aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/pmu-events/arch/x86/jaketown
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/cache.json1580
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/floating-point.json162
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/frontend.json365
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json400
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/memory.json480
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/other.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/pipeline.json1554
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json1900
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json824
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json445
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-other.json1538
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json351
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json180
13 files changed, 7292 insertions, 2547 deletions
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/cache.json b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
index 52dc6ef40e63..f98649fb92b4 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/cache.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
@@ -1,1290 +1,1266 @@
[
{
- "PEBS": "1",
- "EventCode": "0xD0",
+ "BriefDescription": "Allocated L1D data cache lines in M state.",
"Counter": "0,1,2,3",
- "UMask": "0x11",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB.",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x51",
+ "EventName": "L1D.ALLOCATED_IN_M",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "EventCode": "0xD0",
+ "BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.",
"Counter": "0,1,2,3",
- "UMask": "0x12",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB.",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x51",
+ "EventName": "L1D.ALL_M_REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "PEBS": "1",
- "EventCode": "0xD0",
+ "BriefDescription": "L1D data cache lines in M state evicted due to replacement.",
"Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access.",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x51",
+ "EventName": "L1D.EVICTION",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
- "EventCode": "0xD0",
+ "BriefDescription": "L1D data line replacements.",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
- "EventCode": "0xD0",
+ "BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.",
"Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xBF",
+ "EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x5"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of load uops retired",
- "EventCode": "0xD0",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
"Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of store uops retired.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "L1D miss oustandings duration in cycles.",
+ "Counter": "2",
+ "CounterHTOff": "2",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
+ "CounterHTOff": "2",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
- "CounterHTOff": "0,1,2,3"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
+ "CounterHTOff": "2",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xD1",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_MISS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.ALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf"
},
{
- "PEBS": "1",
- "EventCode": "0xD1",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_E",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xD2",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_M",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a non-modified state.",
- "EventCode": "0xD2",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_S",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2.",
- "EventCode": "0xD2",
+ "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xD2",
+ "BriefDescription": "L2 cache lines filling L2.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x7"
},
{
- "EventCode": "0xD3",
+ "BriefDescription": "L2 cache lines in E state filling L2.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
- "SampleAfterValue": "100007",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.E",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xD3",
+ "BriefDescription": "L2 cache lines in I state filling L2.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM",
- "SampleAfterValue": "100007",
- "BriefDescription": "Data from remote DRAM either Snoop not needed or Snoop Miss (RspI)",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.I",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.",
- "EventCode": "0x51",
+ "BriefDescription": "L2 cache lines in S state filling L2.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D.REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data line replacements.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.S",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x51",
+ "BriefDescription": "Clean L2 cache lines evicted by demand.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D.ALLOCATED_IN_M",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Allocated L1D data cache lines in M state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x51",
+ "BriefDescription": "Dirty L2 cache lines evicted by demand.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L1D.EVICTION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data cache lines in M state evicted due to replacement.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x51",
+ "BriefDescription": "Dirty L2 cache lines filling the L2.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L1D.ALL_M_REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D miss oustandings duration in cycles.",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "2"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DIRTY_ALL",
+ "SampleAfterValue": "100003",
+ "UMask": "0xa"
},
{
- "EventCode": "0x63",
+ "BriefDescription": "Clean L2 cache lines evicted by L2 prefetch.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when L1D is locked.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.PF_CLEAN",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "EventCode": "0x60",
+ "BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.PF_DIRTY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x60",
+ "BriefDescription": "L2 code requests.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "UMask": "0x30"
},
{
- "EventCode": "0x60",
+ "BriefDescription": "Demand Data Read requests.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3"
},
{
- "EventCode": "0x60",
+ "BriefDescription": "Requests from L2 hardware prefetchers.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc0"
},
{
- "EventCode": "0x60",
+ "BriefDescription": "RFO requests to L2 cache.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc"
},
{
- "EventCode": "0xB0",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests sent to uncore.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
},
{
- "EventCode": "0xB0",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cacheable and noncachaeble code read requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
},
{
- "EventCode": "0xB0",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xB0",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand and prefetch data reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.PF_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
},
{
- "EventCode": "0xB2",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cases when offcore requests buffer cannot take more entries for core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "EventName": "L2_RQSTS.PF_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "RFO requests that hit L2 cache.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "RFO requests that miss L2 cache.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "RFOs that access cache lines in any state.",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x27",
+ "EventName": "L2_STORE_LOCK_RQSTS.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "RFOs that hit cache lines in E state.",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x27",
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_E",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "RFOs that hit cache lines in M state.",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_RQSTS.PF_HIT",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x27",
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "RFOs that miss cache lines.",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_RQSTS.PF_MISS",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x27",
+ "EventName": "L2_STORE_LOCK_RQSTS.MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x27",
+ "BriefDescription": "L2 or LLC HW prefetches that access L2 cache.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_STORE_LOCK_RQSTS.MISS",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.ALL_PF",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that miss cache lines.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0x27",
+ "BriefDescription": "Transactions accessing L2 pipe.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_STORE_LOCK_RQSTS.HIT_E",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that hit cache lines in E state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "EventCode": "0x27",
+ "BriefDescription": "L2 cache accesses when fetching instructions.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.CODE_RD",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that hit cache lines in M state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x27",
+ "BriefDescription": "Demand Data Read requests that access L2 cache.",
"Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_STORE_LOCK_RQSTS.ALL",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that access cache lines in any state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x28",
+ "BriefDescription": "L1D writebacks that access L2 cache.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_L1D_WB_RQSTS.MISS",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L1D_WB",
"SampleAfterValue": "200003",
- "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x28",
+ "BriefDescription": "L2 fill requests that access L2 cache.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_L1D_WB_RQSTS.HIT_S",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_FILL",
"SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x28",
+ "BriefDescription": "L2 writebacks that access L2 cache.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_L1D_WB_RQSTS.HIT_E",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
"SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0x28",
+ "BriefDescription": "RFO requests that access L2 cache.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_L1D_WB_RQSTS.HIT_M",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.RFO",
"SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x28",
+ "BriefDescription": "Cycles when L1D is locked.",
"Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_L1D_WB_RQSTS.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x63",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xF0",
+ "BriefDescription": "Core-originated cacheable demand requests missed LLC.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_TRANS.DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
},
{
- "EventCode": "0xF0",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to LLC.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_TRANS.RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4f"
},
{
- "EventCode": "0xF0",
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_TRANS.CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache accesses when fetching instructions.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
+ "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a non-modified state.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
},
{
- "EventCode": "0xF0",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_TRANS.ALL_PF",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 or LLC HW prefetches that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
+ "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
},
{
- "EventCode": "0xF0",
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "L2_TRANS.L1D_WB",
- "SampleAfterValue": "200003",
- "BriefDescription": "L1D writebacks that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF0",
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "L2_TRANS.L2_FILL",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 fill requests that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xF0",
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_TRANS.L2_WB",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 writebacks that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF0",
+ "BriefDescription": "Data from remote DRAM either Snoop not needed or Snoop Miss (RspI)",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_TRANS.ALL_REQUESTS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Transactions accessing L2 pipe.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
},
{
- "EventCode": "0xF1",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_LINES_IN.I",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in I state filling L2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0xF1",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_LINES_IN.S",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in S state filling L2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF1",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_IN.E",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in E state filling L2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
- "EventCode": "0xF1",
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
"Counter": "0,1,2,3",
- "UMask": "0x7",
- "EventName": "L2_LINES_IN.ALL",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines filling L2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
+ "PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x4"
},
{
- "EventCode": "0xF2",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
- "SampleAfterValue": "100003",
- "BriefDescription": "Clean L2 cache lines evicted by demand.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_MISS",
+ "SampleAfterValue": "100007",
+ "UMask": "0x20"
},
{
- "EventCode": "0xF2",
+ "BriefDescription": "All retired load uops.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Dirty L2 cache lines evicted by demand.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of load uops retired",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x81"
},
{
- "EventCode": "0xF2",
+ "BriefDescription": "All retired store uops.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_OUT.PF_CLEAN",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of store uops retired.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "Retired load uops with locked access.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
- "BriefDescription": "Clean L2 cache lines evicted by L2 prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "EventCode": "0xF2",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_LINES_OUT.PF_DIRTY",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
- "BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x42"
},
{
- "EventCode": "0xF2",
+ "BriefDescription": "Retired load uops that miss the STLB.",
"Counter": "0,1,2,3",
- "UMask": "0xa",
- "EventName": "L2_LINES_OUT.DIRTY_ALL",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Dirty L2 cache lines filling the L2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x11"
},
{
- "EventCode": "0x2E",
+ "BriefDescription": "Retired store uops that miss the STLB.",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "LONGEST_LAT_CACHE.MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests missed LLC.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x12"
},
{
- "EventCode": "0x2E",
+ "BriefDescription": "Demand and prefetch data reads.",
"Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests that refer to LLC.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xF4",
+ "BriefDescription": "Cacheable and noncachaeble code read requests.",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "SQ_MISC.SPLIT_LOCK",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"SampleAfterValue": "100003",
- "BriefDescription": "Split locks in SQ.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "Demand Data Read requests sent to uncore.",
"Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM.",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "L2_RQSTS.ALL_RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "Cases when offcore requests buffer cannot take more entries for core.",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB2",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore.",
"Counter": "0,1,2,3",
- "UMask": "0xc0",
- "EventName": "L2_RQSTS.ALL_PF",
- "SampleAfterValue": "200003",
- "BriefDescription": "Requests from L2 hardware prefetchers.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xBF",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x60",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
"SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0x60",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "6",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "2"
+ "UMask": "0x1"
},
{
- "EventCode": "0x48",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0091",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x000105B3",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0091",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0091",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0091",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0091",
"Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0091",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0091",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0091",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0090",
+ "BriefDescription": "Counts all prefetch data reads that hit the LLC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0090",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that hit the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0090",
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0090",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0090",
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0090",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0090",
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0090",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0090",
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0090",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c03f7",
+ "BriefDescription": "Counts all data/code/rfo references (demand & prefetch)",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x000107F7",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c03f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c03f7",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c03f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c03f7",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c03f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c03f7",
"Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c03f7",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c03f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoop returned a clean response",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c03f7",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoop returned a clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10008",
+ "BriefDescription": "Counts all demand & prefetch prefetch RFOs",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00010122",
"Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all writebacks from the core to the LLC",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10008",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all writebacks from the core to the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0004",
+ "BriefDescription": "Counts all demand code reads",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00010004",
"Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads that hit in the LLC",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0004",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0001",
+ "BriefDescription": "Counts all demand data reads",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00010001",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0001",
+ "BriefDescription": "Counts all demand data reads that hit in the LLC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0001",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0001",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0001",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0001",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0001",
"Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0001",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0001",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoop returned a clean response",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0001",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoop returned a clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x803c8000",
+ "BriefDescription": "Counts all demand rfo's",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00010002",
"Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 hints sent to LLC to keep a line from being evicted out of the core caches",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.LRU_HINTS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803c8000",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts L2 hints sent to LLC to keep a line from being evicted out of the core caches",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x23ffc08000",
+ "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.PORTIO_MMIO_UC",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23ffc08000",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0040",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that hit in the LLC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0040",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0010",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0010",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0010",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0010",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0010",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0200",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the LLC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0200",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0080",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "MSRValue": "0x3f803c0080",
"Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0080",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "MSRValue": "0x10003c0080",
"Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0080",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10400",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "MSRValue": "0x4003c0080",
"Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts non-temporal stores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010008",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data reads",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "MSRValue": "0x1003c0080",
"Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand rfo's",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010004",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0080",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x000105B3",
+ "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010122",
+ "BriefDescription": "Counts non-temporal stores",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch prefetch RFOs",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x000107F7",
+ "BriefDescription": "Split locks in SQ.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF4",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo references (demand & prefetch)",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json b/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json
index 982eda48785e..eb2ff2cfdf6b 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json
@@ -1,138 +1,138 @@
[
{
- "EventCode": "0xC1",
+ "BriefDescription": "Cycles with any input/output SSE or FP assist.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OTHER_ASSISTS.AVX_STORE",
+ "CounterHTOff": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.ANY",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1e"
},
{
- "EventCode": "0xC1",
+ "BriefDescription": "Number of SIMD FP assists due to input values.",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xC1",
+ "BriefDescription": "Number of SIMD FP assists due to Output values.",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "FP_ASSIST.X87_OUTPUT",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to output value.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xCA",
+ "BriefDescription": "Number of X87 assists due to input value.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_INPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to input value.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xCA",
+ "BriefDescription": "Number of X87 assists due to output value.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to Output values.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xCA",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "FP_ASSIST.SIMD_INPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to input values.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0x10",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FP_COMP_OPS_EXE.X87",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_SINGLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULsand IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0x10",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle.",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "EventCode": "0x10",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x10",
+ "BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULsand IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s.",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_SINGLE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.X87",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x10",
+ "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.AVX_STORE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x11",
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "SIMD_FP_256.PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of GSSE-256 Computational FP single precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "EventCode": "0x11",
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of AVX-256 Computational FP double precision uops issued this cycle.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x11",
"EventName": "SIMD_FP_256.PACKED_DOUBLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of AVX-256 Computational FP double precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xCA",
+ "BriefDescription": "Number of GSSE-256 Computational FP single precision uops issued this cycle.",
"Counter": "0,1,2,3",
- "UMask": "0x1e",
- "EventName": "FP_ASSIST.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cycles with any input/output SSE or FP assist.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x11",
+ "EventName": "SIMD_FP_256.PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/frontend.json b/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
index 1b7b1dd36c68..0b4dbce2f1c0 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
@@ -1,305 +1,314 @@
[
{
- "EventCode": "0x80",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ICACHE.HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
},
{
- "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
- "EventCode": "0x80",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ICACHE.MISSES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "IDQ.EMPTY",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0x79",
+ "BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_UOPS",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xAC",
+ "EventName": "DSB_FILL.ALL_CANCEL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xa"
},
{
- "EventCode": "0x79",
+ "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xAC",
+ "EventName": "DSB_FILL.EXCEED_DSB_LINES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0x79",
+ "BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xAC",
+ "EventName": "DSB_FILL.OTHER_CANCEL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x79",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
+ "BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more information.",
- "EventCode": "0x79",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
- "PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
- "EventCode": "0x9C",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x18"
},
{
- "EventCode": "0x9C",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x24"
},
{
- "EventCode": "0x9C",
+ "BriefDescription": "Cycles MITE is delivering any Uop.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x24"
},
{
- "EventCode": "0xAB",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
- "EventCode": "0xAB",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xAC",
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DSB_FILL.OTHER_CANCEL",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.EMPTY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xAC",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "DSB_FILL.EXCEED_DSB_LINES",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_ALL_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3c"
},
{
- "EventCode": "0x79",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x79",
"EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x79",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x79",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_CYCLES",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES",
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more information.",
"SampleAfterValue": "2000003",
+ "UMask": "0x30"
+ },
+ {
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0x79",
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
"EdgeDetect": "1",
+ "EventCode": "0x79",
"EventName": "IDQ.MS_DSB_OCCUR",
"SampleAfterValue": "2000003",
- "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x9C",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "EventCode": "0x9C",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_MITE_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20"
},
{
- "EventCode": "0x9C",
- "Invert": "1",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_GE_1_UOP_DELIV.CORE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when 1 or more uops were delivered to the by the front end.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x30"
},
{
- "EventCode": "0x79",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
"Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "EventCode": "0x79",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
"Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering 4 Uops.",
+ "CounterHTOff": "0,1,2,3",
"CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering any Uop.",
+ "CounterHTOff": "0,1,2,3",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xAC",
+ "BriefDescription": "Cycles when 1 or more uops were delivered to the by the front end.",
"Counter": "0,1,2,3",
- "UMask": "0xa",
- "EventName": "DSB_FILL.ALL_CANCEL",
+ "CounterHTOff": "0,1,2,3",
+ "CounterMask": "4",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_GE_1_UOP_DELIV.CORE",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "Invert": "1",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "CounterHTOff": "0,1,2,3",
+ "CounterMask": "3",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
"Counter": "0,1,2,3",
- "UMask": "0x3c",
- "EventName": "IDQ.MITE_ALL_UOPS",
+ "CounterHTOff": "0,1,2,3",
+ "CounterMask": "2",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterHTOff": "0,1,2,3",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
index dbb33e00b72a..554f87c03c05 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
@@ -1,193 +1,385 @@
[
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / SLOTS",
+ "MetricGroup": "PGO;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Machine_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "4 * min(CPU_CLK_UNHALTED.THREAD, IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE) / SLOTS",
+ "MetricGroup": "Frontend;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "(12 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / CLKS",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "12 * (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY) / CLKS",
+ "MetricGroup": "FetchLat;TopdownL3;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / CLKS",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_fetch_latency_group",
+ "MetricName": "tma_dsb_switches",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / CLKS",
+ "MetricGroup": "FetchLat;TopdownL3;tma_fetch_latency_group",
+ "MetricName": "tma_lcp",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "3 * IDQ.MS_SWITCHES / CLKS",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_fetch_latency_group",
+ "MetricName": "tma_ms_switches",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "tma_frontend_bound - tma_fetch_latency",
+ "MetricGroup": "FetchBW;Frontend;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_bandwidth",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.",
+ "ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
+ "MetricExpr": "(UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ((INT_MISC.RECOVERY_CYCLES_ANY / 2) if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / SLOTS",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricExpr": "(BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricName": "tma_branch_mispredicts",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
+ "MetricGroup": "BadSpec;MachineClears;TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricName": "tma_machine_clears",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT",
+ "ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
+ "MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricExpr": "((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_L1D_PENDING) + RESOURCE_STALLS.SB) / (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_DISPATCH) + cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=1@ - cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=3@ if (IPC > 1.8) else cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=2@ - RS_EVENTS.EMPTY_CYCLES if (tma_fetch_latency > 0.1) else RESOURCE_STALLS.SB)) * tma_backend_bound",
+ "MetricGroup": "Backend;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "(7 * DTLB_LOAD_MISSES.STLB_HIT + DTLB_LOAD_MISSES.WALK_DURATION) / CLKS",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricExpr": "(MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_RETIRED.LLC_MISS)) * CYCLE_ACTIVITY.STALLS_L2_PENDING / CLKS",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricExpr": "(1 - (MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_RETIRED.LLC_MISS))) * CYCLE_ACTIVITY.STALLS_L2_PENDING / CLKS",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / CLKS",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_dram_bound_group",
+ "MetricName": "tma_mem_bandwidth",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / CLKS - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_dram_bound_group",
+ "MetricName": "tma_mem_latency",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "RESOURCE_STALLS.SB / CLKS",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricExpr": "tma_backend_bound - tma_memory_bound",
+ "MetricGroup": "Backend;Compute;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.FPU_DIV_ACTIVE / CORE_CLKS",
+ "MetricGroup": "TopdownL3;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricExpr": "((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_DISPATCH) + cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=1@ - cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=3@ if (IPC > 1.8) else cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=2@ - RS_EVENTS.EMPTY_CYCLES if (tma_fetch_latency > 0.1) else RESOURCE_STALLS.SB) - RESOURCE_STALLS.SB - min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_L1D_PENDING)) / CLKS",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Retiring",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / SLOTS",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "tma_retiring - tma_heavy_operations",
+ "MetricGroup": "Retire;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS * FP_COMP_OPS_EXE.X87 / UOPS_DISPATCHED.THREAD",
+ "MetricGroup": "Compute;TopdownL4;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE) / UOPS_DISPATCHED.THREAD",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_fp_arith_group",
+ "MetricName": "tma_fp_scalar",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) / UOPS_DISPATCHED.THREAD",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_fp_arith_group",
+ "MetricName": "tma_fp_vector",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or microcoded sequences",
+ "MetricExpr": "tma_microcode_sequencer",
+ "MetricGroup": "Retire;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or microcoded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "(UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY) * IDQ.MS_UOPS / SLOTS",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_heavy_operations_group",
+ "MetricName": "tma_microcode_sequencer",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS",
+ "ScaleUnit": "100%"
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
- "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "TopDownL1",
+ "MetricExpr": "INST_RETIRED.ANY / CLKS",
+ "MetricGroup": "Ret;Summary",
"MetricName": "IPC"
},
{
"BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline;Retire",
+ "MetricGroup": "Pipeline;Ret;Retire",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
- "MetricGroup": "PGO;IcMiss",
- "MetricName": "IFetch_Line_Utilization"
- },
- {
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
- "MetricGroup": "DSB;Fetch_BW",
- "MetricName": "DSB_Coverage"
- },
- {
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "MetricGroup": "Pipeline;Summary",
+ "MetricExpr": "1 / IPC",
+ "MetricGroup": "Mem;Pipeline",
"MetricName": "CPI"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
+ "MetricGroup": "Pipeline",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * cycles",
- "MetricGroup": "TopDownL1",
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "4 * CORE_CLKS",
+ "MetricGroup": "tma_L1_group",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TopDownL1_SMT",
- "MetricName": "SLOTS_SMT"
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_DISPATCHED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "Execute_per_Issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
},
{
- "BriefDescription": "Total number of retired Instructions",
- "MetricExpr": "INST_RETIRED.ANY",
- "MetricGroup": "Summary",
- "MetricName": "Instructions"
- },
- {
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / cycles",
- "MetricGroup": "SMT",
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / CORE_CLKS",
+ "MetricGroup": "Ret;SMT;tma_L1_group",
"MetricName": "CoreIPC"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC_SMT"
- },
- {
"BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
- "MetricGroup": "FLOPS",
+ "MetricExpr": "(1 * (FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / CORE_CLKS",
+ "MetricGroup": "Flops;Ret",
"MetricName": "FLOPc"
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "FLOPS_SMT",
- "MetricName": "FLOPc_SMT"
- },
- {
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_DISPATCHED.THREAD / ((cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "ILP"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
+ "MetricExpr": "((CPU_CLK_UNHALTED.THREAD / 2) * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK)) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2) if #SMT_on else CLKS",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
+ "BriefDescription": "Total number of retired Instructions Sample with: INST_RETIRED.PREC_DIST",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary;tma_L1_group",
+ "MetricName": "Instructions"
+ },
+ {
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Retire"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / ((IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS))",
+ "MetricGroup": "DSB;Fed;FetchBW",
+ "MetricName": "DSB_Coverage"
+ },
+ {
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
- "MetricGroup": "Summary",
+ "MetricGroup": "HPC;Summary",
"MetricName": "CPU_Utilization"
},
{
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "Turbo_Utilization * msr@tsc@ / 1000000000 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "Average_Frequency"
+ },
+ {
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
- "MetricGroup": "FLOPS;Summary",
- "MetricName": "GFLOPs"
+ "MetricExpr": "((1 * (FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1000000000) / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "CLKS / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
- "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "MetricGroup": "SMT;Summary",
+ "MetricExpr": "1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0",
+ "MetricGroup": "SMT",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
"MetricName": "Kernel_Utilization"
},
{
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "Kernel_CPI"
+ },
+ {
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
- "MetricGroup": "Memory_BW",
+ "MetricExpr": "(64 * (uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@) / 1000000000) / duration_time",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC",
"MetricName": "DRAM_BW_Use"
},
{
"BriefDescription": "Socket actual clocks when any core is active on that socket",
"MetricExpr": "cbox_0@event\\=0x0@",
- "MetricGroup": "",
+ "MetricGroup": "SoC",
"MetricName": "Socket_CLKS"
},
{
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "IpFarBranch"
+ },
+ {
"BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
@@ -228,5 +420,11 @@
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "Socket_CLKS / #num_dies / duration_time / 1000000000",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/memory.json b/tools/perf/pmu-events/arch/x86/jaketown/memory.json
index 27e636428f4f..23756ca9b7da 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/memory.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/memory.json
@@ -1,422 +1,422 @@
[
{
- "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from memory disambiguation, external snoops, or cross SMT-HW-thread snoop (stores) hitting load buffers. Machine clears can have a significant performance impact if they are happening frequently.",
- "EventCode": "0xC3",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from memory disambiguation, external snoops, or cross SMT-HW-thread snoop (stores) hitting load buffers. Machine clears can have a significant performance impact if they are happening frequently.",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x4",
+ "BriefDescription": "Loads with latency value being above 128.",
"Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "CounterHTOff": "3",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads with latency value being above 4 .",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "SampleAfterValue": "1009",
"TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x8",
+ "BriefDescription": "Loads with latency value being above 16.",
"Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "CounterHTOff": "3",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "50021",
- "BriefDescription": "Loads with latency value being above 8.",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "SampleAfterValue": "20011",
"TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x10",
+ "BriefDescription": "Loads with latency value being above 256.",
"Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "CounterHTOff": "3",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "20011",
- "BriefDescription": "Loads with latency value being above 16.",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "SampleAfterValue": "503",
"TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x20",
+ "BriefDescription": "Loads with latency value being above 32.",
"Counter": "3",
- "UMask": "0x1",
+ "CounterHTOff": "3",
+ "EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
"SampleAfterValue": "100007",
- "BriefDescription": "Loads with latency value being above 32.",
"TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x40",
+ "BriefDescription": "Loads with latency value being above 4 .",
"Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "CounterHTOff": "3",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "2003",
- "BriefDescription": "Loads with latency value being above 64.",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "SampleAfterValue": "100003",
"TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x80",
+ "BriefDescription": "Loads with latency value being above 512.",
"Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "CounterHTOff": "3",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "1009",
- "BriefDescription": "Loads with latency value being above 128.",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "SampleAfterValue": "101",
"TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x100",
+ "BriefDescription": "Loads with latency value being above 64.",
"Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "CounterHTOff": "3",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "503",
- "BriefDescription": "Loads with latency value being above 256.",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "SampleAfterValue": "2003",
"TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x200",
+ "BriefDescription": "Loads with latency value being above 8.",
"Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "CounterHTOff": "3",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "101",
- "BriefDescription": "Loads with latency value being above 512.",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "SampleAfterValue": "50021",
"TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xCD",
+ "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only. (Precise Event - PEBS).",
"Counter": "3",
- "UMask": "0x2",
+ "CounterHTOff": "3",
+ "EventCode": "0xCD",
"EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only. (Precise Event - PEBS).",
+ "PEBS": "2",
"PRECISE_STORE": "1",
+ "SampleAfterValue": "2000003",
"TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x2"
},
{
- "EventCode": "0x05",
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.LOADS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x05",
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x05",
"EventName": "MISALIGN_MEM_REF.STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20004",
+ "BriefDescription": "This event counts all LLC misses for all demand and L2 prefetches. LLC prefetches are excluded.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC20077",
"Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all local dram accesses for all demand and L2 prefetches. LLC prefetches are excluded.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x600400077",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "This event counts all remote cache-to-cache transfers (includes HITM and HIT-Forward) for all demand and L2 prefetches. LLC prefetches are excluded.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x600400004",
+ "EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.REMOTE_HITM_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x187FC20077",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads that miss the LLC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20004",
"Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from local dram",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x600400004",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x67f800004",
+ "BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from remote dram",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x67f800004",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from remote dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x87f820004",
+ "BriefDescription": "Counts all demand code reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x107fc00004",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the LLC and the data forwarded from remote cache",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x107fc00004",
+ "BriefDescription": "Counts all demand code reads that miss the LLC and the data forwarded from remote cache",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x87f820004",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x67fc00001",
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote & local dram",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x67fc00001",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote & local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20001",
+ "BriefDescription": "Counts demand data reads that miss in the LLC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20001",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x600400001",
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from local dram",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x600400001",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x67f800001",
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote dram",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x67f800001",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x87f820001",
+ "BriefDescription": "Counts demand data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x107fc00001",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC and the data forwarded from remote cache",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x107fc00001",
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data forwarded from remote cache",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x87f820001",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20040",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that miss the LLC and the data returned from remote & local dram",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20040",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that miss the LLC and the data returned from remote & local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x67fc00010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote & local dram",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x67fc00010",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote & local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the LLC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20010",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x600400010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from local dram",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x600400010",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x67f800010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote dram",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote dram",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x87f820010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "MSRValue": "0x67f800010",
"Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data forwarded from remote cache",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x107fc00010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "MSRValue": "0x107fc00010",
"Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20080",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data forwarded from remote cache",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x600400077",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x87f820010",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all local dram accesses for all demand and L2 prefetches. LLC prefetches are excluded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFFC20077",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the LLC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20200",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "This event counts all LLC misses for all demand and L2 prefetches. LLC prefetches are excluded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x187FC20077",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.REMOTE_HITM_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20080",
+ "Offcore": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "This event counts all remote cache-to-cache transfers (includes HITM and HIT-Forward) for all demand and L2 prefetches. LLC prefetches are excluded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/other.json b/tools/perf/pmu-events/arch/x86/jaketown/other.json
index 64b195b82c50..2f873ab14156 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/other.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/other.json
@@ -1,58 +1,58 @@
[
{
- "EventCode": "0x17",
+ "BriefDescription": "Unhalted core cycles when the thread is in ring 0.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "INSTS_WRITTEN_TO_IQ.INSTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Valid instructions written to IQ per cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "CPL_CYCLES.RING0",
"SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when the thread is in ring 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5C",
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
"EdgeDetect": "1",
+ "EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0_TRANS",
"SampleAfterValue": "100007",
- "BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5C",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING123",
"SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x4E",
+ "BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x4E",
"EventName": "HW_PRE_REQ.DL1_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x63",
+ "BriefDescription": "Valid instructions written to IQ per cycle.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x17",
+ "EventName": "INSTS_WRITTEN_TO_IQ.INSTS",
"SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x63",
+ "EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
index 783a5b4a67b1..61a3db4d67d5 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
@@ -1,1216 +1,1202 @@
[
{
- "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.",
- "Counter": "Fixed counter 1",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB6",
+ "EventName": "AGU_BYPASS_CANCEL.COUNT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "BriefDescription": "Divide operations executed.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x14",
+ "EventName": "ARITH.FPU_DIV",
+ "PublicDescription": "This event counts the number of the divide operations executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "Counter": "Fixed counter 3",
- "UMask": "0x3",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Cycles when divider is busy executing divide operations.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x14",
+ "EventName": "ARITH.FPU_DIV_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "Speculative and retired branches.",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
"SampleAfterValue": "200003",
- "BriefDescription": "Not taken macro-conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xff"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "Speculative and retired macro-conditional branches.",
"Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc1"
},
{
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
+ },
+ {
+ "BriefDescription": "Speculative and retired direct near calls.",
"Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xd0"
},
{
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
+ },
+ {
+ "BriefDescription": "Speculative and retired indirect return branches.",
"Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc8"
},
{
+ "BriefDescription": "Not taken macro-conditional branches.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Taken speculative and retired macro-conditional branches.",
"Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81"
},
{
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "Taken speculative and retired direct near calls.",
"Counter": "0,1,2,3",
- "UMask": "0x90",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired direct near calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x90"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
"Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x84"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "Taken speculative and retired indirect calls.",
"Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xa0"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
"Counter": "0,1,2,3",
- "UMask": "0xc2",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x88"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "All (macro) branch instructions retired.",
"Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).",
"Counter": "0,1,2,3",
- "UMask": "0xc8",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect return branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "Conditional branch instructions retired.",
"Counter": "0,1,2,3",
- "UMask": "0xd0",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired direct near calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Far branch instructions retired.",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
"Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Return instructions retired.",
"Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Taken branch instructions retired.",
"Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Not taken branch instructions retired.",
"Counter": "0,1,2,3",
- "UMask": "0x90",
- "EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted direct near calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
"Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xff"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
"Counter": "0,1,2,3",
- "UMask": "0xc1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc1"
},
{
+ "BriefDescription": "Speculative and retired mispredicted direct near calls.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xd0"
+ },
+ {
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
"Counter": "0,1,2,3",
- "UMask": "0xc4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
- "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc4"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
"Counter": "0,1,2,3",
- "UMask": "0xd0",
- "EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted direct near calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "EventCode": "0x3C",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x81"
},
{
- "EventCode": "0xA8",
+ "BriefDescription": "Taken speculative and retired mispredicted direct near calls.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of Uops delivered by the LSD.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x90"
},
{
- "EventCode": "0xA8",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "UMask": "0x84"
},
{
- "EventCode": "0x87",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ILD_STALL.LCP",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xa0"
},
{
- "EventCode": "0x87",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ILD_STALL.IQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stall cycles because IQ is full.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "SampleAfterValue": "200003",
+ "UMask": "0x88"
},
{
- "EventCode": "0x0D",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0x59",
+ "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Increments the number of flags-merge uops in flight each cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
- "EventCode": "0x59",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least one slow LEA uop being allocated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x59",
+ "BriefDescription": "Direct and indirect mispredicted near call instructions retired.",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "PARTIAL_RAT_STALLS.MUL_SINGLE_UOP",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Multiply packed/scalar single precision uops allocated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
- "EventCode": "0xA2",
+ "BriefDescription": "Mispredicted not taken branch instructions retired.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RESOURCE_STALLS.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Resource-related stall cycles.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NOT_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
- "EventCode": "0xA2",
+ "BriefDescription": "Mispredicted taken branch instructions retired.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "RESOURCE_STALLS.LB",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the cycles of stall due to lack of load buffers.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "EventCode": "0xA2",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "RESOURCE_STALLS.RS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA2",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RESOURCE_STALLS.SB",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA2",
+ "AnyThread": "1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "RESOURCE_STALLS.ROB",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to re-order buffer full.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5B",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "RESOURCE_STALLS2.BOB_FULL",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Allocator is stalled if BOB is full and new branch needs it.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.ANY",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
+ "CounterHTOff": "Fixed counter 2",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "EventCode": "0x0E",
- "Invert": "1",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"AnyThread": "1",
- "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "BriefDescription": "Core cycles when the thread is not in halt state.",
+ "Counter": "Fixed counter 1",
+ "CounterHTOff": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 2",
+ "CounterHTOff": "Fixed counter 2",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count cases of saving new LBR.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
- "EventCode": "0xC3",
+ "BriefDescription": "Thread cycles when thread is not in halt state.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "100003",
- "BriefDescription": "Self-modifying code (SMC) detected.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003"
},
{
- "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
- "EventCode": "0xC3",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MACHINE_CLEARS.MASKMOV",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "INST_RETIRED.ANY_P",
+ "BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "Counter": "2",
+ "CounterHTOff": "2",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of micro-ops retired.",
- "EventCode": "0xC2",
+ "BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.ALL",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Actually retired uops.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 micro-ops or 4 instructions could retire each cycle. This event is used in determining the 'Retiring' category of the Top-Down pipeline slots characterization.",
- "EventCode": "0xC2",
+ "BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "CounterHTOff": "0,1,2,3",
+ "CounterMask": "4",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "Counter": "2",
+ "CounterHTOff": "2",
+ "CounterMask": "6",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x6"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
+ "BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "CounterHTOff": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "CounterMask": "10",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x5"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "EventCode": "0xC4",
+ "BriefDescription": "Stall cycles because IQ is full.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xC4",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 0",
+ "CounterHTOff": "Fixed counter 0",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC4",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event.",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003"
},
{
+ "BriefDescription": "Instructions retired. (Precise Event - PEBS).",
+ "Counter": "1",
+ "CounterHTOff": "1",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "2",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "SampleAfterValue": "2000003",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_MISP_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect mispredicted near call instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "EventCode": "0xC5",
+ "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BR_MISP_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted not taken branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
+ "BriefDescription": "Number of occurrences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted taken branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
- "PEBS": "2",
- "PublicDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
- "EventCode": "0xC5",
+ "BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ALL_BLOCK",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "EventCode": "0xC1",
+ "BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired instructions experiencing ITLB misses.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x14",
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ARITH.FPU_DIV_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when divider is busy executing divide operations.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts the number of the divide operations executed.",
- "EventCode": "0x14",
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "ARITH.FPU_DIV",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
"SampleAfterValue": "100003",
- "BriefDescription": "Divide operations executed.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB1",
+ "BriefDescription": "False dependencies in MOB due to partial compare.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_DISPATCHED.THREAD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched per thread.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline. The enhanced address check typically has a performance penalty of 5 cycles.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
+ "BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched from any thread.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xA1",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x4C",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x4C",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA8",
+ "EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
- "CounterMask": "2",
- "CounterHTOff": "2"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
- "CounterMask": "6",
- "CounterHTOff": "2"
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
"Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
- "EventCode": "0x4C",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LOAD_HIT_PRE.SW_PF",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
"SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x4C",
+ "BriefDescription": "Retired instructions experiencing ITLB misses.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LOAD_HIT_PRE.HW_PF",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED",
"SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x03",
+ "BriefDescription": "Increments the number of flags-merge uops in flight each cycle.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS.DATA_UNKNOWN",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x59",
+ "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
- "EventCode": "0x03",
+ "BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x59",
+ "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
+ "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel? 64 and IA-32 Architectures Optimization Reference Manual.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "EventCode": "0x03",
+ "BriefDescription": "Multiply packed/scalar single precision uops allocated.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS.NO_SR",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x59",
+ "EventName": "PARTIAL_RAT_STALLS.MUL_SINGLE_UOP",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "EventCode": "0x03",
+ "BriefDescription": "Cycles with at least one slow LEA uop being allocated.",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "LD_BLOCKS.ALL_BLOCK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x59",
+ "EventName": "PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW",
+ "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline. The enhanced address check typically has a performance penalty of 5 cycles.",
- "EventCode": "0x07",
+ "BriefDescription": "Resource-related stall cycles.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "SampleAfterValue": "100003",
- "BriefDescription": "False dependencies in MOB due to partial compare.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x07",
+ "BriefDescription": "Counts the cycles of stall due to lack of load buffers.",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.LB",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xB6",
+ "BriefDescription": "Resource stalls due to load or store buffers all being in use.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "AGU_BYPASS_CANCEL.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.LB_SB",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xa"
},
{
- "EventCode": "0x3C",
+ "BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.MEM_RS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "EventCode": "0x3C",
+ "BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.OOO_RSRC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0xf0"
},
{
- "EventCode": "0xA1",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.ROB",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xA1",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.RS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA1",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.SB",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA1",
+ "BriefDescription": "Cycles with either free list is empty.",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5B",
+ "EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "EventCode": "0xA1",
+ "BriefDescription": "Resource stalls2 control structures full for physical registers.",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5B",
+ "EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf"
},
{
- "EventCode": "0xA1",
+ "BriefDescription": "Cycles when Allocator is stalled if BOB is full and new branch needs it.",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5B",
+ "EventName": "RESOURCE_STALLS2.BOB_FULL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0xA1",
+ "BriefDescription": "Resource stalls out of order resources full.",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5B",
+ "EventName": "RESOURCE_STALLS2.OOO_RSRC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4f"
},
{
- "EventCode": "0xA1",
+ "BriefDescription": "Count cases of saving new LBR.",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PEBS": "2",
- "EventCode": "0xC0",
- "Counter": "1",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.PREC_DIST",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired. (Precise Event - PEBS).",
- "TakenAlone": "1",
- "CounterHTOff": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5B",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
"Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls2 control structures full for physical registers.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5B",
+ "BriefDescription": "Uops dispatched from any thread.",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_DISPATCHED.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with either free list is empty.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA2",
+ "BriefDescription": "Uops dispatched per thread.",
"Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "RESOURCE_STALLS.MEM_RS",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_DISPATCHED.THREAD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA2",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 0.",
"Counter": "0,1,2,3",
- "UMask": "0xf0",
- "EventName": "RESOURCE_STALLS.OOO_RSRC",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5B",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0.",
"Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "RESOURCE_STALLS2.OOO_RSRC",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls out of order resources full.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA2",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1.",
"Counter": "0,1,2,3",
- "UMask": "0xa",
- "EventName": "RESOURCE_STALLS.LB_SB",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls due to load or store buffers all being in use.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x0D",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1.",
"Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel? 64 and IA-32 Architectures Optimization Reference Manual.",
- "EventCode": "0x59",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "EventCode": "0x0D",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.",
"Counter": "0,1,2,3",
- "UMask": "0x3",
- "EdgeDetect": "1",
- "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "EventCode": "0xE6",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.",
"Counter": "0,1,2,3",
- "UMask": "0x1f",
- "EventName": "BACLEARS.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
},
{
- "EventCode": "0x88",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
"Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_INST_EXEC.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4.",
"Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40"
},
{
- "EventCode": "0xA8",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "EventCode": "0xc3",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "EventCode": "0x5E",
- "Invert": "1",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "UMask": "0x2"
},
{
- "EventCode": "0x3C",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x3C",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x0D",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB1",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "CounterHTOff": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "CounterHTOff": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
+ "BriefDescription": "Actually retired uops.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.ALL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of micro-ops retired.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "Invert": "1",
+ "BriefDescription": "Cycles without actually retired uops.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "EventCode": "0x3C",
+ "BriefDescription": "Retirement slots used.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 micro-ops or 4 instructions could retire each cycle. This event is used in determining the 'Retiring' category of the Top-Down pipeline slots characterization.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x3C",
+ "BriefDescription": "Cycles without actually retired uops.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "CounterHTOff": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x3C",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "CounterHTOff": "0,1,2,3",
+ "CounterMask": "10",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json
index 3fa61d962607..351f8b040ed1 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json
@@ -1,210 +1,1946 @@
[
{
- "BriefDescription": "Uncore cache clock ticks",
+ "BriefDescription": "Uncore Clocks",
"Counter": "0,1,2,3",
"EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
"Unit": "CBO"
},
{
- "BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch)",
+ "BriefDescription": "Counter 0 Occupancy",
+ "Counter": "1,2,3",
+ "EventCode": "0x1f",
+ "EventName": "UNC_C_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Data Read Request",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; RTID",
"Counter": "0,1",
"EventCode": "0x34",
- "EventName": "UNC_C_LLC_LOOKUP.ANY",
- "Filter": "filter_state=0x1",
+ "EventName": "UNC_C_LLC_LOOKUP.NID",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x11",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
+ "UMask": "0x41",
"Unit": "CBO"
},
{
- "BriefDescription": "M line evictions from LLC (writebacks to memory)",
+ "BriefDescription": "Cache Lookups; External Snoop Request",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
+ "UMask": "0x9",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Write Requests",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
+ "UMask": "0x5",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in E state",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in M state",
"Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.M_STATE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode.demand",
+ "BriefDescription": "Lines Victimized; Victimized Lines that Match NID",
"Counter": "0,1",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.DATA_READ",
- "Filter": "filter_opc=0x182",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.NID",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x40",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC misses - Uncacheable reads. Derived from unc_c_tor_inserts.miss_opcode.uncacheable",
+ "BriefDescription": "Lines Victimized; Lines in S State",
"Counter": "0,1",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.UNCACHEABLE",
- "Filter": "filter_opc=0x187",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.S_STATE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; RFO HitS",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x8",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe allocating writes that miss LLC - DDIO misses. Derived from unc_c_tor_inserts.miss_opcode.ddio_miss",
+ "BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.STARTED",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Even",
+ "Counter": "2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd",
+ "Counter": "2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even",
+ "Counter": "2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd",
+ "Counter": "2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Even",
+ "Counter": "2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd",
+ "Counter": "2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even",
+ "Counter": "2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd",
+ "Counter": "2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Even",
+ "Counter": "2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd",
+ "Counter": "2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even",
+ "Counter": "2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd",
+ "Counter": "2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Acknowledgements to core",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AK_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Data Responses to core",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.BL_CORE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.IV_CORE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "Counter": "2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_C_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in JKT. Therefore, if one wants to monitor the 'Even' ring, they should select both UP_EVEN and DN_EVEN. To monitor the 'Odd' ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0xf",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IRQ",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IPQ",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.ISMQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ_BID",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.ISMQ_BIDS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IPQ",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ Rejected",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ_REJECTED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; VFIFO",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.VFIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IPQ",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IRQ",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; ISMQ",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.ISMQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Address Conflict",
+ "Counter": "0,1",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Any Reject",
+ "Counter": "0,1",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No Egress Credits",
+ "Counter": "0,1",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No QPI Credits",
+ "Counter": "0,1",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Address Conflict",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Any Reject",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No Egress Credits",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.FULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No QPI Credits",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No RTIDs",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.RTID",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Any Reject",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No Egress Credits",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No IIO Credits",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No QPI Credits",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No RTIDs",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.RTID",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJECTED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; VFIFO",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.VFIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Evictions",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_WRITE",
- "Filter": "filter_opc=0x19c",
+ "EventName": "UNC_C_TOR_INSERTS.EVICTION",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x4",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC misses for ItoM writes (as part of fast string memcpy stores). Derived from unc_c_tor_inserts.miss_opcode.itom_write",
+ "BriefDescription": "TOR Inserts; Miss All",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.ITOM_WRITE",
- "Filter": "filter_opc=0x1c8",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0xa",
"Unit": "CBO"
},
{
- "BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode.streaming_full",
+ "BriefDescription": "TOR Inserts; Miss Opcode Match",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_FULL",
- "Filter": "filter_opc=0x18c",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x3",
"Unit": "CBO"
},
{
- "BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode.streaming_partial",
+ "BriefDescription": "TOR Inserts; NID Matched",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
- "Filter": "filter_opc=0x18d",
+ "EventName": "UNC_C_TOR_INSERTS.NID_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x48",
"Unit": "CBO"
},
{
- "BriefDescription": "Partial PCIe reads. Derived from unc_c_tor_inserts.opcode.pcie_partial",
+ "BriefDescription": "TOR Inserts; NID Matched Evictions",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_PARTIAL_READ",
- "Filter": "filter_opc=0x195",
+ "EventName": "UNC_C_TOR_INSERTS.NID_EVICTION",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x44",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe allocating writes that hit in LLC (DDIO hits). Derived from unc_c_tor_inserts.opcode.ddio_hit",
+ "BriefDescription": "TOR Inserts; NID Matched Miss All",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_WRITE",
- "Filter": "filter_opc=0x19c",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x4a",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode.pcie_read_current",
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched Miss",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x43",
"Unit": "CBO"
},
{
- "BriefDescription": "ItoM write hits (as part of fast string memcpy stores). Derived from unc_c_tor_inserts.opcode.itom_write_hit",
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.ITOM_WRITE",
- "Filter": "filter_opc=0x1c8",
+ "EventName": "UNC_C_TOR_INSERTS.NID_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x41",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe non-snoop reads. Derived from unc_c_tor_inserts.opcode.pcie_read",
+ "BriefDescription": "TOR Inserts; NID Matched Writebacks",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_READ",
- "Filter": "filter_opc=0x1e4",
+ "EventName": "UNC_C_TOR_INSERTS.NID_WB",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x50",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe non-snoop writes (partial). Derived from unc_c_tor_inserts.opcode.pcie_partial_write",
+ "BriefDescription": "TOR Inserts; Opcode Match",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
- "Filter": "filter_opc=0x1e5",
+ "EventName": "UNC_C_TOR_INSERTS.OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
"UMask": "0x1",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe non-snoop writes (full line). Derived from unc_c_tor_inserts.opcode.pcie_full_write",
+ "BriefDescription": "TOR Inserts; Writebacks",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_WRITE",
- "Filter": "filter_opc=0x1e6",
+ "EventName": "UNC_C_TOR_INSERTS.WB",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Any",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x4",
"Unit": "CBO"
},
{
- "BriefDescription": "Occupancy counter for all LLC misses; we divide this by UNC_C_CLOCKTICKS to get average Q depth",
+ "BriefDescription": "TOR Occupancy; Miss All",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
- "Filter": "filter_opc=0x182",
- "MetricExpr": "(UNC_C_TOR_OCCUPANCY.MISS_ALL / UNC_C_CLOCKTICKS) * 100.",
- "MetricName": "tor_occupancy.miss_all %",
"PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
"UMask": "0xa",
"Unit": "CBO"
},
{
- "BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode.llc_data_read",
+ "BriefDescription": "TOR Occupancy; Miss Opcode Match",
"EventCode": "0x36",
- "EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE",
"PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
"UMask": "0x3",
"Unit": "CBO"
},
{
- "BriefDescription": "read requests to home agent",
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x48",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x44",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x4a",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x43",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x41",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Opcode Match",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Cachebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Corebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Cachebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Corebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Cacheno",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Corebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; IV - Cachebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.IV_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AK Ring",
+ "Counter": "0,1",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "uclks",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_H_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Conflict Checks; Conflict Detected",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_H_CONFLICT_CYCLES.CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Conflict Checks; No Conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_H_CONFLICT_CYCLES.NO_CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Direct2Core Messages Sent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_DIRECT2CORE_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Direct2Core messages sent",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2Core was Disabled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_H_DIRECT2CORE_CYCLES_DISABLED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles in which Direct2Core was disabled",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Number of Reads that had Direct2Core Overridden",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Reads where Direct2Core overridden",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Any Directory Update",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Clear",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.CLEAR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Set",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.SET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Retry Events",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_H_IMC_RETRY",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; All Writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Full Line",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Partial",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Reads",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).",
"UMask": "0x3",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to home agent",
+ "BriefDescription": "Read and Write Requests; Writes",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).",
"UMask": "0xc",
"Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION10",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION11",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION8",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION9",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Allocations; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_H_TRACKER_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the local HA tracker pool. This can be used in conjunction with the occupancy accumulation event in order to calculate average latency. One cannot filter between reads and writes. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions; Non-data Responses",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xf",
+ "EventName": "UNC_H_TxR_AD.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of outbound transactions on the AD ring. This can be filtered by the NDR and SNP message classes. See the filter descriptions for more details.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions; Snoops",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xf",
+ "EventName": "UNC_H_TxR_AD.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of outbound transactions on the AD ring. This can be filtered by the NDR and SNP message classes. See the filter descriptions for more details.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2a",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2a",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2a",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Occupancy; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_TxR_AD_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Occupancy",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Occupancy; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_TxR_AD_OCCUPANCY.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Occupancy",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Occupancy; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_TxR_AD_OCCUPANCY.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Occupancy",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2f",
+ "EventName": "UNC_H_TxR_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2f",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2f",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xe",
+ "EventName": "UNC_H_TxR_AK_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of outbound NDR transactions sent on the AK ring. NDR stands for 'non-data response' and is generally used for completions that do not include data. AK NDR is used for messages to the local socket.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Occupancy; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_H_TxR_AK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Occupancy",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Occupancy; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_H_TxR_AK_OCCUPANCY.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Occupancy",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Occupancy; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_H_TxR_AK_OCCUPANCY.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Occupancy",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_QPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Occupancy; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_TxR_BL_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Occupancy",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Occupancy; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_TxR_BL_OCCUPANCY.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Occupancy",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Occupancy; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_TxR_BL_OCCUPANCY.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Occupancy",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x8",
+ "Unit": "HA"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json
index 1b53c0e609e3..750870fd1cb1 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json
@@ -1,48 +1,850 @@
[
{
- "BriefDescription": "QPI clock ticks. Used to get percentages of QPI cycles events",
+ "BriefDescription": "Number of qfclks",
"Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of clocks in the QPI LL. This clock runs at 1/8th the 'GT/s' speed of the QPI link. For example, a 8GT/s link will have qfclk or 1GHz. JKT does not support dynamic link speeds, so this frequency is fixed.",
"Unit": "QPI LL"
},
{
- "BriefDescription": "Cycles where receiving QPI link is in half-width mode",
+ "BriefDescription": "Count of CTO Events",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_CTO_COUNT",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of CTO (cluster trigger outs) events that were asserted across the two slots. If both slots trigger in a given cycle, the event will increment by 2. You can use edge detect to count the number of cases when both events triggered.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Not Set",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Success",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.SUCCESS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a QPI link. Use edge detect to count the number of instances when the QPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
"Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL0P_POWER_CYCLES",
- "MetricExpr": "(UNC_Q_RxL0P_POWER_CYCLES / UNC_Q_CLOCKTICKS) * 100.",
- "MetricName": "rxl0p_power_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xf",
+ "EventName": "UNC_Q_RxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected; LinkInit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.LINK_INIT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected; Normal Operations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VNA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa",
+ "EventName": "UNC_Q_RxL_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Data Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Idle and Null Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Non-Data protocol Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x18",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x6",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_NONREQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_REQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0xc",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent data Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent non-data Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent standard Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AD",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AK",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_Q_RxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xe",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_Q_RxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.EGRESS_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x40",
"Unit": "QPI LL"
},
{
- "BriefDescription": "Cycles where transmitting QPI link is in half-width mode",
+ "BriefDescription": "Stalls Sending to R3QPI; GV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.GV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x80",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
"Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_Q_TxL0P_POWER_CYCLES",
- "MetricExpr": "(UNC_Q_TxL0P_POWER_CYCLES / UNC_Q_CLOCKTICKS) * 100.",
- "MetricName": "txl0p_power_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_Q_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
"Unit": "QPI LL"
},
{
- "BriefDescription": "Number of data flits transmitted ",
+ "BriefDescription": "Tx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_Q_TxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the QPI Link. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is almost full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.ALMOST_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Cycles not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_Q_TxL_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the TxQ is not empty. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 0; Data Tx Flits",
"Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.DATA",
"PerPkg": "1",
- "ScaleUnit": "8Bytes",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
"UMask": "0x2",
"Unit": "QPI LL"
},
{
- "BriefDescription": "Number of non data (control) flits transmitted ",
+ "BriefDescription": "Flits Transferred - Group 0; Idle and Null Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G0.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 0; Non-Data protocol Tx Flits",
"Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
"PerPkg": "1",
- "ScaleUnit": "8Bytes",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x18",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x6",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_NONREQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
"UMask": "0x4",
"Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_REQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent Bypass Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0xc",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent data Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent non-data Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent standard Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AD",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AK",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_Q_TxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Tx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_Q_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credits Returned",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURNS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of VNA credits returned.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURN_OCCUPANCY",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
+ "Unit": "QPI LL"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json
index 8551cebeba23..a165a77947a0 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json
@@ -1,82 +1,493 @@
[
{
- "BriefDescription": "Memory page activates",
+ "BriefDescription": "DRAM Activate Count",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"Unit": "iMC"
},
{
- "BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
"Counter": "0,1,2,3",
"EventCode": "0x4",
- "EventName": "LLC_MISSES.MEM_READ",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
"PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM Reads (RD_CAS + Underfills)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
"UMask": "0x3",
"Unit": "iMC"
},
{
- "BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (both Modes)",
"Counter": "0,1,2,3",
"EventCode": "0x4",
- "EventName": "LLC_MISSES.MEM_WRITE",
+ "EventName": "UNC_M_CAS_COUNT.WR",
"PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
"UMask": "0xc",
"Unit": "iMC"
},
{
- "BriefDescription": "Memory controller clock ticks. Used to get percentages of memory controller cycles events",
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_RMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "uclks",
"Counter": "0,1,2,3",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "Uncore Fixed Counter - uclks",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the precharge all command was sent.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit erros in lockstep mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
+ "UMask": "0x8",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
+ "BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel DLLOFF Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel PPD Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
- "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_channel_ppd %",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles all ranks are in critical thermal throttle",
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Critical Throttle Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
- "MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_critical_throttle_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles Memory is in self refresh power mode",
+ "BriefDescription": "Clock-Enabled Self-Refresh",
"Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
- "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_self_refresh %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "Memory page conflicts",
+ "BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Occupancy counter for memory read queue",
+ "BriefDescription": "Read Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_M_RPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
"Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M_RPQ_OCCUPANCY",
"PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x81",
+ "EventName": "UNC_M_WPQ_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the 'not posted' filter, we can track how long writes spent in the iMC before completions were sent to the HA. The 'posted' filter, on the other hand, provides information about how much queueing is actually happenning in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-other.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-other.json
new file mode 100644
index 000000000000..588549a668bd
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-other.json
@@ -0,0 +1,1538 @@
+[
+ {
+ "BriefDescription": "Address Match (Conflict) Count; Conflict Merges",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_ADDRESS_MATCH.MERGE_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when an inbound write (from a device to memory or another device) had an address match with another request in the write cache.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Address Match (Conflict) Count; Conflict Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_ADDRESS_MATCH.STALL_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when an inbound write (from a device to memory or another device) had an address match with another request in the write cache.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Write Ack Pending Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_CACHE_ACK_PENDING_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes that have acquired ownership but have not yet returned their data to the uncore. These writes are generally queued up in the switch trying to get to the head of their queues so that they can post their data. The queue occuapancy increments when the ACK is received, and decrements when either the data is returned OR a tickle is received and ownership is released. Note that a single tickle can result in multiple decrements.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Write Ack Pending Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_CACHE_ACK_PENDING_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes that have acquired ownership but have not yet returned their data to the uncore. These writes are generally queued up in the switch trying to get to the head of their queues so that they can post their data. The queue occuapancy increments when the ACK is received, and decrements when either the data is returned OR a tickle is received and ownership is released. Note that a single tickle can result in multiple decrements.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Ownership Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_CACHE_OWN_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore trying to acquire ownership in each cycle. This can be used with the write transaction count to calculate the average write latency in the uncore. The occupancy increments when a write request is issued, and decrements when the data is returned.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Ownership Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_CACHE_OWN_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore trying to acquire ownership in each cycle. This can be used with the write transaction count to calculate the average write latency in the uncore. The occupancy increments when a write request is issued, and decrements when the data is returned.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Read Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_CACHE_READ_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads that are outstanding in the uncore in each cycle. This can be used with the read transaction count to calculate the average read latency in the uncore. The occupancy increments when a read request is issued, and decrements when the data is returned.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Read Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_CACHE_READ_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads that are outstanding in the uncore in each cycle. This can be used with the read transaction count to calculate the average read latency in the uncore. The occupancy increments when a read request is issued, and decrements when the data is returned.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_CACHE_WRITE_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore in each cycle. This can be used with the transaction count event to calculate the average latency in the uncore. The occupancy increments when the ownership fetch/prefetch is issued, and decrements the data is returned to the uncore.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_CACHE_WRITE_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore in each cycle. This can be used with the transaction count event to calculate the average latency in the uncore. The occupancy increments when the ownership fetch/prefetch is issued, and decrements the data is returned to the uncore.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clocks in the IRP",
+ "Counter": "0,1",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of clocks in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0xB",
+ "EventName": "UNC_I_RxR_AK_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the AK Ingress is full. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Ingress Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0xA",
+ "EventName": "UNC_I_RxR_AK_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the AK Ingress. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0xC",
+ "EventName": "UNC_I_RxR_AK_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the AK Ingress in each cycles. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x4",
+ "EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - DRS",
+ "Counter": "0,1",
+ "EventCode": "0x1",
+ "EventName": "UNC_I_RxR_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x7",
+ "EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCB",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_I_RxR_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x8",
+ "EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x6",
+ "EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCS",
+ "Counter": "0,1",
+ "EventCode": "0x3",
+ "EventName": "UNC_I_RxR_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x9",
+ "EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Tickle Count; Ownership Lost",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TICKLES.LOST_OWNERSHIP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of tickles that are received. This is for both explicit (from Cbo) and implicit (internal conflict) tickles.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Tickle Count; Data Returned",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TICKLES.TOP_OF_QUEUE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of tickles that are received. This is for both explicit (from Cbo) and implicit (internal conflict) tickles.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.PD_PREFETCHES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of 'Inbound' transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Reads",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of 'Inbound' transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Writes",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of 'Inbound' transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD Egress Credit Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "EventCode": "0xE",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0xD",
+ "EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumultes the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjuection with the allocations event in order to calculate average latency of outbound requests.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Write Ordering Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x1A",
+ "EventName": "UNC_I_WRITE_ORDERING_STALL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are pending write ACK's in the switch but the switch->IRP pipeline is not utilized.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_R2_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the R2PCIe uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the R2PCIe is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Failed to Acquire a Credit; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R2_IIO_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Failed to Acquire a Credit; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R2_IIO_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Failed to Acquire a Credit; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R2_IIO_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa",
+ "EventName": "UNC_R2_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.",
+ "UMask": "0xf",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RxR_AK_BOUNCES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AD",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AK",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; BL",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AD",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AK",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; BL",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress NACK; AD",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACKS.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the Egress received a NACK from the ring and could not issue a transaction.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress NACK; AK",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACKS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the Egress received a NACK from the ring and could not issue a transaction.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress NACK; BL",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACKS.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the Egress received a NACK from the ring and could not issue a transaction.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2",
+ "EventCode": "0x1",
+ "EventName": "UNC_R3_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the QPI uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the QPI Agent is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Acquired",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_IIO_CREDITS_ACQUIRED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the NCS/NCB/DRS credit is acquried in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Acquired",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_IIO_CREDITS_ACQUIRED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the NCS/NCB/DRS credit is acquried in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Acquired",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_IIO_CREDITS_ACQUIRED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the NCS/NCB/DRS credit is acquried in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Rejected",
+ "Counter": "0,1",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_IIO_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Rejected",
+ "Counter": "0,1",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_IIO_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Rejected",
+ "Counter": "0,1",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_IIO_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit In Use",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_IIO_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit In Use",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_IIO_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit In Use",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_IIO_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 IV Ring in Use; Any",
+ "Counter": "0,1,2",
+ "EventCode": "0xa",
+ "EventName": "UNC_R3_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.",
+ "UMask": "0xf",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Bypassed",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_R3_RxR_BYPASSED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the Ingress was bypassed and an incoming transaction was bypassed directly across the BGF and into the qfclk domain.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; HOM",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NDR",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; SNP",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; HOM",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NDR",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; SNP",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; HOM",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; NCB",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; NCS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; NDR",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; SNP",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transfered). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transfered in a given message class using an qfclk event.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Cycles with no VNA credits available",
+ "Counter": "0,1",
+ "EventCode": "0x31",
+ "EventName": "UNC_R3_VNA_CREDIT_CYCLES_OUT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI uclk cycles when the transmitted has no VNA credits available and therefore cannot send any requests on this channel. Note that this does not mean that no flits can be transmitted, as those holding VN0 credits will still (potentially) be able to transmit. Generally it is the goal of the uncore that VNA credits should not run out, as this can substantially throttle back useful QPI bandwidth.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Cycles with 1 or more VNA credits in use",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R3_VNA_CREDIT_CYCLES_USED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI uclk cycles with one or more VNA credits in use. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average number of used VNA credits.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "IDI Lock/SplitLock Cycles",
+ "Counter": "0,1",
+ "EventCode": "0x44",
+ "EventName": "UNC_U_LOCK_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times an IDI Lock/SplitLock sequence was started",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "MsgCh Requests by Size; 4B Requests",
+ "Counter": "0,1",
+ "EventCode": "0x47",
+ "EventName": "UNC_U_MSG_CHNL_SIZE_COUNT.4B",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of transactions on the message channel filtered by request size. This includes both reads and writes.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "MsgCh Requests by Size; 8B Requests",
+ "Counter": "0,1",
+ "EventCode": "0x47",
+ "EventName": "UNC_U_MSG_CHNL_SIZE_COUNT.8B",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of transactions on the message channel filtered by request size. This includes both reads and writes.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; ACK to Deassert",
+ "Counter": "0,1",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ACK_TO_DEASSERT",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "PHOLD cycles. Filter from source CoreID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "Counter": "0,1",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "PHOLD cycles. Filter from source CoreID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "Counter": "0,1",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS.COUNT",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.CMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Livelock",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; LTError",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LTERROR",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T0",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T1",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Other",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Trap",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.TRAP",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.UMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
index 8755693d86c6..817ea6d7f785 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
@@ -1,272 +1,361 @@
[
{
- "BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events",
+ "BriefDescription": "pclk Cycles",
"Counter": "0,1,2,3",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "The PCU runs off a fixed 800 MHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band0=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_BAND0_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND0_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band0_cycles %",
+ "EventCode": "0x3",
+ "EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band1=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_BAND1_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND1_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band1_cycles %",
+ "EventCode": "0x4",
+ "EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band2=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_BAND2_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND2_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band2_cycles %",
+ "EventCode": "0x5",
+ "EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band3=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_BAND3_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND3_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band3_cycles %",
+ "EventCode": "0x6",
+ "EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned a frequency greater than or equal to the frequency that is configured in the filter. (filter_band0=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band0_cycles",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_BAND0_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND0_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band0_cycles %",
+ "EventCode": "0x7",
+ "EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transistioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band1=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band1_cycles",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_BAND1_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND1_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band1_cycles %",
+ "EventCode": "0x8",
+ "EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band2=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band2_cycles",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_BAND2_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND2_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band2_cycles %",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band3=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band3_cycles",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_BAND3_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND3_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band3_cycles %",
+ "EventCode": "0xa",
+ "EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
+ "BriefDescription": "Core C State Demotions",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
- "Filter": "occ_sel=1",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c0 %",
+ "EventCode": "0x1e",
+ "EventName": "UNC_P_DEMOTIONS_CORE0",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C3. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
+ "BriefDescription": "Core C State Demotions",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
- "Filter": "occ_sel=2",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c3 %",
+ "EventCode": "0x1f",
+ "EventName": "UNC_P_DEMOTIONS_CORE1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C6. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events ",
+ "BriefDescription": "Core C State Demotions",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
- "Filter": "occ_sel=3",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c6 %",
+ "EventCode": "0x20",
+ "EventName": "UNC_P_DEMOTIONS_CORE2",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip",
+ "BriefDescription": "Core C State Demotions",
"Counter": "0,1,2,3",
- "EventCode": "0xa",
- "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
- "MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "prochot_external_cycles %",
+ "EventCode": "0x21",
+ "EventName": "UNC_P_DEMOTIONS_CORE3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_P_DEMOTIONS_CORE4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_P_DEMOTIONS_CORE5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_P_DEMOTIONS_CORE6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_P_DEMOTIONS_CORE7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_P_FREQ_BAND0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_P_FREQ_BAND1_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when temperature is the upper limit on frequency",
+ "BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_P_FREQ_BAND2_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xe",
+ "EventName": "UNC_P_FREQ_BAND3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Current Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_P_FREQ_MAX_CURRENT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when current is the upper limit on frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_limit_thermal_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when the OS is the upper limit on frequency",
+ "BriefDescription": "OS Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_os_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the OS is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when power is the upper limit on frequency",
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_power_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when power is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when current is the upper limit on frequency",
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0x7",
- "EventName": "UNC_P_FREQ_MAX_CURRENT_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_CURRENT_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_current_cycles %",
+ "EventCode": "0x1",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Perf P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_P_FREQ_MIN_PERF_P_CYCLES",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when Perf P Limit is preventing us from dropping the frequency lower. Perf P Limit is an algorithm that takes input from remote sockets when determining if a socket should drop it's frequency down. This is largely to minimize increases in snoop and remote read latencies.",
"Unit": "PCU"
},
{
"BriefDescription": "Cycles spent changing Frequency",
"Counter": "0,1,2,3",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_trans_cycles %",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 1.2Ghz. Derived from unc_p_freq_band0_cycles",
+ "BriefDescription": "Memory Phase Shedding Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
- "Filter": "filter_band0=12",
- "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_1200mhz_cycles %",
+ "EventCode": "0x2f",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 2Ghz. Derived from unc_p_freq_band1_cycles",
+ "BriefDescription": "Number of cores in C0",
"Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
- "Filter": "filter_band1=20",
- "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_2000mhz_cycles %",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
"PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 3Ghz. Derived from unc_p_freq_band2_cycles",
+ "BriefDescription": "Number of cores in C0",
"Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
- "Filter": "filter_band2=30",
- "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_3000mhz_cycles %",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
"PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 4Ghz. Derived from unc_p_freq_band3_cycles",
+ "BriefDescription": "Number of cores in C0",
"Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
- "Filter": "filter_band3=40",
- "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_4000mhz_cycles %",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
"PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to 1.2Ghz. Derived from unc_p_freq_band0_cycles",
+ "BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in Interal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
"Counter": "0,1,2,3",
"EventCode": "0xb",
- "EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band0=12",
- "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_1200mhz_cycles %",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions across all cores.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to 2Ghz. Derived from unc_p_freq_band1_cycles",
+ "BriefDescription": "Cycles Changing Voltage",
"Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band1=20",
- "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_2000mhz_cycles %",
+ "EventCode": "0x3",
+ "EventName": "UNC_P_VOLT_TRANS_CYCLES_CHANGE",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is changing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition. This event is calculated by or'ing together the increasing and decreasing events.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to 3Ghz. Derived from unc_p_freq_band2_cycles",
+ "BriefDescription": "Cycles Decreasing Voltage",
"Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band2=30",
- "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_3000mhz_cycles %",
+ "EventCode": "0x2",
+ "EventName": "UNC_P_VOLT_TRANS_CYCLES_DECREASE",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is decreasing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to 4Ghz. Derived from unc_p_freq_band3_cycles",
+ "BriefDescription": "Cycles Increasing Voltage",
"Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band3=40",
- "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_4000mhz_cycles %",
+ "EventCode": "0x1",
+ "EventName": "UNC_P_VOLT_TRANS_CYCLES_INCREASE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is increasing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
"PerPkg": "1",
"Unit": "PCU"
}
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json b/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json
index a654ab771fce..98362abba1a7 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json
@@ -1,149 +1,149 @@
[
{
- "EventCode": "0xAE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB.ITLB_FLUSH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x4F",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks.",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "EPT.WALK_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x85",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
"SampleAfterValue": "100003",
- "BriefDescription": "Misses at all ITLB levels that cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x85",
+ "BriefDescription": "Load misses at all DTLB levels that cause completed page walks.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
- "EventCode": "0x85",
+ "BriefDescription": "Cycles when PMH is busy with page walks.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ITLB_MISSES.WALK_DURATION",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when PMH is busy with page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x85",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks.",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "ITLB_MISSES.STLB_HIT",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x08",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Load misses in all DTLB levels that cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x08",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Load misses at all DTLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
- "EventCode": "0x08",
+ "BriefDescription": "Cycles when PMH is busy with page walks.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when PMH is busy with page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
- "EventCode": "0x08",
+ "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x4F",
+ "EventName": "EPT.WALK_CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0x49",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xAE",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "EventCode": "0x49",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks.",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x49",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when PMH is busy with page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "EventCode": "0x49",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xBD",
+ "BriefDescription": "Cycles when PMH is busy with page walks.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"SampleAfterValue": "100007",
- "BriefDescription": "DTLB flush attempts of the thread-specific entries.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xBD",
+ "BriefDescription": "STLB flush attempts.",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"SampleAfterValue": "100007",
- "BriefDescription": "STLB flush attempts.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
}
-] \ No newline at end of file
+]