diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /tools/perf/pmu-events/arch/powerpc/power8 | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/perf/pmu-events/arch/powerpc/power8')
-rw-r--r-- | tools/perf/pmu-events/arch/powerpc/power8/cache.json | 176 | ||||
-rw-r--r-- | tools/perf/pmu-events/arch/powerpc/power8/floating-point.json | 14 | ||||
-rw-r--r-- | tools/perf/pmu-events/arch/powerpc/power8/frontend.json | 470 | ||||
-rw-r--r-- | tools/perf/pmu-events/arch/powerpc/power8/marked.json | 794 | ||||
-rw-r--r-- | tools/perf/pmu-events/arch/powerpc/power8/memory.json | 212 | ||||
-rw-r--r-- | tools/perf/pmu-events/arch/powerpc/power8/metrics.json | 2245 | ||||
-rw-r--r-- | tools/perf/pmu-events/arch/powerpc/power8/other.json | 3446 | ||||
-rw-r--r-- | tools/perf/pmu-events/arch/powerpc/power8/pipeline.json | 350 | ||||
-rw-r--r-- | tools/perf/pmu-events/arch/powerpc/power8/pmc.json | 140 | ||||
-rw-r--r-- | tools/perf/pmu-events/arch/powerpc/power8/translation.json | 176 |
10 files changed, 8023 insertions, 0 deletions
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/cache.json b/tools/perf/pmu-events/arch/powerpc/power8/cache.json new file mode 100644 index 0000000000..05a17084d9 --- /dev/null +++ b/tools/perf/pmu-events/arch/powerpc/power8/cache.json @@ -0,0 +1,176 @@ +[ + { + "EventCode": "0x4c048", + "EventName": "PM_DATA_FROM_DL2L3_MOD", + "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a demand load", + "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x3c048", + "EventName": "PM_DATA_FROM_DL2L3_SHR", + "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a demand load", + "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x3c04c", + "EventName": "PM_DATA_FROM_DL4", + "BriefDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x1c042", + "EventName": "PM_DATA_FROM_L2", + "BriefDescription": "The processor's data cache was reloaded from local core's L2 due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from local core's L2 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x200fe", + "EventName": "PM_DATA_FROM_L2MISS", + "BriefDescription": "Demand LD - L2 Miss (not L2 hit)", + "PublicDescription": "" + }, + { + "EventCode": "0x1c04e", + "EventName": "PM_DATA_FROM_L2MISS_MOD", + "BriefDescription": "The processor's data cache was reloaded from a location other than the local core's L2 due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from a location other than the local core's L2 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x3c040", + "EventName": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST", + "BriefDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x4c040", + "EventName": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER", + "BriefDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x2c040", + "EventName": "PM_DATA_FROM_L2_MEPF", + "BriefDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x1c040", + "EventName": "PM_DATA_FROM_L2_NO_CONFLICT", + "BriefDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x4c042", + "EventName": "PM_DATA_FROM_L3", + "BriefDescription": "The processor's data cache was reloaded from local core's L3 due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from local core's L3 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x300fe", + "EventName": "PM_DATA_FROM_L3MISS", + "BriefDescription": "Demand LD - L3 Miss (not L2 hit and not L3 hit)", + "PublicDescription": "" + }, + { + "EventCode": "0x4c04e", + "EventName": "PM_DATA_FROM_L3MISS_MOD", + "BriefDescription": "The processor's data cache was reloaded from a location other than the local core's L3 due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from a location other than the local core's L3 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x3c042", + "EventName": "PM_DATA_FROM_L3_DISP_CONFLICT", + "BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x2c042", + "EventName": "PM_DATA_FROM_L3_MEPF", + "BriefDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x1c044", + "EventName": "PM_DATA_FROM_L3_NO_CONFLICT", + "BriefDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x1c04c", + "EventName": "PM_DATA_FROM_LL4", + "BriefDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x4c04a", + "EventName": "PM_DATA_FROM_OFF_CHIP_CACHE", + "BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a demand load", + "PublicDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x1c048", + "EventName": "PM_DATA_FROM_ON_CHIP_CACHE", + "BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to a demand load", + "PublicDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x2c046", + "EventName": "PM_DATA_FROM_RL2L3_MOD", + "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a demand load", + "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x1c04a", + "EventName": "PM_DATA_FROM_RL2L3_SHR", + "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a demand load", + "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x3001a", + "EventName": "PM_DATA_TABLEWALK_CYC", + "BriefDescription": "Tablwalk Cycles (could be 1 or 2 active)", + "PublicDescription": "Data Tablewalk Active" + }, + { + "EventCode": "0x4e04e", + "EventName": "PM_DPTEG_FROM_L3MISS", + "BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L3 due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0xd094", + "EventName": "PM_DSLB_MISS", + "BriefDescription": "Data SLB Miss - Total of all segment sizes", + "PublicDescription": "Data SLB Miss - Total of all segment sizesData SLB misses" + }, + { + "EventCode": "0x1002c", + "EventName": "PM_L1_DCACHE_RELOADED_ALL", + "BriefDescription": "L1 data cache reloaded for demand or prefetch", + "PublicDescription": "" + }, + { + "EventCode": "0x300f6", + "EventName": "PM_L1_DCACHE_RELOAD_VALID", + "BriefDescription": "DL1 reloaded due to Demand Load", + "PublicDescription": "" + }, + { + "EventCode": "0x3e054", + "EventName": "PM_LD_MISS_L1", + "BriefDescription": "Load Missed L1", + "PublicDescription": "" + }, + { + "EventCode": "0x100ee", + "EventName": "PM_LD_REF_L1", + "BriefDescription": "All L1 D cache load references counted at finish, gated by reject", + "PublicDescription": "Load Ref count combined for all units" + }, + { + "EventCode": "0x300f0", + "EventName": "PM_ST_MISS_L1", + "BriefDescription": "Store Missed L1", + "PublicDescription": "" + } +] diff --git a/tools/perf/pmu-events/arch/powerpc/power8/floating-point.json b/tools/perf/pmu-events/arch/powerpc/power8/floating-point.json new file mode 100644 index 0000000000..4ef0d01b7f --- /dev/null +++ b/tools/perf/pmu-events/arch/powerpc/power8/floating-point.json @@ -0,0 +1,14 @@ +[ + { + "EventCode": "0x2000e", + "EventName": "PM_FXU_BUSY", + "BriefDescription": "fxu0 busy and fxu1 busy", + "PublicDescription": "" + }, + { + "EventCode": "0x1000e", + "EventName": "PM_FXU_IDLE", + "BriefDescription": "fxu0 idle and fxu1 idle", + "PublicDescription": "" + } +] diff --git a/tools/perf/pmu-events/arch/powerpc/power8/frontend.json b/tools/perf/pmu-events/arch/powerpc/power8/frontend.json new file mode 100644 index 0000000000..1c902a8263 --- /dev/null +++ b/tools/perf/pmu-events/arch/powerpc/power8/frontend.json @@ -0,0 +1,470 @@ +[ + { + "EventCode": "0x2505e", + "EventName": "PM_BACK_BR_CMPL", + "BriefDescription": "Branch instruction completed with a target address less than current instruction address", + "PublicDescription": "" + }, + { + "EventCode": "0x10068", + "EventName": "PM_BRU_FIN", + "BriefDescription": "Branch Instruction Finished", + "PublicDescription": "" + }, + { + "EventCode": "0x20036", + "EventName": "PM_BR_2PATH", + "BriefDescription": "two path branch", + "PublicDescription": "" + }, + { + "EventCode": "0x40060", + "EventName": "PM_BR_CMPL", + "BriefDescription": "Branch Instruction completed", + "PublicDescription": "" + }, + { + "EventCode": "0x400f6", + "EventName": "PM_BR_MPRED_CMPL", + "BriefDescription": "Number of Branch Mispredicts", + "PublicDescription": "" + }, + { + "EventCode": "0x200fa", + "EventName": "PM_BR_TAKEN_CMPL", + "BriefDescription": "New event for Branch Taken", + "PublicDescription": "" + }, + { + "EventCode": "0x10018", + "EventName": "PM_IC_DEMAND_CYC", + "BriefDescription": "Cycles when a demand ifetch was pending", + "PublicDescription": "Demand ifetch pending" + }, + { + "EventCode": "0x100f6", + "EventName": "PM_IERAT_RELOAD", + "BriefDescription": "Number of I-ERAT reloads", + "PublicDescription": "IERAT Reloaded (Miss)" + }, + { + "EventCode": "0x4006a", + "EventName": "PM_IERAT_RELOAD_16M", + "BriefDescription": "IERAT Reloaded (Miss) for a 16M page", + "PublicDescription": "" + }, + { + "EventCode": "0x20064", + "EventName": "PM_IERAT_RELOAD_4K", + "BriefDescription": "IERAT Miss (Not implemented as DI on POWER6)", + "PublicDescription": "IERAT Reloaded (Miss) for a 4k page" + }, + { + "EventCode": "0x3006a", + "EventName": "PM_IERAT_RELOAD_64K", + "BriefDescription": "IERAT Reloaded (Miss) for a 64k page", + "PublicDescription": "" + }, + { + "EventCode": "0x14050", + "EventName": "PM_INST_CHIP_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for an instruction fetch", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for an instruction fetch" + }, + { + "EventCode": "0x2", + "EventName": "PM_INST_CMPL", + "BriefDescription": "Number of PowerPC Instructions that completed", + "PublicDescription": "PPC Instructions Finished (completed)" + }, + { + "EventCode": "0x200f2", + "EventName": "PM_INST_DISP", + "BriefDescription": "PPC Dispatched", + "PublicDescription": "" + }, + { + "EventCode": "0x44048", + "EventName": "PM_INST_FROM_DL2L3_MOD", + "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x34048", + "EventName": "PM_INST_FROM_DL2L3_SHR", + "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x3404c", + "EventName": "PM_INST_FROM_DL4", + "BriefDescription": "The processor's Instruction cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x4404c", + "EventName": "PM_INST_FROM_DMEM", + "BriefDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group (Distant) due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group (Distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x14042", + "EventName": "PM_INST_FROM_L2", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x1404e", + "EventName": "PM_INST_FROM_L2MISS", + "BriefDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L2 due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L2 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x34040", + "EventName": "PM_INST_FROM_L2_DISP_CONFLICT_LDHITST", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 with load hit store conflict due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 with load hit store conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x44040", + "EventName": "PM_INST_FROM_L2_DISP_CONFLICT_OTHER", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 with dispatch conflict due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 with dispatch conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x24040", + "EventName": "PM_INST_FROM_L2_MEPF", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x14040", + "EventName": "PM_INST_FROM_L2_NO_CONFLICT", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 without conflict due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 without conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x44042", + "EventName": "PM_INST_FROM_L3", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x300fa", + "EventName": "PM_INST_FROM_L3MISS", + "BriefDescription": "Marked instruction was reloaded from a location beyond the local chiplet", + "PublicDescription": "Inst from L3 miss" + }, + { + "EventCode": "0x4404e", + "EventName": "PM_INST_FROM_L3MISS_MOD", + "BriefDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L3 due to a instruction fetch", + "PublicDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L3 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x34042", + "EventName": "PM_INST_FROM_L3_DISP_CONFLICT", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 with dispatch conflict due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 with dispatch conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x24042", + "EventName": "PM_INST_FROM_L3_MEPF", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x14044", + "EventName": "PM_INST_FROM_L3_NO_CONFLICT", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 without conflict due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 without conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x1404c", + "EventName": "PM_INST_FROM_LL4", + "BriefDescription": "The processor's Instruction cache was reloaded from the local chip's L4 cache due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from the local chip's L4 cache due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x24048", + "EventName": "PM_INST_FROM_LMEM", + "BriefDescription": "The processor's Instruction cache was reloaded from the local chip's Memory due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from the local chip's Memory due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x2404c", + "EventName": "PM_INST_FROM_MEMORY", + "BriefDescription": "The processor's Instruction cache was reloaded from a memory location including L4 from local remote or distant due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from a memory location including L4 from local remote or distant due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x4404a", + "EventName": "PM_INST_FROM_OFF_CHIP_CACHE", + "BriefDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x14048", + "EventName": "PM_INST_FROM_ON_CHIP_CACHE", + "BriefDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x24046", + "EventName": "PM_INST_FROM_RL2L3_MOD", + "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x1404a", + "EventName": "PM_INST_FROM_RL2L3_SHR", + "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x2404a", + "EventName": "PM_INST_FROM_RL4", + "BriefDescription": "The processor's Instruction cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x3404a", + "EventName": "PM_INST_FROM_RMEM", + "BriefDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x24050", + "EventName": "PM_INST_GRP_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope was group pump (prediction=correct) for an instruction fetch", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for an instruction fetch" + }, + { + "EventCode": "0x24052", + "EventName": "PM_INST_GRP_PUMP_MPRED", + "BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for an instruction fetch", + "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro" + }, + { + "EventCode": "0x14052", + "EventName": "PM_INST_GRP_PUMP_MPRED_RTY", + "BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for an instruction fetch", + "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor an instruction fetch" + }, + { + "EventCode": "0x1003a", + "EventName": "PM_INST_IMC_MATCH_CMPL", + "BriefDescription": "IMC Match Count ( Not architected in P8)", + "PublicDescription": "" + }, + { + "EventCode": "0x14054", + "EventName": "PM_INST_PUMP_CPRED", + "BriefDescription": "Pump prediction correct. Counts across all types of pumps for an instruction fetch", + "PublicDescription": "Pump prediction correct. Counts across all types of pumpsfor an instruction fetch" + }, + { + "EventCode": "0x44052", + "EventName": "PM_INST_PUMP_MPRED", + "BriefDescription": "Pump misprediction. Counts across all types of pumps for an instruction fetch", + "PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor an instruction fetch" + }, + { + "EventCode": "0x34050", + "EventName": "PM_INST_SYS_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope was system pump (prediction=correct) for an instruction fetch", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for an instruction fetch" + }, + { + "EventCode": "0x34052", + "EventName": "PM_INST_SYS_PUMP_MPRED", + "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for an instruction fetch", + "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or" + }, + { + "EventCode": "0x44050", + "EventName": "PM_INST_SYS_PUMP_MPRED_RTY", + "BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for an instruction fetch", + "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for an instruction fetch" + }, + { + "EventCode": "0x45048", + "EventName": "PM_IPTEG_FROM_DL2L3_MOD", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x35048", + "EventName": "PM_IPTEG_FROM_DL2L3_SHR", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x3504c", + "EventName": "PM_IPTEG_FROM_DL4", + "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on a different Node or Group (Distant) due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x4504c", + "EventName": "PM_IPTEG_FROM_DMEM", + "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group (Distant) due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x15042", + "EventName": "PM_IPTEG_FROM_L2", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1504e", + "EventName": "PM_IPTEG_FROM_L2MISS", + "BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L2 due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x25040", + "EventName": "PM_IPTEG_FROM_L2_MEPF", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 hit without dispatch conflicts on Mepf state. due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x15040", + "EventName": "PM_IPTEG_FROM_L2_NO_CONFLICT", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x45042", + "EventName": "PM_IPTEG_FROM_L3", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x4504e", + "EventName": "PM_IPTEG_FROM_L3MISS", + "BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L3 due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x35042", + "EventName": "PM_IPTEG_FROM_L3_DISP_CONFLICT", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 with dispatch conflict due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x25042", + "EventName": "PM_IPTEG_FROM_L3_MEPF", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without dispatch conflicts hit on Mepf state. due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x15044", + "EventName": "PM_IPTEG_FROM_L3_NO_CONFLICT", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without conflict due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1504c", + "EventName": "PM_IPTEG_FROM_LL4", + "BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's L4 cache due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x25048", + "EventName": "PM_IPTEG_FROM_LMEM", + "BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's Memory due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x2504c", + "EventName": "PM_IPTEG_FROM_MEMORY", + "BriefDescription": "A Page Table Entry was loaded into the TLB from a memory location including L4 from local remote or distant due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x4504a", + "EventName": "PM_IPTEG_FROM_OFF_CHIP_CACHE", + "BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x15048", + "EventName": "PM_IPTEG_FROM_ON_CHIP_CACHE", + "BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on the same chip due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x25046", + "EventName": "PM_IPTEG_FROM_RL2L3_MOD", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1504a", + "EventName": "PM_IPTEG_FROM_RL2L3_SHR", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x2504a", + "EventName": "PM_IPTEG_FROM_RL4", + "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on the same Node or Group ( Remote) due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x3504a", + "EventName": "PM_IPTEG_FROM_RMEM", + "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group ( Remote) due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0xd096", + "EventName": "PM_ISLB_MISS", + "BriefDescription": "I SLB Miss", + "PublicDescription": "" + }, + { + "EventCode": "0x400fc", + "EventName": "PM_ITLB_MISS", + "BriefDescription": "ITLB Reloaded (always zero on POWER6)", + "PublicDescription": "" + }, + { + "EventCode": "0x200fd", + "EventName": "PM_L1_ICACHE_MISS", + "BriefDescription": "Demand iCache Miss", + "PublicDescription": "" + }, + { + "EventCode": "0x40012", + "EventName": "PM_L1_ICACHE_RELOADED_ALL", + "BriefDescription": "Counts all Icache reloads includes demand, prefetchm prefetch turned into demand and demand turned into prefetch", + "PublicDescription": "" + }, + { + "EventCode": "0x30068", + "EventName": "PM_L1_ICACHE_RELOADED_PREF", + "BriefDescription": "Counts all Icache prefetch reloads ( includes demand turned into prefetch)", + "PublicDescription": "" + }, + { + "EventCode": "0x300f4", + "EventName": "PM_THRD_CONC_RUN_INST", + "BriefDescription": "PPC Instructions Finished when both threads in run_cycles", + "PublicDescription": "Concurrent Run Instructions" + }, + { + "EventCode": "0x30060", + "EventName": "PM_TM_TRANS_RUN_INST", + "BriefDescription": "Instructions completed in transactional state", + "PublicDescription": "" + }, + { + "EventCode": "0x4e014", + "EventName": "PM_TM_TX_PASS_RUN_INST", + "BriefDescription": "run instructions spent in successful transactions", + "PublicDescription": "" + } +] diff --git a/tools/perf/pmu-events/arch/powerpc/power8/marked.json b/tools/perf/pmu-events/arch/powerpc/power8/marked.json new file mode 100644 index 0000000000..6de61a797b --- /dev/null +++ b/tools/perf/pmu-events/arch/powerpc/power8/marked.json @@ -0,0 +1,794 @@ +[ + { + "EventCode": "0x3515e", + "EventName": "PM_MRK_BACK_BR_CMPL", + "BriefDescription": "Marked branch instruction completed with a target address less than current instruction address", + "PublicDescription": "" + }, + { + "EventCode": "0x2013a", + "EventName": "PM_MRK_BRU_FIN", + "BriefDescription": "bru marked instr finish", + "PublicDescription": "" + }, + { + "EventCode": "0x1016e", + "EventName": "PM_MRK_BR_CMPL", + "BriefDescription": "Branch Instruction completed", + "PublicDescription": "" + }, + { + "EventCode": "0x301e4", + "EventName": "PM_MRK_BR_MPRED_CMPL", + "BriefDescription": "Marked Branch Mispredicted", + "PublicDescription": "" + }, + { + "EventCode": "0x101e2", + "EventName": "PM_MRK_BR_TAKEN_CMPL", + "BriefDescription": "Marked Branch Taken completed", + "PublicDescription": "" + }, + { + "EventCode": "0x4d148", + "EventName": "PM_MRK_DATA_FROM_DL2L3_MOD", + "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2d128", + "EventName": "PM_MRK_DATA_FROM_DL2L3_MOD_CYC", + "BriefDescription": "Duration in cycles to reload with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x3d148", + "EventName": "PM_MRK_DATA_FROM_DL2L3_SHR", + "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2c128", + "EventName": "PM_MRK_DATA_FROM_DL2L3_SHR_CYC", + "BriefDescription": "Duration in cycles to reload with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x3d14c", + "EventName": "PM_MRK_DATA_FROM_DL4", + "BriefDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2c12c", + "EventName": "PM_MRK_DATA_FROM_DL4_CYC", + "BriefDescription": "Duration in cycles to reload from another chip's L4 on a different Node or Group (Distant) due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4d14c", + "EventName": "PM_MRK_DATA_FROM_DMEM", + "BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2d12c", + "EventName": "PM_MRK_DATA_FROM_DMEM_CYC", + "BriefDescription": "Duration in cycles to reload from another chip's memory on the same Node or Group (Distant) due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x1d142", + "EventName": "PM_MRK_DATA_FROM_L2", + "BriefDescription": "The processor's data cache was reloaded from local core's L2 due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x1d14e", + "EventName": "PM_MRK_DATA_FROM_L2MISS", + "BriefDescription": "Data cache reload L2 miss", + "PublicDescription": "" + }, + { + "EventCode": "0x4c12e", + "EventName": "PM_MRK_DATA_FROM_L2MISS_CYC", + "BriefDescription": "Duration in cycles to reload from a location other than the local core's L2 due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4c122", + "EventName": "PM_MRK_DATA_FROM_L2_CYC", + "BriefDescription": "Duration in cycles to reload from local core's L2 due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x3d140", + "EventName": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST", + "BriefDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2c120", + "EventName": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST_CYC", + "BriefDescription": "Duration in cycles to reload from local core's L2 with load hit store conflict due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4d140", + "EventName": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER", + "BriefDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2d120", + "EventName": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER_CYC", + "BriefDescription": "Duration in cycles to reload from local core's L2 with dispatch conflict due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2d140", + "EventName": "PM_MRK_DATA_FROM_L2_MEPF", + "BriefDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4d120", + "EventName": "PM_MRK_DATA_FROM_L2_MEPF_CYC", + "BriefDescription": "Duration in cycles to reload from local core's L2 hit without dispatch conflicts on Mepf state. due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x1d140", + "EventName": "PM_MRK_DATA_FROM_L2_NO_CONFLICT", + "BriefDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4c120", + "EventName": "PM_MRK_DATA_FROM_L2_NO_CONFLICT_CYC", + "BriefDescription": "Duration in cycles to reload from local core's L2 without conflict due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4d142", + "EventName": "PM_MRK_DATA_FROM_L3", + "BriefDescription": "The processor's data cache was reloaded from local core's L3 due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x201e4", + "EventName": "PM_MRK_DATA_FROM_L3MISS", + "BriefDescription": "The processor's data cache was reloaded from a location other than the local core's L3 due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2d12e", + "EventName": "PM_MRK_DATA_FROM_L3MISS_CYC", + "BriefDescription": "Duration in cycles to reload from a location other than the local core's L3 due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2d122", + "EventName": "PM_MRK_DATA_FROM_L3_CYC", + "BriefDescription": "Duration in cycles to reload from local core's L3 due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x3d142", + "EventName": "PM_MRK_DATA_FROM_L3_DISP_CONFLICT", + "BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2c122", + "EventName": "PM_MRK_DATA_FROM_L3_DISP_CONFLICT_CYC", + "BriefDescription": "Duration in cycles to reload from local core's L3 with dispatch conflict due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2d142", + "EventName": "PM_MRK_DATA_FROM_L3_MEPF", + "BriefDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4d122", + "EventName": "PM_MRK_DATA_FROM_L3_MEPF_CYC", + "BriefDescription": "Duration in cycles to reload from local core's L3 without dispatch conflicts hit on Mepf state. due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x1d144", + "EventName": "PM_MRK_DATA_FROM_L3_NO_CONFLICT", + "BriefDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4c124", + "EventName": "PM_MRK_DATA_FROM_L3_NO_CONFLICT_CYC", + "BriefDescription": "Duration in cycles to reload from local core's L3 without conflict due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x1d14c", + "EventName": "PM_MRK_DATA_FROM_LL4", + "BriefDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4c12c", + "EventName": "PM_MRK_DATA_FROM_LL4_CYC", + "BriefDescription": "Duration in cycles to reload from the local chip's L4 cache due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2d148", + "EventName": "PM_MRK_DATA_FROM_LMEM", + "BriefDescription": "The processor's data cache was reloaded from the local chip's Memory due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4d128", + "EventName": "PM_MRK_DATA_FROM_LMEM_CYC", + "BriefDescription": "Duration in cycles to reload from the local chip's Memory due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2d14c", + "EventName": "PM_MRK_DATA_FROM_MEMORY", + "BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4d12c", + "EventName": "PM_MRK_DATA_FROM_MEMORY_CYC", + "BriefDescription": "Duration in cycles to reload from a memory location including L4 from local remote or distant due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4d14a", + "EventName": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE", + "BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2d12a", + "EventName": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE_CYC", + "BriefDescription": "Duration in cycles to reload either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x1d148", + "EventName": "PM_MRK_DATA_FROM_ON_CHIP_CACHE", + "BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4c128", + "EventName": "PM_MRK_DATA_FROM_ON_CHIP_CACHE_CYC", + "BriefDescription": "Duration in cycles to reload either shared or modified data from another core's L2/L3 on the same chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2d146", + "EventName": "PM_MRK_DATA_FROM_RL2L3_MOD", + "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4d126", + "EventName": "PM_MRK_DATA_FROM_RL2L3_MOD_CYC", + "BriefDescription": "Duration in cycles to reload with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x1d14a", + "EventName": "PM_MRK_DATA_FROM_RL2L3_SHR", + "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4c12a", + "EventName": "PM_MRK_DATA_FROM_RL2L3_SHR_CYC", + "BriefDescription": "Duration in cycles to reload with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2d14a", + "EventName": "PM_MRK_DATA_FROM_RL4", + "BriefDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4d12a", + "EventName": "PM_MRK_DATA_FROM_RL4_CYC", + "BriefDescription": "Duration in cycles to reload from another chip's L4 on the same Node or Group ( Remote) due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x3d14a", + "EventName": "PM_MRK_DATA_FROM_RMEM", + "BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2c12a", + "EventName": "PM_MRK_DATA_FROM_RMEM_CYC", + "BriefDescription": "Duration in cycles to reload from another chip's memory on the same Node or Group ( Remote) due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x40118", + "EventName": "PM_MRK_DCACHE_RELOAD_INTV", + "BriefDescription": "Combined Intervention event", + "PublicDescription": "" + }, + { + "EventCode": "0x301e6", + "EventName": "PM_MRK_DERAT_MISS", + "BriefDescription": "Erat Miss (TLB Access) All page sizes", + "PublicDescription": "" + }, + { + "EventCode": "0x4d154", + "EventName": "PM_MRK_DERAT_MISS_16G", + "BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 16G", + "PublicDescription": "" + }, + { + "EventCode": "0x3d154", + "EventName": "PM_MRK_DERAT_MISS_16M", + "BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 16M", + "PublicDescription": "" + }, + { + "EventCode": "0x1d156", + "EventName": "PM_MRK_DERAT_MISS_4K", + "BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 4K", + "PublicDescription": "" + }, + { + "EventCode": "0x2d154", + "EventName": "PM_MRK_DERAT_MISS_64K", + "BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 64K", + "PublicDescription": "" + }, + { + "EventCode": "0x20132", + "EventName": "PM_MRK_DFU_FIN", + "BriefDescription": "Decimal Unit marked Instruction Finish", + "PublicDescription": "" + }, + { + "EventCode": "0x4f148", + "EventName": "PM_MRK_DPTEG_FROM_DL2L3_MOD", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x3f148", + "EventName": "PM_MRK_DPTEG_FROM_DL2L3_SHR", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x3f14c", + "EventName": "PM_MRK_DPTEG_FROM_DL4", + "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on a different Node or Group (Distant) due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x4f14c", + "EventName": "PM_MRK_DPTEG_FROM_DMEM", + "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group (Distant) due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1f142", + "EventName": "PM_MRK_DPTEG_FROM_L2", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1f14e", + "EventName": "PM_MRK_DPTEG_FROM_L2MISS", + "BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L2 due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x2f140", + "EventName": "PM_MRK_DPTEG_FROM_L2_MEPF", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 hit without dispatch conflicts on Mepf state. due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1f140", + "EventName": "PM_MRK_DPTEG_FROM_L2_NO_CONFLICT", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x4f142", + "EventName": "PM_MRK_DPTEG_FROM_L3", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x4f14e", + "EventName": "PM_MRK_DPTEG_FROM_L3MISS", + "BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L3 due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x3f142", + "EventName": "PM_MRK_DPTEG_FROM_L3_DISP_CONFLICT", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 with dispatch conflict due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x2f142", + "EventName": "PM_MRK_DPTEG_FROM_L3_MEPF", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without dispatch conflicts hit on Mepf state. due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1f144", + "EventName": "PM_MRK_DPTEG_FROM_L3_NO_CONFLICT", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without conflict due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1f14c", + "EventName": "PM_MRK_DPTEG_FROM_LL4", + "BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's L4 cache due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x2f148", + "EventName": "PM_MRK_DPTEG_FROM_LMEM", + "BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's Memory due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x2f14c", + "EventName": "PM_MRK_DPTEG_FROM_MEMORY", + "BriefDescription": "A Page Table Entry was loaded into the TLB from a memory location including L4 from local remote or distant due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x4f14a", + "EventName": "PM_MRK_DPTEG_FROM_OFF_CHIP_CACHE", + "BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1f148", + "EventName": "PM_MRK_DPTEG_FROM_ON_CHIP_CACHE", + "BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on the same chip due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x2f146", + "EventName": "PM_MRK_DPTEG_FROM_RL2L3_MOD", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1f14a", + "EventName": "PM_MRK_DPTEG_FROM_RL2L3_SHR", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x2f14a", + "EventName": "PM_MRK_DPTEG_FROM_RL4", + "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on the same Node or Group ( Remote) due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x3f14a", + "EventName": "PM_MRK_DPTEG_FROM_RMEM", + "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group ( Remote) due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x401e4", + "EventName": "PM_MRK_DTLB_MISS", + "BriefDescription": "Marked dtlb miss", + "PublicDescription": "" + }, + { + "EventCode": "0x1d158", + "EventName": "PM_MRK_DTLB_MISS_16G", + "BriefDescription": "Marked Data TLB Miss page size 16G", + "PublicDescription": "" + }, + { + "EventCode": "0x4d156", + "EventName": "PM_MRK_DTLB_MISS_16M", + "BriefDescription": "Marked Data TLB Miss page size 16M", + "PublicDescription": "" + }, + { + "EventCode": "0x2d156", + "EventName": "PM_MRK_DTLB_MISS_4K", + "BriefDescription": "Marked Data TLB Miss page size 4k", + "PublicDescription": "" + }, + { + "EventCode": "0x3d156", + "EventName": "PM_MRK_DTLB_MISS_64K", + "BriefDescription": "Marked Data TLB Miss page size 64K", + "PublicDescription": "" + }, + { + "EventCode": "0x40154", + "EventName": "PM_MRK_FAB_RSP_BKILL", + "BriefDescription": "Marked store had to do a bkill", + "PublicDescription": "" + }, + { + "EventCode": "0x2f150", + "EventName": "PM_MRK_FAB_RSP_BKILL_CYC", + "BriefDescription": "cycles L2 RC took for a bkill", + "PublicDescription": "" + }, + { + "EventCode": "0x3015e", + "EventName": "PM_MRK_FAB_RSP_CLAIM_RTY", + "BriefDescription": "Sampled store did a rwitm and got a rty", + "PublicDescription": "" + }, + { + "EventCode": "0x30154", + "EventName": "PM_MRK_FAB_RSP_DCLAIM", + "BriefDescription": "Marked store had to do a dclaim", + "PublicDescription": "" + }, + { + "EventCode": "0x2f152", + "EventName": "PM_MRK_FAB_RSP_DCLAIM_CYC", + "BriefDescription": "cycles L2 RC took for a dclaim", + "PublicDescription": "" + }, + { + "EventCode": "0x4015e", + "EventName": "PM_MRK_FAB_RSP_RD_RTY", + "BriefDescription": "Sampled L2 reads retry count", + "PublicDescription": "" + }, + { + "EventCode": "0x1015e", + "EventName": "PM_MRK_FAB_RSP_RD_T_INTV", + "BriefDescription": "Sampled Read got a T intervention", + "PublicDescription": "" + }, + { + "EventCode": "0x4f150", + "EventName": "PM_MRK_FAB_RSP_RWITM_CYC", + "BriefDescription": "cycles L2 RC took for a rwitm", + "PublicDescription": "" + }, + { + "EventCode": "0x2015e", + "EventName": "PM_MRK_FAB_RSP_RWITM_RTY", + "BriefDescription": "Sampled store did a rwitm and got a rty", + "PublicDescription": "" + }, + { + "EventCode": "0x20134", + "EventName": "PM_MRK_FXU_FIN", + "BriefDescription": "fxu marked instr finish", + "PublicDescription": "" + }, + { + "EventCode": "0x401e0", + "EventName": "PM_MRK_INST_CMPL", + "BriefDescription": "marked instruction completed", + "PublicDescription": "" + }, + { + "EventCode": "0x20130", + "EventName": "PM_MRK_INST_DECODED", + "BriefDescription": "marked instruction decoded", + "PublicDescription": "marked instruction decoded. Name from ISU?" + }, + { + "EventCode": "0x101e0", + "EventName": "PM_MRK_INST_DISP", + "BriefDescription": "The thread has dispatched a randomly sampled marked instruction", + "PublicDescription": "Marked Instruction dispatched" + }, + { + "EventCode": "0x30130", + "EventName": "PM_MRK_INST_FIN", + "BriefDescription": "marked instruction finished", + "PublicDescription": "marked instr finish any unit" + }, + { + "EventCode": "0x401e6", + "EventName": "PM_MRK_INST_FROM_L3MISS", + "BriefDescription": "Marked instruction was reloaded from a location beyond the local chiplet", + "PublicDescription": "n/a" + }, + { + "EventCode": "0x10132", + "EventName": "PM_MRK_INST_ISSUED", + "BriefDescription": "Marked instruction issued", + "PublicDescription": "" + }, + { + "EventCode": "0x40134", + "EventName": "PM_MRK_INST_TIMEO", + "BriefDescription": "marked Instruction finish timeout (instruction lost)", + "PublicDescription": "" + }, + { + "EventCode": "0x101e4", + "EventName": "PM_MRK_L1_ICACHE_MISS", + "BriefDescription": "sampled Instruction suffered an icache Miss", + "PublicDescription": "Marked L1 Icache Miss" + }, + { + "EventCode": "0x101ea", + "EventName": "PM_MRK_L1_RELOAD_VALID", + "BriefDescription": "Marked demand reload", + "PublicDescription": "" + }, + { + "EventCode": "0x20114", + "EventName": "PM_MRK_L2_RC_DISP", + "BriefDescription": "Marked Instruction RC dispatched in L2", + "PublicDescription": "" + }, + { + "EventCode": "0x3012a", + "EventName": "PM_MRK_L2_RC_DONE", + "BriefDescription": "Marked RC done", + "PublicDescription": "" + }, + { + "EventCode": "0x40116", + "EventName": "PM_MRK_LARX_FIN", + "BriefDescription": "Larx finished", + "PublicDescription": "" + }, + { + "EventCode": "0x1013e", + "EventName": "PM_MRK_LD_MISS_EXPOSED_CYC", + "BriefDescription": "Marked Load exposed Miss cycles", + "PublicDescription": "Marked Load exposed Miss (use edge detect to count #)" + }, + { + "EventCode": "0x201e2", + "EventName": "PM_MRK_LD_MISS_L1", + "BriefDescription": "Marked DL1 Demand Miss counted at exec time", + "PublicDescription": "" + }, + { + "EventCode": "0x4013e", + "EventName": "PM_MRK_LD_MISS_L1_CYC", + "BriefDescription": "Marked ld latency", + "PublicDescription": "" + }, + { + "EventCode": "0x40132", + "EventName": "PM_MRK_LSU_FIN", + "BriefDescription": "lsu marked instr finish", + "PublicDescription": "" + }, + { + "EventCode": "0x20112", + "EventName": "PM_MRK_NTF_FIN", + "BriefDescription": "Marked next to finish instruction finished", + "PublicDescription": "" + }, + { + "EventCode": "0x1d15e", + "EventName": "PM_MRK_RUN_CYC", + "BriefDescription": "Marked run cycles", + "PublicDescription": "" + }, + { + "EventCode": "0x3013e", + "EventName": "PM_MRK_STALL_CMPLU_CYC", + "BriefDescription": "Marked Group completion Stall", + "PublicDescription": "Marked Group Completion Stall cycles (use edge detect to count #)" + }, + { + "EventCode": "0x3e158", + "EventName": "PM_MRK_STCX_FAIL", + "BriefDescription": "marked stcx failed", + "PublicDescription": "" + }, + { + "EventCode": "0x10134", + "EventName": "PM_MRK_ST_CMPL", + "BriefDescription": "marked store completed and sent to nest", + "PublicDescription": "Marked store completed" + }, + { + "EventCode": "0x30134", + "EventName": "PM_MRK_ST_CMPL_INT", + "BriefDescription": "marked store finished with intervention", + "PublicDescription": "marked store complete (data home) with intervention" + }, + { + "EventCode": "0x3f150", + "EventName": "PM_MRK_ST_DRAIN_TO_L2DISP_CYC", + "BriefDescription": "cycles to drain st from core to L2", + "PublicDescription": "" + }, + { + "EventCode": "0x3012c", + "EventName": "PM_MRK_ST_FWD", + "BriefDescription": "Marked st forwards", + "PublicDescription": "" + }, + { + "EventCode": "0x1f150", + "EventName": "PM_MRK_ST_L2DISP_TO_CMPL_CYC", + "BriefDescription": "cycles from L2 rc disp to l2 rc completion", + "PublicDescription": "" + }, + { + "EventCode": "0x20138", + "EventName": "PM_MRK_ST_NEST", + "BriefDescription": "Marked store sent to nest", + "PublicDescription": "" + }, + { + "EventCode": "0x30132", + "EventName": "PM_MRK_VSU_FIN", + "BriefDescription": "VSU marked instr finish", + "PublicDescription": "vsu (fpu) marked instr finish" + }, + { + "EventCode": "0x3d15e", + "EventName": "PM_MULT_MRK", + "BriefDescription": "mult marked instr", + "PublicDescription": "" + }, + { + "EventCode": "0x15152", + "EventName": "PM_SYNC_MRK_BR_LINK", + "BriefDescription": "Marked Branch and link branch that can cause a synchronous interrupt", + "PublicDescription": "" + }, + { + "EventCode": "0x1515c", + "EventName": "PM_SYNC_MRK_BR_MPRED", + "BriefDescription": "Marked Branch mispredict that can cause a synchronous interrupt", + "PublicDescription": "" + }, + { + "EventCode": "0x15156", + "EventName": "PM_SYNC_MRK_FX_DIVIDE", + "BriefDescription": "Marked fixed point divide that can cause a synchronous interrupt", + "PublicDescription": "" + }, + { + "EventCode": "0x15158", + "EventName": "PM_SYNC_MRK_L2HIT", + "BriefDescription": "Marked L2 Hits that can throw a synchronous interrupt", + "PublicDescription": "" + }, + { + "EventCode": "0x1515a", + "EventName": "PM_SYNC_MRK_L2MISS", + "BriefDescription": "Marked L2 Miss that can throw a synchronous interrupt", + "PublicDescription": "" + }, + { + "EventCode": "0x15154", + "EventName": "PM_SYNC_MRK_L3MISS", + "BriefDescription": "Marked L3 misses that can throw a synchronous interrupt", + "PublicDescription": "" + }, + { + "EventCode": "0x15150", + "EventName": "PM_SYNC_MRK_PROBE_NOP", + "BriefDescription": "Marked probeNops which can cause synchronous interrupts", + "PublicDescription": "" + } +] diff --git a/tools/perf/pmu-events/arch/powerpc/power8/memory.json b/tools/perf/pmu-events/arch/powerpc/power8/memory.json new file mode 100644 index 0000000000..2ba33420e2 --- /dev/null +++ b/tools/perf/pmu-events/arch/powerpc/power8/memory.json @@ -0,0 +1,212 @@ +[ + { + "EventCode": "0x10050", + "EventName": "PM_CHIP_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for all data types ( demand load,data,inst prefetch,inst fetch,xlate (I or d)" + }, + { + "EventCode": "0x1c050", + "EventName": "PM_DATA_CHIP_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for a demand load", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for a demand load" + }, + { + "EventCode": "0x4c04c", + "EventName": "PM_DATA_FROM_DMEM", + "BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x2c048", + "EventName": "PM_DATA_FROM_LMEM", + "BriefDescription": "The processor's data cache was reloaded from the local chip's Memory due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from the local chip's Memory due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x2c04c", + "EventName": "PM_DATA_FROM_MEMORY", + "BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x2c04a", + "EventName": "PM_DATA_FROM_RL4", + "BriefDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x3c04a", + "EventName": "PM_DATA_FROM_RMEM", + "BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to a demand load", + "PublicDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x2c050", + "EventName": "PM_DATA_GRP_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope was group pump (prediction=correct) for a demand load", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for a demand load" + }, + { + "EventCode": "0x2c052", + "EventName": "PM_DATA_GRP_PUMP_MPRED", + "BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for a demand load", + "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro" + }, + { + "EventCode": "0x1c052", + "EventName": "PM_DATA_GRP_PUMP_MPRED_RTY", + "BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for a demand load", + "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor a demand load" + }, + { + "EventCode": "0x1c054", + "EventName": "PM_DATA_PUMP_CPRED", + "BriefDescription": "Pump prediction correct. Counts across all types of pumps for a demand load", + "PublicDescription": "" + }, + { + "EventCode": "0x4c052", + "EventName": "PM_DATA_PUMP_MPRED", + "BriefDescription": "Pump misprediction. Counts across all types of pumps for a demand load", + "PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor a demand load" + }, + { + "EventCode": "0x3c050", + "EventName": "PM_DATA_SYS_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope was system pump (prediction=correct) for a demand load", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for a demand load" + }, + { + "EventCode": "0x3c052", + "EventName": "PM_DATA_SYS_PUMP_MPRED", + "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for a demand load", + "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or" + }, + { + "EventCode": "0x4c050", + "EventName": "PM_DATA_SYS_PUMP_MPRED_RTY", + "BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for a demand load", + "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for a demand load" + }, + { + "EventCode": "0x3e04c", + "EventName": "PM_DPTEG_FROM_DL4", + "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on a different Node or Group (Distant) due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x4e04c", + "EventName": "PM_DPTEG_FROM_DMEM", + "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group (Distant) due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x3e04a", + "EventName": "PM_DPTEG_FROM_RMEM", + "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group ( Remote) due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x20050", + "EventName": "PM_GRP_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)", + "PublicDescription": "" + }, + { + "EventCode": "0x20052", + "EventName": "PM_GRP_PUMP_MPRED", + "BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro" + }, + { + "EventCode": "0x10052", + "EventName": "PM_GRP_PUMP_MPRED_RTY", + "BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)" + }, + { + "EventCode": "0x18082", + "EventName": "PM_L3_CO_MEPF", + "BriefDescription": "L3 CO of line in Mep state ( includes casthrough", + "PublicDescription": "" + }, + { + "EventCode": "0x4c058", + "EventName": "PM_MEM_CO", + "BriefDescription": "Memory castouts from this lpar", + "PublicDescription": "" + }, + { + "EventCode": "0x10058", + "EventName": "PM_MEM_LOC_THRESH_IFU", + "BriefDescription": "Local Memory above threshold for IFU speculation control", + "PublicDescription": "" + }, + { + "EventCode": "0x40056", + "EventName": "PM_MEM_LOC_THRESH_LSU_HIGH", + "BriefDescription": "Local memory above threshold for LSU medium", + "PublicDescription": "" + }, + { + "EventCode": "0x1c05e", + "EventName": "PM_MEM_LOC_THRESH_LSU_MED", + "BriefDescription": "Local memory above theshold for data prefetch", + "PublicDescription": "" + }, + { + "EventCode": "0x2c058", + "EventName": "PM_MEM_PREF", + "BriefDescription": "Memory prefetch for this lpar. Includes L4", + "PublicDescription": "" + }, + { + "EventCode": "0x10056", + "EventName": "PM_MEM_READ", + "BriefDescription": "Reads from Memory from this lpar (includes data/inst/xlate/l1prefetch/inst prefetch). Includes L4", + "PublicDescription": "" + }, + { + "EventCode": "0x3c05e", + "EventName": "PM_MEM_RWITM", + "BriefDescription": "Memory rwitm for this lpar", + "PublicDescription": "" + }, + { + "EventCode": "0x3006e", + "EventName": "PM_NEST_REF_CLK", + "BriefDescription": "Multiply by 4 to obtain the number of PB cycles", + "PublicDescription": "Nest reference clocks" + }, + { + "EventCode": "0x10054", + "EventName": "PM_PUMP_CPRED", + "BriefDescription": "Pump prediction correct. Counts across all types of pumps for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Pump prediction correct. Counts across all types of pumpsfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)" + }, + { + "EventCode": "0x40052", + "EventName": "PM_PUMP_MPRED", + "BriefDescription": "Pump misprediction. Counts across all types of pumps for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)" + }, + { + "EventCode": "0x30050", + "EventName": "PM_SYS_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope was system pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)" + }, + { + "EventCode": "0x30052", + "EventName": "PM_SYS_PUMP_MPRED", + "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or" + }, + { + "EventCode": "0x40050", + "EventName": "PM_SYS_PUMP_MPRED_RTY", + "BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)" + } +] diff --git a/tools/perf/pmu-events/arch/powerpc/power8/metrics.json b/tools/perf/pmu-events/arch/powerpc/power8/metrics.json new file mode 100644 index 0000000000..4e25525b7d --- /dev/null +++ b/tools/perf/pmu-events/arch/powerpc/power8/metrics.json @@ -0,0 +1,2245 @@ +[ + { + "BriefDescription": "% of finished branches that were treated as BC+8", + "MetricExpr": "PM_BR_BC_8_CONV / PM_BRU_FIN * 100", + "MetricGroup": "branch_prediction", + "MetricName": "bc_8_branch_ratio_percent" + }, + { + "BriefDescription": "% of finished branches that were pairable but not treated as BC+8", + "MetricExpr": "PM_BR_BC_8 / PM_BRU_FIN * 100", + "MetricGroup": "branch_prediction", + "MetricName": "bc_8_not_converted_branch_ratio_percent" + }, + { + "BriefDescription": "Percent of mispredicted branches out of all predicted (correctly and incorrectly) branches that completed", + "MetricExpr": "PM_BR_MPRED_CMPL / (PM_BR_PRED_BR0 + PM_BR_PRED_BR1) * 100", + "MetricGroup": "branch_prediction", + "MetricName": "br_misprediction_percent" + }, + { + "BriefDescription": "% of Branch miss predictions per instruction", + "MetricExpr": "PM_BR_MPRED_CMPL / PM_RUN_INST_CMPL * 100", + "MetricGroup": "branch_prediction", + "MetricName": "branch_mispredict_rate_percent" + }, + { + "BriefDescription": "Count cache branch misprediction per instruction", + "MetricExpr": "PM_BR_MPRED_CCACHE / PM_RUN_INST_CMPL * 100", + "MetricGroup": "branch_prediction", + "MetricName": "ccache_mispredict_rate_percent" + }, + { + "BriefDescription": "Percent of count catch mispredictions out of all completed branches that required count cache predictionn", + "MetricExpr": "PM_BR_MPRED_CCACHE / (PM_BR_PRED_CCACHE_BR0 + PM_BR_PRED_CCACHE_BR1) * 100", + "MetricGroup": "branch_prediction", + "MetricName": "ccache_misprediction_percent" + }, + { + "BriefDescription": "CR MisPredictions per Instruction", + "MetricExpr": "PM_BR_MPRED_CR / PM_RUN_INST_CMPL * 100", + "MetricGroup": "branch_prediction", + "MetricName": "cr_mispredict_rate_percent" + }, + { + "BriefDescription": "Link stack branch misprediction", + "MetricExpr": "(PM_BR_MPRED_TA - PM_BR_MPRED_CCACHE) / PM_RUN_INST_CMPL * 100", + "MetricGroup": "branch_prediction", + "MetricName": "lstack_mispredict_rate_percent" + }, + { + "BriefDescription": "Percent of link stack mispredictions out of all completed branches that required link stack prediction", + "MetricExpr": "(PM_BR_MPRED_TA - PM_BR_MPRED_CCACHE) / (PM_BR_PRED_LSTACK_BR0 + PM_BR_PRED_LSTACK_BR1) * 100", + "MetricGroup": "branch_prediction", + "MetricName": "lstack_misprediction_percent" + }, + { + "BriefDescription": "TA MisPredictions per Instruction", + "MetricExpr": "PM_BR_MPRED_TA / PM_RUN_INST_CMPL * 100", + "MetricGroup": "branch_prediction", + "MetricName": "ta_mispredict_rate_percent" + }, + { + "BriefDescription": "Percent of target address mispredictions out of all completed branches that required address prediction", + "MetricExpr": "PM_BR_MPRED_TA / (PM_BR_PRED_CCACHE_BR0 + PM_BR_PRED_CCACHE_BR1 + PM_BR_PRED_LSTACK_BR0 + PM_BR_PRED_LSTACK_BR1) * 100", + "MetricGroup": "branch_prediction", + "MetricName": "ta_misprediction_percent" + }, + { + "BriefDescription": "Percent of branches completed that were taken", + "MetricExpr": "PM_BR_TAKEN_CMPL * 100 / PM_BR_CMPL", + "MetricGroup": "branch_prediction", + "MetricName": "taken_branches_percent" + }, + { + "BriefDescription": "Percent of chip+group+sys pumps that were incorrectly predicted", + "MetricExpr": "PM_PUMP_MPRED * 100 / (PM_PUMP_CPRED + PM_PUMP_MPRED)", + "MetricGroup": "bus_stats", + "MetricName": "any_pump_mpred_percent" + }, + { + "BriefDescription": "Percent of chip pumps that were correctly predicted as chip pumps the first time", + "MetricExpr": "PM_CHIP_PUMP_CPRED * 100 / PM_L2_CHIP_PUMP", + "MetricGroup": "bus_stats", + "MetricName": "chip_pump_cpred_percent" + }, + { + "BriefDescription": "Percent of group pumps that were correctly predicted as group pumps the first time", + "MetricExpr": "PM_GRP_PUMP_CPRED * 100 / PM_L2_GROUP_PUMP", + "MetricGroup": "bus_stats", + "MetricName": "group_pump_cpred_percent" + }, + { + "BriefDescription": "Percent of system pumps that were correctly predicted as group pumps the first time", + "MetricExpr": "PM_SYS_PUMP_CPRED * 100 / PM_L2_GROUP_PUMP", + "MetricGroup": "bus_stats", + "MetricName": "sys_pump_cpred_percent" + }, + { + "BriefDescription": "Cycles stalled due to CRU or BRU operations", + "MetricExpr": "PM_CMPLU_STALL_BRU_CRU / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "bru_cru_stall_cpi" + }, + { + "BriefDescription": "Cycles stalled due to ISU Branch Operations", + "MetricExpr": "PM_CMPLU_STALL_BRU / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "bru_stall_cpi" + }, + { + "BriefDescription": "Cycles in which a Group Completed", + "MetricExpr": "PM_GRP_CMPL / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "completion_cpi" + }, + { + "BriefDescription": "Cycles stalled by CO queue full", + "MetricExpr": "PM_CMPLU_STALL_COQ_FULL / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "coq_full_stall_cpi" + }, + { + "BriefDescription": "Cycles stalled due to CRU Operations", + "MetricExpr": "(PM_CMPLU_STALL_BRU_CRU - PM_CMPLU_STALL_BRU) / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "cru_stall_cpi" + }, + { + "BriefDescription": "Cycles stalled by flushes", + "MetricExpr": "PM_CMPLU_STALL_FLUSH / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "flush_stall_cpi" + }, + { + "BriefDescription": "Cycles stalled by FXU Multi-Cycle Instructions", + "MetricExpr": "PM_CMPLU_STALL_FXLONG / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "fxu_multi_cyc_cpi" + }, + { + "BriefDescription": "Cycles stalled by FXU", + "MetricExpr": "PM_CMPLU_STALL_FXU / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "fxu_stall_cpi" + }, + { + "BriefDescription": "Other cycles stalled by FXU", + "MetricExpr": "(PM_CMPLU_STALL_FXU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_FXLONG / PM_RUN_INST_CMPL)", + "MetricGroup": "cpi_breakdown", + "MetricName": "fxu_stall_other_cpi" + }, + { + "BriefDescription": "Cycles GCT empty due to Branch Mispredicts", + "MetricExpr": "PM_GCT_NOSLOT_BR_MPRED / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "gct_empty_br_mpred_cpi" + }, + { + "BriefDescription": "Cycles GCT empty due to Branch Mispredicts and Icache Misses", + "MetricExpr": "PM_GCT_NOSLOT_BR_MPRED_ICMISS / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "gct_empty_br_mpred_ic_miss_cpi" + }, + { + "BriefDescription": "GCT empty cycles", + "MetricExpr": "PM_GCT_NOSLOT_CYC / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "gct_empty_cpi" + }, + { + "BriefDescription": "Cycles GCT empty where dispatch was held", + "MetricExpr": "(PM_GCT_NOSLOT_DISP_HELD_MAP + PM_GCT_NOSLOT_DISP_HELD_SRQ + PM_GCT_NOSLOT_DISP_HELD_ISSQ + PM_GCT_NOSLOT_DISP_HELD_OTHER) / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "gct_empty_disp_held_cpi" + }, + { + "BriefDescription": "Cycles GCT empty where dispatch was held due to issue queue", + "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_ISSQ / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "gct_empty_disp_held_issq_cpi" + }, + { + "BriefDescription": "Cycles GCT empty where dispatch was held due to maps", + "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_MAP / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "gct_empty_disp_held_map_cpi" + }, + { + "BriefDescription": "Cycles GCT empty where dispatch was held due to syncs and other effects", + "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_OTHER / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "gct_empty_disp_held_other_cpi" + }, + { + "BriefDescription": "Cycles GCT empty where dispatch was held due to SRQ", + "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_SRQ / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "gct_empty_disp_held_srq_cpi" + }, + { + "BriefDescription": "Cycles stalled by GCT empty due to Icache misses", + "MetricExpr": "PM_GCT_NOSLOT_IC_MISS / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "gct_empty_ic_miss_cpi" + }, + { + "BriefDescription": "Cycles stalled by GCT empty due to Icache misses that resolve in the local L2 or L3", + "MetricExpr": "(PM_GCT_NOSLOT_IC_MISS - PM_GCT_NOSLOT_IC_L3MISS) / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "gct_empty_ic_miss_l2l3_cpi" + }, + { + "BriefDescription": "Cycles stalled by GCT empty due to Icache misses that resolve off-chip", + "MetricExpr": "PM_GCT_NOSLOT_IC_L3MISS / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "gct_empty_ic_miss_l3miss_cpi" + }, + { + "BriefDescription": "Other GCT empty cycles", + "MetricExpr": "(PM_GCT_NOSLOT_CYC / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_IC_MISS / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_BR_MPRED / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_BR_MPRED_ICMISS / PM_RUN_INST_CMPL) - ((PM_GCT_NOSLOT_DISP_HELD_MAP / PM_RUN_INST_CMPL) + (PM_GCT_NOSLOT_DISP_HELD_SRQ / PM_RUN_INST_CMPL) + (PM_GCT_NOSLOT_DISP_HELD_ISSQ / PM_RUN_INST_CMPL) + (PM_GCT_NOSLOT_DISP_HELD_OTHER / PM_RUN_INST_CMPL))", + "MetricGroup": "cpi_breakdown", + "MetricName": "gct_empty_other_cpi" + }, + { + "BriefDescription": "Cycles stalled by heavyweight syncs", + "MetricExpr": "PM_CMPLU_STALL_HWSYNC / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "hwsync_stall_cpi" + }, + { + "BriefDescription": "Cycles stalled by LSU", + "MetricExpr": "PM_CMPLU_STALL_LSU / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_cpi" + }, + { + "BriefDescription": "Cycles stalled by D-Cache Misses", + "MetricExpr": "PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_dcache_miss_cpi" + }, + { + "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in distant interventions and memory", + "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_LMEM - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_REMOTE) / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_dcache_miss_distant_cpi" + }, + { + "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in remote or distant caches", + "MetricExpr": "PM_CMPLU_STALL_DMISS_L21_L31 / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_dcache_miss_l21l31_cpi" + }, + { + "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in the local L2 or L3, where there was a conflict", + "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3_CONFLICT / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_dcache_miss_l2l3_conflict_cpi" + }, + { + "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in the local L2 or L3", + "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3 / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_dcache_miss_l2l3_cpi" + }, + { + "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in the local L2 or L3, where there was no conflict", + "MetricExpr": "(PM_CMPLU_STALL_DMISS_L2L3 - PM_CMPLU_STALL_DMISS_L2L3_CONFLICT) / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_dcache_miss_l2l3_noconflict_cpi" + }, + { + "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in other core's caches or memory", + "MetricExpr": "PM_CMPLU_STALL_DMISS_L3MISS / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_dcache_miss_l3miss_cpi" + }, + { + "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in local memory or local L4", + "MetricExpr": "PM_CMPLU_STALL_DMISS_LMEM / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_dcache_miss_lmem_cpi" + }, + { + "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in remote interventions and memory", + "MetricExpr": "PM_CMPLU_STALL_DMISS_REMOTE / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_dcache_miss_remote_cpi" + }, + { + "BriefDescription": "Cycles stalled by ERAT Translation rejects", + "MetricExpr": "PM_CMPLU_STALL_ERAT_MISS / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_erat_miss_cpi" + }, + { + "BriefDescription": "Cycles stalled by LSU load finishes", + "MetricExpr": "PM_CMPLU_STALL_LOAD_FINISH / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_ld_fin_cpi" + }, + { + "BriefDescription": "Cycles stalled by LHS rejects", + "MetricExpr": "PM_CMPLU_STALL_REJECT_LHS / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_lhs_cpi" + }, + { + "BriefDescription": "Cycles stalled by LMQ Full rejects", + "MetricExpr": "PM_CMPLU_STALL_REJ_LMQ_FULL / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_lmq_full_cpi" + }, + { + "BriefDescription": "Cycles stalled by Other LSU Operations", + "MetricExpr": "(PM_CMPLU_STALL_LSU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_REJECT / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_STORE / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_LOAD_FINISH / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_ST_FWD / PM_RUN_INST_CMPL)", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_other_cpi" + }, + { + "BriefDescription": "Cycles stalled by LSU Rejects", + "MetricExpr": "PM_CMPLU_STALL_REJECT / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_reject_cpi" + }, + { + "BriefDescription": "Cycles stalled by Other LSU Rejects", + "MetricExpr": "(PM_CMPLU_STALL_REJECT / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_REJECT_LHS / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_ERAT_MISS / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_REJ_LMQ_FULL / PM_RUN_INST_CMPL)", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_reject_other_cpi" + }, + { + "BriefDescription": "Cycles stalled by LSU store forwarding", + "MetricExpr": "PM_CMPLU_STALL_ST_FWD / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_st_fwd_cpi" + }, + { + "BriefDescription": "Cycles stalled by LSU Stores", + "MetricExpr": "PM_CMPLU_STALL_STORE / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lsu_stall_store_cpi" + }, + { + "BriefDescription": "Cycles stalled by lightweight syncs", + "MetricExpr": "PM_CMPLU_STALL_LWSYNC / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "lwsync_stall_cpi" + }, + { + "MetricExpr": "PM_CMPLU_STALL_MEM_ECC_DELAY / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "mem_ecc_delay_stall_cpi" + }, + { + "BriefDescription": "Cycles stalled by nops (nothing next to finish)", + "MetricExpr": "PM_CMPLU_STALL_NO_NTF / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "no_ntf_stall_cpi" + }, + { + "MetricExpr": "PM_NTCG_ALL_FIN / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "ntcg_all_fin_cpi" + }, + { + "MetricExpr": "PM_CMPLU_STALL_NTCG_FLUSH / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "ntcg_flush_cpi" + }, + { + "BriefDescription": "Other thread block stall cycles", + "MetricExpr": "(PM_CMPLU_STALL_THRD - PM_CMPLU_STALL_LWSYNC - PM_CMPLU_STALL_HWSYNC - PM_CMPLU_STALL_MEM_ECC_DELAY - PM_CMPLU_STALL_FLUSH - PM_CMPLU_STALL_COQ_FULL) / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "other_block_stall_cpi" + }, + { + "BriefDescription": "Cycles unaccounted for", + "MetricExpr": "(PM_RUN_CYC / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_CYC / PM_RUN_INST_CMPL) - (PM_NTCG_ALL_FIN / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_THRD / PM_RUN_INST_CMPL) - (PM_GRP_CMPL / PM_RUN_INST_CMPL)", + "MetricGroup": "cpi_breakdown", + "MetricName": "other_cpi" + }, + { + "BriefDescription": "Stall cycles unaccounted for", + "MetricExpr": "(PM_CMPLU_STALL / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_BRU_CRU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_FXU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_VSU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_LSU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_NTCG_FLUSH / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_NO_NTF / PM_RUN_INST_CMPL)", + "MetricGroup": "cpi_breakdown", + "MetricName": "other_stall_cpi" + }, + { + "BriefDescription": "Run cycles per run instruction", + "MetricExpr": "PM_RUN_CYC / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "run_cpi" + }, + { + "BriefDescription": "Completion Stall Cycles", + "MetricExpr": "PM_CMPLU_STALL / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "stall_cpi" + }, + { + "BriefDescription": "Cycles a thread was blocked", + "MetricExpr": "PM_CMPLU_STALL_THRD / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "thread_block_stall_cpi" + }, + { + "BriefDescription": "Cycles stalled by VSU", + "MetricExpr": "PM_CMPLU_STALL_VSU / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "vsu_stall_cpi" + }, + { + "BriefDescription": "Cycles stalled by other VSU Operations", + "MetricExpr": "(PM_CMPLU_STALL_VSU - PM_CMPLU_STALL_VECTOR - PM_CMPLU_STALL_SCALAR) / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "vsu_stall_other_cpi" + }, + { + "BriefDescription": "Cycles stalled by VSU Scalar Operations", + "MetricExpr": "PM_CMPLU_STALL_SCALAR / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "vsu_stall_scalar_cpi" + }, + { + "BriefDescription": "Cycles stalled by VSU Scalar Long Operations", + "MetricExpr": "PM_CMPLU_STALL_SCALAR_LONG / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "vsu_stall_scalar_long_cpi" + }, + { + "BriefDescription": "Cycles stalled by Other VSU Scalar Operations", + "MetricExpr": "(PM_CMPLU_STALL_SCALAR / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_SCALAR_LONG / PM_RUN_INST_CMPL)", + "MetricGroup": "cpi_breakdown", + "MetricName": "vsu_stall_scalar_other_cpi" + }, + { + "BriefDescription": "Cycles stalled by VSU Vector Operations", + "MetricExpr": "PM_CMPLU_STALL_VECTOR / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "vsu_stall_vector_cpi" + }, + { + "BriefDescription": "Cycles stalled by VSU Vector Long Operations", + "MetricExpr": "PM_CMPLU_STALL_VECTOR_LONG / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "vsu_stall_vector_long_cpi" + }, + { + "BriefDescription": "Cycles stalled by other VSU Vector Operations", + "MetricExpr": "(PM_CMPLU_STALL_VECTOR - PM_CMPLU_STALL_VECTOR_LONG) / PM_RUN_INST_CMPL", + "MetricGroup": "cpi_breakdown", + "MetricName": "vsu_stall_vector_other_cpi" + }, + { + "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Modified) per Inst", + "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_dl2l3_mod_rate_percent" + }, + { + "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Shared) per Inst", + "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_dl2l3_shr_rate_percent" + }, + { + "BriefDescription": "% of DL1 Reloads from Distant L4 per Inst", + "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_dl4_rate_percent" + }, + { + "BriefDescription": "% of DL1 Reloads from Distant Memory per Inst", + "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_dmem_rate_percent" + }, + { + "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst", + "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_l21_mod_rate_percent" + }, + { + "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst", + "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_l21_shr_rate_percent" + }, + { + "BriefDescription": "Percentage of L2 load hits per instruction where the L2 experienced a Load-Hit-Store conflict", + "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_l2_lhs_rate_percent" + }, + { + "BriefDescription": "% of DL1 reloads from L2 per Inst", + "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_l2_miss_rate_percent" + }, + { + "BriefDescription": "Percentage of L2 load hits per instruction where the L2 did not experience a conflict", + "MetricExpr": "PM_DATA_FROM_L2_NO_CONFLICT * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_l2_no_conflict_rate_percent" + }, + { + "BriefDescription": "Percentage of L2 load hits per instruction where the L2 experienced some conflict other than Load-Hit-Store", + "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_l2_other_conflict_rate_percent" + }, + { + "BriefDescription": "% of DL1 reloads from L2 per Inst", + "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_l2_rate_percent" + }, + { + "BriefDescription": "% of DL1 reloads from Private L3 M state, other core per Inst", + "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_l31_mod_rate_percent" + }, + { + "BriefDescription": "% of DL1 reloads from Private L3 S tate, other core per Inst", + "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_l31_shr_rate_percent" + }, + { + "BriefDescription": "Percentage of L3 load hits per instruction where the load collided with a pending prefetch", + "MetricExpr": "PM_DATA_FROM_L3_DISP_CONFLICT * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_l3_conflict_rate_percent" + }, + { + "BriefDescription": "% of DL1 reloads from L3 per Inst", + "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_l3_miss_rate_percent" + }, + { + "BriefDescription": "Percentage of L3 load hits per instruction where the L3 did not experience a conflict", + "MetricExpr": "PM_DATA_FROM_L3_NO_CONFLICT * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_l3_no_conflict_rate_percent" + }, + { + "BriefDescription": "% of DL1 Reloads from L3 per Inst", + "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_l3_rate_percent" + }, + { + "BriefDescription": "% of DL1 Reloads from Local L4 per Inst", + "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_ll4_rate_percent" + }, + { + "BriefDescription": "% of DL1 Reloads from Local Memory per Inst", + "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_lmem_rate_percent" + }, + { + "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst", + "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_rl2l3_mod_rate_percent" + }, + { + "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst", + "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_rl2l3_shr_rate_percent" + }, + { + "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst", + "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_rl4_rate_percent" + }, + { + "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst", + "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "dl1_reload_from_rmem_rate_percent" + }, + { + "BriefDescription": "Percentage of L1 demand load misses per run instruction", + "MetricExpr": "PM_LD_MISS_L1 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_inst", + "MetricName": "l1_ld_miss_rate_percent" + }, + { + "BriefDescription": "% of DL1 misses that result in a cache reload", + "MetricExpr": "PM_L1_DCACHE_RELOAD_VALID * 100 / PM_LD_MISS_L1", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_miss_reloads_percent" + }, + { + "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Modified)", + "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_dl2l3_mod_percent" + }, + { + "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Shared)", + "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_dl2l3_shr_percent" + }, + { + "BriefDescription": "% of DL1 dL1_Reloads from Distant L4", + "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_dl4_percent" + }, + { + "BriefDescription": "% of DL1 dL1_Reloads from Distant Memory", + "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_dmem_percent" + }, + { + "BriefDescription": "% of DL1 reloads from Private L2, other core", + "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_l21_mod_percent" + }, + { + "BriefDescription": "% of DL1 reloads from Private L2, other core", + "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_l21_shr_percent" + }, + { + "BriefDescription": "Percentage of DL1 reloads from L2 with a Load-Hit-Store conflict", + "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_l2_lhs_percent" + }, + { + "BriefDescription": "Percentage of DL1 reloads from L2 with no conflicts", + "MetricExpr": "PM_DATA_FROM_L2_NO_CONFLICT * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_l2_no_conflict_percent" + }, + { + "BriefDescription": "Percentage of DL1 reloads from L2 with some conflict other than Load-Hit-Store", + "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_l2_other_conflict_percent" + }, + { + "BriefDescription": "% of DL1 reloads from L2", + "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_l2_percent" + }, + { + "BriefDescription": "% of DL1 reloads from Private L3, other core", + "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_l31_mod_percent" + }, + { + "BriefDescription": "% of DL1 reloads from Private L3, other core", + "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_l31_shr_percent" + }, + { + "BriefDescription": "Percentage of DL1 reloads from L3 where the load collided with a pending prefetch", + "MetricExpr": "PM_DATA_FROM_L3_DISP_CONFLICT * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_l3_conflict_percent" + }, + { + "BriefDescription": "Percentage of L3 load hits per instruction where the line was brought into the L3 by a prefetch operation", + "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_l3_mepf_rate_percent" + }, + { + "BriefDescription": "Percentage of DL1 reloads from L3 without conflicts", + "MetricExpr": "PM_DATA_FROM_L3_NO_CONFLICT * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_l3_no_conflict_percent" + }, + { + "BriefDescription": "% of DL1 Reloads from L3", + "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_l3_percent" + }, + { + "BriefDescription": "% of DL1 dL1_Reloads from Local L4", + "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_ll4_percent" + }, + { + "BriefDescription": "% of DL1 dL1_Reloads from Local Memory", + "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_lmem_percent" + }, + { + "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Modified)", + "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_rl2l3_mod_percent" + }, + { + "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Shared)", + "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_rl2l3_shr_percent" + }, + { + "BriefDescription": "% of DL1 dL1_Reloads from Remote L4", + "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_rl4_percent" + }, + { + "BriefDescription": "% of DL1 dL1_Reloads from Remote Memory", + "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricGroup": "dl1_reloads_percent_per_ref", + "MetricName": "dl1_reload_from_rmem_percent" + }, + { + "BriefDescription": "dL1 miss portion of CPI", + "MetricExpr": "( (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)/ (PM_RUN_CYC / PM_RUN_INST_CMPL)) * 100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "dcache_miss_cpi_percent" + }, + { + "BriefDescription": "estimate of dl2l3 distant MOD miss rates with measured DL2L3 MOD latency as a %of dcache miss cpi", + "MetricExpr": "(((PM_DATA_FROM_DL2L3_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DL2L3_MOD_CYC/ PM_MRK_DATA_FROM_DL2L3_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "dl2l3_mod_cpi_percent" + }, + { + "BriefDescription": "estimate of dl2l3 distant SHR miss rates with measured DL2L3 SHR latency as a %of dcache miss cpi", + "MetricExpr": "(((PM_DATA_FROM_DL2L3_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DL2L3_SHR_CYC/ PM_MRK_DATA_FROM_DL2L3_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "dl2l3_shr_cpi_percent" + }, + { + "BriefDescription": "estimate of distant L4 miss rates with measured DL4 latency as a %of dcache miss cpi", + "MetricExpr": "(((PM_DATA_FROM_DL4 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DL4_CYC/ PM_MRK_DATA_FROM_DL4)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "dl4_cpi_percent" + }, + { + "BriefDescription": "estimate of distant memory miss rates with measured DMEM latency as a %of dcache miss cpi", + "MetricExpr": "(((PM_DATA_FROM_DMEM / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DMEM_CYC/ PM_MRK_DATA_FROM_DMEM)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "dmem_cpi_percent" + }, + { + "BriefDescription": "estimate of dl21 MOD miss rates with measured L21 MOD latency as a %of dcache miss cpi", + "MetricExpr": "(((PM_DATA_FROM_L21_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L21_MOD_CYC/ PM_MRK_DATA_FROM_L21_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "l21_mod_cpi_percent" + }, + { + "BriefDescription": "estimate of dl21 SHR miss rates with measured L21 SHR latency as a %of dcache miss cpi", + "MetricExpr": "(((PM_DATA_FROM_L21_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L21_SHR_CYC/ PM_MRK_DATA_FROM_L21_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "l21_shr_cpi_percent" + }, + { + "BriefDescription": "estimate of dl2 miss rates with measured L2 latency as a %of dcache miss cpi", + "MetricExpr": "(((PM_DATA_FROM_L2 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L2_CYC/ PM_MRK_DATA_FROM_L2)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL) ) *100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "l2_cpi_percent" + }, + { + "BriefDescription": "estimate of dl31 MOD miss rates with measured L31 MOD latency as a %of dcache miss cpi", + "MetricExpr": "(((PM_DATA_FROM_L31_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L31_MOD_CYC/ PM_MRK_DATA_FROM_L31_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "l31_mod_cpi_percent" + }, + { + "BriefDescription": "estimate of dl31 SHR miss rates with measured L31 SHR latency as a %of dcache miss cpi", + "MetricExpr": "(((PM_DATA_FROM_L31_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L31_SHR_CYC/ PM_MRK_DATA_FROM_L31_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "l31_shr_cpi_percent" + }, + { + "BriefDescription": "estimate of dl3 miss rates with measured L3 latency as a % of dcache miss cpi", + "MetricExpr": "(((PM_DATA_FROM_L3 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L3_CYC/ PM_MRK_DATA_FROM_L3)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) * 100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "l3_cpi_percent" + }, + { + "BriefDescription": "estimate of Local L4 miss rates with measured LL4 latency as a %of dcache miss cpi", + "MetricExpr": "(((PM_DATA_FROM_LL4 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_LL4_CYC/ PM_MRK_DATA_FROM_LL4)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "ll4_cpi_percent" + }, + { + "BriefDescription": "estimate of Local memory miss rates with measured LMEM latency as a %of dcache miss cpi", + "MetricExpr": "(((PM_DATA_FROM_LMEM / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_LMEM_CYC/ PM_MRK_DATA_FROM_LMEM)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "lmem_cpi_percent" + }, + { + "BriefDescription": "estimate of dl2l3 remote MOD miss rates with measured RL2L3 MOD latency as a %of dcache miss cpi", + "MetricExpr": "(((PM_DATA_FROM_RL2L3_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RL2L3_MOD_CYC/ PM_MRK_DATA_FROM_RL2L3_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "rl2l3_mod_cpi_percent" + }, + { + "BriefDescription": "estimate of dl2l3 shared miss rates with measured RL2L3 SHR latency as a %of dcache miss cpi", + "MetricExpr": "(((PM_DATA_FROM_RL2L3_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RL2L3_SHR_CYC/ PM_MRK_DATA_FROM_RL2L3_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) * 100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "rl2l3_shr_cpi_percent" + }, + { + "BriefDescription": "estimate of remote L4 miss rates with measured RL4 latency as a %of dcache miss cpi", + "MetricExpr": "(((PM_DATA_FROM_RL4 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RL4_CYC/ PM_MRK_DATA_FROM_RL4)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "rl4_cpi_percent" + }, + { + "BriefDescription": "estimate of remote memory miss rates with measured RMEM latency as a %of dcache miss cpi", + "MetricExpr": "(((PM_DATA_FROM_RMEM / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RMEM_CYC/ PM_MRK_DATA_FROM_RMEM)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100", + "MetricGroup": "estimated_dcache_miss_cpi", + "MetricName": "rmem_cpi_percent" + }, + { + "BriefDescription": "Branch Mispredict flushes per instruction", + "MetricExpr": "PM_FLUSH_BR_MPRED / PM_RUN_INST_CMPL * 100", + "MetricGroup": "general", + "MetricName": "br_mpred_flush_rate_percent" + }, + { + "BriefDescription": "Cycles per instruction", + "MetricExpr": "PM_CYC / PM_INST_CMPL", + "MetricGroup": "general", + "MetricName": "cpi" + }, + { + "BriefDescription": "Percentage Cycles a group completed", + "MetricExpr": "PM_GRP_CMPL / PM_CYC * 100", + "MetricGroup": "general", + "MetricName": "cyc_grp_completed_percent" + }, + { + "BriefDescription": "Percentage Cycles a group dispatched", + "MetricExpr": "PM_1PLUS_PPC_DISP / PM_CYC * 100", + "MetricGroup": "general", + "MetricName": "cyc_grp_dispatched_percent" + }, + { + "BriefDescription": "Cycles per group", + "MetricExpr": "PM_CYC / PM_1PLUS_PPC_CMPL", + "MetricGroup": "general", + "MetricName": "cyc_per_group" + }, + { + "BriefDescription": "GCT empty cycles", + "MetricExpr": "(PM_FLUSH_DISP / PM_RUN_INST_CMPL) * 100", + "MetricGroup": "general", + "MetricName": "disp_flush_rate_percent" + }, + { + "BriefDescription": "% DTLB miss rate per inst", + "MetricExpr": "PM_DTLB_MISS / PM_RUN_INST_CMPL *100", + "MetricGroup": "general", + "MetricName": "dtlb_miss_rate_percent" + }, + { + "BriefDescription": "Flush rate (%)", + "MetricExpr": "PM_FLUSH * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "general", + "MetricName": "flush_rate_percent" + }, + { + "BriefDescription": "GCT slot utilization (11 to 14) as a % of cycles this thread had at least 1 slot valid", + "MetricExpr": "PM_GCT_UTIL_11_14_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100", + "MetricGroup": "general", + "MetricName": "gct_util_11to14_slots_percent" + }, + { + "BriefDescription": "GCT slot utilization (15 to 17) as a % of cycles this thread had at least 1 slot valid", + "MetricExpr": "PM_GCT_UTIL_15_17_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100", + "MetricGroup": "general", + "MetricName": "gct_util_15to17_slots_percent" + }, + { + "BriefDescription": "GCT slot utilization 18+ as a % of cycles this thread had at least 1 slot valid", + "MetricExpr": "PM_GCT_UTIL_18_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100", + "MetricGroup": "general", + "MetricName": "gct_util_18plus_slots_percent" + }, + { + "BriefDescription": "GCT slot utilization (1 to 2) as a % of cycles this thread had at least 1 slot valid", + "MetricExpr": "PM_GCT_UTIL_1_2_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100", + "MetricGroup": "general", + "MetricName": "gct_util_1to2_slots_percent" + }, + { + "BriefDescription": "GCT slot utilization (3 to 6) as a % of cycles this thread had at least 1 slot valid", + "MetricExpr": "PM_GCT_UTIL_3_6_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100", + "MetricGroup": "general", + "MetricName": "gct_util_3to6_slots_percent" + }, + { + "BriefDescription": "GCT slot utilization (7 to 10) as a % of cycles this thread had at least 1 slot valid", + "MetricExpr": "PM_GCT_UTIL_7_10_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100", + "MetricGroup": "general", + "MetricName": "gct_util_7to10_slots_percent" + }, + { + "BriefDescription": "Avg. group size", + "MetricExpr": "PM_INST_CMPL / PM_1PLUS_PPC_CMPL", + "MetricGroup": "general", + "MetricName": "group_size" + }, + { + "BriefDescription": "Instructions per group", + "MetricExpr": "PM_INST_CMPL / PM_1PLUS_PPC_CMPL", + "MetricGroup": "general", + "MetricName": "inst_per_group" + }, + { + "BriefDescription": "Instructions per cycles", + "MetricExpr": "PM_INST_CMPL / PM_CYC", + "MetricGroup": "general", + "MetricName": "ipc" + }, + { + "BriefDescription": "% ITLB miss rate per inst", + "MetricExpr": "PM_ITLB_MISS / PM_RUN_INST_CMPL *100", + "MetricGroup": "general", + "MetricName": "itlb_miss_rate_percent" + }, + { + "BriefDescription": "Percentage of L1 load misses per L1 load ref", + "MetricExpr": "PM_LD_MISS_L1 / PM_LD_REF_L1 * 100", + "MetricGroup": "general", + "MetricName": "l1_ld_miss_ratio_percent" + }, + { + "BriefDescription": "Percentage of L1 store misses per run instruction", + "MetricExpr": "PM_ST_MISS_L1 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "general", + "MetricName": "l1_st_miss_rate_percent" + }, + { + "BriefDescription": "Percentage of L1 store misses per L1 store ref", + "MetricExpr": "PM_ST_MISS_L1 / PM_ST_FIN * 100", + "MetricGroup": "general", + "MetricName": "l1_st_miss_ratio_percent" + }, + { + "BriefDescription": "L2 Instruction Miss Rate (per instruction)(%)", + "MetricExpr": "PM_INST_FROM_L2MISS * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "general", + "MetricName": "l2_inst_miss_rate_percent" + }, + { + "BriefDescription": "L2 dmand Load Miss Rate (per run instruction)(%)", + "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "general", + "MetricName": "l2_ld_miss_rate_percent" + }, + { + "BriefDescription": "L2 PTEG Miss Rate (per run instruction)(%)", + "MetricExpr": "PM_DPTEG_FROM_L2MISS * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "general", + "MetricName": "l2_pteg_miss_rate_percent" + }, + { + "BriefDescription": "Percentage of L2 store misses per run instruction", + "MetricExpr": "PM_ST_MISS_L1 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "general", + "MetricName": "l2_st_miss_rate_percent" + }, + { + "BriefDescription": "L3 Instruction Miss Rate (per instruction)(%)", + "MetricExpr": "PM_INST_FROM_L3MISS * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "general", + "MetricName": "l3_inst_miss_rate_percent" + }, + { + "BriefDescription": "L3 demand Load Miss Rate (per run instruction)(%)", + "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "general", + "MetricName": "l3_ld_miss_rate_percent" + }, + { + "BriefDescription": "L3 PTEG Miss Rate (per run instruction)(%)", + "MetricExpr": "PM_DPTEG_FROM_L3MISS * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "general", + "MetricName": "l3_pteg_miss_rate_percent" + }, + { + "BriefDescription": "Run cycles per cycle", + "MetricExpr": "PM_RUN_CYC / PM_CYC*100", + "MetricGroup": "general", + "MetricName": "run_cycles_percent" + }, + { + "BriefDescription": "Percentage of cycles spent in SMT2 Mode", + "MetricExpr": "(PM_RUN_CYC_SMT2_MODE/PM_RUN_CYC) * 100", + "MetricGroup": "general", + "MetricName": "smt2_cycles_percent" + }, + { + "BriefDescription": "Percentage of cycles spent in SMT4 Mode", + "MetricExpr": "(PM_RUN_CYC_SMT4_MODE/PM_RUN_CYC) * 100", + "MetricGroup": "general", + "MetricName": "smt4_cycles_percent" + }, + { + "BriefDescription": "Percentage of cycles spent in SMT8 Mode", + "MetricExpr": "(PM_RUN_CYC_SMT8_MODE/PM_RUN_CYC) * 100", + "MetricGroup": "general", + "MetricName": "smt8_cycles_percent" + }, + { + "BriefDescription": "IPC of all instructions completed by the core while this thread was stalled", + "MetricExpr": "PM_CMPLU_STALL_OTHER_CMPL/PM_RUN_CYC", + "MetricGroup": "general", + "MetricName": "smt_benefit" + }, + { + "BriefDescription": "Instruction dispatch-to-completion ratio", + "MetricExpr": "PM_INST_DISP / PM_INST_CMPL", + "MetricGroup": "general", + "MetricName": "speculation" + }, + { + "BriefDescription": "Percentage of cycles spent in Single Thread Mode", + "MetricExpr": "(PM_RUN_CYC_ST_MODE/PM_RUN_CYC) * 100", + "MetricGroup": "general", + "MetricName": "st_cycles_percent" + }, + { + "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified) per Inst", + "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "inst_from_dl2l3_mod_rate_percent" + }, + { + "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared) per Inst", + "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "inst_from_dl2l3_shr_rate_percent" + }, + { + "BriefDescription": "% of ICache reloads from Distant L4 per Inst", + "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "inst_from_dl4_rate_percent" + }, + { + "BriefDescription": "% of ICache reloads from Distant Memory per Inst", + "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "inst_from_dmem_rate_percent" + }, + { + "BriefDescription": "% of ICache reloads from Private L2, other core per Inst", + "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "inst_from_l21_mod_rate_percent" + }, + { + "BriefDescription": "% of ICache reloads from Private L2, other core per Inst", + "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "inst_from_l21_shr_rate_percent" + }, + { + "BriefDescription": "% of ICache reloads from L2 per Inst", + "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "inst_from_l2_rate_percent" + }, + { + "BriefDescription": "% of ICache reloads from Private L3, other core per Inst", + "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "inst_from_l31_mod_rate_percent" + }, + { + "BriefDescription": "% of ICache reloads from Private L3 other core per Inst", + "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "inst_from_l31_shr_rate_percent" + }, + { + "BriefDescription": "% of ICache reloads from L3 per Inst", + "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "inst_from_l3_rate_percent" + }, + { + "BriefDescription": "% of ICache reloads from Local L4 per Inst", + "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "inst_from_ll4_rate_percent" + }, + { + "BriefDescription": "% of ICache reloads from Local Memory per Inst", + "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "inst_from_lmem_rate_percent" + }, + { + "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified) per Inst", + "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "inst_from_rl2l3_mod_rate_percent" + }, + { + "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared) per Inst", + "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "inst_from_rl2l3_shr_rate_percent" + }, + { + "BriefDescription": "% of ICache reloads from Remote L4 per Inst", + "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "inst_from_rl4_rate_percent" + }, + { + "BriefDescription": "% of ICache reloads from Remote Memory per Inst", + "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "inst_from_rmem_rate_percent" + }, + { + "BriefDescription": "Instruction Cache Miss Rate (Per run Instruction)(%)", + "MetricExpr": "PM_L1_ICACHE_MISS * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_misses_percent_per_inst", + "MetricName": "l1_inst_miss_rate_percent" + }, + { + "BriefDescription": "% Branches per instruction", + "MetricExpr": "PM_BRU_FIN / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_mix", + "MetricName": "branches_per_inst" + }, + { + "BriefDescription": "Total Fixed point operations", + "MetricExpr": "(PM_FXU0_FIN + PM_FXU1_FIN)/PM_RUN_INST_CMPL", + "MetricGroup": "instruction_mix", + "MetricName": "fixed_per_inst" + }, + { + "BriefDescription": "FXU0 balance", + "MetricExpr": "PM_FXU0_FIN / (PM_FXU0_FIN + PM_FXU1_FIN)", + "MetricGroup": "instruction_mix", + "MetricName": "fxu0_balance" + }, + { + "BriefDescription": "Fraction of cycles that FXU0 is in use", + "MetricExpr": "PM_FXU0_FIN / PM_RUN_CYC", + "MetricGroup": "instruction_mix", + "MetricName": "fxu0_fin" + }, + { + "BriefDescription": "FXU0 only Busy", + "MetricExpr": "PM_FXU0_BUSY_FXU1_IDLE / PM_CYC", + "MetricGroup": "instruction_mix", + "MetricName": "fxu0_only_busy" + }, + { + "BriefDescription": "Fraction of cycles that FXU1 is in use", + "MetricExpr": "PM_FXU1_FIN / PM_RUN_CYC", + "MetricGroup": "instruction_mix", + "MetricName": "fxu1_fin" + }, + { + "BriefDescription": "FXU1 only Busy", + "MetricExpr": "PM_FXU1_BUSY_FXU0_IDLE / PM_CYC", + "MetricGroup": "instruction_mix", + "MetricName": "fxu1_only_busy" + }, + { + "BriefDescription": "Both FXU Busy", + "MetricExpr": "PM_FXU_BUSY / PM_CYC", + "MetricGroup": "instruction_mix", + "MetricName": "fxu_both_busy" + }, + { + "BriefDescription": "Both FXU Idle", + "MetricExpr": "PM_FXU_IDLE / PM_CYC", + "MetricGroup": "instruction_mix", + "MetricName": "fxu_both_idle" + }, + { + "BriefDescription": "PCT instruction loads", + "MetricExpr": "PM_LD_REF_L1 / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_mix", + "MetricName": "loads_per_inst" + }, + { + "BriefDescription": "PCT instruction stores", + "MetricExpr": "PM_ST_FIN / PM_RUN_INST_CMPL", + "MetricGroup": "instruction_mix", + "MetricName": "stores_per_inst" + }, + { + "BriefDescription": "Icache Fetchs per Icache Miss", + "MetricExpr": "(PM_L1_ICACHE_MISS - PM_IC_PREF_WRITE) / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "icache_miss_reload" + }, + { + "BriefDescription": "% of ICache reloads due to prefetch", + "MetricExpr": "PM_IC_PREF_WRITE * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "icache_pref_percent" + }, + { + "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified)", + "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "inst_from_dl2l3_mod_percent" + }, + { + "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared)", + "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "inst_from_dl2l3_shr_percent" + }, + { + "BriefDescription": "% of ICache reloads from Distant L4", + "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "inst_from_dl4_percent" + }, + { + "BriefDescription": "% of ICache reloads from Distant Memory", + "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "inst_from_dmem_percent" + }, + { + "BriefDescription": "% of ICache reloads from Private L2, other core", + "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "inst_from_l21_mod_percent" + }, + { + "BriefDescription": "% of ICache reloads from Private L2, other core", + "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "inst_from_l21_shr_percent" + }, + { + "BriefDescription": "% of ICache reloads from L2", + "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "inst_from_l2_percent" + }, + { + "BriefDescription": "% of ICache reloads from Private L3, other core", + "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "inst_from_l31_mod_percent" + }, + { + "BriefDescription": "% of ICache reloads from Private L3, other core", + "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "inst_from_l31_shr_percent" + }, + { + "BriefDescription": "% of ICache reloads from L3", + "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "inst_from_l3_percent" + }, + { + "BriefDescription": "% of ICache reloads from Local L4", + "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "inst_from_ll4_percent" + }, + { + "BriefDescription": "% of ICache reloads from Local Memory", + "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "inst_from_lmem_percent" + }, + { + "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified)", + "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "inst_from_rl2l3_mod_percent" + }, + { + "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared)", + "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "inst_from_rl2l3_shr_percent" + }, + { + "BriefDescription": "% of ICache reloads from Remote L4", + "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "inst_from_rl4_percent" + }, + { + "BriefDescription": "% of ICache reloads from Remote Memory", + "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_L1_ICACHE_MISS", + "MetricGroup": "instruction_stats_percent_per_ref", + "MetricName": "inst_from_rmem_percent" + }, + { + "BriefDescription": "Average number of stores that gather in the store buffer before being sent to an L2 RC machine", + "MetricExpr": "PM_ST_CMPL / (PM_L2_ST / 2)", + "MetricGroup": "l2_stats", + "MetricName": "avg_stores_gathered" + }, + { + "BriefDescription": "L2 Store misses as a % of total L2 Store dispatches (per thread)", + "MetricExpr": "PM_L2_ST_MISS / PM_L2_ST * 100", + "MetricGroup": "l2_stats", + "MetricName": "l2_st_miss_ratio_percent" + }, + { + "BriefDescription": "Percentage of L2 store misses per drained store. A drained store may contain multiple individual stores if they target the same line", + "MetricExpr": "PM_L2_ST_MISS / (PM_L2_ST / 2)", + "MetricGroup": "l2_stats", + "MetricName": "l2_store_miss_ratio_percent" + }, + { + "BriefDescription": "average L1 miss latency using marked events", + "MetricExpr": "PM_MRK_LD_MISS_L1_CYC / PM_MRK_LD_MISS_L1", + "MetricGroup": "latency", + "MetricName": "average_dl1miss_latency" + }, + { + "BriefDescription": "Average icache miss latency", + "MetricExpr": "(PM_IC_DEMAND_CYC / PM_IC_DEMAND_REQ)", + "MetricGroup": "latency", + "MetricName": "average_il1_miss_latency" + }, + { + "BriefDescription": "average service time for SYNC", + "MetricExpr": "PM_LSU_SRQ_SYNC_CYC / PM_LSU_SRQ_SYNC", + "MetricGroup": "latency", + "MetricName": "average_sync_cyc" + }, + { + "BriefDescription": "Cycles LMQ slot0 was active on an average", + "MetricExpr": "PM_LSU_LMQ_S0_VALID / PM_LSU_LMQ_S0_ALLOC", + "MetricGroup": "latency", + "MetricName": "avg_lmq_life_time" + }, + { + "BriefDescription": "Average number of cycles LRQ stays active for one load. Slot 0 is VALID ONLY FOR EVEN THREADS", + "MetricExpr": "PM_LSU_LRQ_S0_VALID / PM_LSU_LRQ_S0_ALLOC", + "MetricGroup": "latency", + "MetricName": "avg_lrq_life_time_even" + }, + { + "BriefDescription": "Average number of cycles LRQ stays active for one load. Slot 43 is valid ONLY FOR ODD THREADS", + "MetricExpr": "PM_LSU_LRQ_S43_VALID / PM_LSU_LRQ_S43_ALLOC", + "MetricGroup": "latency", + "MetricName": "avg_lrq_life_time_odd" + }, + { + "BriefDescription": "Average number of cycles SRQ stays active for one load. Slot 0 is VALID ONLY FOR EVEN THREADS", + "MetricExpr": "PM_LSU_SRQ_S0_VALID / PM_LSU_SRQ_S0_ALLOC", + "MetricGroup": "latency", + "MetricName": "avg_srq_life_time_even" + }, + { + "BriefDescription": "Average number of cycles SRQ stays active for one load. Slot 39 is valid ONLY FOR ODD THREADS", + "MetricExpr": "PM_LSU_SRQ_S39_VALID / PM_LSU_SRQ_S39_ALLOC", + "MetricGroup": "latency", + "MetricName": "avg_srq_life_time_odd" + }, + { + "BriefDescription": "Marked background kill latency, measured in L2", + "MetricExpr": "PM_MRK_FAB_RSP_BKILL_CYC / PM_MRK_FAB_RSP_BKILL", + "MetricGroup": "latency", + "MetricName": "bkill_latency" + }, + { + "BriefDescription": "Marked dclaim latency, measured in L2", + "MetricExpr": "PM_MRK_FAB_RSP_DCLAIM_CYC / PM_MRK_FAB_RSP_DCLAIM", + "MetricGroup": "latency", + "MetricName": "dclaim_latency" + }, + { + "BriefDescription": "Marked L2L3 remote Load latency", + "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_MOD_CYC/ PM_MRK_DATA_FROM_DL2L3_MOD", + "MetricGroup": "latency", + "MetricName": "dl2l3_mod_latency" + }, + { + "BriefDescription": "Marked L2L3 distant Load latency", + "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_SHR_CYC/ PM_MRK_DATA_FROM_DL2L3_SHR", + "MetricGroup": "latency", + "MetricName": "dl2l3_shr_latency" + }, + { + "BriefDescription": "Distant L4 average load latency", + "MetricExpr": "PM_MRK_DATA_FROM_DL4_CYC/ PM_MRK_DATA_FROM_DL4", + "MetricGroup": "latency", + "MetricName": "dl4_latency" + }, + { + "BriefDescription": "Marked Dmem Load latency", + "MetricExpr": "PM_MRK_DATA_FROM_DMEM_CYC/ PM_MRK_DATA_FROM_DMEM", + "MetricGroup": "latency", + "MetricName": "dmem_latency" + }, + { + "BriefDescription": "estimated exposed miss latency for dL1 misses, ie load miss when we were NTC", + "MetricExpr": "PM_MRK_LD_MISS_EXPOSED_CYC / PM_MRK_LD_MISS_EXPOSED", + "MetricGroup": "latency", + "MetricName": "exposed_dl1miss_latency" + }, + { + "BriefDescription": "Average load latency for all marked demand loads that came from L2.1 in the M state", + "MetricExpr": "PM_MRK_DATA_FROM_L21_MOD_CYC/ PM_MRK_DATA_FROM_L21_MOD", + "MetricGroup": "latency", + "MetricName": "l21_mod_latency" + }, + { + "BriefDescription": "Average load latency for all marked demand loads that came from L2.1 in the S state", + "MetricExpr": "PM_MRK_DATA_FROM_L21_SHR_CYC/ PM_MRK_DATA_FROM_L21_SHR", + "MetricGroup": "latency", + "MetricName": "l21_shr_latency" + }, + { + "BriefDescription": "Average load latency for all marked demand loads that came from the L2 and suffered a conflict at RC machine dispatch time due to load-hit-store", + "MetricExpr": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST_CYC/ PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST", + "MetricGroup": "latency", + "MetricName": "l2_disp_conflict_ldhitst_latency" + }, + { + "BriefDescription": "Average load latency for all marked demand loads that came from the L2 and suffered a conflict at RC machine dispatch time NOT due load-hit-store", + "MetricExpr": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER_CYC/ PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER", + "MetricGroup": "latency", + "MetricName": "l2_disp_conflict_other_latency" + }, + { + "BriefDescription": "Average load latency for all marked demand loads that came from the L2", + "MetricExpr": "PM_MRK_DATA_FROM_L2_CYC/ PM_MRK_DATA_FROM_L2", + "MetricGroup": "latency", + "MetricName": "l2_latency" + }, + { + "BriefDescription": "Average load latency for all marked demand loads that were satisfied by lines prefetched into the L3. This information is forwarded from the L3", + "MetricExpr": "PM_MRK_DATA_FROM_L2_MEPF_CYC/ PM_MRK_DATA_FROM_L2", + "MetricGroup": "latency", + "MetricName": "l2_mepf_latency" + }, + { + "BriefDescription": "Average load latency for all marked demand loads that came from the L2 and suffered no conflicts", + "MetricExpr": "PM_MRK_DATA_FROM_L2_NO_CONFLICT_CYC/ PM_MRK_DATA_FROM_L2", + "MetricGroup": "latency", + "MetricName": "l2_no_conflict_latency" + }, + { + "BriefDescription": "Average load latency for all marked demand loads that came from the L3 and beyond", + "MetricExpr": "PM_MRK_DATA_FROM_L2MISS_CYC/ PM_MRK_DATA_FROM_L2MISS", + "MetricGroup": "latency", + "MetricName": "l2miss_latency" + }, + { + "BriefDescription": "Marked L31 Load latency", + "MetricExpr": "PM_MRK_DATA_FROM_L31_MOD_CYC/ PM_MRK_DATA_FROM_L31_MOD", + "MetricGroup": "latency", + "MetricName": "l31_mod_latency" + }, + { + "BriefDescription": "Marked L31 Load latency", + "MetricExpr": "PM_MRK_DATA_FROM_L31_SHR_CYC/ PM_MRK_DATA_FROM_L31_SHR", + "MetricGroup": "latency", + "MetricName": "l31_shr_latency" + }, + { + "BriefDescription": "Average load latency for all marked demand loads that came from the L3", + "MetricExpr": "PM_MRK_DATA_FROM_L3_CYC/ PM_MRK_DATA_FROM_L3", + "MetricGroup": "latency", + "MetricName": "l3_latency" + }, + { + "BriefDescription": "Average load latency for all marked demand loads that came from the L3 and suffered no conflicts", + "MetricExpr": "PM_MRK_DATA_FROM_L3_NO_CONFLICT_CYC/ PM_MRK_DATA_FROM_L2", + "MetricGroup": "latency", + "MetricName": "l3_no_conflict_latency" + }, + { + "BriefDescription": "Average load latency for all marked demand loads that come from beyond the L3", + "MetricExpr": "PM_MRK_DATA_FROM_L3MISS_CYC/ PM_MRK_DATA_FROM_L3MISS", + "MetricGroup": "latency", + "MetricName": "l3miss_latency" + }, + { + "BriefDescription": "Average latency for marked reloads that hit in the L3 on the MEPF state. i.e. lines that were prefetched into the L3", + "MetricExpr": "PM_MRK_DATA_FROM_L3_MEPF_CYC/ PM_MRK_DATA_FROM_L3_MEPF", + "MetricGroup": "latency", + "MetricName": "l3pref_latency" + }, + { + "BriefDescription": "Local L4 average load latency", + "MetricExpr": "PM_MRK_DATA_FROM_LL4_CYC/ PM_MRK_DATA_FROM_LL4", + "MetricGroup": "latency", + "MetricName": "ll4_latency" + }, + { + "BriefDescription": "Marked Lmem Load latency", + "MetricExpr": "PM_MRK_DATA_FROM_LMEM_CYC/ PM_MRK_DATA_FROM_LMEM", + "MetricGroup": "latency", + "MetricName": "lmem_latency" + }, + { + "BriefDescription": "Latency for marked reloads that hit in the L2 or L3 of any other core on a different chip", + "MetricExpr": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE_CYC/ PM_MRK_DATA_FROM_OFF_CHIP_CACHE", + "MetricGroup": "latency", + "MetricName": "off_chip_cache_latency" + }, + { + "BriefDescription": "Latency for marked reloads that hit in the L2 or L3 of any other core on the same chip", + "MetricExpr": "PM_MRK_DATA_FROM_ON_CHIP_CACHE_CYC/ PM_MRK_DATA_FROM_ON_CHIP_CACHE", + "MetricGroup": "latency", + "MetricName": "on_chip_cache_latency" + }, + { + "BriefDescription": "Marked L2L3 remote Load latency", + "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_MOD_CYC/ PM_MRK_DATA_FROM_RL2L3_MOD", + "MetricGroup": "latency", + "MetricName": "rl2l3_mod_latency" + }, + { + "BriefDescription": "Marked L2L3 remote Load latency", + "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_SHR_CYC/ PM_MRK_DATA_FROM_RL2L3_SHR", + "MetricGroup": "latency", + "MetricName": "rl2l3_shr_latency" + }, + { + "BriefDescription": "Remote L4 average load latency", + "MetricExpr": "PM_MRK_DATA_FROM_RL4_CYC/ PM_MRK_DATA_FROM_RL4", + "MetricGroup": "latency", + "MetricName": "rl4_latency" + }, + { + "BriefDescription": "Marked Rmem Load latency", + "MetricExpr": "PM_MRK_DATA_FROM_RMEM_CYC/ PM_MRK_DATA_FROM_RMEM", + "MetricGroup": "latency", + "MetricName": "rmem_latency" + }, + { + "BriefDescription": "ERAT miss reject ratio", + "MetricExpr": "PM_LSU_REJECT_ERAT_MISS * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "lsu_rejects", + "MetricName": "erat_reject_rate_percent" + }, + { + "BriefDescription": "ERAT miss reject ratio", + "MetricExpr": "PM_LSU_REJECT_ERAT_MISS * 100 / (PM_LSU_FIN - PM_LSU_FX_FIN)", + "MetricGroup": "lsu_rejects", + "MetricName": "erat_reject_ratio_percent" + }, + { + "BriefDescription": "LHS reject ratio", + "MetricExpr": "PM_LSU_REJECT_LHS *100/ PM_RUN_INST_CMPL", + "MetricGroup": "lsu_rejects", + "MetricName": "lhs_reject_rate_percent" + }, + { + "BriefDescription": "LHS reject ratio", + "MetricExpr": "PM_LSU_REJECT_LHS *100/ (PM_LSU_FIN - PM_LSU_FX_FIN)", + "MetricGroup": "lsu_rejects", + "MetricName": "lhs_reject_ratio_percent" + }, + { + "BriefDescription": "LMQ full reject ratio", + "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "lsu_rejects", + "MetricName": "lmq_full_reject_rate_percent" + }, + { + "BriefDescription": "ERAT miss reject ratio", + "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_LD_REF_L1", + "MetricGroup": "lsu_rejects", + "MetricName": "lmq_full_reject_ratio_percent" + }, + { + "BriefDescription": "LSU reject ratio", + "MetricExpr": "PM_LSU_REJECT *100/ PM_RUN_INST_CMPL", + "MetricGroup": "lsu_rejects", + "MetricName": "lsu_reject_rate_percent" + }, + { + "BriefDescription": "LSU reject ratio", + "MetricExpr": "PM_LSU_REJECT *100/ (PM_LSU_FIN - PM_LSU_FX_FIN)", + "MetricGroup": "lsu_rejects", + "MetricName": "lsu_reject_ratio_percent" + }, + { + "BriefDescription": "Ratio of reloads from local L4 to distant L4", + "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_DL4", + "MetricGroup": "memory", + "MetricName": "ld_ll4_per_ld_dmem" + }, + { + "BriefDescription": "Ratio of reloads from local L4 to remote+distant L4", + "MetricExpr": "PM_DATA_FROM_LL4 / (PM_DATA_FROM_DL4 + PM_DATA_FROM_RL4)", + "MetricGroup": "memory", + "MetricName": "ld_ll4_per_ld_mem" + }, + { + "BriefDescription": "Ratio of reloads from local L4 to remote L4", + "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_RL4", + "MetricGroup": "memory", + "MetricName": "ld_ll4_per_ld_rl4" + }, + { + "BriefDescription": "Number of loads from local memory per loads from distant memory", + "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_DMEM", + "MetricGroup": "memory", + "MetricName": "ld_lmem_per_ld_dmem" + }, + { + "BriefDescription": "Number of loads from local memory per loads from remote and distant memory", + "MetricExpr": "PM_DATA_FROM_LMEM / (PM_DATA_FROM_DMEM + PM_DATA_FROM_RMEM)", + "MetricGroup": "memory", + "MetricName": "ld_lmem_per_ld_mem" + }, + { + "BriefDescription": "Number of loads from local memory per loads from remote memory", + "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_RMEM", + "MetricGroup": "memory", + "MetricName": "ld_lmem_per_ld_rmem" + }, + { + "BriefDescription": "Number of loads from remote memory per loads from distant memory", + "MetricExpr": "PM_DATA_FROM_RMEM / PM_DATA_FROM_DMEM", + "MetricGroup": "memory", + "MetricName": "ld_rmem_per_ld_dmem" + }, + { + "BriefDescription": "Memory locality", + "MetricExpr": "(PM_DATA_FROM_LL4 + PM_DATA_FROM_LMEM) * 100/ (PM_DATA_FROM_LMEM + PM_DATA_FROM_LL4 + PM_DATA_FROM_RMEM + PM_DATA_FROM_RL4 + PM_DATA_FROM_DMEM + PM_DATA_FROM_DL4)", + "MetricGroup": "memory", + "MetricName": "mem_locality_percent" + }, + { + "BriefDescription": "DERAT Miss Rate (per run instruction)(%)", + "MetricExpr": "PM_LSU_DERAT_MISS * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "derat_miss_rate_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified) per inst", + "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "pteg_from_dl2l3_mod_rate_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared) per inst", + "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "pteg_from_dl2l3_shr_rate_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Distant L4 per inst", + "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "pteg_from_dl4_rate_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Distant Memory per inst", + "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "pteg_from_dmem_rate_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Private L2, other core per inst", + "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "pteg_from_l21_mod_rate_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Private L2, other core per inst", + "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "pteg_from_l21_shr_rate_percent" + }, + { + "BriefDescription": "% of DERAT reloads from L2 per inst", + "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "pteg_from_l2_rate_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Private L3, other core per inst", + "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "pteg_from_l31_mod_rate_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Private L3, other core per inst", + "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "pteg_from_l31_shr_rate_percent" + }, + { + "BriefDescription": "% of DERAT reloads from L3 per inst", + "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "pteg_from_l3_rate_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Local L4 per inst", + "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "pteg_from_ll4_rate_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Local Memory per inst", + "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "pteg_from_lmem_rate_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified) per inst", + "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "pteg_from_rl2l3_mod_rate_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared) per inst", + "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "pteg_from_rl2l3_shr_rate_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Remote L4 per inst", + "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "pteg_from_rl4_rate_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Remote Memory per inst", + "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "pteg_reloads_percent_per_inst", + "MetricName": "pteg_from_rmem_rate_percent" + }, + { + "BriefDescription": "% of DERAT misses that result in an ERAT reload", + "MetricExpr": "PM_DTLB_MISS * 100 / PM_LSU_DERAT_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "derat_miss_reload_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified)", + "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_DTLB_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "pteg_from_dl2l3_mod_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared)", + "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_DTLB_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "pteg_from_dl2l3_shr_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Distant L4", + "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_DTLB_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "pteg_from_dl4_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Distant Memory", + "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_DTLB_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "pteg_from_dmem_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Private L2, other core", + "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_DTLB_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "pteg_from_l21_mod_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Private L2, other core", + "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_DTLB_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "pteg_from_l21_shr_percent" + }, + { + "BriefDescription": "% of DERAT reloads from L2", + "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_DTLB_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "pteg_from_l2_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Private L3, other core", + "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_DTLB_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "pteg_from_l31_mod_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Private L3, other core", + "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_DTLB_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "pteg_from_l31_shr_percent" + }, + { + "BriefDescription": "% of DERAT reloads from L3", + "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_DTLB_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "pteg_from_l3_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Local L4", + "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_DTLB_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "pteg_from_ll4_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Local Memory", + "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_DTLB_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "pteg_from_lmem_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified)", + "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_DTLB_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "pteg_from_rl2l3_mod_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared)", + "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_DTLB_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "pteg_from_rl2l3_shr_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Remote L4", + "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_DTLB_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "pteg_from_rl4_percent" + }, + { + "BriefDescription": "% of DERAT reloads from Remote Memory", + "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_DTLB_MISS", + "MetricGroup": "pteg_reloads_percent_per_ref", + "MetricName": "pteg_from_rmem_percent" + }, + { + "BriefDescription": "% DERAT miss ratio for 16G page per inst", + "MetricExpr": "100 * PM_DERAT_MISS_16G / PM_RUN_INST_CMPL", + "MetricGroup": "translation", + "MetricName": "derat_16g_miss_rate_percent" + }, + { + "BriefDescription": "DERAT miss ratio for 16G page", + "MetricExpr": "PM_DERAT_MISS_16G / PM_LSU_DERAT_MISS", + "MetricGroup": "translation", + "MetricName": "derat_16g_miss_ratio" + }, + { + "BriefDescription": "% DERAT miss rate for 16M page per inst", + "MetricExpr": "PM_DERAT_MISS_16M * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "translation", + "MetricName": "derat_16m_miss_rate_percent" + }, + { + "BriefDescription": "DERAT miss ratio for 16M page", + "MetricExpr": "PM_DERAT_MISS_16M / PM_LSU_DERAT_MISS", + "MetricGroup": "translation", + "MetricName": "derat_16m_miss_ratio" + }, + { + "BriefDescription": "% DERAT miss rate for 4K page per inst", + "MetricExpr": "PM_DERAT_MISS_4K * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "translation", + "MetricName": "derat_4k_miss_rate_percent" + }, + { + "BriefDescription": "DERAT miss ratio for 4K page", + "MetricExpr": "PM_DERAT_MISS_4K / PM_LSU_DERAT_MISS", + "MetricGroup": "translation", + "MetricName": "derat_4k_miss_ratio" + }, + { + "BriefDescription": "% DERAT miss ratio for 64K page per inst", + "MetricExpr": "PM_DERAT_MISS_64K * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "translation", + "MetricName": "derat_64k_miss_rate_percent" + }, + { + "BriefDescription": "DERAT miss ratio for 64K page", + "MetricExpr": "PM_DERAT_MISS_64K / PM_LSU_DERAT_MISS", + "MetricGroup": "translation", + "MetricName": "derat_64k_miss_ratio" + }, + { + "BriefDescription": "% DSLB_Miss_Rate per inst", + "MetricExpr": "PM_DSLB_MISS * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "translation", + "MetricName": "dslb_miss_rate_percent" + }, + { + "BriefDescription": "% ISLB miss rate per inst", + "MetricExpr": "PM_ISLB_MISS * 100 / PM_RUN_INST_CMPL", + "MetricGroup": "translation", + "MetricName": "islb_miss_rate_percent" + }, + { + "BriefDescription": "Fraction of hits on any Centaur (local, remote, or distant) on either L4 or DRAM per L1 load ref", + "MetricExpr": "PM_DATA_FROM_MEMORY / PM_LD_REF_L1", + "MetricName": "any_centaur_ld_hit_ratio" + }, + { + "BriefDescription": "Base Completion Cycles", + "MetricExpr": "PM_1PLUS_PPC_CMPL / PM_RUN_INST_CMPL", + "MetricName": "base_completion_cpi" + }, + { + "BriefDescription": "Marked background kill latency, measured in L2", + "MetricExpr": "PM_MRK_FAB_RSP_BKILL_CYC / PM_MRK_FAB_RSP_BKILL", + "MetricName": "bkill_ratio_percent" + }, + { + "BriefDescription": "cycles", + "MetricExpr": "PM_RUN_CYC", + "MetricName": "custom_secs" + }, + { + "BriefDescription": "Fraction of hits on a distant chip's Centaur (L4 or DRAM) per L1 load ref", + "MetricExpr": "(PM_DATA_FROM_DMEM + PM_DATA_FROM_DL4) / PM_LD_REF_L1", + "MetricName": "distant_centaur_ld_hit_ratio" + }, + { + "BriefDescription": "% of DL1 reloads that came from the L3 and beyond", + "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricName": "dl1_reload_from_l2_miss_percent" + }, + { + "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst", + "MetricExpr": "(PM_DATA_FROM_L31_MOD + PM_DATA_FROM_L31_SHR) * 100 / PM_RUN_INST_CMPL", + "MetricName": "dl1_reload_from_l31_rate_percent" + }, + { + "BriefDescription": "Percentage of DL1 reloads from L3 where the lines were brought into the L3 by a prefetch operation", + "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricName": "dl1_reload_from_l3_mepf_percent" + }, + { + "BriefDescription": "% of DL1 Reloads from beyond the local L3", + "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_L1_DCACHE_RELOAD_VALID", + "MetricName": "dl1_reload_from_l3_miss_percent" + }, + { + "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on the L2 or L3 of a core on a distant chip per L1 load ref", + "MetricExpr": "PM_DATA_FROM_DL2L3_MOD / PM_LD_REF_L1", + "MetricName": "dl2l3_mod_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of hits of a line in the S state on the L2 or L3 of a core on a distant chip per L1 load ref", + "MetricExpr": "PM_DATA_FROM_DL2L3_SHR / PM_LD_REF_L1", + "MetricName": "dl2l3_shr_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of hits on a distant Centaur's cache per L1 load ref", + "MetricExpr": "PM_DATA_FROM_DL4 / PM_LD_REF_L1", + "MetricName": "dl4_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of hits on a distant Centaur's DRAM per L1 load ref", + "MetricExpr": "PM_DATA_FROM_DMEM / PM_LD_REF_L1", + "MetricName": "dmem_ld_hit_ratio" + }, + { + "BriefDescription": "Rate of DERAT reloads from L2", + "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL", + "MetricName": "dpteg_from_l2_rate_percent" + }, + { + "BriefDescription": "Rate of DERAT reloads from L3", + "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL", + "MetricName": "dpteg_from_l3_rate_percent" + }, + { + "BriefDescription": "Overhead of expansion cycles", + "MetricExpr": "(PM_GRP_CMPL / PM_RUN_INST_CMPL) - (PM_1PLUS_PPC_CMPL / PM_RUN_INST_CMPL)", + "MetricName": "expansion_overhead_cpi" + }, + { + "BriefDescription": "Total Fixed point operations executded in the Load/Store Unit following a load/store operation", + "MetricExpr": "PM_LSU_FX_FIN/PM_RUN_INST_CMPL", + "MetricName": "fixed_in_lsu_per_inst" + }, + { + "BriefDescription": "GCT empty cycles", + "MetricExpr": "(PM_GCT_NOSLOT_CYC / PM_RUN_CYC) * 100", + "MetricName": "gct_empty_percent" + }, + { + "BriefDescription": "Rate of IERAT reloads from L2", + "MetricExpr": "PM_IPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL", + "MetricName": "ipteg_from_l2_rate_percent" + }, + { + "BriefDescription": "Rate of IERAT reloads from L3", + "MetricExpr": "PM_IPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL", + "MetricName": "ipteg_from_l3_rate_percent" + }, + { + "BriefDescription": "Rate of IERAT reloads from local memory", + "MetricExpr": "PM_IPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL", + "MetricName": "ipteg_from_ll4_rate_percent" + }, + { + "BriefDescription": "Rate of IERAT reloads from local memory", + "MetricExpr": "PM_IPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL", + "MetricName": "ipteg_from_lmem_rate_percent" + }, + { + "BriefDescription": "Fraction of L1 hits per load ref", + "MetricExpr": "(PM_LD_REF_L1 - PM_LD_MISS_L1) / PM_LD_REF_L1", + "MetricName": "l1_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of L1 load misses per L1 load ref", + "MetricExpr": "PM_LD_MISS_L1 / PM_LD_REF_L1", + "MetricName": "l1_ld_miss_ratio" + }, + { + "BriefDescription": "Fraction of hits on another core's L2 on the same chip per L1 load ref", + "MetricExpr": "(PM_DATA_FROM_L21_MOD + PM_DATA_FROM_L21_SHR) / PM_LD_REF_L1", + "MetricName": "l2_1_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on another core's L2 on the same chip per L1 load ref", + "MetricExpr": "PM_DATA_FROM_L21_MOD / PM_LD_REF_L1", + "MetricName": "l2_1_mod_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of hits of a line in the S state on another core's L2 on the same chip per L1 load ref", + "MetricExpr": "PM_DATA_FROM_L21_SHR / PM_LD_REF_L1", + "MetricName": "l2_1_shr_ld_hit_ratio" + }, + { + "BriefDescription": "Average number of Castout machines used. 1 of 16 CO machines is sampled every L2 cycle", + "MetricExpr": "(PM_CO_USAGE / PM_RUN_CYC) * 16", + "MetricName": "l2_co_usage" + }, + { + "BriefDescription": "Fraction of L2 load hits per L1 load ref", + "MetricExpr": "PM_DATA_FROM_L2 / PM_LD_REF_L1", + "MetricName": "l2_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of L2 load misses per L1 load ref", + "MetricExpr": "PM_DATA_FROM_L2MISS / PM_LD_REF_L1", + "MetricName": "l2_ld_miss_ratio" + }, + { + "BriefDescription": "Fraction of L2 load hits per L1 load ref where the L2 experienced a Load-Hit-Store conflict", + "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST / PM_LD_REF_L1", + "MetricName": "l2_lhs_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of L2 load hits per L1 load ref where the L2 did not experience a conflict", + "MetricExpr": "PM_DATA_FROM_L2_NO_CONFLICT / PM_LD_REF_L1", + "MetricName": "l2_no_conflict_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of L2 load hits per L1 load ref where the L2 experienced some conflict other than Load-Hit-Store", + "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER / PM_LD_REF_L1", + "MetricName": "l2_other_conflict_ld_hit_ratio" + }, + { + "BriefDescription": "Average number of Read/Claim machines used. 1 of 16 RC machines is sampled every L2 cycle", + "MetricExpr": "(PM_RC_USAGE / PM_RUN_CYC) * 16", + "MetricName": "l2_rc_usage" + }, + { + "BriefDescription": "Average number of Snoop machines used. 1 of 8 SN machines is sampled every L2 cycle", + "MetricExpr": "(PM_SN_USAGE / PM_RUN_CYC) * 8", + "MetricName": "l2_sn_usage" + }, + { + "BriefDescription": "Marked L31 Load latency", + "MetricExpr": "(PM_MRK_DATA_FROM_L31_SHR_CYC + PM_MRK_DATA_FROM_L31_MOD_CYC) / (PM_MRK_DATA_FROM_L31_SHR + PM_MRK_DATA_FROM_L31_MOD)", + "MetricName": "l31_latency" + }, + { + "BriefDescription": "Fraction of hits on another core's L3 on the same chip per L1 load ref", + "MetricExpr": "(PM_DATA_FROM_L31_MOD + PM_DATA_FROM_L31_SHR) / PM_LD_REF_L1", + "MetricName": "l3_1_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on another core's L3 on the same chip per L1 load ref", + "MetricExpr": "PM_DATA_FROM_L31_MOD / PM_LD_REF_L1", + "MetricName": "l3_1_mod_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of hits of a line in the S state on another core's L3 on the same chip per L1 load ref", + "MetricExpr": "PM_DATA_FROM_L31_SHR / PM_LD_REF_L1", + "MetricName": "l3_1_shr_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of L3 load hits per load ref where the demand load collided with a pending prefetch", + "MetricExpr": "PM_DATA_FROM_L3_DISP_CONFLICT / PM_LD_REF_L1", + "MetricName": "l3_conflict_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of L3 load hits per L1 load ref", + "MetricExpr": "PM_DATA_FROM_L3 / PM_LD_REF_L1", + "MetricName": "l3_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of L3 load misses per L1 load ref", + "MetricExpr": "PM_DATA_FROM_L3MISS / PM_LD_REF_L1", + "MetricName": "l3_ld_miss_ratio" + }, + { + "BriefDescription": "Fraction of L3 load hits per load ref where the L3 did not experience a conflict", + "MetricExpr": "PM_DATA_FROM_L3_NO_CONFLICT / PM_LD_REF_L1", + "MetricName": "l3_no_conflict_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of L3 hits on lines that were not in the MEPF state per L1 load ref", + "MetricExpr": "(PM_DATA_FROM_L3 - PM_DATA_FROM_L3_MEPF) / PM_LD_REF_L1", + "MetricName": "l3other_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of L3 hits on lines that were recently prefetched into the L3 (MEPF state) per L1 load ref", + "MetricExpr": "PM_DATA_FROM_L3_MEPF / PM_LD_REF_L1", + "MetricName": "l3pref_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of hits on a local Centaur's cache per L1 load ref", + "MetricExpr": "PM_DATA_FROM_LL4 / PM_LD_REF_L1", + "MetricName": "ll4_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of hits on a local Centaur's DRAM per L1 load ref", + "MetricExpr": "PM_DATA_FROM_LMEM / PM_LD_REF_L1", + "MetricName": "lmem_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of hits on a local Centaur (L4 or DRAM) per L1 load ref", + "MetricExpr": "(PM_DATA_FROM_LMEM + PM_DATA_FROM_LL4) / PM_LD_REF_L1", + "MetricName": "local_centaur_ld_hit_ratio" + }, + { + "BriefDescription": "Cycles stalled by Other LSU Operations", + "MetricExpr": "(PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_REJECT - PM_CMPLU_STALL_DCACHE_MISS - PM_CMPLU_STALL_STORE) / (PM_LD_REF_L1 - PM_LD_MISS_L1)", + "MetricName": "lsu_stall_avg_cyc_per_l1hit_stfw" + }, + { + "BriefDescription": "Fraction of hits on another core's L2 or L3 on a different chip (remote or distant) per L1 load ref", + "MetricExpr": "PM_DATA_FROM_OFF_CHIP_CACHE / PM_LD_REF_L1", + "MetricName": "off_chip_cache_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of hits on another core's L2 or L3 on the same chip per L1 load ref", + "MetricExpr": "PM_DATA_FROM_ON_CHIP_CACHE / PM_LD_REF_L1", + "MetricName": "on_chip_cache_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of hits on a remote chip's Centaur (L4 or DRAM) per L1 load ref", + "MetricExpr": "(PM_DATA_FROM_RMEM + PM_DATA_FROM_RL4) / PM_LD_REF_L1", + "MetricName": "remote_centaur_ld_hit_ratio" + }, + { + "BriefDescription": "Percent of all FXU/VSU instructions that got rejected because of unavailable resources or facilities", + "MetricExpr": "PM_ISU_REJECT_RES_NA *100/ PM_RUN_INST_CMPL", + "MetricName": "resource_na_reject_rate_percent" + }, + { + "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on the L2 or L3 of a core on a remote chip per L1 load ref", + "MetricExpr": "PM_DATA_FROM_RL2L3_MOD / PM_LD_REF_L1", + "MetricName": "rl2l3_mod_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of hits of a line in the S state on the L2 or L3 of a core on a remote chip per L1 load ref", + "MetricExpr": "PM_DATA_FROM_RL2L3_SHR / PM_LD_REF_L1", + "MetricName": "rl2l3_shr_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of hits on a remote Centaur's cache per L1 load ref", + "MetricExpr": "PM_DATA_FROM_RL4 / PM_LD_REF_L1", + "MetricName": "rl4_ld_hit_ratio" + }, + { + "BriefDescription": "Fraction of hits on a remote Centaur's DRAM per L1 load ref", + "MetricExpr": "PM_DATA_FROM_RMEM / PM_LD_REF_L1", + "MetricName": "rmem_ld_hit_ratio" + }, + { + "BriefDescription": "Percent of all FXU/VSU instructions that got rejected due to SAR Bypass", + "MetricExpr": "PM_ISU_REJECT_SAR_BYPASS *100/ PM_RUN_INST_CMPL", + "MetricName": "sar_bypass_reject_rate_percent" + }, + { + "BriefDescription": "Percent of all FXU/VSU instructions that got rejected because of unavailable sources", + "MetricExpr": "PM_ISU_REJECT_SRC_NA *100/ PM_RUN_INST_CMPL", + "MetricName": "source_na_reject_rate_percent" + }, + { + "BriefDescription": "Store forward rate", + "MetricExpr": "100 * (PM_LSU0_SRQ_STFWD + PM_LSU1_SRQ_STFWD) / PM_RUN_INST_CMPL", + "MetricName": "store_forward_rate_percent" + }, + { + "BriefDescription": "Store forward rate", + "MetricExpr": "100 * (PM_LSU0_SRQ_STFWD + PM_LSU1_SRQ_STFWD) / (PM_LD_REF_L1 - PM_LD_MISS_L1)", + "MetricName": "store_forward_ratio_percent" + }, + { + "BriefDescription": "Marked store latency, from core completion to L2 RC machine completion", + "MetricExpr": "(PM_MRK_ST_L2DISP_TO_CMPL_CYC + PM_MRK_ST_DRAIN_TO_L2DISP_CYC) / PM_MRK_ST_NEST", + "MetricName": "store_latency" + }, + { + "BriefDescription": "Cycles stalled by any sync", + "MetricExpr": "(PM_CMPLU_STALL_LWSYNC + PM_CMPLU_STALL_HWSYNC) / PM_RUN_INST_CMPL", + "MetricName": "sync_stall_cpi" + }, + { + "BriefDescription": "Percentage of lines that were prefetched into the L3 and evicted before they were consumed", + "MetricExpr": "(PM_L3_CO_MEPF / 2) / PM_L3_PREF_ALL * 100", + "MetricName": "wasted_l3_prefetch_percent" + } +] diff --git a/tools/perf/pmu-events/arch/powerpc/power8/other.json b/tools/perf/pmu-events/arch/powerpc/power8/other.json new file mode 100644 index 0000000000..f1f2965f67 --- /dev/null +++ b/tools/perf/pmu-events/arch/powerpc/power8/other.json @@ -0,0 +1,3446 @@ +[ + { + "EventCode": "0x1f05e", + "EventName": "PM_1LPAR_CYC", + "BriefDescription": "Number of cycles in single lpar mode. All threads in the core are assigned to the same lpar", + "PublicDescription": "" + }, + { + "EventCode": "0x2006e", + "EventName": "PM_2LPAR_CYC", + "BriefDescription": "Cycles in 2-lpar mode. Threads 0-3 belong to Lpar0 and threads 4-7 belong to Lpar1", + "PublicDescription": "Number of cycles in 2 lpar mode" + }, + { + "EventCode": "0x4e05e", + "EventName": "PM_4LPAR_CYC", + "BriefDescription": "Number of cycles in 4 LPAR mode. Threads 0-1 belong to lpar0, threads 2-3 belong to lpar1, threads 4-5 belong to lpar2, and threads 6-7 belong to lpar3", + "PublicDescription": "" + }, + { + "EventCode": "0x610050", + "EventName": "PM_ALL_CHIP_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for all data types ( demand load,data,inst prefetch,inst fetch,xlate (I or d)" + }, + { + "EventCode": "0x520050", + "EventName": "PM_ALL_GRP_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)" + }, + { + "EventCode": "0x620052", + "EventName": "PM_ALL_GRP_PUMP_MPRED", + "BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro" + }, + { + "EventCode": "0x610052", + "EventName": "PM_ALL_GRP_PUMP_MPRED_RTY", + "BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)" + }, + { + "EventCode": "0x610054", + "EventName": "PM_ALL_PUMP_CPRED", + "BriefDescription": "Pump prediction correct. Counts across all types of pumps for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Pump prediction correct. Counts across all types of pumpsfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)" + }, + { + "EventCode": "0x640052", + "EventName": "PM_ALL_PUMP_MPRED", + "BriefDescription": "Pump misprediction. Counts across all types of pumps for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)" + }, + { + "EventCode": "0x630050", + "EventName": "PM_ALL_SYS_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope was system pump for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)" + }, + { + "EventCode": "0x630052", + "EventName": "PM_ALL_SYS_PUMP_MPRED", + "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or" + }, + { + "EventCode": "0x640050", + "EventName": "PM_ALL_SYS_PUMP_MPRED_RTY", + "BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)", + "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)" + }, + { + "EventCode": "0x4082", + "EventName": "PM_BANK_CONFLICT", + "BriefDescription": "Read blocked due to interleave conflict. The ifar logic will detect an interleave conflict and kill the data that was read that cycle", + "PublicDescription": "" + }, + { + "EventCode": "0x5086", + "EventName": "PM_BR_BC_8", + "BriefDescription": "Pairable BC+8 branch that has not been converted to a Resolve Finished in the BRU pipeline", + "PublicDescription": "" + }, + { + "EventCode": "0x5084", + "EventName": "PM_BR_BC_8_CONV", + "BriefDescription": "Pairable BC+8 branch that was converted to a Resolve Finished in the BRU pipeline", + "PublicDescription": "" + }, + { + "EventCode": "0x40ac", + "EventName": "PM_BR_MPRED_CCACHE", + "BriefDescription": "Conditional Branch Completed that was Mispredicted due to the Count Cache Target Prediction", + "PublicDescription": "" + }, + { + "EventCode": "0x40b8", + "EventName": "PM_BR_MPRED_CR", + "BriefDescription": "Conditional Branch Completed that was Mispredicted due to the BHT Direction Prediction (taken/not taken)", + "PublicDescription": "" + }, + { + "EventCode": "0x40ae", + "EventName": "PM_BR_MPRED_LSTACK", + "BriefDescription": "Conditional Branch Completed that was Mispredicted due to the Link Stack Target Prediction", + "PublicDescription": "" + }, + { + "EventCode": "0x40ba", + "EventName": "PM_BR_MPRED_TA", + "BriefDescription": "Conditional Branch Completed that was Mispredicted due to the Target Address Prediction from the Count Cache or Link Stack. Only XL-form branches that resolved Taken set this event", + "PublicDescription": "" + }, + { + "EventCode": "0x10138", + "EventName": "PM_BR_MRK_2PATH", + "BriefDescription": "marked two path branch", + "PublicDescription": "" + }, + { + "EventCode": "0x409c", + "EventName": "PM_BR_PRED_BR0", + "BriefDescription": "Conditional Branch Completed on BR0 (1st branch in group) in which the HW predicted the Direction or Target", + "PublicDescription": "" + }, + { + "EventCode": "0x409e", + "EventName": "PM_BR_PRED_BR1", + "BriefDescription": "Conditional Branch Completed on BR1 (2nd branch in group) in which the HW predicted the Direction or Target. Note: BR1 can only be used in Single Thread Mode. In all of the SMT modes, only one branch can complete, thus BR1 is unused", + "PublicDescription": "" + }, + { + "EventCode": "0x489c", + "EventName": "PM_BR_PRED_BR_CMPL", + "BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred(0) OR if_pc_br0_br_pred(1)", + "PublicDescription": "IFU" + }, + { + "EventCode": "0x40a4", + "EventName": "PM_BR_PRED_CCACHE_BR0", + "BriefDescription": "Conditional Branch Completed on BR0 that used the Count Cache for Target Prediction", + "PublicDescription": "" + }, + { + "EventCode": "0x40a6", + "EventName": "PM_BR_PRED_CCACHE_BR1", + "BriefDescription": "Conditional Branch Completed on BR1 that used the Count Cache for Target Prediction", + "PublicDescription": "" + }, + { + "EventCode": "0x48a4", + "EventName": "PM_BR_PRED_CCACHE_CMPL", + "BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred(0) AND if_pc_br0_pred_type", + "PublicDescription": "IFU" + }, + { + "EventCode": "0x40b0", + "EventName": "PM_BR_PRED_CR_BR0", + "BriefDescription": "Conditional Branch Completed on BR0 that had its direction predicted. I-form branches do not set this event. In addition, B-form branches which do not use the BHT do not set this event - these are branches with BO-field set to 'always taken' and branches", + "PublicDescription": "" + }, + { + "EventCode": "0x40b2", + "EventName": "PM_BR_PRED_CR_BR1", + "BriefDescription": "Conditional Branch Completed on BR1 that had its direction predicted. I-form branches do not set this event. In addition, B-form branches which do not use the BHT do not set this event - these are branches with BO-field set to 'always taken' and branches", + "PublicDescription": "" + }, + { + "EventCode": "0x48b0", + "EventName": "PM_BR_PRED_CR_CMPL", + "BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred(1)='1'", + "PublicDescription": "IFU" + }, + { + "EventCode": "0x40a8", + "EventName": "PM_BR_PRED_LSTACK_BR0", + "BriefDescription": "Conditional Branch Completed on BR0 that used the Link Stack for Target Prediction", + "PublicDescription": "" + }, + { + "EventCode": "0x40aa", + "EventName": "PM_BR_PRED_LSTACK_BR1", + "BriefDescription": "Conditional Branch Completed on BR1 that used the Link Stack for Target Prediction", + "PublicDescription": "" + }, + { + "EventCode": "0x48a8", + "EventName": "PM_BR_PRED_LSTACK_CMPL", + "BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred(0) AND (not if_pc_br0_pred_type)", + "PublicDescription": "IFU" + }, + { + "EventCode": "0x40b4", + "EventName": "PM_BR_PRED_TA_BR0", + "BriefDescription": "Conditional Branch Completed on BR0 that had its target address predicted. Only XL-form branches set this event", + "PublicDescription": "" + }, + { + "EventCode": "0x40b6", + "EventName": "PM_BR_PRED_TA_BR1", + "BriefDescription": "Conditional Branch Completed on BR1 that had its target address predicted. Only XL-form branches set this event", + "PublicDescription": "" + }, + { + "EventCode": "0x48b4", + "EventName": "PM_BR_PRED_TA_CMPL", + "BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred(0)='1'", + "PublicDescription": "IFU" + }, + { + "EventCode": "0x40a0", + "EventName": "PM_BR_UNCOND_BR0", + "BriefDescription": "Unconditional Branch Completed on BR0. HW branch prediction was not used for this branch. This can be an I-form branch, a B-form branch with BO-field set to branch always, or a B-form branch which was coverted to a Resolve", + "PublicDescription": "" + }, + { + "EventCode": "0x40a2", + "EventName": "PM_BR_UNCOND_BR1", + "BriefDescription": "Unconditional Branch Completed on BR1. HW branch prediction was not used for this branch. This can be an I-form branch, a B-form branch with BO-field set to branch always, or a B-form branch which was coverted to a Resolve", + "PublicDescription": "" + }, + { + "EventCode": "0x48a0", + "EventName": "PM_BR_UNCOND_CMPL", + "BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred=00 AND if_pc_br0_completed", + "PublicDescription": "IFU" + }, + { + "EventCode": "0x3094", + "EventName": "PM_CASTOUT_ISSUED", + "BriefDescription": "Castouts issued", + "PublicDescription": "" + }, + { + "EventCode": "0x3096", + "EventName": "PM_CASTOUT_ISSUED_GPR", + "BriefDescription": "Castouts issued GPR", + "PublicDescription": "" + }, + { + "EventCode": "0x2090", + "EventName": "PM_CLB_HELD", + "BriefDescription": "CLB Hold: Any Reason", + "PublicDescription": "" + }, + { + "EventCode": "0x2d018", + "EventName": "PM_CMPLU_STALL_BRU_CRU", + "BriefDescription": "Completion stall due to IFU", + "PublicDescription": "" + }, + { + "EventCode": "0x30026", + "EventName": "PM_CMPLU_STALL_COQ_FULL", + "BriefDescription": "Completion stall due to CO q full", + "PublicDescription": "" + }, + { + "EventCode": "0x30038", + "EventName": "PM_CMPLU_STALL_FLUSH", + "BriefDescription": "completion stall due to flush by own thread", + "PublicDescription": "" + }, + { + "EventCode": "0x30028", + "EventName": "PM_CMPLU_STALL_MEM_ECC_DELAY", + "BriefDescription": "Completion stall due to mem ECC delay", + "PublicDescription": "" + }, + { + "EventCode": "0x2e01c", + "EventName": "PM_CMPLU_STALL_NO_NTF", + "BriefDescription": "Completion stall due to nop", + "PublicDescription": "" + }, + { + "EventCode": "0x2e01e", + "EventName": "PM_CMPLU_STALL_NTCG_FLUSH", + "BriefDescription": "Completion stall due to ntcg flush", + "PublicDescription": "Completion stall due to reject (load hit store)" + }, + { + "EventCode": "0x4c010", + "EventName": "PM_CMPLU_STALL_REJECT", + "BriefDescription": "Completion stall due to LSU reject", + "PublicDescription": "" + }, + { + "EventCode": "0x2c01a", + "EventName": "PM_CMPLU_STALL_REJECT_LHS", + "BriefDescription": "Completion stall due to reject (load hit store)", + "PublicDescription": "" + }, + { + "EventCode": "0x4c014", + "EventName": "PM_CMPLU_STALL_REJ_LMQ_FULL", + "BriefDescription": "Completion stall due to LSU reject LMQ full", + "PublicDescription": "" + }, + { + "EventCode": "0x4d010", + "EventName": "PM_CMPLU_STALL_SCALAR", + "BriefDescription": "Completion stall due to VSU scalar instruction", + "PublicDescription": "" + }, + { + "EventCode": "0x2d010", + "EventName": "PM_CMPLU_STALL_SCALAR_LONG", + "BriefDescription": "Completion stall due to VSU scalar long latency instruction", + "PublicDescription": "" + }, + { + "EventCode": "0x2c014", + "EventName": "PM_CMPLU_STALL_STORE", + "BriefDescription": "Completion stall by stores this includes store agen finishes in pipe LS0/LS1 and store data finishes in LS2/LS3", + "PublicDescription": "" + }, + { + "EventCode": "0x2d014", + "EventName": "PM_CMPLU_STALL_VECTOR", + "BriefDescription": "Completion stall due to VSU vector instruction", + "PublicDescription": "" + }, + { + "EventCode": "0x4d012", + "EventName": "PM_CMPLU_STALL_VECTOR_LONG", + "BriefDescription": "Completion stall due to VSU vector long instruction", + "PublicDescription": "" + }, + { + "EventCode": "0x2d012", + "EventName": "PM_CMPLU_STALL_VSU", + "BriefDescription": "Completion stall due to VSU instruction", + "PublicDescription": "" + }, + { + "EventCode": "0x16083", + "EventName": "PM_CO0_ALLOC", + "BriefDescription": "CO mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)", + "PublicDescription": "0.0" + }, + { + "EventCode": "0x16082", + "EventName": "PM_CO0_BUSY", + "BriefDescription": "CO mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)", + "PublicDescription": "" + }, + { + "EventCode": "0x3608a", + "EventName": "PM_CO_USAGE", + "BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 CO machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running", + "PublicDescription": "" + }, + { + "EventCode": "0x40066", + "EventName": "PM_CRU_FIN", + "BriefDescription": "IFU Finished a (non-branch) instruction", + "PublicDescription": "" + }, + { + "EventCode": "0x61c050", + "EventName": "PM_DATA_ALL_CHIP_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for either demand loads or data prefetch", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for a demand load" + }, + { + "EventCode": "0x64c048", + "EventName": "PM_DATA_ALL_FROM_DL2L3_MOD", + "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x63c048", + "EventName": "PM_DATA_ALL_FROM_DL2L3_SHR", + "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x63c04c", + "EventName": "PM_DATA_ALL_FROM_DL4", + "BriefDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x64c04c", + "EventName": "PM_DATA_ALL_FROM_DMEM", + "BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x61c042", + "EventName": "PM_DATA_ALL_FROM_L2", + "BriefDescription": "The processor's data cache was reloaded from local core's L2 due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from local core's L2 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x64c046", + "EventName": "PM_DATA_ALL_FROM_L21_MOD", + "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x63c046", + "EventName": "PM_DATA_ALL_FROM_L21_SHR", + "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x61c04e", + "EventName": "PM_DATA_ALL_FROM_L2MISS_MOD", + "BriefDescription": "The processor's data cache was reloaded from a location other than the local core's L2 due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from a location other than the local core's L2 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x63c040", + "EventName": "PM_DATA_ALL_FROM_L2_DISP_CONFLICT_LDHITST", + "BriefDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x64c040", + "EventName": "PM_DATA_ALL_FROM_L2_DISP_CONFLICT_OTHER", + "BriefDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x62c040", + "EventName": "PM_DATA_ALL_FROM_L2_MEPF", + "BriefDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x61c040", + "EventName": "PM_DATA_ALL_FROM_L2_NO_CONFLICT", + "BriefDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x64c042", + "EventName": "PM_DATA_ALL_FROM_L3", + "BriefDescription": "The processor's data cache was reloaded from local core's L3 due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from local core's L3 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x64c044", + "EventName": "PM_DATA_ALL_FROM_L31_ECO_MOD", + "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x63c044", + "EventName": "PM_DATA_ALL_FROM_L31_ECO_SHR", + "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x62c044", + "EventName": "PM_DATA_ALL_FROM_L31_MOD", + "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x61c046", + "EventName": "PM_DATA_ALL_FROM_L31_SHR", + "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x64c04e", + "EventName": "PM_DATA_ALL_FROM_L3MISS_MOD", + "BriefDescription": "The processor's data cache was reloaded from a location other than the local core's L3 due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from a location other than the local core's L3 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x63c042", + "EventName": "PM_DATA_ALL_FROM_L3_DISP_CONFLICT", + "BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x62c042", + "EventName": "PM_DATA_ALL_FROM_L3_MEPF", + "BriefDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x61c044", + "EventName": "PM_DATA_ALL_FROM_L3_NO_CONFLICT", + "BriefDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x61c04c", + "EventName": "PM_DATA_ALL_FROM_LL4", + "BriefDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x62c048", + "EventName": "PM_DATA_ALL_FROM_LMEM", + "BriefDescription": "The processor's data cache was reloaded from the local chip's Memory due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from the local chip's Memory due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x62c04c", + "EventName": "PM_DATA_ALL_FROM_MEMORY", + "BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x64c04a", + "EventName": "PM_DATA_ALL_FROM_OFF_CHIP_CACHE", + "BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x61c048", + "EventName": "PM_DATA_ALL_FROM_ON_CHIP_CACHE", + "BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x62c046", + "EventName": "PM_DATA_ALL_FROM_RL2L3_MOD", + "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x61c04a", + "EventName": "PM_DATA_ALL_FROM_RL2L3_SHR", + "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x62c04a", + "EventName": "PM_DATA_ALL_FROM_RL4", + "BriefDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x63c04a", + "EventName": "PM_DATA_ALL_FROM_RMEM", + "BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to either demand loads or data prefetch", + "PublicDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x62c050", + "EventName": "PM_DATA_ALL_GRP_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope was group pump (prediction=correct) for either demand loads or data prefetch", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for a demand load" + }, + { + "EventCode": "0x62c052", + "EventName": "PM_DATA_ALL_GRP_PUMP_MPRED", + "BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for either demand loads or data prefetch", + "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro" + }, + { + "EventCode": "0x61c052", + "EventName": "PM_DATA_ALL_GRP_PUMP_MPRED_RTY", + "BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for either demand loads or data prefetch", + "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor a demand load" + }, + { + "EventCode": "0x61c054", + "EventName": "PM_DATA_ALL_PUMP_CPRED", + "BriefDescription": "Pump prediction correct. Counts across all types of pumps for either demand loads or data prefetch", + "PublicDescription": "Pump prediction correct. Counts across all types of pumps for a demand load" + }, + { + "EventCode": "0x64c052", + "EventName": "PM_DATA_ALL_PUMP_MPRED", + "BriefDescription": "Pump misprediction. Counts across all types of pumps for either demand loads or data prefetch", + "PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor a demand load" + }, + { + "EventCode": "0x63c050", + "EventName": "PM_DATA_ALL_SYS_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope was system pump (prediction=correct) for either demand loads or data prefetch", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for a demand load" + }, + { + "EventCode": "0x63c052", + "EventName": "PM_DATA_ALL_SYS_PUMP_MPRED", + "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for either demand loads or data prefetch", + "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or" + }, + { + "EventCode": "0x64c050", + "EventName": "PM_DATA_ALL_SYS_PUMP_MPRED_RTY", + "BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for either demand loads or data prefetch", + "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for a demand load" + }, + { + "EventCode": "0x4c046", + "EventName": "PM_DATA_FROM_L21_MOD", + "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to a demand load", + "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x3c046", + "EventName": "PM_DATA_FROM_L21_SHR", + "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to a demand load", + "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x4c044", + "EventName": "PM_DATA_FROM_L31_ECO_MOD", + "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to a demand load", + "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x3c044", + "EventName": "PM_DATA_FROM_L31_ECO_SHR", + "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to a demand load", + "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x2c044", + "EventName": "PM_DATA_FROM_L31_MOD", + "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to a demand load", + "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x1c046", + "EventName": "PM_DATA_FROM_L31_SHR", + "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to a demand load", + "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1" + }, + { + "EventCode": "0x400fe", + "EventName": "PM_DATA_FROM_MEM", + "BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a demand load", + "PublicDescription": "Data cache reload from memory (including L4)" + }, + { + "EventCode": "0xe0bc", + "EventName": "PM_DC_COLLISIONS", + "BriefDescription": "DATA Cache collisions", + "PublicDescription": "DATA Cache collisions42" + }, + { + "EventCode": "0x1e050", + "EventName": "PM_DC_PREF_STREAM_ALLOC", + "BriefDescription": "Stream marked valid. The stream could have been allocated through the hardware prefetch mechanism or through software. This is combined ls0 and ls1", + "PublicDescription": "" + }, + { + "EventCode": "0x2e050", + "EventName": "PM_DC_PREF_STREAM_CONF", + "BriefDescription": "A demand load referenced a line in an active prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software. Combine up + down", + "PublicDescription": "" + }, + { + "EventCode": "0x4e050", + "EventName": "PM_DC_PREF_STREAM_FUZZY_CONF", + "BriefDescription": "A demand load referenced a line in an active fuzzy prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software.Fuzzy stream confirm (out of order effects, or pf cant keep up)", + "PublicDescription": "" + }, + { + "EventCode": "0x3e050", + "EventName": "PM_DC_PREF_STREAM_STRIDED_CONF", + "BriefDescription": "A demand load referenced a line in an active strided prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software", + "PublicDescription": "" + }, + { + "EventCode": "0xb0ba", + "EventName": "PM_DFU", + "BriefDescription": "Finish DFU (all finish)", + "PublicDescription": "" + }, + { + "EventCode": "0xb0be", + "EventName": "PM_DFU_DCFFIX", + "BriefDescription": "Convert from fixed opcode finish (dcffix,dcffixq)", + "PublicDescription": "" + }, + { + "EventCode": "0xb0bc", + "EventName": "PM_DFU_DENBCD", + "BriefDescription": "BCD->DPD opcode finish (denbcd, denbcdq)", + "PublicDescription": "" + }, + { + "EventCode": "0xb0b8", + "EventName": "PM_DFU_MC", + "BriefDescription": "Finish DFU multicycle", + "PublicDescription": "" + }, + { + "EventCode": "0x2092", + "EventName": "PM_DISP_CLB_HELD_BAL", + "BriefDescription": "Dispatch/CLB Hold: Balance", + "PublicDescription": "" + }, + { + "EventCode": "0x2094", + "EventName": "PM_DISP_CLB_HELD_RES", + "BriefDescription": "Dispatch/CLB Hold: Resource", + "PublicDescription": "" + }, + { + "EventCode": "0x20a8", + "EventName": "PM_DISP_CLB_HELD_SB", + "BriefDescription": "Dispatch/CLB Hold: Scoreboard", + "PublicDescription": "" + }, + { + "EventCode": "0x2098", + "EventName": "PM_DISP_CLB_HELD_SYNC", + "BriefDescription": "Dispatch/CLB Hold: Sync type instruction", + "PublicDescription": "" + }, + { + "EventCode": "0x2096", + "EventName": "PM_DISP_CLB_HELD_TLBIE", + "BriefDescription": "Dispatch Hold: Due to TLBIE", + "PublicDescription": "" + }, + { + "EventCode": "0x20006", + "EventName": "PM_DISP_HELD_IQ_FULL", + "BriefDescription": "Dispatch held due to Issue q full", + "PublicDescription": "" + }, + { + "EventCode": "0x1002a", + "EventName": "PM_DISP_HELD_MAP_FULL", + "BriefDescription": "Dispatch for this thread was held because the Mappers were full", + "PublicDescription": "Dispatch held due to Mapper full" + }, + { + "EventCode": "0x30018", + "EventName": "PM_DISP_HELD_SRQ_FULL", + "BriefDescription": "Dispatch held due SRQ no room", + "PublicDescription": "" + }, + { + "EventCode": "0x30a6", + "EventName": "PM_DISP_HOLD_GCT_FULL", + "BriefDescription": "Dispatch Hold Due to no space in the GCT", + "PublicDescription": "" + }, + { + "EventCode": "0x30008", + "EventName": "PM_DISP_WT", + "BriefDescription": "Dispatched Starved", + "PublicDescription": "Dispatched Starved (not held, nothing to dispatch)" + }, + { + "EventCode": "0x4e046", + "EventName": "PM_DPTEG_FROM_L21_MOD", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L2 on the same chip due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x3e046", + "EventName": "PM_DPTEG_FROM_L21_SHR", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L2 on the same chip due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x3e040", + "EventName": "PM_DPTEG_FROM_L2_DISP_CONFLICT_LDHITST", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with load hit store conflict due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x4e040", + "EventName": "PM_DPTEG_FROM_L2_DISP_CONFLICT_OTHER", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with dispatch conflict due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x4e044", + "EventName": "PM_DPTEG_FROM_L31_ECO_MOD", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's ECO L3 on the same chip due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x3e044", + "EventName": "PM_DPTEG_FROM_L31_ECO_SHR", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's ECO L3 on the same chip due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x2e044", + "EventName": "PM_DPTEG_FROM_L31_MOD", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L3 on the same chip due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1e046", + "EventName": "PM_DPTEG_FROM_L31_SHR", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L3 on the same chip due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x50a8", + "EventName": "PM_EAT_FORCE_MISPRED", + "BriefDescription": "XL-form branch was mispredicted due to the predicted target address missing from EAT. The EAT forces a mispredict in this case since there is no predicated target to validate. This is a rare case that may occur when the EAT is full and a branch is issue", + "PublicDescription": "" + }, + { + "EventCode": "0x4084", + "EventName": "PM_EAT_FULL_CYC", + "BriefDescription": "Cycles No room in EAT", + "PublicDescription": "Cycles No room in EATSet on bank conflict and case where no ibuffers available" + }, + { + "EventCode": "0x2080", + "EventName": "PM_EE_OFF_EXT_INT", + "BriefDescription": "Ee off and external interrupt", + "PublicDescription": "" + }, + { + "EventCode": "0x20b4", + "EventName": "PM_FAV_TBEGIN", + "BriefDescription": "Dispatch time Favored tbegin", + "PublicDescription": "" + }, + { + "EventCode": "0x100f4", + "EventName": "PM_FLOP", + "BriefDescription": "Floating Point Operation Finished", + "PublicDescription": "Floating Point Operations Finished" + }, + { + "EventCode": "0xa0ae", + "EventName": "PM_FLOP_SUM_SCALAR", + "BriefDescription": "flops summary scalar instructions", + "PublicDescription": "" + }, + { + "EventCode": "0xa0ac", + "EventName": "PM_FLOP_SUM_VEC", + "BriefDescription": "flops summary vector instructions", + "PublicDescription": "" + }, + { + "EventCode": "0x2084", + "EventName": "PM_FLUSH_BR_MPRED", + "BriefDescription": "Flush caused by branch mispredict", + "PublicDescription": "" + }, + { + "EventCode": "0x2082", + "EventName": "PM_FLUSH_DISP", + "BriefDescription": "Dispatch flush", + "PublicDescription": "" + }, + { + "EventCode": "0x208c", + "EventName": "PM_FLUSH_DISP_SB", + "BriefDescription": "Dispatch Flush: Scoreboard", + "PublicDescription": "" + }, + { + "EventCode": "0x2088", + "EventName": "PM_FLUSH_DISP_SYNC", + "BriefDescription": "Dispatch Flush: Sync", + "PublicDescription": "" + }, + { + "EventCode": "0x208a", + "EventName": "PM_FLUSH_DISP_TLBIE", + "BriefDescription": "Dispatch Flush: TLBIE", + "PublicDescription": "" + }, + { + "EventCode": "0x208e", + "EventName": "PM_FLUSH_LSU", + "BriefDescription": "Flush initiated by LSU", + "PublicDescription": "" + }, + { + "EventCode": "0x2086", + "EventName": "PM_FLUSH_PARTIAL", + "BriefDescription": "Partial flush", + "PublicDescription": "" + }, + { + "EventCode": "0xa0b0", + "EventName": "PM_FPU0_FCONV", + "BriefDescription": "Convert instruction executed", + "PublicDescription": "" + }, + { + "EventCode": "0xa0b8", + "EventName": "PM_FPU0_FEST", + "BriefDescription": "Estimate instruction executed", + "PublicDescription": "" + }, + { + "EventCode": "0xa0b4", + "EventName": "PM_FPU0_FRSP", + "BriefDescription": "Round to single precision instruction executed", + "PublicDescription": "" + }, + { + "EventCode": "0xa0b2", + "EventName": "PM_FPU1_FCONV", + "BriefDescription": "Convert instruction executed", + "PublicDescription": "" + }, + { + "EventCode": "0xa0ba", + "EventName": "PM_FPU1_FEST", + "BriefDescription": "Estimate instruction executed", + "PublicDescription": "" + }, + { + "EventCode": "0xa0b6", + "EventName": "PM_FPU1_FRSP", + "BriefDescription": "Round to single precision instruction executed", + "PublicDescription": "" + }, + { + "EventCode": "0x50b0", + "EventName": "PM_FUSION_TOC_GRP0_1", + "BriefDescription": "One pair of instructions fused with TOC in Group0", + "PublicDescription": "" + }, + { + "EventCode": "0x50ae", + "EventName": "PM_FUSION_TOC_GRP0_2", + "BriefDescription": "Two pairs of instructions fused with TOCin Group0", + "PublicDescription": "" + }, + { + "EventCode": "0x50ac", + "EventName": "PM_FUSION_TOC_GRP0_3", + "BriefDescription": "Three pairs of instructions fused with TOC in Group0", + "PublicDescription": "" + }, + { + "EventCode": "0x50b2", + "EventName": "PM_FUSION_TOC_GRP1_1", + "BriefDescription": "One pair of instructions fused with TOX in Group1", + "PublicDescription": "" + }, + { + "EventCode": "0x50b8", + "EventName": "PM_FUSION_VSX_GRP0_1", + "BriefDescription": "One pair of instructions fused with VSX in Group0", + "PublicDescription": "" + }, + { + "EventCode": "0x50b6", + "EventName": "PM_FUSION_VSX_GRP0_2", + "BriefDescription": "Two pairs of instructions fused with VSX in Group0", + "PublicDescription": "" + }, + { + "EventCode": "0x50b4", + "EventName": "PM_FUSION_VSX_GRP0_3", + "BriefDescription": "Three pairs of instructions fused with VSX in Group0", + "PublicDescription": "" + }, + { + "EventCode": "0x50ba", + "EventName": "PM_FUSION_VSX_GRP1_1", + "BriefDescription": "One pair of instructions fused with VSX in Group1", + "PublicDescription": "" + }, + { + "EventCode": "0x3000e", + "EventName": "PM_FXU0_BUSY_FXU1_IDLE", + "BriefDescription": "fxu0 busy and fxu1 idle", + "PublicDescription": "" + }, + { + "EventCode": "0x10004", + "EventName": "PM_FXU0_FIN", + "BriefDescription": "The fixed point unit Unit 0 finished an instruction. Instructions that finish may not necessary complete", + "PublicDescription": "FXU0 Finished" + }, + { + "EventCode": "0x4000e", + "EventName": "PM_FXU1_BUSY_FXU0_IDLE", + "BriefDescription": "fxu0 idle and fxu1 busy", + "PublicDescription": "" + }, + { + "EventCode": "0x40004", + "EventName": "PM_FXU1_FIN", + "BriefDescription": "FXU1 Finished", + "PublicDescription": "" + }, + { + "EventCode": "0x20008", + "EventName": "PM_GCT_EMPTY_CYC", + "BriefDescription": "No itags assigned either thread (GCT Empty)", + "PublicDescription": "" + }, + { + "EventCode": "0x30a4", + "EventName": "PM_GCT_MERGE", + "BriefDescription": "Group dispatched on a merged GCT empty. GCT entries can be merged only within the same thread", + "PublicDescription": "" + }, + { + "EventCode": "0x4d01e", + "EventName": "PM_GCT_NOSLOT_BR_MPRED", + "BriefDescription": "Gct empty for this thread due to branch mispred", + "PublicDescription": "" + }, + { + "EventCode": "0x4d01a", + "EventName": "PM_GCT_NOSLOT_BR_MPRED_ICMISS", + "BriefDescription": "Gct empty for this thread due to Icache Miss and branch mispred", + "PublicDescription": "" + }, + { + "EventCode": "0x100f8", + "EventName": "PM_GCT_NOSLOT_CYC", + "BriefDescription": "No itags assigned", + "PublicDescription": "Pipeline empty (No itags assigned , no GCT slots used)" + }, + { + "EventCode": "0x2d01e", + "EventName": "PM_GCT_NOSLOT_DISP_HELD_ISSQ", + "BriefDescription": "Gct empty for this thread due to dispatch hold on this thread due to Issue q full", + "PublicDescription": "" + }, + { + "EventCode": "0x4d01c", + "EventName": "PM_GCT_NOSLOT_DISP_HELD_MAP", + "BriefDescription": "Gct empty for this thread due to dispatch hold on this thread due to Mapper full", + "PublicDescription": "" + }, + { + "EventCode": "0x2e010", + "EventName": "PM_GCT_NOSLOT_DISP_HELD_OTHER", + "BriefDescription": "Gct empty for this thread due to dispatch hold on this thread due to sync", + "PublicDescription": "" + }, + { + "EventCode": "0x2d01c", + "EventName": "PM_GCT_NOSLOT_DISP_HELD_SRQ", + "BriefDescription": "Gct empty for this thread due to dispatch hold on this thread due to SRQ full", + "PublicDescription": "" + }, + { + "EventCode": "0x4e010", + "EventName": "PM_GCT_NOSLOT_IC_L3MISS", + "BriefDescription": "Gct empty for this thread due to icache l3 miss", + "PublicDescription": "" + }, + { + "EventCode": "0x2d01a", + "EventName": "PM_GCT_NOSLOT_IC_MISS", + "BriefDescription": "Gct empty for this thread due to Icache Miss", + "PublicDescription": "" + }, + { + "EventCode": "0x20a2", + "EventName": "PM_GCT_UTIL_11_14_ENTRIES", + "BriefDescription": "GCT Utilization 11-14 entries", + "PublicDescription": "" + }, + { + "EventCode": "0x20a4", + "EventName": "PM_GCT_UTIL_15_17_ENTRIES", + "BriefDescription": "GCT Utilization 15-17 entries", + "PublicDescription": "" + }, + { + "EventCode": "0x20a6", + "EventName": "PM_GCT_UTIL_18_ENTRIES", + "BriefDescription": "GCT Utilization 18+ entries", + "PublicDescription": "" + }, + { + "EventCode": "0x209c", + "EventName": "PM_GCT_UTIL_1_2_ENTRIES", + "BriefDescription": "GCT Utilization 1-2 entries", + "PublicDescription": "" + }, + { + "EventCode": "0x209e", + "EventName": "PM_GCT_UTIL_3_6_ENTRIES", + "BriefDescription": "GCT Utilization 3-6 entries", + "PublicDescription": "" + }, + { + "EventCode": "0x20a0", + "EventName": "PM_GCT_UTIL_7_10_ENTRIES", + "BriefDescription": "GCT Utilization 7-10 entries", + "PublicDescription": "" + }, + { + "EventCode": "0x1000a", + "EventName": "PM_GRP_BR_MPRED_NONSPEC", + "BriefDescription": "Group experienced non-speculative branch redirect", + "PublicDescription": "Group experienced Non-speculative br mispredicct" + }, + { + "EventCode": "0x30004", + "EventName": "PM_GRP_CMPL", + "BriefDescription": "group completed", + "PublicDescription": "" + }, + { + "EventCode": "0x3000a", + "EventName": "PM_GRP_DISP", + "BriefDescription": "group dispatch", + "PublicDescription": "dispatch_success (Group Dispatched)" + }, + { + "EventCode": "0x1000c", + "EventName": "PM_GRP_IC_MISS_NONSPEC", + "BriefDescription": "Group experienced non-speculative I cache miss", + "PublicDescription": "Group experi enced Non-specu lative I cache miss" + }, + { + "EventCode": "0x10130", + "EventName": "PM_GRP_MRK", + "BriefDescription": "Instruction Marked", + "PublicDescription": "Instruction marked in idu" + }, + { + "EventCode": "0x509c", + "EventName": "PM_GRP_NON_FULL_GROUP", + "BriefDescription": "GROUPs where we did not have 6 non branch instructions in the group(ST mode), in SMT mode 3 non branches", + "PublicDescription": "" + }, + { + "EventCode": "0x50a4", + "EventName": "PM_GRP_TERM_2ND_BRANCH", + "BriefDescription": "There were enough instructions in the Ibuffer, but 2nd branch ends group", + "PublicDescription": "" + }, + { + "EventCode": "0x50a6", + "EventName": "PM_GRP_TERM_FPU_AFTER_BR", + "BriefDescription": "There were enough instructions in the Ibuffer, but FPU OP IN same group after a branch terminates a group, cant do partial flushes", + "PublicDescription": "" + }, + { + "EventCode": "0x509e", + "EventName": "PM_GRP_TERM_NOINST", + "BriefDescription": "Do not fill every slot in the group, Not enough instructions in the Ibuffer. This includes cases where the group started with enough instructions, but some got knocked out by a cache miss or branch redirect (which would also empty the Ibuffer)", + "PublicDescription": "" + }, + { + "EventCode": "0x50a0", + "EventName": "PM_GRP_TERM_OTHER", + "BriefDescription": "There were enough instructions in the Ibuffer, but the group terminated early for some other reason, most likely due to a First or Last", + "PublicDescription": "" + }, + { + "EventCode": "0x50a2", + "EventName": "PM_GRP_TERM_SLOT_LIMIT", + "BriefDescription": "There were enough instructions in the Ibuffer, but 3 src RA/RB/RC , 2 way crack caused a group termination", + "PublicDescription": "" + }, + { + "EventCode": "0x4086", + "EventName": "PM_IBUF_FULL_CYC", + "BriefDescription": "Cycles No room in ibuff", + "PublicDescription": "Cycles No room in ibufffully qualified transfer (if5 valid)" + }, + { + "EventCode": "0x4098", + "EventName": "PM_IC_DEMAND_L2_BHT_REDIRECT", + "BriefDescription": "L2 I cache demand request due to BHT redirect, branch redirect ( 2 bubbles 3 cycles)", + "PublicDescription": "" + }, + { + "EventCode": "0x409a", + "EventName": "PM_IC_DEMAND_L2_BR_REDIRECT", + "BriefDescription": "L2 I cache demand request due to branch Mispredict ( 15 cycle path)", + "PublicDescription": "" + }, + { + "EventCode": "0x4088", + "EventName": "PM_IC_DEMAND_REQ", + "BriefDescription": "Demand Instruction fetch request", + "PublicDescription": "" + }, + { + "EventCode": "0x508a", + "EventName": "PM_IC_INVALIDATE", + "BriefDescription": "Ic line invalidated", + "PublicDescription": "" + }, + { + "EventCode": "0x4092", + "EventName": "PM_IC_PREF_CANCEL_HIT", + "BriefDescription": "Prefetch Canceled due to icache hit", + "PublicDescription": "" + }, + { + "EventCode": "0x4094", + "EventName": "PM_IC_PREF_CANCEL_L2", + "BriefDescription": "L2 Squashed request", + "PublicDescription": "" + }, + { + "EventCode": "0x4090", + "EventName": "PM_IC_PREF_CANCEL_PAGE", + "BriefDescription": "Prefetch Canceled due to page boundary", + "PublicDescription": "" + }, + { + "EventCode": "0x408a", + "EventName": "PM_IC_PREF_REQ", + "BriefDescription": "Instruction prefetch requests", + "PublicDescription": "" + }, + { + "EventCode": "0x408e", + "EventName": "PM_IC_PREF_WRITE", + "BriefDescription": "Instruction prefetch written into IL1", + "PublicDescription": "" + }, + { + "EventCode": "0x4096", + "EventName": "PM_IC_RELOAD_PRIVATE", + "BriefDescription": "Reloading line was brought in private for a specific thread. Most lines are brought in shared for all eight thrreads. If RA does not match then invalidates and then brings it shared to other thread. In P7 line brought in private , then line was invalidat", + "PublicDescription": "" + }, + { + "EventCode": "0x5088", + "EventName": "PM_IFU_L2_TOUCH", + "BriefDescription": "L2 touch to update MRU on a line", + "PublicDescription": "" + }, + { + "EventCode": "0x514050", + "EventName": "PM_INST_ALL_CHIP_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for instruction fetches and prefetches", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for an instruction fetch" + }, + { + "EventCode": "0x544048", + "EventName": "PM_INST_ALL_FROM_DL2L3_MOD", + "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x534048", + "EventName": "PM_INST_ALL_FROM_DL2L3_SHR", + "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x53404c", + "EventName": "PM_INST_ALL_FROM_DL4", + "BriefDescription": "The processor's Instruction cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x54404c", + "EventName": "PM_INST_ALL_FROM_DMEM", + "BriefDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group (Distant) due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group (Distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x514042", + "EventName": "PM_INST_ALL_FROM_L2", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x544046", + "EventName": "PM_INST_ALL_FROM_L21_MOD", + "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L2 on the same chip due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L2 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x534046", + "EventName": "PM_INST_ALL_FROM_L21_SHR", + "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L2 on the same chip due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L2 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x51404e", + "EventName": "PM_INST_ALL_FROM_L2MISS", + "BriefDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L2 due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L2 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x534040", + "EventName": "PM_INST_ALL_FROM_L2_DISP_CONFLICT_LDHITST", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 with load hit store conflict due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 with load hit store conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x544040", + "EventName": "PM_INST_ALL_FROM_L2_DISP_CONFLICT_OTHER", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 with dispatch conflict due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 with dispatch conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x524040", + "EventName": "PM_INST_ALL_FROM_L2_MEPF", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x514040", + "EventName": "PM_INST_ALL_FROM_L2_NO_CONFLICT", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 without conflict due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 without conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x544042", + "EventName": "PM_INST_ALL_FROM_L3", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x544044", + "EventName": "PM_INST_ALL_FROM_L31_ECO_MOD", + "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x534044", + "EventName": "PM_INST_ALL_FROM_L31_ECO_SHR", + "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x524044", + "EventName": "PM_INST_ALL_FROM_L31_MOD", + "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L3 on the same chip due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x514046", + "EventName": "PM_INST_ALL_FROM_L31_SHR", + "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L3 on the same chip due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x54404e", + "EventName": "PM_INST_ALL_FROM_L3MISS_MOD", + "BriefDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L3 due to a instruction fetch", + "PublicDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L3 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x534042", + "EventName": "PM_INST_ALL_FROM_L3_DISP_CONFLICT", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 with dispatch conflict due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 with dispatch conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x524042", + "EventName": "PM_INST_ALL_FROM_L3_MEPF", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x514044", + "EventName": "PM_INST_ALL_FROM_L3_NO_CONFLICT", + "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 without conflict due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 without conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x51404c", + "EventName": "PM_INST_ALL_FROM_LL4", + "BriefDescription": "The processor's Instruction cache was reloaded from the local chip's L4 cache due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from the local chip's L4 cache due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x524048", + "EventName": "PM_INST_ALL_FROM_LMEM", + "BriefDescription": "The processor's Instruction cache was reloaded from the local chip's Memory due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from the local chip's Memory due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x52404c", + "EventName": "PM_INST_ALL_FROM_MEMORY", + "BriefDescription": "The processor's Instruction cache was reloaded from a memory location including L4 from local remote or distant due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from a memory location including L4 from local remote or distant due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x54404a", + "EventName": "PM_INST_ALL_FROM_OFF_CHIP_CACHE", + "BriefDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x514048", + "EventName": "PM_INST_ALL_FROM_ON_CHIP_CACHE", + "BriefDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x524046", + "EventName": "PM_INST_ALL_FROM_RL2L3_MOD", + "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x51404a", + "EventName": "PM_INST_ALL_FROM_RL2L3_SHR", + "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x52404a", + "EventName": "PM_INST_ALL_FROM_RL4", + "BriefDescription": "The processor's Instruction cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x53404a", + "EventName": "PM_INST_ALL_FROM_RMEM", + "BriefDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to instruction fetches and prefetches", + "PublicDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x524050", + "EventName": "PM_INST_ALL_GRP_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope was group pump (prediction=correct) for instruction fetches and prefetches", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for an instruction fetch" + }, + { + "EventCode": "0x524052", + "EventName": "PM_INST_ALL_GRP_PUMP_MPRED", + "BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for instruction fetches and prefetches", + "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro" + }, + { + "EventCode": "0x514052", + "EventName": "PM_INST_ALL_GRP_PUMP_MPRED_RTY", + "BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for instruction fetches and prefetches", + "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor an instruction fetch" + }, + { + "EventCode": "0x514054", + "EventName": "PM_INST_ALL_PUMP_CPRED", + "BriefDescription": "Pump prediction correct. Counts across all types of pumps for instruction fetches and prefetches", + "PublicDescription": "Pump prediction correct. Counts across all types of pumpsfor an instruction fetch" + }, + { + "EventCode": "0x544052", + "EventName": "PM_INST_ALL_PUMP_MPRED", + "BriefDescription": "Pump misprediction. Counts across all types of pumps for instruction fetches and prefetches", + "PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor an instruction fetch" + }, + { + "EventCode": "0x534050", + "EventName": "PM_INST_ALL_SYS_PUMP_CPRED", + "BriefDescription": "Initial and Final Pump Scope was system pump (prediction=correct) for instruction fetches and prefetches", + "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for an instruction fetch" + }, + { + "EventCode": "0x534052", + "EventName": "PM_INST_ALL_SYS_PUMP_MPRED", + "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for instruction fetches and prefetches", + "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or" + }, + { + "EventCode": "0x544050", + "EventName": "PM_INST_ALL_SYS_PUMP_MPRED_RTY", + "BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for instruction fetches and prefetches", + "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for an instruction fetch" + }, + { + "EventCode": "0x4080", + "EventName": "PM_INST_FROM_L1", + "BriefDescription": "Instruction fetches from L1", + "PublicDescription": "" + }, + { + "EventCode": "0x44046", + "EventName": "PM_INST_FROM_L21_MOD", + "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L2 on the same chip due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L2 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x34046", + "EventName": "PM_INST_FROM_L21_SHR", + "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L2 on the same chip due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L2 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x44044", + "EventName": "PM_INST_FROM_L31_ECO_MOD", + "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x34044", + "EventName": "PM_INST_FROM_L31_ECO_SHR", + "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x24044", + "EventName": "PM_INST_FROM_L31_MOD", + "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L3 on the same chip due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x14046", + "EventName": "PM_INST_FROM_L31_SHR", + "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L3 on the same chip due to an instruction fetch (not prefetch)", + "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1" + }, + { + "EventCode": "0x30016", + "EventName": "PM_INST_IMC_MATCH_DISP", + "BriefDescription": "Matched Instructions Dispatched", + "PublicDescription": "IMC Matches dispatched" + }, + { + "EventCode": "0x30014", + "EventName": "PM_IOPS_DISP", + "BriefDescription": "Internal Operations dispatched", + "PublicDescription": "IOPS dispatched" + }, + { + "EventCode": "0x45046", + "EventName": "PM_IPTEG_FROM_L21_MOD", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L2 on the same chip due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x35046", + "EventName": "PM_IPTEG_FROM_L21_SHR", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L2 on the same chip due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x35040", + "EventName": "PM_IPTEG_FROM_L2_DISP_CONFLICT_LDHITST", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with load hit store conflict due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x45040", + "EventName": "PM_IPTEG_FROM_L2_DISP_CONFLICT_OTHER", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with dispatch conflict due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x45044", + "EventName": "PM_IPTEG_FROM_L31_ECO_MOD", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's ECO L3 on the same chip due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x35044", + "EventName": "PM_IPTEG_FROM_L31_ECO_SHR", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's ECO L3 on the same chip due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x25044", + "EventName": "PM_IPTEG_FROM_L31_MOD", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L3 on the same chip due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x15046", + "EventName": "PM_IPTEG_FROM_L31_SHR", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L3 on the same chip due to a instruction side request", + "PublicDescription": "" + }, + { + "EventCode": "0x4608e", + "EventName": "PM_ISIDE_L2MEMACC", + "BriefDescription": "valid when first beat of data comes in for an i-side fetch where data came from mem(or L4)", + "PublicDescription": "" + }, + { + "EventCode": "0x30ac", + "EventName": "PM_ISU_REF_FX0", + "BriefDescription": "FX0 ISU reject", + "PublicDescription": "" + }, + { + "EventCode": "0x30ae", + "EventName": "PM_ISU_REF_FX1", + "BriefDescription": "FX1 ISU reject", + "PublicDescription": "" + }, + { + "EventCode": "0x38ac", + "EventName": "PM_ISU_REF_FXU", + "BriefDescription": "FXU ISU reject from either pipe", + "PublicDescription": "" + }, + { + "EventCode": "0x30b0", + "EventName": "PM_ISU_REF_LS0", + "BriefDescription": "LS0 ISU reject", + "PublicDescription": "" + }, + { + "EventCode": "0x30b2", + "EventName": "PM_ISU_REF_LS1", + "BriefDescription": "LS1 ISU reject", + "PublicDescription": "" + }, + { + "EventCode": "0x30b4", + "EventName": "PM_ISU_REF_LS2", + "BriefDescription": "LS2 ISU reject", + "PublicDescription": "" + }, + { + "EventCode": "0x30b6", + "EventName": "PM_ISU_REF_LS3", + "BriefDescription": "LS3 ISU reject", + "PublicDescription": "" + }, + { + "EventCode": "0x309c", + "EventName": "PM_ISU_REJECTS_ALL", + "BriefDescription": "All isu rejects could be more than 1 per cycle", + "PublicDescription": "" + }, + { + "EventCode": "0x30a2", + "EventName": "PM_ISU_REJECT_RES_NA", + "BriefDescription": "ISU reject due to resource not available", + "PublicDescription": "" + }, + { + "EventCode": "0x309e", + "EventName": "PM_ISU_REJECT_SAR_BYPASS", + "BriefDescription": "Reject because of SAR bypass", + "PublicDescription": "" + }, + { + "EventCode": "0x30a0", + "EventName": "PM_ISU_REJECT_SRC_NA", + "BriefDescription": "ISU reject due to source not available", + "PublicDescription": "" + }, + { + "EventCode": "0x30a8", + "EventName": "PM_ISU_REJ_VS0", + "BriefDescription": "VS0 ISU reject", + "PublicDescription": "" + }, + { + "EventCode": "0x30aa", + "EventName": "PM_ISU_REJ_VS1", + "BriefDescription": "VS1 ISU reject", + "PublicDescription": "" + }, + { + "EventCode": "0x38a8", + "EventName": "PM_ISU_REJ_VSU", + "BriefDescription": "VSU ISU reject from either pipe", + "PublicDescription": "" + }, + { + "EventCode": "0x30b8", + "EventName": "PM_ISYNC", + "BriefDescription": "Isync count per thread", + "PublicDescription": "" + }, + { + "EventCode": "0x200301ea", + "EventName": "PM_L1MISS_LAT_EXC_1024", + "BriefDescription": "L1 misses that took longer than 1024 cyles to resolve (miss to reload)", + "PublicDescription": "Reload latency exceeded 1024 cyc" + }, + { + "EventCode": "0x200401ec", + "EventName": "PM_L1MISS_LAT_EXC_2048", + "BriefDescription": "L1 misses that took longer than 2048 cyles to resolve (miss to reload)", + "PublicDescription": "Reload latency exceeded 2048 cyc" + }, + { + "EventCode": "0x200101e8", + "EventName": "PM_L1MISS_LAT_EXC_256", + "BriefDescription": "L1 misses that took longer than 256 cyles to resolve (miss to reload)", + "PublicDescription": "Reload latency exceeded 256 cyc" + }, + { + "EventCode": "0x200201e6", + "EventName": "PM_L1MISS_LAT_EXC_32", + "BriefDescription": "L1 misses that took longer than 32 cyles to resolve (miss to reload)", + "PublicDescription": "Reload latency exceeded 32 cyc" + }, + { + "EventCode": "0x26086", + "EventName": "PM_L1PF_L2MEMACC", + "BriefDescription": "valid when first beat of data comes in for an L1pref where data came from mem(or L4)", + "PublicDescription": "" + }, + { + "EventCode": "0x408c", + "EventName": "PM_L1_DEMAND_WRITE", + "BriefDescription": "Instruction Demand sectors wriittent into IL1", + "PublicDescription": "" + }, + { + "EventCode": "0x27084", + "EventName": "PM_L2_CHIP_PUMP", + "BriefDescription": "RC requests that were local on chip pump attempts", + "PublicDescription": "" + }, + { + "EventCode": "0x27086", + "EventName": "PM_L2_GROUP_PUMP", + "BriefDescription": "RC requests that were on Node Pump attempts", + "PublicDescription": "" + }, + { + "EventCode": "0x3708a", + "EventName": "PM_L2_RTY_ST", + "BriefDescription": "RC retries on PB for any store from core", + "PublicDescription": "" + }, + { + "EventCode": "0x17080", + "EventName": "PM_L2_ST", + "BriefDescription": "All successful D-side store dispatches for this thread", + "PublicDescription": "" + }, + { + "EventCode": "0x17082", + "EventName": "PM_L2_ST_MISS", + "BriefDescription": "All successful D-side store dispatches for this thread that were L2 Miss", + "PublicDescription": "" + }, + { + "EventCode": "0x1e05e", + "EventName": "PM_L2_TM_REQ_ABORT", + "BriefDescription": "TM abort", + "PublicDescription": "" + }, + { + "EventCode": "0x3e05c", + "EventName": "PM_L2_TM_ST_ABORT_SISTER", + "BriefDescription": "TM marked store abort", + "PublicDescription": "" + }, + { + "EventCode": "0x819082", + "EventName": "PM_L3_CI_USAGE", + "BriefDescription": "rotating sample of 16 CI or CO actives", + "PublicDescription": "" + }, + { + "EventCode": "0x83908b", + "EventName": "PM_L3_CO0_ALLOC", + "BriefDescription": "lifetime, sample of CO machine 0 valid", + "PublicDescription": "0.0" + }, + { + "EventCode": "0x83908a", + "EventName": "PM_L3_CO0_BUSY", + "BriefDescription": "lifetime, sample of CO machine 0 valid", + "PublicDescription": "" + }, + { + "EventCode": "0x28086", + "EventName": "PM_L3_CO_L31", + "BriefDescription": "L3 CO to L3.1 OR of port 0 and 1 ( lossy)", + "PublicDescription": "" + }, + { + "EventCode": "0x28084", + "EventName": "PM_L3_CO_MEM", + "BriefDescription": "L3 CO to memory OR of port 0 and 1 ( lossy)", + "PublicDescription": "" + }, + { + "EventCode": "0x1e052", + "EventName": "PM_L3_LD_PREF", + "BriefDescription": "L3 Load Prefetches", + "PublicDescription": "" + }, + { + "EventCode": "0x84908d", + "EventName": "PM_L3_PF0_ALLOC", + "BriefDescription": "lifetime, sample of PF machine 0 valid", + "PublicDescription": "0.0" + }, + { + "EventCode": "0x84908c", + "EventName": "PM_L3_PF0_BUSY", + "BriefDescription": "lifetime, sample of PF machine 0 valid", + "PublicDescription": "" + }, + { + "EventCode": "0x18080", + "EventName": "PM_L3_PF_MISS_L3", + "BriefDescription": "L3 Prefetch missed in L3", + "PublicDescription": "" + }, + { + "EventCode": "0x3808a", + "EventName": "PM_L3_PF_OFF_CHIP_CACHE", + "BriefDescription": "L3 Prefetch from Off chip cache", + "PublicDescription": "" + }, + { + "EventCode": "0x4808e", + "EventName": "PM_L3_PF_OFF_CHIP_MEM", + "BriefDescription": "L3 Prefetch from Off chip memory", + "PublicDescription": "" + }, + { + "EventCode": "0x38088", + "EventName": "PM_L3_PF_ON_CHIP_CACHE", + "BriefDescription": "L3 Prefetch from On chip cache", + "PublicDescription": "" + }, + { + "EventCode": "0x4808c", + "EventName": "PM_L3_PF_ON_CHIP_MEM", + "BriefDescription": "L3 Prefetch from On chip memory", + "PublicDescription": "" + }, + { + "EventCode": "0x829084", + "EventName": "PM_L3_PF_USAGE", + "BriefDescription": "rotating sample of 32 PF actives", + "PublicDescription": "" + }, + { + "EventCode": "0x4e052", + "EventName": "PM_L3_PREF_ALL", + "BriefDescription": "Total HW L3 prefetches(Load+store)", + "PublicDescription": "" + }, + { + "EventCode": "0x84908f", + "EventName": "PM_L3_RD0_ALLOC", + "BriefDescription": "lifetime, sample of RD machine 0 valid", + "PublicDescription": "0.0" + }, + { + "EventCode": "0x84908e", + "EventName": "PM_L3_RD0_BUSY", + "BriefDescription": "lifetime, sample of RD machine 0 valid", + "PublicDescription": "" + }, + { + "EventCode": "0x829086", + "EventName": "PM_L3_RD_USAGE", + "BriefDescription": "rotating sample of 16 RD actives", + "PublicDescription": "" + }, + { + "EventCode": "0x839089", + "EventName": "PM_L3_SN0_ALLOC", + "BriefDescription": "lifetime, sample of snooper machine 0 valid", + "PublicDescription": "0.0" + }, + { + "EventCode": "0x839088", + "EventName": "PM_L3_SN0_BUSY", + "BriefDescription": "lifetime, sample of snooper machine 0 valid", + "PublicDescription": "" + }, + { + "EventCode": "0x819080", + "EventName": "PM_L3_SN_USAGE", + "BriefDescription": "rotating sample of 8 snoop valids", + "PublicDescription": "" + }, + { + "EventCode": "0x2e052", + "EventName": "PM_L3_ST_PREF", + "BriefDescription": "L3 store Prefetches", + "PublicDescription": "" + }, + { + "EventCode": "0x3e052", + "EventName": "PM_L3_SW_PREF", + "BriefDescription": "Data stream touchto L3", + "PublicDescription": "" + }, + { + "EventCode": "0x18081", + "EventName": "PM_L3_WI0_ALLOC", + "BriefDescription": "lifetime, sample of Write Inject machine 0 valid", + "PublicDescription": "0.0" + }, + { + "EventCode": "0xc080", + "EventName": "PM_LD_REF_L1_LSU0", + "BriefDescription": "LS0 L1 D cache load references counted at finish, gated by reject", + "PublicDescription": "LS0 L1 D cache load references counted at finish, gated by rejectLSU0 L1 D cache load references" + }, + { + "EventCode": "0xc082", + "EventName": "PM_LD_REF_L1_LSU1", + "BriefDescription": "LS1 L1 D cache load references counted at finish, gated by reject", + "PublicDescription": "LS1 L1 D cache load references counted at finish, gated by rejectLSU1 L1 D cache load references" + }, + { + "EventCode": "0xc094", + "EventName": "PM_LD_REF_L1_LSU2", + "BriefDescription": "LS2 L1 D cache load references counted at finish, gated by reject", + "PublicDescription": "LS2 L1 D cache load references counted at finish, gated by reject42" + }, + { + "EventCode": "0xc096", + "EventName": "PM_LD_REF_L1_LSU3", + "BriefDescription": "LS3 L1 D cache load references counted at finish, gated by reject", + "PublicDescription": "LS3 L1 D cache load references counted at finish, gated by reject42" + }, + { + "EventCode": "0x509a", + "EventName": "PM_LINK_STACK_INVALID_PTR", + "BriefDescription": "A flush were LS ptr is invalid, results in a pop , A lot of interrupts between push and pops", + "PublicDescription": "" + }, + { + "EventCode": "0x5098", + "EventName": "PM_LINK_STACK_WRONG_ADD_PRED", + "BriefDescription": "Link stack predicts wrong address, because of link stack design limitation", + "PublicDescription": "" + }, + { + "EventCode": "0xe080", + "EventName": "PM_LS0_ERAT_MISS_PREF", + "BriefDescription": "LS0 Erat miss due to prefetch", + "PublicDescription": "LS0 Erat miss due to prefetch42" + }, + { + "EventCode": "0xd0b8", + "EventName": "PM_LS0_L1_PREF", + "BriefDescription": "LS0 L1 cache data prefetches", + "PublicDescription": "LS0 L1 cache data prefetches42" + }, + { + "EventCode": "0xc098", + "EventName": "PM_LS0_L1_SW_PREF", + "BriefDescription": "Software L1 Prefetches, including SW Transient Prefetches", + "PublicDescription": "Software L1 Prefetches, including SW Transient Prefetches42" + }, + { + "EventCode": "0xe082", + "EventName": "PM_LS1_ERAT_MISS_PREF", + "BriefDescription": "LS1 Erat miss due to prefetch", + "PublicDescription": "LS1 Erat miss due to prefetch42" + }, + { + "EventCode": "0xd0ba", + "EventName": "PM_LS1_L1_PREF", + "BriefDescription": "LS1 L1 cache data prefetches", + "PublicDescription": "LS1 L1 cache data prefetches42" + }, + { + "EventCode": "0xc09a", + "EventName": "PM_LS1_L1_SW_PREF", + "BriefDescription": "Software L1 Prefetches, including SW Transient Prefetches", + "PublicDescription": "Software L1 Prefetches, including SW Transient Prefetches42" + }, + { + "EventCode": "0xc0b0", + "EventName": "PM_LSU0_FLUSH_LRQ", + "BriefDescription": "LS0 Flush: LRQ", + "PublicDescription": "LS0 Flush: LRQLSU0 LRQ flushes" + }, + { + "EventCode": "0xc0b8", + "EventName": "PM_LSU0_FLUSH_SRQ", + "BriefDescription": "LS0 Flush: SRQ", + "PublicDescription": "LS0 Flush: SRQLSU0 SRQ lhs flushes" + }, + { + "EventCode": "0xc0a4", + "EventName": "PM_LSU0_FLUSH_ULD", + "BriefDescription": "LS0 Flush: Unaligned Load", + "PublicDescription": "LS0 Flush: Unaligned LoadLSU0 unaligned load flushes" + }, + { + "EventCode": "0xc0ac", + "EventName": "PM_LSU0_FLUSH_UST", + "BriefDescription": "LS0 Flush: Unaligned Store", + "PublicDescription": "LS0 Flush: Unaligned StoreLSU0 unaligned store flushes" + }, + { + "EventCode": "0xf088", + "EventName": "PM_LSU0_L1_CAM_CANCEL", + "BriefDescription": "ls0 l1 tm cam cancel", + "PublicDescription": "ls0 l1 tm cam cancel42" + }, + { + "EventCode": "0x1e056", + "EventName": "PM_LSU0_LARX_FIN", + "BriefDescription": "Larx finished in LSU pipe0", + "PublicDescription": "" + }, + { + "EventCode": "0xd08c", + "EventName": "PM_LSU0_LMQ_LHR_MERGE", + "BriefDescription": "LS0 Load Merged with another cacheline request", + "PublicDescription": "LS0 Load Merged with another cacheline request42" + }, + { + "EventCode": "0xc08c", + "EventName": "PM_LSU0_NCLD", + "BriefDescription": "LS0 Non-cachable Loads counted at finish", + "PublicDescription": "LS0 Non-cachable Loads counted at finishLSU0 non-cacheable loads" + }, + { + "EventCode": "0xe090", + "EventName": "PM_LSU0_PRIMARY_ERAT_HIT", + "BriefDescription": "Primary ERAT hit", + "PublicDescription": "Primary ERAT hit42" + }, + { + "EventCode": "0x1e05a", + "EventName": "PM_LSU0_REJECT", + "BriefDescription": "LSU0 reject", + "PublicDescription": "" + }, + { + "EventCode": "0xc09c", + "EventName": "PM_LSU0_SRQ_STFWD", + "BriefDescription": "LS0 SRQ forwarded data to a load", + "PublicDescription": "LS0 SRQ forwarded data to a loadLSU0 SRQ store forwarded" + }, + { + "EventCode": "0xf084", + "EventName": "PM_LSU0_STORE_REJECT", + "BriefDescription": "ls0 store reject", + "PublicDescription": "ls0 store reject42" + }, + { + "EventCode": "0xe0a8", + "EventName": "PM_LSU0_TMA_REQ_L2", + "BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding", + "PublicDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding42" + }, + { + "EventCode": "0xe098", + "EventName": "PM_LSU0_TM_L1_HIT", + "BriefDescription": "Load tm hit in L1", + "PublicDescription": "Load tm hit in L142" + }, + { + "EventCode": "0xe0a0", + "EventName": "PM_LSU0_TM_L1_MISS", + "BriefDescription": "Load tm L1 miss", + "PublicDescription": "Load tm L1 miss42" + }, + { + "EventCode": "0xc0b2", + "EventName": "PM_LSU1_FLUSH_LRQ", + "BriefDescription": "LS1 Flush: LRQ", + "PublicDescription": "LS1 Flush: LRQLSU1 LRQ flushes" + }, + { + "EventCode": "0xc0ba", + "EventName": "PM_LSU1_FLUSH_SRQ", + "BriefDescription": "LS1 Flush: SRQ", + "PublicDescription": "LS1 Flush: SRQLSU1 SRQ lhs flushes" + }, + { + "EventCode": "0xc0a6", + "EventName": "PM_LSU1_FLUSH_ULD", + "BriefDescription": "LS 1 Flush: Unaligned Load", + "PublicDescription": "LS 1 Flush: Unaligned LoadLSU1 unaligned load flushes" + }, + { + "EventCode": "0xc0ae", + "EventName": "PM_LSU1_FLUSH_UST", + "BriefDescription": "LS1 Flush: Unaligned Store", + "PublicDescription": "LS1 Flush: Unaligned StoreLSU1 unaligned store flushes" + }, + { + "EventCode": "0xf08a", + "EventName": "PM_LSU1_L1_CAM_CANCEL", + "BriefDescription": "ls1 l1 tm cam cancel", + "PublicDescription": "ls1 l1 tm cam cancel42" + }, + { + "EventCode": "0x2e056", + "EventName": "PM_LSU1_LARX_FIN", + "BriefDescription": "Larx finished in LSU pipe1", + "PublicDescription": "" + }, + { + "EventCode": "0xd08e", + "EventName": "PM_LSU1_LMQ_LHR_MERGE", + "BriefDescription": "LS1 Load Merge with another cacheline request", + "PublicDescription": "LS1 Load Merge with another cacheline request42" + }, + { + "EventCode": "0xc08e", + "EventName": "PM_LSU1_NCLD", + "BriefDescription": "LS1 Non-cachable Loads counted at finish", + "PublicDescription": "LS1 Non-cachable Loads counted at finishLSU1 non-cacheable loads" + }, + { + "EventCode": "0xe092", + "EventName": "PM_LSU1_PRIMARY_ERAT_HIT", + "BriefDescription": "Primary ERAT hit", + "PublicDescription": "Primary ERAT hit42" + }, + { + "EventCode": "0x2e05a", + "EventName": "PM_LSU1_REJECT", + "BriefDescription": "LSU1 reject", + "PublicDescription": "" + }, + { + "EventCode": "0xc09e", + "EventName": "PM_LSU1_SRQ_STFWD", + "BriefDescription": "LS1 SRQ forwarded data to a load", + "PublicDescription": "LS1 SRQ forwarded data to a loadLSU1 SRQ store forwarded" + }, + { + "EventCode": "0xf086", + "EventName": "PM_LSU1_STORE_REJECT", + "BriefDescription": "ls1 store reject", + "PublicDescription": "ls1 store reject42" + }, + { + "EventCode": "0xe0aa", + "EventName": "PM_LSU1_TMA_REQ_L2", + "BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding", + "PublicDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding42" + }, + { + "EventCode": "0xe09a", + "EventName": "PM_LSU1_TM_L1_HIT", + "BriefDescription": "Load tm hit in L1", + "PublicDescription": "Load tm hit in L142" + }, + { + "EventCode": "0xe0a2", + "EventName": "PM_LSU1_TM_L1_MISS", + "BriefDescription": "Load tm L1 miss", + "PublicDescription": "Load tm L1 miss42" + }, + { + "EventCode": "0xc0b4", + "EventName": "PM_LSU2_FLUSH_LRQ", + "BriefDescription": "LS02Flush: LRQ", + "PublicDescription": "LS02Flush: LRQ42" + }, + { + "EventCode": "0xc0bc", + "EventName": "PM_LSU2_FLUSH_SRQ", + "BriefDescription": "LS2 Flush: SRQ", + "PublicDescription": "LS2 Flush: SRQ42" + }, + { + "EventCode": "0xc0a8", + "EventName": "PM_LSU2_FLUSH_ULD", + "BriefDescription": "LS3 Flush: Unaligned Load", + "PublicDescription": "LS3 Flush: Unaligned Load42" + }, + { + "EventCode": "0xf08c", + "EventName": "PM_LSU2_L1_CAM_CANCEL", + "BriefDescription": "ls2 l1 tm cam cancel", + "PublicDescription": "ls2 l1 tm cam cancel42" + }, + { + "EventCode": "0x3e056", + "EventName": "PM_LSU2_LARX_FIN", + "BriefDescription": "Larx finished in LSU pipe2", + "PublicDescription": "" + }, + { + "EventCode": "0xc084", + "EventName": "PM_LSU2_LDF", + "BriefDescription": "LS2 Scalar Loads", + "PublicDescription": "LS2 Scalar Loads42" + }, + { + "EventCode": "0xc088", + "EventName": "PM_LSU2_LDX", + "BriefDescription": "LS0 Vector Loads", + "PublicDescription": "LS0 Vector Loads42" + }, + { + "EventCode": "0xd090", + "EventName": "PM_LSU2_LMQ_LHR_MERGE", + "BriefDescription": "LS0 Load Merged with another cacheline request", + "PublicDescription": "LS0 Load Merged with another cacheline request42" + }, + { + "EventCode": "0xe094", + "EventName": "PM_LSU2_PRIMARY_ERAT_HIT", + "BriefDescription": "Primary ERAT hit", + "PublicDescription": "Primary ERAT hit42" + }, + { + "EventCode": "0x3e05a", + "EventName": "PM_LSU2_REJECT", + "BriefDescription": "LSU2 reject", + "PublicDescription": "" + }, + { + "EventCode": "0xc0a0", + "EventName": "PM_LSU2_SRQ_STFWD", + "BriefDescription": "LS2 SRQ forwarded data to a load", + "PublicDescription": "LS2 SRQ forwarded data to a load42" + }, + { + "EventCode": "0xe0ac", + "EventName": "PM_LSU2_TMA_REQ_L2", + "BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding", + "PublicDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding42" + }, + { + "EventCode": "0xe09c", + "EventName": "PM_LSU2_TM_L1_HIT", + "BriefDescription": "Load tm hit in L1", + "PublicDescription": "Load tm hit in L142" + }, + { + "EventCode": "0xe0a4", + "EventName": "PM_LSU2_TM_L1_MISS", + "BriefDescription": "Load tm L1 miss", + "PublicDescription": "Load tm L1 miss42" + }, + { + "EventCode": "0xc0b6", + "EventName": "PM_LSU3_FLUSH_LRQ", + "BriefDescription": "LS3 Flush: LRQ", + "PublicDescription": "LS3 Flush: LRQ42" + }, + { + "EventCode": "0xc0be", + "EventName": "PM_LSU3_FLUSH_SRQ", + "BriefDescription": "LS13 Flush: SRQ", + "PublicDescription": "LS13 Flush: SRQ42" + }, + { + "EventCode": "0xc0aa", + "EventName": "PM_LSU3_FLUSH_ULD", + "BriefDescription": "LS 14Flush: Unaligned Load", + "PublicDescription": "LS 14Flush: Unaligned Load42" + }, + { + "EventCode": "0xf08e", + "EventName": "PM_LSU3_L1_CAM_CANCEL", + "BriefDescription": "ls3 l1 tm cam cancel", + "PublicDescription": "ls3 l1 tm cam cancel42" + }, + { + "EventCode": "0x4e056", + "EventName": "PM_LSU3_LARX_FIN", + "BriefDescription": "Larx finished in LSU pipe3", + "PublicDescription": "" + }, + { + "EventCode": "0xc086", + "EventName": "PM_LSU3_LDF", + "BriefDescription": "LS3 Scalar Loads", + "PublicDescription": "LS3 Scalar Loads 42" + }, + { + "EventCode": "0xc08a", + "EventName": "PM_LSU3_LDX", + "BriefDescription": "LS1 Vector Loads", + "PublicDescription": "LS1 Vector Loads42" + }, + { + "EventCode": "0xd092", + "EventName": "PM_LSU3_LMQ_LHR_MERGE", + "BriefDescription": "LS1 Load Merge with another cacheline request", + "PublicDescription": "LS1 Load Merge with another cacheline request42" + }, + { + "EventCode": "0xe096", + "EventName": "PM_LSU3_PRIMARY_ERAT_HIT", + "BriefDescription": "Primary ERAT hit", + "PublicDescription": "Primary ERAT hit42" + }, + { + "EventCode": "0x4e05a", + "EventName": "PM_LSU3_REJECT", + "BriefDescription": "LSU3 reject", + "PublicDescription": "" + }, + { + "EventCode": "0xc0a2", + "EventName": "PM_LSU3_SRQ_STFWD", + "BriefDescription": "LS3 SRQ forwarded data to a load", + "PublicDescription": "LS3 SRQ forwarded data to a load42" + }, + { + "EventCode": "0xe0ae", + "EventName": "PM_LSU3_TMA_REQ_L2", + "BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding", + "PublicDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding42" + }, + { + "EventCode": "0xe09e", + "EventName": "PM_LSU3_TM_L1_HIT", + "BriefDescription": "Load tm hit in L1", + "PublicDescription": "Load tm hit in L142" + }, + { + "EventCode": "0xe0a6", + "EventName": "PM_LSU3_TM_L1_MISS", + "BriefDescription": "Load tm L1 miss", + "PublicDescription": "Load tm L1 miss42" + }, + { + "EventCode": "0xe880", + "EventName": "PM_LSU_ERAT_MISS_PREF", + "BriefDescription": "Erat miss due to prefetch, on either pipe", + "PublicDescription": "LSU" + }, + { + "EventCode": "0xc8ac", + "EventName": "PM_LSU_FLUSH_UST", + "BriefDescription": "Unaligned Store Flush on either pipe", + "PublicDescription": "LSU" + }, + { + "EventCode": "0xd0a4", + "EventName": "PM_LSU_FOUR_TABLEWALK_CYC", + "BriefDescription": "Cycles when four tablewalks pending on this thread", + "PublicDescription": "Cycles when four tablewalks pending on this thread42" + }, + { + "EventCode": "0x10066", + "EventName": "PM_LSU_FX_FIN", + "BriefDescription": "LSU Finished a FX operation (up to 2 per cycle", + "PublicDescription": "" + }, + { + "EventCode": "0xd8b8", + "EventName": "PM_LSU_L1_PREF", + "BriefDescription": "hw initiated , include sw streaming forms as well , include sw streams as a separate event", + "PublicDescription": "LSU" + }, + { + "EventCode": "0xc898", + "EventName": "PM_LSU_L1_SW_PREF", + "BriefDescription": "Software L1 Prefetches, including SW Transient Prefetches, on both pipes", + "PublicDescription": "LSU" + }, + { + "EventCode": "0xc884", + "EventName": "PM_LSU_LDF", + "BriefDescription": "FPU loads only on LS2/LS3 ie LU0/LU1", + "PublicDescription": "LSU" + }, + { + "EventCode": "0xc888", + "EventName": "PM_LSU_LDX", + "BriefDescription": "Vector loads can issue only on LS2/LS3", + "PublicDescription": "LSU" + }, + { + "EventCode": "0xd0a2", + "EventName": "PM_LSU_LMQ_FULL_CYC", + "BriefDescription": "LMQ full", + "PublicDescription": "LMQ fullCycles LMQ full" + }, + { + "EventCode": "0xd0a1", + "EventName": "PM_LSU_LMQ_S0_ALLOC", + "BriefDescription": "Per thread - use edge detect to count allocates On a per thread basis, level signal indicating Slot 0 is valid. By instrumenting a single slot we can calculate service time for that slot. Previous machines required a separate signal indicating the slot was allocated. Because any signal can be routed to any counter in P8, we can count level in one PMC and edge detect in another PMC using the same signal", + "PublicDescription": "0.0" + }, + { + "EventCode": "0xd0a0", + "EventName": "PM_LSU_LMQ_S0_VALID", + "BriefDescription": "Slot 0 of LMQ valid", + "PublicDescription": "Slot 0 of LMQ validLMQ slot 0 valid" + }, + { + "EventCode": "0x3001c", + "EventName": "PM_LSU_LMQ_SRQ_EMPTY_ALL_CYC", + "BriefDescription": "ALL threads lsu empty (lmq and srq empty)", + "PublicDescription": "ALL threads lsu empty (lmq and srq empty). Issue HW016541" + }, + { + "EventCode": "0xd09f", + "EventName": "PM_LSU_LRQ_S0_ALLOC", + "BriefDescription": "Per thread - use edge detect to count allocates On a per thread basis, level signal indicating Slot 0 is valid. By instrumenting a single slot we can calculate service time for that slot. Previous machines required a separate signal indicating the slot was allocated. Because any signal can be routed to any counter in P8, we can count level in one PMC and edge detect in another PMC using the same signal", + "PublicDescription": "0.0" + }, + { + "EventCode": "0xd09e", + "EventName": "PM_LSU_LRQ_S0_VALID", + "BriefDescription": "Slot 0 of LRQ valid", + "PublicDescription": "Slot 0 of LRQ validLRQ slot 0 valid" + }, + { + "EventCode": "0xf091", + "EventName": "PM_LSU_LRQ_S43_ALLOC", + "BriefDescription": "LRQ slot 43 was released", + "PublicDescription": "0.0" + }, + { + "EventCode": "0xf090", + "EventName": "PM_LSU_LRQ_S43_VALID", + "BriefDescription": "LRQ slot 43 was busy", + "PublicDescription": "LRQ slot 43 was busy42" + }, + { + "EventCode": "0x30162", + "EventName": "PM_LSU_MRK_DERAT_MISS", + "BriefDescription": "DERAT Reloaded (Miss)", + "PublicDescription": "" + }, + { + "EventCode": "0xc88c", + "EventName": "PM_LSU_NCLD", + "BriefDescription": "count at finish so can return only on ls0 or ls1", + "PublicDescription": "LSU" + }, + { + "EventCode": "0xc092", + "EventName": "PM_LSU_NCST", + "BriefDescription": "Non-cachable Stores sent to nest", + "PublicDescription": "Non-cachable Stores sent to nest42" + }, + { + "EventCode": "0x10064", + "EventName": "PM_LSU_REJECT", + "BriefDescription": "LSU Reject (up to 4 per cycle)", + "PublicDescription": "" + }, + { + "EventCode": "0xd082", + "EventName": "PM_LSU_SET_MPRED", + "BriefDescription": "Line already in cache at reload time", + "PublicDescription": "Line already in cache at reload time42" + }, + { + "EventCode": "0x40008", + "EventName": "PM_LSU_SRQ_EMPTY_CYC", + "BriefDescription": "ALL threads srq empty", + "PublicDescription": "All threads srq empty" + }, + { + "EventCode": "0xd09d", + "EventName": "PM_LSU_SRQ_S0_ALLOC", + "BriefDescription": "Per thread - use edge detect to count allocates On a per thread basis, level signal indicating Slot 0 is valid. By instrumenting a single slot we can calculate service time for that slot. Previous machines required a separate signal indicating the slot was allocated. Because any signal can be routed to any counter in P8, we can count level in one PMC and edge detect in another PMC using the same signal", + "PublicDescription": "0.0" + }, + { + "EventCode": "0xd09c", + "EventName": "PM_LSU_SRQ_S0_VALID", + "BriefDescription": "Slot 0 of SRQ valid", + "PublicDescription": "Slot 0 of SRQ validSRQ slot 0 valid" + }, + { + "EventCode": "0xf093", + "EventName": "PM_LSU_SRQ_S39_ALLOC", + "BriefDescription": "SRQ slot 39 was released", + "PublicDescription": "0.0" + }, + { + "EventCode": "0xf092", + "EventName": "PM_LSU_SRQ_S39_VALID", + "BriefDescription": "SRQ slot 39 was busy", + "PublicDescription": "SRQ slot 39 was busy42" + }, + { + "EventCode": "0xd09b", + "EventName": "PM_LSU_SRQ_SYNC", + "BriefDescription": "A sync in the SRQ ended", + "PublicDescription": "0.0" + }, + { + "EventCode": "0xd09a", + "EventName": "PM_LSU_SRQ_SYNC_CYC", + "BriefDescription": "A sync is in the SRQ (edge detect to count)", + "PublicDescription": "A sync is in the SRQ (edge detect to count)SRQ sync duration" + }, + { + "EventCode": "0xf084", + "EventName": "PM_LSU_STORE_REJECT", + "BriefDescription": "Store reject on either pipe", + "PublicDescription": "LSU" + }, + { + "EventCode": "0xd0a6", + "EventName": "PM_LSU_TWO_TABLEWALK_CYC", + "BriefDescription": "Cycles when two tablewalks pending on this thread", + "PublicDescription": "Cycles when two tablewalks pending on this thread42" + }, + { + "EventCode": "0x5094", + "EventName": "PM_LWSYNC", + "BriefDescription": "threaded version, IC Misses where we got EA dir hit but no sector valids were on. ICBI took line out", + "PublicDescription": "" + }, + { + "EventCode": "0x209a", + "EventName": "PM_LWSYNC_HELD", + "BriefDescription": "LWSYNC held at dispatch", + "PublicDescription": "" + }, + { + "EventCode": "0x3013a", + "EventName": "PM_MRK_CRU_FIN", + "BriefDescription": "IFU non-branch finished", + "PublicDescription": "IFU non-branch marked instruction finished" + }, + { + "EventCode": "0x4d146", + "EventName": "PM_MRK_DATA_FROM_L21_MOD", + "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2d126", + "EventName": "PM_MRK_DATA_FROM_L21_MOD_CYC", + "BriefDescription": "Duration in cycles to reload with Modified (M) data from another core's L2 on the same chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x3d146", + "EventName": "PM_MRK_DATA_FROM_L21_SHR", + "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2c126", + "EventName": "PM_MRK_DATA_FROM_L21_SHR_CYC", + "BriefDescription": "Duration in cycles to reload with Shared (S) data from another core's L2 on the same chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4d144", + "EventName": "PM_MRK_DATA_FROM_L31_ECO_MOD", + "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2d124", + "EventName": "PM_MRK_DATA_FROM_L31_ECO_MOD_CYC", + "BriefDescription": "Duration in cycles to reload with Modified (M) data from another core's ECO L3 on the same chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x3d144", + "EventName": "PM_MRK_DATA_FROM_L31_ECO_SHR", + "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2c124", + "EventName": "PM_MRK_DATA_FROM_L31_ECO_SHR_CYC", + "BriefDescription": "Duration in cycles to reload with Shared (S) data from another core's ECO L3 on the same chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x2d144", + "EventName": "PM_MRK_DATA_FROM_L31_MOD", + "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4d124", + "EventName": "PM_MRK_DATA_FROM_L31_MOD_CYC", + "BriefDescription": "Duration in cycles to reload with Modified (M) data from another core's L3 on the same chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x1d146", + "EventName": "PM_MRK_DATA_FROM_L31_SHR", + "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4c126", + "EventName": "PM_MRK_DATA_FROM_L31_SHR_CYC", + "BriefDescription": "Duration in cycles to reload with Shared (S) data from another core's L3 on the same chip due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x201e0", + "EventName": "PM_MRK_DATA_FROM_MEM", + "BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a marked load", + "PublicDescription": "" + }, + { + "EventCode": "0x4f146", + "EventName": "PM_MRK_DPTEG_FROM_L21_MOD", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L2 on the same chip due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x3f146", + "EventName": "PM_MRK_DPTEG_FROM_L21_SHR", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L2 on the same chip due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x3f140", + "EventName": "PM_MRK_DPTEG_FROM_L2_DISP_CONFLICT_LDHITST", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with load hit store conflict due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x4f140", + "EventName": "PM_MRK_DPTEG_FROM_L2_DISP_CONFLICT_OTHER", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with dispatch conflict due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x4f144", + "EventName": "PM_MRK_DPTEG_FROM_L31_ECO_MOD", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's ECO L3 on the same chip due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x3f144", + "EventName": "PM_MRK_DPTEG_FROM_L31_ECO_SHR", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's ECO L3 on the same chip due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x2f144", + "EventName": "PM_MRK_DPTEG_FROM_L31_MOD", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L3 on the same chip due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1f146", + "EventName": "PM_MRK_DPTEG_FROM_L31_SHR", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L3 on the same chip due to a marked data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x30156", + "EventName": "PM_MRK_FAB_RSP_MATCH", + "BriefDescription": "ttype and cresp matched as specified in MMCR1", + "PublicDescription": "" + }, + { + "EventCode": "0x4f152", + "EventName": "PM_MRK_FAB_RSP_MATCH_CYC", + "BriefDescription": "cresp/ttype match cycles", + "PublicDescription": "" + }, + { + "EventCode": "0x2013c", + "EventName": "PM_MRK_FILT_MATCH", + "BriefDescription": "Marked filter Match", + "PublicDescription": "" + }, + { + "EventCode": "0x1013c", + "EventName": "PM_MRK_FIN_STALL_CYC", + "BriefDescription": "Marked instruction Finish Stall cycles (marked finish after NTC) (use edge detect to count )", + "PublicDescription": "Marked instruction Finish Stall cycles (marked finish after NTC) (use edge detect to count #)" + }, + { + "EventCode": "0x40130", + "EventName": "PM_MRK_GRP_CMPL", + "BriefDescription": "marked instruction finished (completed)", + "PublicDescription": "" + }, + { + "EventCode": "0x4013a", + "EventName": "PM_MRK_GRP_IC_MISS", + "BriefDescription": "Marked Group experienced I cache miss", + "PublicDescription": "" + }, + { + "EventCode": "0x3013c", + "EventName": "PM_MRK_GRP_NTC", + "BriefDescription": "Marked group ntc cycles", + "PublicDescription": "" + }, + { + "EventCode": "0x1013f", + "EventName": "PM_MRK_LD_MISS_EXPOSED", + "BriefDescription": "Marked Load exposed Miss (exposed period ended)", + "PublicDescription": "Marked Load exposed Miss (use edge detect to count #)" + }, + { + "EventCode": "0xd180", + "EventName": "PM_MRK_LSU_FLUSH", + "BriefDescription": "Flush: (marked) : All Cases", + "PublicDescription": "Flush: (marked) : All Cases42" + }, + { + "EventCode": "0xd188", + "EventName": "PM_MRK_LSU_FLUSH_LRQ", + "BriefDescription": "Flush: (marked) LRQ", + "PublicDescription": "Flush: (marked) LRQMarked LRQ flushes" + }, + { + "EventCode": "0xd18a", + "EventName": "PM_MRK_LSU_FLUSH_SRQ", + "BriefDescription": "Flush: (marked) SRQ", + "PublicDescription": "Flush: (marked) SRQMarked SRQ lhs flushes" + }, + { + "EventCode": "0xd184", + "EventName": "PM_MRK_LSU_FLUSH_ULD", + "BriefDescription": "Flush: (marked) Unaligned Load", + "PublicDescription": "Flush: (marked) Unaligned LoadMarked unaligned load flushes" + }, + { + "EventCode": "0xd186", + "EventName": "PM_MRK_LSU_FLUSH_UST", + "BriefDescription": "Flush: (marked) Unaligned Store", + "PublicDescription": "Flush: (marked) Unaligned StoreMarked unaligned store flushes" + }, + { + "EventCode": "0x40164", + "EventName": "PM_MRK_LSU_REJECT", + "BriefDescription": "LSU marked reject (up to 2 per cycle)", + "PublicDescription": "" + }, + { + "EventCode": "0x30164", + "EventName": "PM_MRK_LSU_REJECT_ERAT_MISS", + "BriefDescription": "LSU marked reject due to ERAT (up to 2 per cycle)", + "PublicDescription": "" + }, + { + "EventCode": "0x1d15a", + "EventName": "PM_MRK_SRC_PREF_TRACK_EFF", + "BriefDescription": "Marked src pref track was effective", + "PublicDescription": "" + }, + { + "EventCode": "0x3d15a", + "EventName": "PM_MRK_SRC_PREF_TRACK_INEFF", + "BriefDescription": "Prefetch tracked was ineffective for marked src", + "PublicDescription": "" + }, + { + "EventCode": "0x4d15c", + "EventName": "PM_MRK_SRC_PREF_TRACK_MOD", + "BriefDescription": "Prefetch tracked was moderate for marked src", + "PublicDescription": "" + }, + { + "EventCode": "0x1d15c", + "EventName": "PM_MRK_SRC_PREF_TRACK_MOD_L2", + "BriefDescription": "Marked src Prefetch Tracked was moderate (source L2)", + "PublicDescription": "" + }, + { + "EventCode": "0x3d15c", + "EventName": "PM_MRK_SRC_PREF_TRACK_MOD_L3", + "BriefDescription": "Prefetch tracked was moderate (L3 hit) for marked src", + "PublicDescription": "" + }, + { + "EventCode": "0x1c15a", + "EventName": "PM_MRK_TGT_PREF_TRACK_EFF", + "BriefDescription": "Marked target pref track was effective", + "PublicDescription": "" + }, + { + "EventCode": "0x3c15a", + "EventName": "PM_MRK_TGT_PREF_TRACK_INEFF", + "BriefDescription": "Prefetch tracked was ineffective for marked target", + "PublicDescription": "" + }, + { + "EventCode": "0x4c15c", + "EventName": "PM_MRK_TGT_PREF_TRACK_MOD", + "BriefDescription": "Prefetch tracked was moderate for marked target", + "PublicDescription": "" + }, + { + "EventCode": "0x1c15c", + "EventName": "PM_MRK_TGT_PREF_TRACK_MOD_L2", + "BriefDescription": "Marked target Prefetch Tracked was moderate (source L2)", + "PublicDescription": "" + }, + { + "EventCode": "0x3c15c", + "EventName": "PM_MRK_TGT_PREF_TRACK_MOD_L3", + "BriefDescription": "Prefetch tracked was moderate (L3 hit) for marked target", + "PublicDescription": "" + }, + { + "EventCode": "0x20b0", + "EventName": "PM_NESTED_TEND", + "BriefDescription": "Completion time nested tend", + "PublicDescription": "" + }, + { + "EventCode": "0x20b6", + "EventName": "PM_NON_FAV_TBEGIN", + "BriefDescription": "Dispatch time non favored tbegin", + "PublicDescription": "" + }, + { + "EventCode": "0x2001a", + "EventName": "PM_NTCG_ALL_FIN", + "BriefDescription": "Cycles after all instructions have finished to group completed", + "PublicDescription": "Ccycles after all instructions have finished to group completed" + }, + { + "EventCode": "0x20ac", + "EventName": "PM_OUTER_TBEGIN", + "BriefDescription": "Completion time outer tbegin", + "PublicDescription": "" + }, + { + "EventCode": "0x20ae", + "EventName": "PM_OUTER_TEND", + "BriefDescription": "Completion time outer tend", + "PublicDescription": "" + }, + { + "EventCode": "0x2005a", + "EventName": "PM_PREF_TRACKED", + "BriefDescription": "Total number of Prefetch Operations that were tracked", + "PublicDescription": "" + }, + { + "EventCode": "0x1005a", + "EventName": "PM_PREF_TRACK_EFF", + "BriefDescription": "Prefetch Tracked was effective", + "PublicDescription": "" + }, + { + "EventCode": "0x3005a", + "EventName": "PM_PREF_TRACK_INEFF", + "BriefDescription": "Prefetch tracked was ineffective", + "PublicDescription": "" + }, + { + "EventCode": "0x4005a", + "EventName": "PM_PREF_TRACK_MOD", + "BriefDescription": "Prefetch tracked was moderate", + "PublicDescription": "" + }, + { + "EventCode": "0x1005c", + "EventName": "PM_PREF_TRACK_MOD_L2", + "BriefDescription": "Prefetch Tracked was moderate (source L2)", + "PublicDescription": "" + }, + { + "EventCode": "0x3005c", + "EventName": "PM_PREF_TRACK_MOD_L3", + "BriefDescription": "Prefetch tracked was moderate (L3)", + "PublicDescription": "" + }, + { + "EventCode": "0xe084", + "EventName": "PM_PTE_PREFETCH", + "BriefDescription": "PTE prefetches", + "PublicDescription": "PTE prefetches42" + }, + { + "EventCode": "0x16081", + "EventName": "PM_RC0_ALLOC", + "BriefDescription": "RC mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)", + "PublicDescription": "0.0" + }, + { + "EventCode": "0x16080", + "EventName": "PM_RC0_BUSY", + "BriefDescription": "RC mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)", + "PublicDescription": "" + }, + { + "EventCode": "0x200301ea", + "EventName": "PM_RC_LIFETIME_EXC_1024", + "BriefDescription": "Number of times the RC machine for a sampled instruction was active for more than 1024 cycles", + "PublicDescription": "Reload latency exceeded 1024 cyc" + }, + { + "EventCode": "0x200401ec", + "EventName": "PM_RC_LIFETIME_EXC_2048", + "BriefDescription": "Number of times the RC machine for a sampled instruction was active for more than 2048 cycles", + "PublicDescription": "Threshold counter exceeded a value of 2048" + }, + { + "EventCode": "0x200101e8", + "EventName": "PM_RC_LIFETIME_EXC_256", + "BriefDescription": "Number of times the RC machine for a sampled instruction was active for more than 256 cycles", + "PublicDescription": "Threshold counter exceed a count of 256" + }, + { + "EventCode": "0x200201e6", + "EventName": "PM_RC_LIFETIME_EXC_32", + "BriefDescription": "Number of times the RC machine for a sampled instruction was active for more than 32 cycles", + "PublicDescription": "Reload latency exceeded 32 cyc" + }, + { + "EventCode": "0x36088", + "EventName": "PM_RC_USAGE", + "BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 RC machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running", + "PublicDescription": "" + }, + { + "EventCode": "0x20004", + "EventName": "PM_REAL_SRQ_FULL", + "BriefDescription": "Out of real srq entries", + "PublicDescription": "" + }, + { + "EventCode": "0x2006a", + "EventName": "PM_RUN_CYC_SMT2_SHRD_MODE", + "BriefDescription": "cycles this threads run latch is set and the core is in SMT2 shared mode", + "PublicDescription": "Cycles run latch is set and core is in SMT2-shared mode" + }, + { + "EventCode": "0x1006a", + "EventName": "PM_RUN_CYC_SMT2_SPLIT_MODE", + "BriefDescription": "Cycles run latch is set and core is in SMT2-split mode", + "PublicDescription": "" + }, + { + "EventCode": "0x4006c", + "EventName": "PM_RUN_CYC_SMT8_MODE", + "BriefDescription": "Cycles run latch is set and core is in SMT8 mode", + "PublicDescription": "" + }, + { + "EventCode": "0xf082", + "EventName": "PM_SEC_ERAT_HIT", + "BriefDescription": "secondary ERAT Hit", + "PublicDescription": "secondary ERAT Hit42" + }, + { + "EventCode": "0x508c", + "EventName": "PM_SHL_CREATED", + "BriefDescription": "Store-Hit-Load Table Entry Created", + "PublicDescription": "" + }, + { + "EventCode": "0x508e", + "EventName": "PM_SHL_ST_CONVERT", + "BriefDescription": "Store-Hit-Load Table Read Hit with entry Enabled", + "PublicDescription": "" + }, + { + "EventCode": "0x5090", + "EventName": "PM_SHL_ST_DISABLE", + "BriefDescription": "Store-Hit-Load Table Read Hit with entry Disabled (entry was disabled due to the entry shown to not prevent the flush)", + "PublicDescription": "" + }, + { + "EventCode": "0x26085", + "EventName": "PM_SN0_ALLOC", + "BriefDescription": "SN mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)", + "PublicDescription": "0.0" + }, + { + "EventCode": "0x26084", + "EventName": "PM_SN0_BUSY", + "BriefDescription": "SN mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)", + "PublicDescription": "" + }, + { + "EventCode": "0xd0b2", + "EventName": "PM_SNOOP_TLBIE", + "BriefDescription": "TLBIE snoop", + "PublicDescription": "TLBIE snoopSnoop TLBIE" + }, + { + "EventCode": "0x4608c", + "EventName": "PM_SN_USAGE", + "BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 SN machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running", + "PublicDescription": "" + }, + { + "EventCode": "0x10028", + "EventName": "PM_STALL_END_GCT_EMPTY", + "BriefDescription": "Count ended because GCT went empty", + "PublicDescription": "" + }, + { + "EventCode": "0xc090", + "EventName": "PM_STCX_LSU", + "BriefDescription": "STCX executed reported at sent to nest", + "PublicDescription": "STCX executed reported at sent to nest42" + }, + { + "EventCode": "0x3090", + "EventName": "PM_SWAP_CANCEL", + "BriefDescription": "SWAP cancel , rtag not available", + "PublicDescription": "" + }, + { + "EventCode": "0x3092", + "EventName": "PM_SWAP_CANCEL_GPR", + "BriefDescription": "SWAP cancel , rtag not available for gpr", + "PublicDescription": "" + }, + { + "EventCode": "0x308c", + "EventName": "PM_SWAP_COMPLETE", + "BriefDescription": "swap cast in completed", + "PublicDescription": "" + }, + { + "EventCode": "0x308e", + "EventName": "PM_SWAP_COMPLETE_GPR", + "BriefDescription": "swap cast in completed fpr gpr", + "PublicDescription": "" + }, + { + "EventCode": "0xe086", + "EventName": "PM_TABLEWALK_CYC_PREF", + "BriefDescription": "tablewalk qualified for pte prefetches", + "PublicDescription": "tablewalk qualified for pte prefetches42" + }, + { + "EventCode": "0x20b2", + "EventName": "PM_TABORT_TRECLAIM", + "BriefDescription": "Completion time tabortnoncd, tabortcd, treclaim", + "PublicDescription": "" + }, + { + "EventCode": "0xe0ba", + "EventName": "PM_TEND_PEND_CYC", + "BriefDescription": "TEND latency per thread", + "PublicDescription": "TEND latency per thread42" + }, + { + "EventCode": "0x10012", + "EventName": "PM_THRD_GRP_CMPL_BOTH_CYC", + "BriefDescription": "Cycles group completed on both completion slots by any thread", + "PublicDescription": "Two threads finished same cycle (gated by run latch)" + }, + { + "EventCode": "0x40bc", + "EventName": "PM_THRD_PRIO_0_1_CYC", + "BriefDescription": "Cycles thread running at priority level 0 or 1", + "PublicDescription": "" + }, + { + "EventCode": "0x40be", + "EventName": "PM_THRD_PRIO_2_3_CYC", + "BriefDescription": "Cycles thread running at priority level 2 or 3", + "PublicDescription": "" + }, + { + "EventCode": "0x5080", + "EventName": "PM_THRD_PRIO_4_5_CYC", + "BriefDescription": "Cycles thread running at priority level 4 or 5", + "PublicDescription": "" + }, + { + "EventCode": "0x5082", + "EventName": "PM_THRD_PRIO_6_7_CYC", + "BriefDescription": "Cycles thread running at priority level 6 or 7", + "PublicDescription": "" + }, + { + "EventCode": "0x3098", + "EventName": "PM_THRD_REBAL_CYC", + "BriefDescription": "cycles rebalance was active", + "PublicDescription": "" + }, + { + "EventCode": "0x20b8", + "EventName": "PM_TM_BEGIN_ALL", + "BriefDescription": "Tm any tbegin", + "PublicDescription": "" + }, + { + "EventCode": "0x20ba", + "EventName": "PM_TM_END_ALL", + "BriefDescription": "Tm any tend", + "PublicDescription": "" + }, + { + "EventCode": "0x3086", + "EventName": "PM_TM_FAIL_CONF_NON_TM", + "BriefDescription": "TEXAS fail reason @ completion", + "PublicDescription": "" + }, + { + "EventCode": "0x3088", + "EventName": "PM_TM_FAIL_CON_TM", + "BriefDescription": "TEXAS fail reason @ completion", + "PublicDescription": "" + }, + { + "EventCode": "0xe0b2", + "EventName": "PM_TM_FAIL_DISALLOW", + "BriefDescription": "TM fail disallow", + "PublicDescription": "TM fail disallow42" + }, + { + "EventCode": "0x3084", + "EventName": "PM_TM_FAIL_FOOTPRINT_OVERFLOW", + "BriefDescription": "TEXAS fail reason @ completion", + "PublicDescription": "" + }, + { + "EventCode": "0xe0b8", + "EventName": "PM_TM_FAIL_NON_TX_CONFLICT", + "BriefDescription": "Non transactional conflict from LSU whtver gets repoted to texas", + "PublicDescription": "Non transactional conflict from LSU whtver gets repoted to texas42" + }, + { + "EventCode": "0x308a", + "EventName": "PM_TM_FAIL_SELF", + "BriefDescription": "TEXAS fail reason @ completion", + "PublicDescription": "" + }, + { + "EventCode": "0xe0b4", + "EventName": "PM_TM_FAIL_TLBIE", + "BriefDescription": "TLBIE hit bloom filter", + "PublicDescription": "TLBIE hit bloom filter42" + }, + { + "EventCode": "0xe0b6", + "EventName": "PM_TM_FAIL_TX_CONFLICT", + "BriefDescription": "Transactional conflict from LSU, whatever gets reported to texas", + "PublicDescription": "Transactional conflict from LSU, whatever gets reported to texas 42" + }, + { + "EventCode": "0x20bc", + "EventName": "PM_TM_TBEGIN", + "BriefDescription": "Tm nested tbegin", + "PublicDescription": "" + }, + { + "EventCode": "0x3080", + "EventName": "PM_TM_TRESUME", + "BriefDescription": "Tm resume", + "PublicDescription": "" + }, + { + "EventCode": "0x20be", + "EventName": "PM_TM_TSUSPEND", + "BriefDescription": "Tm suspend", + "PublicDescription": "" + }, + { + "EventCode": "0xe08c", + "EventName": "PM_UP_PREF_L3", + "BriefDescription": "Micropartition prefetch", + "PublicDescription": "Micropartition prefetch42" + }, + { + "EventCode": "0xe08e", + "EventName": "PM_UP_PREF_POINTER", + "BriefDescription": "Micrpartition pointer prefetches", + "PublicDescription": "Micrpartition pointer prefetches42" + }, + { + "EventCode": "0xa0a4", + "EventName": "PM_VSU0_16FLOP", + "BriefDescription": "Sixteen flops operation (SP vector versions of fdiv,fsqrt)", + "PublicDescription": "" + }, + { + "EventCode": "0xa080", + "EventName": "PM_VSU0_1FLOP", + "BriefDescription": "one flop (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg) operation finished", + "PublicDescription": "one flop (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg) operation finishedDecode into 1,2,4 FLOP according to instr IOP, multiplied by #vector elements according to route( eg x1, x2, x4) Only if instr sends finish to ISU" + }, + { + "EventCode": "0xa098", + "EventName": "PM_VSU0_2FLOP", + "BriefDescription": "two flops operation (scalar fmadd, fnmadd, fmsub, fnmsub and DP vector versions of single flop instructions)", + "PublicDescription": "" + }, + { + "EventCode": "0xa09c", + "EventName": "PM_VSU0_4FLOP", + "BriefDescription": "four flops operation (scalar fdiv, fsqrt, DP vector version of fmadd, fnmadd, fmsub, fnmsub, SP vector versions of single flop instructions)", + "PublicDescription": "" + }, + { + "EventCode": "0xa0a0", + "EventName": "PM_VSU0_8FLOP", + "BriefDescription": "eight flops operation (DP vector versions of fdiv,fsqrt and SP vector versions of fmadd,fnmadd,fmsub,fnmsub)", + "PublicDescription": "" + }, + { + "EventCode": "0xb0a4", + "EventName": "PM_VSU0_COMPLEX_ISSUED", + "BriefDescription": "Complex VMX instruction issued", + "PublicDescription": "" + }, + { + "EventCode": "0xb0b4", + "EventName": "PM_VSU0_CY_ISSUED", + "BriefDescription": "Cryptographic instruction RFC02196 Issued", + "PublicDescription": "" + }, + { + "EventCode": "0xb0a8", + "EventName": "PM_VSU0_DD_ISSUED", + "BriefDescription": "64BIT Decimal Issued", + "PublicDescription": "" + }, + { + "EventCode": "0xa08c", + "EventName": "PM_VSU0_DP_2FLOP", + "BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg", + "PublicDescription": "" + }, + { + "EventCode": "0xa090", + "EventName": "PM_VSU0_DP_FMA", + "BriefDescription": "DP vector version of fmadd,fnmadd,fmsub,fnmsub", + "PublicDescription": "" + }, + { + "EventCode": "0xa094", + "EventName": "PM_VSU0_DP_FSQRT_FDIV", + "BriefDescription": "DP vector versions of fdiv,fsqrt", + "PublicDescription": "" + }, + { + "EventCode": "0xb0ac", + "EventName": "PM_VSU0_DQ_ISSUED", + "BriefDescription": "128BIT Decimal Issued", + "PublicDescription": "" + }, + { + "EventCode": "0xb0b0", + "EventName": "PM_VSU0_EX_ISSUED", + "BriefDescription": "Direct move 32/64b VRFtoGPR RFC02206 Issued", + "PublicDescription": "" + }, + { + "EventCode": "0xa0bc", + "EventName": "PM_VSU0_FIN", + "BriefDescription": "VSU0 Finished an instruction", + "PublicDescription": "" + }, + { + "EventCode": "0xa084", + "EventName": "PM_VSU0_FMA", + "BriefDescription": "two flops operation (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only!", + "PublicDescription": "" + }, + { + "EventCode": "0xb098", + "EventName": "PM_VSU0_FPSCR", + "BriefDescription": "Move to/from FPSCR type instruction issued on Pipe 0", + "PublicDescription": "" + }, + { + "EventCode": "0xa088", + "EventName": "PM_VSU0_FSQRT_FDIV", + "BriefDescription": "four flops operation (fdiv,fsqrt) Scalar Instructions only!", + "PublicDescription": "" + }, + { + "EventCode": "0xb090", + "EventName": "PM_VSU0_PERMUTE_ISSUED", + "BriefDescription": "Permute VMX Instruction Issued", + "PublicDescription": "" + }, + { + "EventCode": "0xb088", + "EventName": "PM_VSU0_SCALAR_DP_ISSUED", + "BriefDescription": "Double Precision scalar instruction issued on Pipe0", + "PublicDescription": "" + }, + { + "EventCode": "0xb094", + "EventName": "PM_VSU0_SIMPLE_ISSUED", + "BriefDescription": "Simple VMX instruction issued", + "PublicDescription": "" + }, + { + "EventCode": "0xa0a8", + "EventName": "PM_VSU0_SINGLE", + "BriefDescription": "FPU single precision", + "PublicDescription": "" + }, + { + "EventCode": "0xb09c", + "EventName": "PM_VSU0_SQ", + "BriefDescription": "Store Vector Issued", + "PublicDescription": "" + }, + { + "EventCode": "0xb08c", + "EventName": "PM_VSU0_STF", + "BriefDescription": "FPU store (SP or DP) issued on Pipe0", + "PublicDescription": "" + }, + { + "EventCode": "0xb080", + "EventName": "PM_VSU0_VECTOR_DP_ISSUED", + "BriefDescription": "Double Precision vector instruction issued on Pipe0", + "PublicDescription": "" + }, + { + "EventCode": "0xb084", + "EventName": "PM_VSU0_VECTOR_SP_ISSUED", + "BriefDescription": "Single Precision vector instruction issued (executed)", + "PublicDescription": "" + }, + { + "EventCode": "0xa0a6", + "EventName": "PM_VSU1_16FLOP", + "BriefDescription": "Sixteen flops operation (SP vector versions of fdiv,fsqrt)", + "PublicDescription": "" + }, + { + "EventCode": "0xa082", + "EventName": "PM_VSU1_1FLOP", + "BriefDescription": "one flop (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg) operation finished", + "PublicDescription": "" + }, + { + "EventCode": "0xa09a", + "EventName": "PM_VSU1_2FLOP", + "BriefDescription": "two flops operation (scalar fmadd, fnmadd, fmsub, fnmsub and DP vector versions of single flop instructions)", + "PublicDescription": "" + }, + { + "EventCode": "0xa09e", + "EventName": "PM_VSU1_4FLOP", + "BriefDescription": "four flops operation (scalar fdiv, fsqrt, DP vector version of fmadd, fnmadd, fmsub, fnmsub, SP vector versions of single flop instructions)", + "PublicDescription": "" + }, + { + "EventCode": "0xa0a2", + "EventName": "PM_VSU1_8FLOP", + "BriefDescription": "eight flops operation (DP vector versions of fdiv,fsqrt and SP vector versions of fmadd,fnmadd,fmsub,fnmsub)", + "PublicDescription": "" + }, + { + "EventCode": "0xb0a6", + "EventName": "PM_VSU1_COMPLEX_ISSUED", + "BriefDescription": "Complex VMX instruction issued", + "PublicDescription": "" + }, + { + "EventCode": "0xb0b6", + "EventName": "PM_VSU1_CY_ISSUED", + "BriefDescription": "Cryptographic instruction RFC02196 Issued", + "PublicDescription": "" + }, + { + "EventCode": "0xb0aa", + "EventName": "PM_VSU1_DD_ISSUED", + "BriefDescription": "64BIT Decimal Issued", + "PublicDescription": "" + }, + { + "EventCode": "0xa08e", + "EventName": "PM_VSU1_DP_2FLOP", + "BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg", + "PublicDescription": "" + }, + { + "EventCode": "0xa092", + "EventName": "PM_VSU1_DP_FMA", + "BriefDescription": "DP vector version of fmadd,fnmadd,fmsub,fnmsub", + "PublicDescription": "" + }, + { + "EventCode": "0xa096", + "EventName": "PM_VSU1_DP_FSQRT_FDIV", + "BriefDescription": "DP vector versions of fdiv,fsqrt", + "PublicDescription": "" + }, + { + "EventCode": "0xb0ae", + "EventName": "PM_VSU1_DQ_ISSUED", + "BriefDescription": "128BIT Decimal Issued", + "PublicDescription": "" + }, + { + "EventCode": "0xb0b2", + "EventName": "PM_VSU1_EX_ISSUED", + "BriefDescription": "Direct move 32/64b VRFtoGPR RFC02206 Issued", + "PublicDescription": "" + }, + { + "EventCode": "0xa0be", + "EventName": "PM_VSU1_FIN", + "BriefDescription": "VSU1 Finished an instruction", + "PublicDescription": "" + }, + { + "EventCode": "0xa086", + "EventName": "PM_VSU1_FMA", + "BriefDescription": "two flops operation (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only!", + "PublicDescription": "" + }, + { + "EventCode": "0xb09a", + "EventName": "PM_VSU1_FPSCR", + "BriefDescription": "Move to/from FPSCR type instruction issued on Pipe 0", + "PublicDescription": "" + }, + { + "EventCode": "0xa08a", + "EventName": "PM_VSU1_FSQRT_FDIV", + "BriefDescription": "four flops operation (fdiv,fsqrt) Scalar Instructions only!", + "PublicDescription": "" + }, + { + "EventCode": "0xb092", + "EventName": "PM_VSU1_PERMUTE_ISSUED", + "BriefDescription": "Permute VMX Instruction Issued", + "PublicDescription": "" + }, + { + "EventCode": "0xb08a", + "EventName": "PM_VSU1_SCALAR_DP_ISSUED", + "BriefDescription": "Double Precision scalar instruction issued on Pipe1", + "PublicDescription": "" + }, + { + "EventCode": "0xb096", + "EventName": "PM_VSU1_SIMPLE_ISSUED", + "BriefDescription": "Simple VMX instruction issued", + "PublicDescription": "" + }, + { + "EventCode": "0xa0aa", + "EventName": "PM_VSU1_SINGLE", + "BriefDescription": "FPU single precision", + "PublicDescription": "" + }, + { + "EventCode": "0xb09e", + "EventName": "PM_VSU1_SQ", + "BriefDescription": "Store Vector Issued", + "PublicDescription": "" + }, + { + "EventCode": "0xb08e", + "EventName": "PM_VSU1_STF", + "BriefDescription": "FPU store (SP or DP) issued on Pipe1", + "PublicDescription": "" + }, + { + "EventCode": "0xb082", + "EventName": "PM_VSU1_VECTOR_DP_ISSUED", + "BriefDescription": "Double Precision vector instruction issued on Pipe1", + "PublicDescription": "" + }, + { + "EventCode": "0xb086", + "EventName": "PM_VSU1_VECTOR_SP_ISSUED", + "BriefDescription": "Single Precision vector instruction issued (executed)", + "PublicDescription": "" + } +] diff --git a/tools/perf/pmu-events/arch/powerpc/power8/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power8/pipeline.json new file mode 100644 index 0000000000..0acfaaef47 --- /dev/null +++ b/tools/perf/pmu-events/arch/powerpc/power8/pipeline.json @@ -0,0 +1,350 @@ +[ + { + "EventCode": "0x100f2", + "EventName": "PM_1PLUS_PPC_CMPL", + "BriefDescription": "1 or more ppc insts finished", + "PublicDescription": "1 or more ppc insts finished (completed)" + }, + { + "EventCode": "0x400f2", + "EventName": "PM_1PLUS_PPC_DISP", + "BriefDescription": "Cycles at least one Instr Dispatched", + "PublicDescription": "Cycles at least one Instr Dispatched. Could be a group with only microcode. Issue HW016521" + }, + { + "EventCode": "0x100fa", + "EventName": "PM_ANY_THRD_RUN_CYC", + "BriefDescription": "One of threads in run_cycles", + "PublicDescription": "Any thread in run_cycles (was one thread in run_cycles)" + }, + { + "EventCode": "0x4000a", + "EventName": "PM_CMPLU_STALL", + "BriefDescription": "Completion stall", + "PublicDescription": "" + }, + { + "EventCode": "0x4d018", + "EventName": "PM_CMPLU_STALL_BRU", + "BriefDescription": "Completion stall due to a Branch Unit", + "PublicDescription": "" + }, + { + "EventCode": "0x2c012", + "EventName": "PM_CMPLU_STALL_DCACHE_MISS", + "BriefDescription": "Completion stall by Dcache miss", + "PublicDescription": "" + }, + { + "EventCode": "0x2c018", + "EventName": "PM_CMPLU_STALL_DMISS_L21_L31", + "BriefDescription": "Completion stall by Dcache miss which resolved on chip ( excluding local L2/L3)", + "PublicDescription": "" + }, + { + "EventCode": "0x2c016", + "EventName": "PM_CMPLU_STALL_DMISS_L2L3", + "BriefDescription": "Completion stall by Dcache miss which resolved in L2/L3", + "PublicDescription": "" + }, + { + "EventCode": "0x4c016", + "EventName": "PM_CMPLU_STALL_DMISS_L2L3_CONFLICT", + "BriefDescription": "Completion stall due to cache miss that resolves in the L2 or L3 with a conflict", + "PublicDescription": "Completion stall due to cache miss resolving in core's L2/L3 with a conflict" + }, + { + "EventCode": "0x4c01a", + "EventName": "PM_CMPLU_STALL_DMISS_L3MISS", + "BriefDescription": "Completion stall due to cache miss resolving missed the L3", + "PublicDescription": "" + }, + { + "EventCode": "0x4c018", + "EventName": "PM_CMPLU_STALL_DMISS_LMEM", + "BriefDescription": "Completion stall due to cache miss that resolves in local memory", + "PublicDescription": "Completion stall due to cache miss resolving in core's Local Memory" + }, + { + "EventCode": "0x2c01c", + "EventName": "PM_CMPLU_STALL_DMISS_REMOTE", + "BriefDescription": "Completion stall by Dcache miss which resolved from remote chip (cache or memory)", + "PublicDescription": "Completion stall by Dcache miss which resolved on chip ( excluding local L2/L3)" + }, + { + "EventCode": "0x4c012", + "EventName": "PM_CMPLU_STALL_ERAT_MISS", + "BriefDescription": "Completion stall due to LSU reject ERAT miss", + "PublicDescription": "" + }, + { + "EventCode": "0x4d016", + "EventName": "PM_CMPLU_STALL_FXLONG", + "BriefDescription": "Completion stall due to a long latency fixed point instruction", + "PublicDescription": "" + }, + { + "EventCode": "0x2d016", + "EventName": "PM_CMPLU_STALL_FXU", + "BriefDescription": "Completion stall due to FXU", + "PublicDescription": "" + }, + { + "EventCode": "0x30036", + "EventName": "PM_CMPLU_STALL_HWSYNC", + "BriefDescription": "completion stall due to hwsync", + "PublicDescription": "" + }, + { + "EventCode": "0x4d014", + "EventName": "PM_CMPLU_STALL_LOAD_FINISH", + "BriefDescription": "Completion stall due to a Load finish", + "PublicDescription": "" + }, + { + "EventCode": "0x2c010", + "EventName": "PM_CMPLU_STALL_LSU", + "BriefDescription": "Completion stall by LSU instruction", + "PublicDescription": "" + }, + { + "EventCode": "0x10036", + "EventName": "PM_CMPLU_STALL_LWSYNC", + "BriefDescription": "completion stall due to isync/lwsync", + "PublicDescription": "" + }, + { + "EventCode": "0x30006", + "EventName": "PM_CMPLU_STALL_OTHER_CMPL", + "BriefDescription": "Instructions core completed while this tread was stalled", + "PublicDescription": "Instructions core completed while this thread was stalled" + }, + { + "EventCode": "0x4c01c", + "EventName": "PM_CMPLU_STALL_ST_FWD", + "BriefDescription": "Completion stall due to store forward", + "PublicDescription": "" + }, + { + "EventCode": "0x1001c", + "EventName": "PM_CMPLU_STALL_THRD", + "BriefDescription": "Completion Stalled due to thread conflict. Group ready to complete but it was another thread's turn", + "PublicDescription": "Completion stall due to thread conflict" + }, + { + "EventCode": "0x1e", + "EventName": "PM_CYC", + "BriefDescription": "Cycles", + "PublicDescription": "" + }, + { + "EventCode": "0x10006", + "EventName": "PM_DISP_HELD", + "BriefDescription": "Dispatch Held", + "PublicDescription": "" + }, + { + "EventCode": "0x4003c", + "EventName": "PM_DISP_HELD_SYNC_HOLD", + "BriefDescription": "Dispatch held due to SYNC hold", + "PublicDescription": "" + }, + { + "EventCode": "0x200f8", + "EventName": "PM_EXT_INT", + "BriefDescription": "external interrupt", + "PublicDescription": "" + }, + { + "EventCode": "0x400f8", + "EventName": "PM_FLUSH", + "BriefDescription": "Flush (any type)", + "PublicDescription": "" + }, + { + "EventCode": "0x30012", + "EventName": "PM_FLUSH_COMPLETION", + "BriefDescription": "Completion Flush", + "PublicDescription": "" + }, + { + "EventCode": "0x3000c", + "EventName": "PM_FREQ_DOWN", + "BriefDescription": "Power Management: Below Threshold B", + "PublicDescription": "Frequency is being slewed down due to Power Management" + }, + { + "EventCode": "0x4000c", + "EventName": "PM_FREQ_UP", + "BriefDescription": "Power Management: Above Threshold A", + "PublicDescription": "Frequency is being slewed up due to Power Management" + }, + { + "EventCode": "0x2000a", + "EventName": "PM_HV_CYC", + "BriefDescription": "Cycles in which msr_hv is high. Note that this event does not take msr_pr into consideration", + "PublicDescription": "cycles in hypervisor mode" + }, + { + "EventCode": "0x3405e", + "EventName": "PM_IFETCH_THROTTLE", + "BriefDescription": "Cycles in which Instruction fetch throttle was active", + "PublicDescription": "Cycles instruction fecth was throttled in IFU" + }, + { + "EventCode": "0x10014", + "EventName": "PM_IOPS_CMPL", + "BriefDescription": "Internal Operations completed", + "PublicDescription": "IOPS Completed" + }, + { + "EventCode": "0x3c058", + "EventName": "PM_LARX_FIN", + "BriefDescription": "Larx finished", + "PublicDescription": "" + }, + { + "EventCode": "0x1002e", + "EventName": "PM_LD_CMPL", + "BriefDescription": "count of Loads completed", + "PublicDescription": "" + }, + { + "EventCode": "0x10062", + "EventName": "PM_LD_L3MISS_PEND_CYC", + "BriefDescription": "Cycles L3 miss was pending for this thread", + "PublicDescription": "" + }, + { + "EventCode": "0x30066", + "EventName": "PM_LSU_FIN", + "BriefDescription": "LSU Finished an instruction (up to 2 per cycle)", + "PublicDescription": "" + }, + { + "EventCode": "0x2003e", + "EventName": "PM_LSU_LMQ_SRQ_EMPTY_CYC", + "BriefDescription": "LSU empty (lmq and srq empty)", + "PublicDescription": "" + }, + { + "EventCode": "0x2e05c", + "EventName": "PM_LSU_REJECT_ERAT_MISS", + "BriefDescription": "LSU Reject due to ERAT (up to 4 per cycles)", + "PublicDescription": "" + }, + { + "EventCode": "0x4e05c", + "EventName": "PM_LSU_REJECT_LHS", + "BriefDescription": "LSU Reject due to LHS (up to 4 per cycle)", + "PublicDescription": "" + }, + { + "EventCode": "0x1e05c", + "EventName": "PM_LSU_REJECT_LMQ_FULL", + "BriefDescription": "LSU reject due to LMQ full ( 4 per cycle)", + "PublicDescription": "" + }, + { + "EventCode": "0x1001a", + "EventName": "PM_LSU_SRQ_FULL_CYC", + "BriefDescription": "Storage Queue is full and is blocking dispatch", + "PublicDescription": "SRQ is Full" + }, + { + "EventCode": "0x40014", + "EventName": "PM_PROBE_NOP_DISP", + "BriefDescription": "ProbeNops dispatched", + "PublicDescription": "" + }, + { + "EventCode": "0x600f4", + "EventName": "PM_RUN_CYC", + "BriefDescription": "Run_cycles", + "PublicDescription": "" + }, + { + "EventCode": "0x3006c", + "EventName": "PM_RUN_CYC_SMT2_MODE", + "BriefDescription": "Cycles run latch is set and core is in SMT2 mode", + "PublicDescription": "" + }, + { + "EventCode": "0x2006c", + "EventName": "PM_RUN_CYC_SMT4_MODE", + "BriefDescription": "cycles this threads run latch is set and the core is in SMT4 mode", + "PublicDescription": "Cycles run latch is set and core is in SMT4 mode" + }, + { + "EventCode": "0x1006c", + "EventName": "PM_RUN_CYC_ST_MODE", + "BriefDescription": "Cycles run latch is set and core is in ST mode", + "PublicDescription": "" + }, + { + "EventCode": "0x500fa", + "EventName": "PM_RUN_INST_CMPL", + "BriefDescription": "Run_Instructions", + "PublicDescription": "" + }, + { + "EventCode": "0x1e058", + "EventName": "PM_STCX_FAIL", + "BriefDescription": "stcx failed", + "PublicDescription": "" + }, + { + "EventCode": "0x20016", + "EventName": "PM_ST_CMPL", + "BriefDescription": "Store completion count", + "PublicDescription": "" + }, + { + "EventCode": "0x200f0", + "EventName": "PM_ST_FIN", + "BriefDescription": "Store Instructions Finished", + "PublicDescription": "Store Instructions Finished (store sent to nest)" + }, + { + "EventCode": "0x20018", + "EventName": "PM_ST_FWD", + "BriefDescription": "Store forwards that finished", + "PublicDescription": "" + }, + { + "EventCode": "0x10026", + "EventName": "PM_TABLEWALK_CYC", + "BriefDescription": "Cycles when a tablewalk (I or D) is active", + "PublicDescription": "Tablewalk Active" + }, + { + "EventCode": "0x300f8", + "EventName": "PM_TB_BIT_TRANS", + "BriefDescription": "timebase event", + "PublicDescription": "" + }, + { + "EventCode": "0x2000c", + "EventName": "PM_THRD_ALL_RUN_CYC", + "BriefDescription": "All Threads in Run_cycles (was both threads in run_cycles)", + "PublicDescription": "" + }, + { + "EventCode": "0x30058", + "EventName": "PM_TLBIE_FIN", + "BriefDescription": "tlbie finished", + "PublicDescription": "" + }, + { + "EventCode": "0x10060", + "EventName": "PM_TM_TRANS_RUN_CYC", + "BriefDescription": "run cycles in transactional state", + "PublicDescription": "" + }, + { + "EventCode": "0x2e012", + "EventName": "PM_TM_TX_PASS_RUN_CYC", + "BriefDescription": "cycles spent in successful transactions", + "PublicDescription": "run cycles spent in successful transactions" + } +] diff --git a/tools/perf/pmu-events/arch/powerpc/power8/pmc.json b/tools/perf/pmu-events/arch/powerpc/power8/pmc.json new file mode 100644 index 0000000000..5e0469f68b --- /dev/null +++ b/tools/perf/pmu-events/arch/powerpc/power8/pmc.json @@ -0,0 +1,140 @@ +[ + { + "EventCode": "0x20010", + "EventName": "PM_PMC1_OVERFLOW", + "BriefDescription": "Overflow from counter 1", + "PublicDescription": "" + }, + { + "EventCode": "0x30010", + "EventName": "PM_PMC2_OVERFLOW", + "BriefDescription": "Overflow from counter 2", + "PublicDescription": "" + }, + { + "EventCode": "0x30020", + "EventName": "PM_PMC2_REWIND", + "BriefDescription": "PMC2 Rewind Event (did not match condition)", + "PublicDescription": "" + }, + { + "EventCode": "0x10022", + "EventName": "PM_PMC2_SAVED", + "BriefDescription": "PMC2 Rewind Value saved", + "PublicDescription": "PMC2 Rewind Value saved (matched condition)" + }, + { + "EventCode": "0x40010", + "EventName": "PM_PMC3_OVERFLOW", + "BriefDescription": "Overflow from counter 3", + "PublicDescription": "" + }, + { + "EventCode": "0x10010", + "EventName": "PM_PMC4_OVERFLOW", + "BriefDescription": "Overflow from counter 4", + "PublicDescription": "" + }, + { + "EventCode": "0x10020", + "EventName": "PM_PMC4_REWIND", + "BriefDescription": "PMC4 Rewind Event", + "PublicDescription": "PMC4 Rewind Event (did not match condition)" + }, + { + "EventCode": "0x30022", + "EventName": "PM_PMC4_SAVED", + "BriefDescription": "PMC4 Rewind Value saved (matched condition)", + "PublicDescription": "" + }, + { + "EventCode": "0x10024", + "EventName": "PM_PMC5_OVERFLOW", + "BriefDescription": "Overflow from counter 5", + "PublicDescription": "" + }, + { + "EventCode": "0x30024", + "EventName": "PM_PMC6_OVERFLOW", + "BriefDescription": "Overflow from counter 6", + "PublicDescription": "" + }, + { + "EventCode": "0x400f4", + "EventName": "PM_RUN_PURR", + "BriefDescription": "Run_PURR", + "PublicDescription": "" + }, + { + "EventCode": "0x10008", + "EventName": "PM_RUN_SPURR", + "BriefDescription": "Run SPURR", + "PublicDescription": "" + }, + { + "EventCode": "0x0", + "EventName": "PM_SUSPENDED", + "BriefDescription": "Counter OFF", + "PublicDescription": "" + }, + { + "EventCode": "0x301ea", + "EventName": "PM_THRESH_EXC_1024", + "BriefDescription": "Threshold counter exceeded a value of 1024", + "PublicDescription": "" + }, + { + "EventCode": "0x401ea", + "EventName": "PM_THRESH_EXC_128", + "BriefDescription": "Threshold counter exceeded a value of 128", + "PublicDescription": "" + }, + { + "EventCode": "0x401ec", + "EventName": "PM_THRESH_EXC_2048", + "BriefDescription": "Threshold counter exceeded a value of 2048", + "PublicDescription": "" + }, + { + "EventCode": "0x101e8", + "EventName": "PM_THRESH_EXC_256", + "BriefDescription": "Threshold counter exceed a count of 256", + "PublicDescription": "" + }, + { + "EventCode": "0x201e6", + "EventName": "PM_THRESH_EXC_32", + "BriefDescription": "Threshold counter exceeded a value of 32", + "PublicDescription": "" + }, + { + "EventCode": "0x101e6", + "EventName": "PM_THRESH_EXC_4096", + "BriefDescription": "Threshold counter exceed a count of 4096", + "PublicDescription": "" + }, + { + "EventCode": "0x201e8", + "EventName": "PM_THRESH_EXC_512", + "BriefDescription": "Threshold counter exceeded a value of 512", + "PublicDescription": "" + }, + { + "EventCode": "0x301e8", + "EventName": "PM_THRESH_EXC_64", + "BriefDescription": "IFU non-branch finished", + "PublicDescription": "Threshold counter exceeded a value of 64" + }, + { + "EventCode": "0x101ec", + "EventName": "PM_THRESH_MET", + "BriefDescription": "threshold exceeded", + "PublicDescription": "" + }, + { + "EventCode": "0x4016e", + "EventName": "PM_THRESH_NOT_MET", + "BriefDescription": "Threshold counter did not meet threshold", + "PublicDescription": "" + } +] diff --git a/tools/perf/pmu-events/arch/powerpc/power8/translation.json b/tools/perf/pmu-events/arch/powerpc/power8/translation.json new file mode 100644 index 0000000000..a1657f5fdc --- /dev/null +++ b/tools/perf/pmu-events/arch/powerpc/power8/translation.json @@ -0,0 +1,176 @@ +[ + { + "EventCode": "0x4c054", + "EventName": "PM_DERAT_MISS_16G", + "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16G", + "PublicDescription": "" + }, + { + "EventCode": "0x3c054", + "EventName": "PM_DERAT_MISS_16M", + "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16M", + "PublicDescription": "" + }, + { + "EventCode": "0x1c056", + "EventName": "PM_DERAT_MISS_4K", + "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 4K", + "PublicDescription": "" + }, + { + "EventCode": "0x2c054", + "EventName": "PM_DERAT_MISS_64K", + "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K", + "PublicDescription": "" + }, + { + "EventCode": "0x4e048", + "EventName": "PM_DPTEG_FROM_DL2L3_MOD", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x3e048", + "EventName": "PM_DPTEG_FROM_DL2L3_SHR", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1e042", + "EventName": "PM_DPTEG_FROM_L2", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1e04e", + "EventName": "PM_DPTEG_FROM_L2MISS", + "BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L2 due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x2e040", + "EventName": "PM_DPTEG_FROM_L2_MEPF", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 hit without dispatch conflicts on Mepf state. due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1e040", + "EventName": "PM_DPTEG_FROM_L2_NO_CONFLICT", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x4e042", + "EventName": "PM_DPTEG_FROM_L3", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x3e042", + "EventName": "PM_DPTEG_FROM_L3_DISP_CONFLICT", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 with dispatch conflict due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x2e042", + "EventName": "PM_DPTEG_FROM_L3_MEPF", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without dispatch conflicts hit on Mepf state. due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1e044", + "EventName": "PM_DPTEG_FROM_L3_NO_CONFLICT", + "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without conflict due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1e04c", + "EventName": "PM_DPTEG_FROM_LL4", + "BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's L4 cache due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x2e048", + "EventName": "PM_DPTEG_FROM_LMEM", + "BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's Memory due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x2e04c", + "EventName": "PM_DPTEG_FROM_MEMORY", + "BriefDescription": "A Page Table Entry was loaded into the TLB from a memory location including L4 from local remote or distant due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x4e04a", + "EventName": "PM_DPTEG_FROM_OFF_CHIP_CACHE", + "BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1e048", + "EventName": "PM_DPTEG_FROM_ON_CHIP_CACHE", + "BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on the same chip due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x2e046", + "EventName": "PM_DPTEG_FROM_RL2L3_MOD", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x1e04a", + "EventName": "PM_DPTEG_FROM_RL2L3_SHR", + "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x2e04a", + "EventName": "PM_DPTEG_FROM_RL4", + "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on the same Node or Group ( Remote) due to a data side request", + "PublicDescription": "" + }, + { + "EventCode": "0x300fc", + "EventName": "PM_DTLB_MISS", + "BriefDescription": "Data PTEG reload", + "PublicDescription": "Data PTEG Reloaded (DTLB Miss)" + }, + { + "EventCode": "0x1c058", + "EventName": "PM_DTLB_MISS_16G", + "BriefDescription": "Data TLB Miss page size 16G", + "PublicDescription": "" + }, + { + "EventCode": "0x4c056", + "EventName": "PM_DTLB_MISS_16M", + "BriefDescription": "Data TLB Miss page size 16M", + "PublicDescription": "" + }, + { + "EventCode": "0x2c056", + "EventName": "PM_DTLB_MISS_4K", + "BriefDescription": "Data TLB Miss page size 4k", + "PublicDescription": "" + }, + { + "EventCode": "0x3c056", + "EventName": "PM_DTLB_MISS_64K", + "BriefDescription": "Data TLB Miss page size 64K", + "PublicDescription": "" + }, + { + "EventCode": "0x200f6", + "EventName": "PM_LSU_DERAT_MISS", + "BriefDescription": "DERAT Reloaded due to a DERAT miss", + "PublicDescription": "DERAT Reloaded (Miss)" + }, + { + "EventCode": "0x20066", + "EventName": "PM_TLB_MISS", + "BriefDescription": "TLB Miss (I + D)", + "PublicDescription": "" + } +] |