Blob Blame History Raw
/****************************/
/* THIS IS OPEN SOURCE CODE */
/****************************/

/* 
* File:    map-i7.c
* Author:  George Neville-Neil
*          gnn@freebsd.org
*          Harald Servat
*          redcrash@gmail.com
*/

#include "freebsd.h"
#include "papiStdEventDefs.h"
#include "map.h"


 /****************************************************************************
 i7 SUBSTRATE 
 i7 SUBSTRATE 
 i7 SUBSTRATE
 i7 SUBSTRATE
 i7 SUBSTRATE
****************************************************************************/

/*
	NativeEvent_Value_i7 must match i7_info 
*/

Native_Event_LabelDescription_t i7Processor_info[] =
{
	{"SB_FORWARD.ANY", "Counts the number of store forwards. "},
	{"LOAD_BLOCK.STD", "Counts the number of loads blocked by a preceding store with unknown data."},
	{"LOAD_BLOCK.ADDRESS_OFFSET", "Counts the number of loads blocked by a preceding store address."},
	{"SB_DRAIN.CYCLES", "Counts the cycles of store buffer drains."},
	{"MISALIGN_MEM_REF.LOAD", "Counts the number of misaligned load references."},
	{"MISALIGN_MEM_REF.STORE", "Counts the number of misaligned store references."},
	{"MISALIGN_MEM_REF.ANY", "Counts the number of misaligned memory references."},
	{"STORE_BLOCKS.NOT_STA", "This event counts the number of load operations delayed caused by preceding stores whose addresses are known but whose data is unknown, and preceding stores that conflict with the load but which incompletely overlap the load."},
	{"STORE_BLOCKS.STA", "This event counts load operations delayed caused by preceding stores whose addresses are unknown (STA block)."},
	{"STORE_BLOCKS.AT_RET", "Counts number of loads delayed with at-Retirement block code. The following loads need to be executed at retirement and wait for all senior stores on the same thread to be drained: load splitting across 4K boundary (page split), load accessing uncacheable (UC or USWC) memory, load lock, and load with page table in UC or USWC memory region."},
	{"STORE_BLOCKS.L1D_BLOCK", "Cacheable  loads delayed with L1D block code."},
	{"STORE_BLOCKS.ANY", "All loads delayed due to store blocks."},
	{"PARTIAL_ADDRESS_ALIAS", "Counts false dependency due to partial address aliasing."},
	{"DTLB_LOAD_MISSES.ANY", "Counts all load misses that cause a page walk."},
	{"DTLB_LOAD_MISSES.WALK_COMPLETED", "Counts number of completed page walks due to load miss in the STLB."},
	{"DTLB_LOAD_MISSES.STLB_HIT", "Number of cache load STLB hits."},
	{"DTLB_LOAD_MISSES.PDE_MISS", "Number of DTLB cache load misses where the low part of the linear to physical address translation was missed."},
	{"DTLB_LOAD_MISSES.PDP_MISS", "Number of DTLB cache load misses where the high part of the linear to physical address translation was missed."},
	{"DTLB_LOAD_MISSES.LARGE_WALK_COMPLETED", "Counts number of completed large page walks due to load miss in the STLB."},
	{"MEMORY_DISAMBIGURATION.RESET", "Counts memory disambiguration reset cycles."},
	{"MEMORY_DISAMBIGURATION.SUCCESS", "Counts the number of loads that memory disambiguration succeeded."},
	{"MEMORY_DISAMBIGURATION.WATCHDOG", "Counts the number of times the memory disambiguration watchdog kicked in."},
	{"MEMORY_DISAMBIGURATION.WATCH_CYCLES", "Counts the cycles that the memory disambiguration watchdog is active."},
	{"MEM_INST_RETIRED.LOADS", "Counts the number of instructions with an architecturally-visible store retired on the architected path."},
	{"MEM_INST_RETIRED.STORES", "Counts the number of instructions with an architecturally-visible store retired on the architected path."},
	{"MEM_STORE_RETIRED.DTLB_MISS", "The event counts the number of retired stores that missed the DTLB. The DTLB miss is not counted if the store operation causes a fault. Does not counter prefetches."},
	{"UOPS_ISSUED.ANY", "Counts the number of Uops issued by the Register Allocation Table to the Reservation Station, i.e. the UOPs issued from the front end to the back end."},
	{"UOPS_ISSUED.FUSED", "Counts the number of fused Uops that were issued from the Register Allocation Table to the Reservation Station."},
	{"MEM_UNCORE_RETIRED.OTHER_CORE_L2_HITM", "Counts number of memory load instructions retired where the memory reference hit modified data in a sibling core residing on the same socket."},
	{"MEM_UNCORE_RETIRED.REMOTE_CACHE_LOCAL_HOME_HIT", "Counts number of memory load instructions retired where the memory reference missed the L1, L2 and L3 caches and HIT in a remote socket's cache. Only counts locally homed lines."},
	{"MEM_UNCORE_RETIRED.REMOTE_DRAM", "Counts number of memory load instructions retired where the memory reference missed the L1, L2 and L3 caches and was remotely homed. This includes both DRAM access and HITM in a remote socket's cache for remotely homed lines."},
	{"MEM_UNCORE_RETIRED.LOCAL_DRAM", "Counts number of memory load instructions retired where the memory reference missed the L1, L2 and L3 caches and required a local socket memory reference. This includes locally homed cachelines that were in a modified state in another socket."},
	{"FP_COMP_OPS_EXE.X87", "Counts the number of FP Computational Uops Executed. The number of FADD, FSUB, FCOM, FMULs, integer MULsand IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a separate FADD instruction."},
	{"FP_COMP_OPS_EXE.MMX", "Counts number of MMX Uops executed."},
	{"FP_COMP_OPS_EXE.SSE_FP", "Counts number of SSE and SSE2 FP uops executed."},
	{"FP_COMP_OPS_EXE.SSE2_INTEGER", "Counts number of SSE2 integer uops executed."},
	{"FP_COMP_OPS_EXE.SSE_FP_PACKED", "Counts number of SSE FP packed uops executed."},
	{"FP_COMP_OPS_EXE.SSE_FP_SCALAR", "Counts number of SSE FP scalar uops executed."},
	{"FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION", "Counts number of SSE* FP single precision uops executed."},
	{"FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION", "Counts number of SSE* FP double precision uops executed."},
	{"SIMD_INT_128.PACKED_MPY", "Counts number of 128 bit ED_MPY integer multiply operations."},
	{"SIMD_INT_128.PACKED_SHIFT", "Counts number of 128 bit SIMD integer shift operations."},
	{"SIMD_INT_128.PACK", " Counts number of 128 bit SIMD integer pack operations."},
	{"SIMD_INT_128.UNPACK", "Counts number of 128 bit SIMD integer unpack operations."},
	{"SIMD_INT_128.PACKED_LOGICAL", "Counts number of 128 bit SIMD integer logical  operations."},
	{"SIMD_INT_128.PACKED_ARITH", "Counts number of 128 bit SIMD integer arithmetic operations."},
	{"SIMD_INT_128.SHUFFLE_MOVE", "Counts number of 128 bit SIMD integer shuffle and move operations."},
	{"LOAD_DISPATCH.RS", "Counts number of loads dispatched from the Reservation Station that bypass the Memory Order Buffer."},
	{"LOAD_DISPATCH.RS_DELAYED", "Counts the number of delayed RS dispatches at the stage latch. If an RS dispatch can not bypass to LB, it has another chance to dispatch from the one-cycle delayed staging latch before it is written into the LB."},
	{"LOAD_DISPATCH.MOB", "Counts the number of loads dispatched from the Reservation Station to the Memory Order Buffer."},
	{"LOAD_DISPATCH.ANY", "Counts all loads dispatched from the Reservation Station."},
	{"ARITH.CYCLES_DIV_BUSY", "Counts the number of cycles the divider is busy executing divide or square root operations. The divide can be integer, X87 or Streaming SIMD Extensions (SSE). The square root operation can be either X87 or SSE."},
	{"ARITH.MUL", "Counts the number of multiply operations executed. This includes integer as well as floating point multiply operations but excludes DPPS mul and MPSAD."},
	{"INST_QUEUE_WRITES", "Counts the number of instructions written into the instruction queue every cycle."},
	{"INST_DECODED.DEC0", "Counts number of instructions that require  decoder 0 to be decoded.  Usually, this means that the instruction maps to more than 1 uop"},
	{"TWO_UOP_INSTS_DECODED", "An instruction that generates two uops was decoded."},
	{"HW_INT.RCV", "Number of interrupts received."},
	{"HW_INT.CYCLES_MASKED", "Number of cycles interrupts are masked."},
	{"HW_INT.CYCLES_PENDING_AND_MASKED", "Number of cycles interrupts are pending and masked."},
	{"INST_QUEUE_WRITE_CYCLES", "This event counts the number of cycles during which instructions are written to the instruction queue.  Dividing this counter by the number of instructions written to the instruction queue (INST_QUEUE_WRITES) yields the average number of instructions decoded each cycle. If this number is  less than four and the pipe stalls, this indicates that the decoder is failing to decode enough instructions per cycle to sustain the 4-wide pipeline. If SSE* instructions that are 6 bytes or longer arrive one after another, then front end throughput may limit execution speed. "},
	{"L2_RQSTS.LD_HIT", "Counts number of loads that hit the L2 cache. L2 loads include both L1D demand misses as well as L1D prefetches.  L2 loads can be rejected for various reasons.  Only non rejected loads are counted."},
	{"L2_RQSTS.LD_MISS", "Counts the number of loads that miss the L2 cache. L2 loads include both L1D demand misses as well as L1D prefetches."},
	{"L2_RQSTS.LOADS", "Counts all L2 load requests. L2 loads include both L1D demand misses as well as L1D prefetches."},
	{"L2_RQSTS.RFO_HIT", "Counts the number of store RFO requests that hit the L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches. Count includes WC memory requests, where the data is not fetched but the permission to write the line is required."},
	{"L2_RQSTS.RFO_MISS", "Counts the number of store RFO requests that miss the L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches."},
	{"L2_RQSTS.RFOS", "Counts all L2 store RFO requests. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches."},
	{"L2_RQSTS.IFETCH_HIT", "Counts number of instruction fetches that hit the L2 cache. L2 instruction fetches include both L1I demand misses as well as L1I instruction prefetches."},
	{"L2_RQSTS.IFETCH_MISS", "Counts number of instruction fetches that miss the L2 cache. L2 instruction fetches include both L1I demand misses as well as L1I instruction prefetches."},
	{"L2_RQSTS.IFETCHES", "Counts all instruction fetches. L2 instruction fetches include both L1I demand misses as well as L1I instruction prefetches."},
	{"L2_RQSTS.PREFETCH_HIT", "Counts L2 prefetch hits for both code and data."},
	{"L2_RQSTS.PREFETCH_MISS", "Counts L2 prefetch misses for both code and data."},
	{"L2_RQSTS.PREFETCHES", "Counts all L2 prefetches for both code and data."},
	{"L2_RQSTS.MISS", "Counts all L2 misses for both code and data."},
	{"L2_RQSTS.REFERENCES", "Counts all L2 requests for both code and data."},
	{"L2_DATA_RQSTS.DEMAND.I_STATE", "Counts number of L2 data demand loads where the cache line to be loaded is in the I (invalid) state, i.e. a cache miss. L2 demand loads are both L1D demand misses and L1D prefetches."},
	{"L2_DATA_RQSTS.DEMAND.S_STATE", "Counts number of L2 data demand loads where the cache line to be loaded is in the S (shared) state. L2 demand loads are both L1D demand misses and L1D prefetches."},
	{"L2_DATA_RQSTS.DEMAND.E_STATE", "Counts number of L2 data demand loads where the cache line to be loaded is in the E (exclusive) state. L2 demand loads are both L1D demand misses and L1D prefetches."},
	{"L2_DATA_RQSTS.DEMAND.M_STATE", "Counts number of L2 data demand loads where the cache line to be loaded is in the M (modified) state. L2 demand loads are both L1D demand misses and L1D prefetches."},
	{"L2_DATA_RQSTS.DEMAND.MESI", "Counts all L2 data demand requests. L2 demand loads are both L1D demand misses and L1D prefetches."},
	{"L2_DATA_RQSTS.PREFETCH.I_STATE", "Counts number of L2 prefetch data loads where the cache line to be loaded is in the I (invalid) state, i.e. a cache miss."},
	{"L2_DATA_RQSTS.PREFETCH.S_STATE", "Counts number of L2 prefetch data loads where the cache line to be loaded is in the S (shared) state. A prefetch RFO will miss on an S state line, while a prefetch read will hit on an S state line."},
	{"L2_DATA_RQSTS.PREFETCH.E_STATE", "Counts number of L2 prefetch data loads where the cache line to be loaded is in the E (exclusive) state."},
	{"L2_DATA_RQSTS.PREFETCH.M_STATE", "Counts number of L2 prefetch data loads where the cache line to be loaded is in the M (modified) state."},
	{"L2_DATA_RQSTS.PREFETCH.MESI", "Counts all L2 prefetch requests."},
	{"L2_DATA_RQSTS.ANY", "Counts all L2 data requests."},
	{"L2_WRITE.RFO.I_STATE", "Counts number of L2 demand store RFO requests where the cache line to be loaded is in the I (invalid) state, i.e, a cache miss. The L1D prefetcher does not issue a RFO prefetch. This is a demand RFO request."},
	{"L2_WRITE.RFO.S_STATE", "Counts number of L2 store RFO requests where the cache line to be loaded is in the S (shared) state. The L1D prefetcher does not issue a RFO prefetch,. This is a demand RFO request."},
	{"L2_WRITE.RFO.E_STATE", "Counts number of L2 store RFO requests where the cache line to be loaded is in the E (exclusive) state. The L1D prefetcher does not issue a RFO prefetch. This is a demand RFO request."},
	{"L2_WRITE.RFO.M_STATE", "Counts number of L2 store RFO requests where the cache line to be loaded is in the M (modified) state. The L1D prefetcher does not issue a RFO prefetch. This is a demand RFO request."},
	{"L2_WRITE.RFO.HIT", "Counts number of L2 store RFO requests where the cache line to be loaded is in either the S, E or M states. The L1D prefetcher does not issue a RFO prefetch. This is a demand RFO request."},
	{"L2_WRITE.RFO.MESI", "Counts all L2 store RFO requests.The L1D prefetcher does not issue a RFO prefetch. This is a demand RFO request."},
	{"L2_WRITE.LOCK.I_STATE", "Counts number of L2 demand lock RFO requests where the cache line to be loaded is in the I (invalid) state, i.e. a cache miss."},
	{"L2_WRITE.LOCK.S_STATE", "Counts number of L2 lock RFO requests where the cache line to be loaded is in the S (shared) state."},
	{"L2_WRITE.LOCK.E_STATE", "Counts number of L2 demand lock RFO requests where the cache line to be loaded is in the E (exclusive) state."},
	{"L2_WRITE.LOCK.M_STATE", "Counts number of L2 demand lock RFO requests where the cache line to be loaded is in the M (modified) state."},
	{"L2_WRITE.LOCK.HIT", "Counts number of L2 demand lock RFO requests where the cache line to be loaded is in either the S, E, or M state."},
	{"L2_WRITE.LOCK.MESI", "Counts all L2 demand lock RFO requests."},
	{"L1D_WB_L2.I_STATE", "Counts number of L1 writebacks to the L2 where the cache line to be written is in the I (invalid) state, i.e. a cache miss."},
	{"L1D_WB_L2.S_STATE", "Counts number of L1 writebacks to the L2 where the cache line to be written is in the S state."},
	{"L1D_WB_L2.E_STATE", "Counts number of L1 writebacks to the L2 where the cache line to be written is in the E (exclusive) state."},
	{"L1D_WB_L2.M_STATE", "Counts number of L1 writebacks to the L2 where the cache line to be written is in the M (modified) state."},
	{"L1D_WB_L2.MESI", "Counts all L1 writebacks to the L2."},
	{"L3_LAT_CACHE.REFERENCE", "This event counts requests originating from the core that reference a cache line in the last level cache. The event count includes speculative traffic but excludes cache line fills due to a L2 hardware-prefetch. Because cache hierarchy, cache sizes and other implementation-specific characteristics; value comparison to estimate performance differences is not recommended."},
	{"L3_LAT_CACHE.MISS", "This event counts each cache miss condition for references to the last level cache. The event count may include speculative traffic but excludes cache line fills due to L2 hardware-prefetches. Because cache hierarchy, cache sizes and other implementation-specific characteristics; value comparison to estimate performance differences is not recommended."},
	{"CPU_CLK_UNHALTED.THREAD_P", "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling."},
	{"CPU_CLK_UNHALTED.REF_P", "Increments at the frequency of TSC when not halted."},
	{"UOPS_DECODED.DEC0", "Counts micro-ops decoded by decoder 0."},
	{"L1D_CACHE_LD.I_STATE", "Counts L1 data cache read requests where the cache line to be loaded is in the I (invalid) state, i.e. the read request missed the cache. Counter 0, 1 only."},
	{"L1D_CACHE_LD.S_STATE", "Counts L1 data cache read requests where the cache line to be loaded is in the S (shared) state. Counter 0, 1 only."},
	{"L1D_CACHE_LD.E_STATE", "Counts L1 data cache read requests where the cache line to be loaded is in the E (exclusive) state. Counter 0, 1 only."},
	{"L1D_CACHE_LD.M_STATE", "Counts L1 data cache read requests where the cache line to be loaded is in the M (modified) state. Counter 0, 1 only."},
	{"L1D_CACHE_LD.MESI", "Counts L1 data cache read requests. Counter 0, 1 only."},
	{"L1D_CACHE_ST.I_STATE", "Counts L1 data cache store RFO requests where the cache line to be loaded is in the I state. Counter 0, 1 only."},
	{"L1D_CACHE_ST.S_STATE", "Counts L1 data cache store RFO requests where the cache line to be loaded is in the S (shared) state. Counter 0, 1 only."},
	{"L1D_CACHE_ST.E_STATE", "Counts L1 data cache store RFO requests where the cache line to be loaded is in the E (exclusive) state. Counter 0, 1 only."},
	{"L1D_CACHE_ST.M_STATE", "Counts L1 data cache store RFO requests where cache line to be loaded is in the M (modified) state. Counter 0, 1 only."},
	{"L1D_CACHE_ST.MESI", "Counts L1 data cache store RFO requests. Counter 0, 1 only."},
	{"L1D_CACHE_LOCK.HIT", "Counts retired load locks that hit in the L1 data cache or hit in an already allocated fill buffer.   The lock portion of the load lock transaction must hit in the L1D. The initial load will pull the lock into the L1 data cache. Counter 0, 1 only."},
	{"L1D_CACHE_LOCK.S_STATE", "Counts L1 data cache retired load locks that hit the target cache line in the shared state. Counter 0, 1 only."},
	{"L1D_CACHE_LOCK.E_STATE", "Counts L1 data cache retired load locks that hit the target cache line in the exclusive state. Counter 0, 1 only."},
	{"L1D_CACHE_LOCK.M_STATE", "Counts L1 data cache retired load locks that hit the target cache line in the modified state. Counter 0, 1 only."},
	{"L1D_ALL_REF.ANY", "Counts all references (uncached, speculated and retired) to the L1 data cache, including all loads and stores with any memory types. The event counts memory accesses only when they are actually performed. For example, a load blocked by unknown store address and later performed is only counted once. The event does not include non- memory accesses, such as I/O accesses. Counter 0, 1 only."},
	{"L1D_ALL_REF.CACHEABLE", "Counts all data reads and writes (speculated and retired) from cacheable memory, including locked operations. Counter 0, 1 only."},
	{"L1D_PEND_MISS.LOAD_BUFFERS_FULL", "Counts cycles of L1 data cache load fill buffers full. Counter 0, 1 only."},
	{"DTLB_MISSES.ANY", "Counts the number of misses in the STLB which causes a page walk."},
	{"DTLB_MISSES.WALK_COMPLETED", "Counts number of misses in the STLB which resulted in a completed page walk."},
	{"DTLB_MISSES.STLB_HIT", "Counts the number of DTLB first level misses that hit in the second level TLB.  This event is only relevant if the core contains multiple DTLB levels."},
	{"DTLB_MISSES.PDE_MISS", "Number of DTLB cache misses where the low part of the linear to physical address translation was missed."},
	{"DTLB_MISSES.PDP_MISS", "Number of DTLB misses where the high part of the linear to physical address translation was missed."},
	{"DTLB_MISSES.LARGE_WALK_COMPLETED", "Counts number of completed large page walks due to misses in the STLB."},
	{"SSE_MEM_EXEC.NTA", "Counts number of SSE NTA prefetch/weakly-ordered instructions which missed the L1 data cache."},
	{"SSE_MEM_EXEC.STREAMING_STORES", "Counts number of SSE non- temporal stores."},
	{"LOAD_HIT_PRE", "Counts load operations sent to the L1 data cache while a previous SSE prefetch instruction to the same cache line has started prefetching but has not yet finished."},
	{"SFENCE_CYCLES", "Counts store fence cycles."},
	{"L1D_PREFETCH.REQUESTS", "Counts number of hardware prefetch requests dispatched out of the prefetch FIFO."},
	{"L1D_PREFETCH.MISS", "Counts number of hardware prefetch requests that miss the L1D.  There are two prefetchers in the L1D.  A streamer, which predicts lines sequentially after this one should be fetched, and the IP prefetcher that remembers access patterns for the current instruction.  The streamer prefetcher stops on an L1D hit,  while the IP prefetcher does not."},
	{"L1D_PREFETCH.TRIGGERS", "Counts number of prefetch requests triggered by the Finite State Machine and pushed into the prefetch FIFO. Some of the prefetch requests are dropped due to overwrites or competition between the IP index prefetcher and streamer prefetcher.  The prefetch FIFO contains 4 entries."},
	{"EPT.EPDE_MISS", "Counts Extended Page Directory Entry misses.  The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches."},
	{"EPT.EPDPE_HIT", "Counts Extended Page Directory Pointer Entry hits."},
	{"EPT.EPDPE_MISS", "Counts Extended Page Directory Pointer Entry misses."},
	{"L1D.REPL", "Counts the number of lines brought into the L1 data cache. Counter 0, 1 only."},
	{"L1D.M_REPL", "Counts the number of modified lines brought into the L1 data cache. Counter 0, 1 only."},
	{"L1D.M_EVICT", "Counts the number of modified lines evicted from the L1 data cache  due to replacement. Counter 0, 1 only."},
	{"L1D.M_SNOOP_EVICT", "Counts the number of modified lines evicted from the L1 data cache due to snoop HITM intervention. Counter 0, 1 only."},
	{"L1D_CACHE_PREFETCH_LOCK_FB_HIT", "Counts the number of cacheable load lock speculated instructions accepted into the fill buffer."},
	{"L1D_CACHE_LOCK_FB_HIT", "Counts the number of cacheable load lock speculated or retired instructions accepted into the fill buffer."},
	{"OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_DATA", "Counts weighted cycles of offcore demand data read requests. Does not include L2 prefetch requests."},
	{"OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_CODE", "Counts weighted cycles of offcore demand code read requests. Does not include L2 prefetch requests."},
	{"OFFCORE_REQUESTS_OUTSTANDING.DEMAND.RFO", "Counts weighted cycles of offcore demand RFO requests. Does not include L2 prefetch requests."},
	{"OFFCORE_REQUESTS_OUTSTANDING.ANY.READ", "Counts weighted cycles of offcore read requests of any kind. Include L2 prefetch requests."},
	{"CACHE_LOCK_CYCLES.L1D_L2", "Cycle count during which the L1D and L2 are locked.  A lock is asserted when there is a locked memory access, due to uncacheable memory, a locked operation that spans two cache lines, or a page walk from an uncacheable page table. Counter 0, 1 only.L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such accesses."},
	{"CACHE_LOCK_CYCLES.L1D", "Counts the number of cycles that cacheline in the L1 data cache unit is locked. Counter 0, 1 only."},
	{"IO_TRANSACTIONS", "Counts the number of completed I/O transactions."},
	{"L1I.HITS", "Counts all instruction fetches that hit the L1 instruction cache."},
	{"L1I.MISSES", "Counts all instruction fetches that miss the L1I cache. This includes instruction cache misses,  streaming buffer misses, victim cache misses and uncacheable fetches.  An instruction fetch miss is counted only once and not once for every cycle it is outstanding."},
	{"L1I.READS", "Counts all instruction fetches, including uncacheable fetches that bypass the L1I."},
	{"L1I.CYCLES_STALLED", "Cycle counts for which an instruction fetch stalls due to a L1I cache miss, ITLB miss or ITLB fault."},
	{"IFU_IVC.FULL", "Instruction Fetch unit victim cache full."},
	{"IFU_IVC.L1I_EVICTION", "L1 Instruction cache evictions."},
	{"LARGE_ITLB.HIT", "Counts number of large ITLB hits."},
	{"L1I_OPPORTUNISTIC_HITS", "Opportunistic hits in streaming."},
	{"ITLB_MISSES.ANY", "Counts the number of misses in all levels of the ITLB which causes a page walk."},
	{"ITLB_MISSES.WALK_COMPLETED", "Counts number of misses in all levels of the ITLB which resulted in a completed page walk."},
	{"ITLB_MISSES.WALK_CYCLES", "Counts ITLB miss page walk cycles."},
	{"ITLB_MISSES.STLB_HIT", "Counts the number of ITLB misses that hit in the second level TLB."},
	{"ITLB_MISSES.PDE_MISS", "Number of ITLB misses where the low part of the linear to physical address translation was missed."},
	{"ITLB_MISSES.PDP_MISS", "Number of ITLB misses where the high part of the linear to physical address translation was missed."},
	{"ITLB_MISSES.LARGE_WALK_COMPLETED", "Counts number of completed large page walks due to misses in the STLB."},
	{"ILD_STALL.ANY", ""},
	{"ILD_STALL.IQ_FULL", ""},
	{"ILD_STALL.LCP", "Cycles Instruction Length Decoder stalls due to length changing prefixes: 66, 67 or REX.W (for EM64T) instructions which change the length of the decoded instruction."},
	{"ILD_STALL.MRU", ""},
	{"ILD_STALL.REGEN", ""},
	{"BR_INST_EXEC.ANY", "Counts all near executed branches (not necessarily retired). This includes only instructions and not micro-op branches. Frequent branching is not necessarily a major performance issue. However frequent branch mispredictions may be a problem."},
	{"BR_INST_EXEC.COND", ""},
	{"BR_INST_EXEC.DIRECT", ""},
	{"BR_INST_EXEC.DIRECT_NEAR_CALL", ""},
	{"BR_INST_EXEC.INDIRECT_NEAR_CALL", ""},
	{"BR_INST_EXEC.INDIRECT_NON_CALL", ""},
	{"BR_INST_EXEC.NEAR_CALLS", ""},
	{"BR_INST_EXEC.NON_CALLS", ""},
	{"BR_INST_EXEC.RETURN_NEAR", ""},
	{"BR_INST_EXEC.TAKEN", ""},
	{"BR_MISP_EXEC.COND", "Counts the number of mispredicted conditional near branch instructions executed, but not necessarily retired."},
	{"BR_MISP_EXEC.DIRECT", "Counts mispredicted macro unconditional near branch instructions, excluding calls and indirect branches (should always be 0)."},
	{"BR_MISP_EXEC.INDIRECT_NON_CALL", "Counts the number of executed mispredicted indirect near branch instructions that are not calls."},
	{"BR_MISP_EXEC.NON_CALLS", "Counts mispredicted non call near branches executed,  but not necessarily retired."},
	{"BR_MISP_EXEC.RETURN_NEAR", "Counts mispredicted indirect branches that have a rear return mnemonic."},
	{"BR_MISP_EXEC.DIRECT_NEAR_CALL", "Counts mispredicted non-indirect near calls executed, (should always be 0)."},
	{"BR_MISP_EXEC.INDIRECT_NEAR_CALL", "Counts mispredicted indirect near calls exeucted, including both register and memory indirect."},
	{"BR_MISP_EXEC.NEAR_CALLS", "Counts all mispredicted near call branches executed, but not necessarily retired."},
	{"BR_MISP_EXEC.TAKEN", "Counts executed mispredicted near branches that are taken, but not necessarily retired."},
	{"BR_MISP_EXEC.ANY", "Counts the number of mispredicted near branch instructions that were executed, but not necessarily retired."},
	{"RESOURCE_STALLS.ANY", "Counts the number of Allocator resource related stalls. Includes register renaming buffer entries, memory buffer entries. In addition to resource related stalls, this event counts some other events. Includes stalls arising during branch misprediction recovery, such as if retirement of the mispredicted branch is delayed and stalls arising while store buffer is draining from synchronizing operations. Does not include stalls due to SuperQ (off core) queue full, too many cache misses, etc."},
	{"RESOURCE_STALLS.LOAD", "Counts the cycles of stall due to lack of load buffer for load operation."},
	{"RESOURCE_STALLS.RS_FULL", "This event counts the number of cycles when the number of instructions in the pipeline waiting for execution reaches the limit the processor can handle. A high count of this event indicates that there are long latency operations in the pipe (possibly load and store operations that miss the L2 cache, or instructions dependent upon instructions further down the pipeline that have yet to retire. When RS is full, new instructions can not enter the reservation station and start execution."},
	{"RESOURCE_STALLS.STORE", "This event counts the number of cycles that a resource related stall will occur due to the number of store instructions reaching the limit of the pipeline, (i.e. all store buffers are used). The stall ends when a store instruction commits its data to the cache or memory."},
	{"RESOURCE_STALLS.ROB_FULL", "Counts the cycles of stall due to re- order buffer full."},
	{"RESOURCE_STALLS.FPCW", "Counts the number of cycles while execution was stalled due to writing the floating-point unit (FPU) control word."},
	{"RESOURCE_STALLS.MXCSR", "Stalls due to the MXCSR register rename occurring to close to a previous MXCSR rename.  The MXCSR provides control and status for the MMX registers."},
	{"RESOURCE_STALLS.OTHER", "Counts the number of cycles while execution was stalled due to other resource issues."},
	{"MACRO_INSTS.FUSIONS_DECODED", "Counts the number of instructions decoded that are macro-fused but not necessarily executed or retired."},
	{"BACLEAR_FORCE_IQ", "Counts number of times a BACLEAR was forced by the Instruction Queue.  The IQ is also responsible for providing conditional branch prediciton direction based on a static scheme and dynamic data provided by the L2 Branch Prediction Unit. If the conditional branch target is not found in the Target Array and the IQ predicts that the branch is taken, then the IQ will force the Branch Address Calculator to issue a BACLEAR. Each BACLEAR asserted by the BAC generates approximately an 8 cycle bubble in the instruction fetch pipeline."},
	{"LSD.UOPS", "Counts the number of micro-ops delivered by loop stream detector Use cmask=1 and invert to count cycles."},
	{"ITLB.FLUSH", "Counts the number of ITLB flushes."},
	{"OFFCORE_REQUESTS.DEMAND.READ_DATA", "Counts number of offcore demand data read requests.  Does not count L2 prefetch requests."},
	{"OFFCORE_REQUESTS.DEMAND.READ_CODE", "Counts number of offcore demand code read requests.  Does not count L2 prefetch requests."},
	{"OFFCORE_REQUESTS.DEMAND.RFO", "Counts number of offcore demand RFO requests. Does not count L2 prefetch requests."},
	{"OFFCORE_REQUESTS.ANY.READ", "Counts number of offcore read requests. Includes L2 prefetch requests."},
	{"OFFCORE_REQUESTS.ANY.RFO", "Counts number of offcore RFO requests. Includes L2 prefetch requests."},
	{"OFFCORE_REQUESTS.UNCACHED_MEM", "Counts number of offcore uncached memory requests."},
	{"OFFCORE_REQUESTS.L1D_WRITEBACK", "Counts number of L1D writebacks to the uncore."},
	{"OFFCORE_REQUESTS.ANY", "Counts all offcore requests."},
	{"UOPS_EXECUTED.PORT0", "Counts number of Uops executed that were issued on port 0.  Port 0 handles integer arithmetic, SIMD and FP add Uops."},
	{"UOPS_EXECUTED.PORT1", "Counts number of Uops executed that were issued on port 1. Port 1 handles integer arithmetic, SIMD, integer shift, FP multiply and FP divide Uops."},
	{"UOPS_EXECUTED.PORT2_CORE", "Counts number of Uops executed that were issued on port 2.  Port 2 handles the load Uops. This is a core count only and can not be collected per thread."},
	{"UOPS_EXECUTED.PORT3_CORE", "Counts number of Uops executed that were issued on port 3. Port 3 handles store Uops.  This is a core count only and can not be collected per thread."},
	{"UOPS_EXECUTED.PORT4_CORE", "Counts number of Uops executed that where issued on port  4.  Port 4 handles the value to be stored for the store Uops issued on port 3. This is a core count only and can not be collected per thread."},
	{"UOPS_EXECUTED.PORT5", "Counts number of Uops executed that where issued on port 5."},
	{"UOPS_EXECUTED.CORE_ACTIVE_CYCLES", "Counts cycles when the Uops are executing."},
	{"UOPS_EXECUTED.PORT015", "Counts number of Uops executed that where issued on port  0, 1, or 5. use cmask=1, invert=1 to count stall cycles."},
	{"UOPS_EXECUTED.PORT234", "Counts number of Uops executed that where issued on port 2, 3, or 4."},
	{"OFFCORE_REQUESTS_SQ_FULL", "Counts number of cycles the SQ is full to handle off-core requests."},
	{"SNOOPQ_REQUESTS_OUTSTANDING.DATA", "Counts weighted cycles of snoopq requests for data. Counter 0 only Use cmask=1 to count cycles not empty."},
	{"SNOOPQ_REQUESTS_OUTSTANDING.INVALIDATE", "Counts weighted cycles of snoopq invalidate requests. Counter 0 only Use cmask=1 to count cycles not empty."},
	{"SNOOPQ_REQUESTS_OUTSTANDING.CODE", "Counts weighted cycles of snoopq requests for code. Counter 0 only Use cmask=1 to count cycles not empty."},
	{"OFF_CORE_RESPONSE_0", "see Section 19.17.1.3, ?Off-core Response Performance Monitoring in the Processor Core?"},
	{"SNOOP_RESPONSE.HIT", "Counts HIT snoop response sent by this thread in response to a snoop request."},
	{"SNOOP_RESPONSE.HITE", "Counts HIT E snoop response sent by this thread in response to a snoop request."},
	{"SNOOP_RESPONSE.HITM", "Counts HIT M snoop response sent by this thread in response to a snoop request."},
	{"PIC_ACCESSES.TPR_READS", "Counts number of TPR reads."},
	{"PIC_ACCESSES.TPR_WRITES", "Counts number of TPR writes."},
	{"INST_RETIRED.ANY_P", "See Table A-1 Notes: INST_RETIRED.ANY is counted by a designated fixed counter. INST_RETIRED.ANY_P is counted by a programmable counter and is an architectural performance event.  Event is supported if CPUID.A.EBX[1] = 0. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions."},
	{"INST_RETIRED.X87", "Counts the number of floating point computational operations retired: floating point computational operations executed by the assist handler and sub-operations of complex floating point instructions like transcendental instructions."},
	{"UOPS_RETIRED.ANY", "Counts the number of micro-ops retired, (macro-fused=1, micro- fused=2, others=1; maximum count of 8 per cycle). Most instructions are composed of one or two micro- ops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists. Use cmask=1 and invert to count active cycles or stalled cycles."},
	{"UOPS_RETIRED.RETIRE_SLOTS", "Counts the number of retirement slots used each cycle."},
	{"UOPS_RETIRED.MACRO_FUSED", "Counts number of macro-fused uops retired."},
	{"MACHINE_CLEARS.CYCLES", "Counts the cycles machine clear is asserted."},
	{"MACHINE_CLEARS.MEM_ORDER", "Counts the number of machine clears due to memory order conflicts."},
	{"MACHINE_CLEARS.SMC", "Counts the number of times that a program writes to a code section. Self-modifying code causes a sever penalty in all Intel 64 and IA-32 processors.  The modified cache line is written back to the L2 and L3caches."},
	{"MACHINE_CLEARS.FUSION_ASSIST", "Counts the number of macro-fusion assists."},
	{"BR_INST_RETIRED.ALL_BRANCHES", "See Table A-1."},
	{"BR_INST_RETIRED.CONDITIONAL", "Counts the number of conditional branch instructions retired."},
	{"BR_INST_RETIRED.NEAR_CALL", "Counts the number of direct & indirect near unconditional calls retired."},
	{"BR_MISP_RETIRED.ALL_BRANCHES", "See Table A-1."},
	{"BR_MISP_RETIRED.NEAR_CALL", "Counts mispredicted direct & indirect near unconditional retired calls."},
	{"SSEX_UOPS_RETIRED.PACKED_SINGLE", "Counts SIMD packed single- precision floating point Uops retired."},
	{"SSEX_UOPS_RETIRED.SCALAR_SINGLE", "Counts SIMD calar single-precision floating point Uops retired."},
	{"SSEX_UOPS_RETIRED.PACKED_DOUBLE", "Counts SIMD packed double- precision floating point Uops retired."},
	{"SSEX_UOPS_RETIRED.SCALAR_DOUBLE", "Counts SIMD scalar double-precision floating point Uops retired."},
	{"SSEX_UOPS_RETIRED.VECTOR_INTEGER", "Counts 128-bit SIMD vector integer Uops retired."},
	{"ITLB_MISS_RETIRED", "Counts the number of retired instructions that missed the ITLB when the instruction was fetched."},
	{"MEM_LOAD_RETIRED.L1D_HIT", "Counts number of retired loads that hit the L1 data cache."},
	{"MEM_LOAD_RETIRED.L2_HIT", "Counts number of retired loads that hit the L2 data cache."},
	{"MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM", "Counts number of retired loads that hit in a sibling core's L2 (on die core).  Since the L3 is inclusive of all cores on the package, this is an L3 hit. This counts both clean or modified hits."},
	{"MEM_LOAD_RETIRED.HIT_LFB", "Counts number of retired loads that miss the L1D and the address is located in an allocated line fill buffer and will soon be committed to cache.  This is counting secondary L1D misses."},
	{"MEM_LOAD_RETIRED.DTLB_MISS", "Counts the number of retired loads that missed the DTLB. The DTLB miss is not counted if the load operation causes a fault.  This event counts loads from cacheable memory only. The event does not count loads by software prefetches. Counts both primary and secondary misses to the TLB."},
	{"MEM_LOAD_RETIRED.L3_MISS", "Counts number of retired loads that miss the L3 cache."},
	{"MEM_LOAD_RETIRED.L3_UNSHARED_HIT", "Couns number of retired loads that hit their own, unshared lines in the L3 cache."},
	{"FP_MMX_TRANS.TO_FP", "Counts the first floating-point instruction following any MMX instruction. You can use this event to estimate the penalties for the transitions between floating-point and MMX technology states."},
	{"FP_MMX_TRANS.TO_MMX", "Counts the first MMX instruction following a floating-point instruction. You can use this event to estimate the penalties for the transitions between floating-point and MMX technology states."},
	{"FP_MMX_TRANS.ANY", "Counts all transitions from floating point to MMX instructions and from MMX instructions to floating point instructions.  You can use this event to estimate the penalties for the transitions between floating-point and MMX technology states."},
	{"MACRO_INSTS.DECODED", "Counts the number of instructions decoded, (but not necessarily executed or retired)."},
	{"UOPS_DECODED.MS", "Counts the number of Uops decoded by the Microcode Sequencer, MS.  The MS delivers uops when the instruction is more than 4 uops long or a microcode assist is occurring."},
	{"UOPS_DECODED.ESP_FOLDING", "Counts number of stack pointer (ESP) instructions decoded: push , pop , call , ret, etc.  ESP instructions do not generate a Uop to increment or decrement ESP.  Instead, they update an ESP_Offset register that keeps track of the delta to the current value of the ESP register."},
	{"UOPS_DECODED.ESP_SYNC", "Counts number of stack pointer (ESP) sync operations where an ESP instruction is corrected  by adding the ESP offset register to the current value of the ESP register."},
	{"RAT_STALLS.FLAGS", "Counts the number of cycles during which execution stalled due to several reasons, one of which is a partial flag register stall. A partial register stall may occur when two conditions are met: 1) an instruction modifies some, but not all, of the flags in the flag register and 2) the next instruction, which depends on flags, depends on flags that were not modified by this instruction."},
	{"RAT_STALLS.REGISTERS", "This event counts the number of cycles instruction execution latency became longer than the defined latency because the instruction used a register that was partially written by previous instruction."},
	{"RAT_STALLS.ROB_READ_PORT", "Counts the number of cycles when ROB read port stalls occurred, which did not allow new micro-ops to enter the out-of-order pipeline. Note that, at this stage in the pipeline, additional stalls may occur at the same cycle and prevent the stalled micro-ops from entering the pipe. In such a case, micro-ops retry entering the execution pipe in the next cycle and the ROB-read port stall is counted again."},
	{"RAT_STALLS.SCOREBOARD", "Counts the cycles where we stall due to microarchitecturally required serialization. Microcode scoreboarding stalls."},
	{"RAT_STALLS.ANY", "Counts all Register Allocation Table stall cycles due to:  Cycles when ROB read port stalls occurred, which did not allow new micro-ops to enter the execution pipe.  Cycles when partial register stalls occurred  Cycles when flag stalls occurred  Cycles floating-point unit (FPU) status word stalls occurred. To count each of these conditions separately use the events: RAT_STALLS.ROB_READ_PORT, RAT_STALLS.PARTIAL, RAT_STALLS.FLAGS, and RAT_STALLS.FPSW."},
	{"SEG_RENAME_STALLS", "Counts the number of stall cycles due to the lack of renaming resources for the ES, DS, FS, and GS segment registers. If a segment is renamed but not retired and a second update to the same segment occurs, a stall occurs in the front-end of the pipeline until the renamed segment retires."},
	{"ES_REG_RENAMES", "Counts the number of times the ES segment register is renamed."},
	{"UOP_UNFUSION", "Counts unfusion events due to floating point exception to a fused uop."},
	{"BR_INST_DECODED", "Counts the number of branch instructions decoded."},
	{"BOGUS_BR", "Counts the number of bogus branches."},
	{"BPU_MISSED_CALL_RET", "Counts number of times the Branch Prediciton Unit missed predicting a call or return branch."},
	{"L2_HW_PREFETCH.DATA_TRIGGER", "Count L2 HW data prefetcher triggered."},
	{"L2_HW_PREFETCH.CODE_TRIGGER", "Count L2 HW code prefetcher triggered."},
	{"L2_HW_PREFETCH.DCA_TRIGGER", "Count L2 HW DCA prefetcher triggered."},
	{"L2_HW_PREFETCH.KICK_START", "Count L2 HW prefetcher kick started."},
	{"SQ_MISC.PROMOTION", "Counts the number of L2 secondary misses that hit the Super Queue."},
	{"SQ_MISC.PROMOTION_POST_GO", "Counts the number of L2 secondary misses during the Super Queue filling L2."},
	{"SQ_MISC.LRU_HINTS", "Counts number of Super Queue LRU hints sent to L3."},
	{"SQ_MISC.FILL_DROPPED", "Counts the number of SQ L2 fills dropped due to L2 busy."},
	{"SQ_MISC.SPLIT_LOCK", "Counts the number of SQ lock splits across a cache line."},
	{"SQ_FULL_STALL_CYCLES", "Counts cycles the Super Queue is full.  Neither of the threads on this core will be able to access the uncore."},
	{"FP_ASSIST.ALL", "Counts the number of floating point operations executed that required micro-code assist intervention. Assists are required in the following cases: SSE instructions, (Denormal input when the DAZ flag is off or Underflow result when the FTZ flag is off): x87 instructions, (NaN or denormal are loaded to a register or used as input from memory, Division by 0 or Underflow output)."},
	{"FP_ASSIST.OUTPUT", "Counts number of floating point micro-code assist when the output value (destination register) is invalid."},
	{"FP_ASSIST.INPUT", "Counts number of floating point micro-code assist when the input value (one of the source operands to an FP instruction) is invalid."},
	{"SEGMENT_REG_LOADS", "Counts number of segment register loads."},
	{"SIMD_INT_64.PACKED_MPY", "Counts number of SID integer 64 bit packed multiply operations."},
	{"SIMD_INT_64.PACKED_SHIFT", "Counts number of SID integer 64 bit packed shift operations."},
	{"SIMD_INT_64.PACK", "Counts number of SID integer 64 bit pack operations."},
	{"SIMD_INT_64.UNPACK", "Counts number of SID integer 64 bit unpack operations."},
	{"SIMD_INT_64.PACKED_LOGICAL", "Counts number of SID integer 64 bit logical operations."},
	{"SIMD_INT_64.PACKED_ARITH", "Counts number of SID integer 64 bit arithmetic operations."},
	{"SIMD_INT_64.SHUFFLE_MOVE", "Counts number of SID integer 64 bit shift or move operations."},
	{"INSTR_RETIRED_ANY", "Instructions retired (IAF)"},
	{"CPU_CLK_UNHALTED_CORE", "Unhalted core cycles (IAF)"},
	{"CPU_CLK_UNHALTED_REF", "Unhalted reference cycles (IAF)"},
	{"GQ_CYCLES_FULL.READ_TRACKER", "Uncore cycles Global Queue read tracker is full."},
	{"GQ_CYCLES_FULL.WRITE_TRACKER", "Uncore cycles Global Queue write tracker is full."},
	{"GQ_CYCLES_FULL.PEER_PROBE_TRACKER", "Uncore cycles Global Queue peer probe tracker is full. The peer probe tracker queue tracks snoops from the IOH and remote sockets."},
	{"GQ_CYCLES_NOT_EMPTY.READ_TRACKER", "Uncore cycles were Global Queue read tracker has at least one valid entry."},
	{"GQ_CYCLES_NOT_EMPTY.WRITE_TRACKER", "Uncore cycles were Global Queue write tracker has at least one valid entry."},
	{"GQ_CYCLES_NOT_EMPTY.PEER_PROBE_TRACKER", "Uncore cycles were Global Queue peer probe tracker has at least one valid entry. The peer probe tracker queue tracks IOH and remote socket snoops."},
	{"GQ_ALLOC.READ_TRACKER", "Counts the number of tread tracker allo- cate to deallocate entries. The GQ read tracker allocate to deal- locate occupancy count is divided by the count to obtain the average read tracker latency."},
	{"GQ_ALLOC.RT_L3_MISS", "Counts the number GQ read tracker entries for which a full cache line read has missed the L3. The GQ read tracker L3 miss to fill occupancy count is divided by this count to obtain the average cache line read L3 miss latency.  The latency represents the time after which the L3 has determined that the cache line has missed. The time between a GQ read tracker allocation and the L3 determining that the cache line has missed is the average L3 hit latency.  The total L3 cache line read miss latency is the hit latency + L3 miss latency."},
	{"GQ_ALLOC.RT_TO_L3_RESP", "Counts the number of GQ read tracker entries that are allocated in the read tracker queue that hit or miss the L3. The GQ read tracker L3 hit occupancy count is divided by this count to obtain the average L3 hit latency."},
	{"GQ_ALLOC.RT_TO_RTID_ACQUIRED", "Counts the number of GQ read tracker entries that are allocated in the read tracker, have missed in the L3 and have not acquired a Request Transaction ID.  The GQ read tracker L3 miss to RTID acquired occupancy count is divided by this count to obtain the average latency for a read L3 miss to acquire an RTID."},
	{"GQ_ALLOC.WT_TO_RTID_ACQUIRED", "Counts the number of GQ write tracker entries that are allocated in the write tracker, have missed in the L3 and have not acquired a Request Transaction ID.  The GQ write tracker L3 miss to RTID occupancy count is divided by this count to obtain the average latency for a write L3 miss to acquire an RTID."},
	{"GQ_ALLOC.WRITE_TRACKER", "Counts the number of GQ write tracker entries that are allocated in the write tracker queue that miss the L3. The GQ write tracker occupancy count is divided by the this count to obtain the average L3 write miss latency."},
	{"GQ_ALLOC.PEER_PROBE_TRACKER", "Counts the number of GQ peer probe tracker (snoop) entries that are allocated in the peer probe tracker queue that miss the L3. The GQ peer probe occupancy count is divided by this count to obtain the average L3 peer probe miss latency."},
	{"GQ_DATA.FROM_QPI", "Cycles Global Queue Quickpath Interface input data port is busy importing data from the Quickpath Inter- face. Each cycle the input port can transfer 8 or 16 bytes of data."},
	{"GQ_DATA.FROM_QMC", "Cycles Global Queue Quickpath Memory Interface input data port is busy importing data from the Quick- path Memory Interface. Each cycle the input port can transfer 8 or 16 bytes of data."},
	{"GQ_DATA.FROM_L3", "Cycles GQ L3 input data port is busy importing data from the Last Level Cache. Each cycle the input port can transfer 32 bytes of data."},
	{"GQ_DATA.FROM_CORES_02", "Cycles GQ Core 0 and 2 input data port is busy importing data from processor cores 0 and 2. Each cycle the input port can transfer 32 bytes of data."},
	{"GQ_DATA.FROM_CORES_13", "Cycles GQ Core 1 and 3 input data port is busy importing data from processor cores 1 and 3. Each cycle the input port can transfer 32 bytes of data."},
	{"GQ_DATA.TO_QPI_QMC", "Cycles GQ QPI and QMC output data port is busy sending data to the Quickpath Interface or Quickpath Memory Interface. Each cycle the output port can transfer 32 bytes of data."},
	{"GQ_DATA.TO_L3", "Cycles GQ L3 output data port is busy sending data to the Last Level Cache.  Each cycle the output port can transfer 32 bytes of data."},
	{"GQ_DATA.TO_CORES", "Cycles GQ Core output data port is busy sending data to the Cores. Each cycle the output port can trans- fer 32 bytes of data."},
	{"SNP_RESP_TO_LOCAL_HOME.I_STATE", "Number of snoop responses to the local home that L3 does not have the referenced cache line."},
	{"SNP_RESP_TO_LOCAL_HOME.S_STATE", "Number of snoop responses to the local home that L3 has the referenced line cached in the S state."},
	{"SNP_RESP_TO_LOCAL_HOME.FWD_S_STATE", "Number of responses to code or data read snoops to the local home that the L3 has the referenced cache line in the E state. The L3 cache line state is changed to the S state and the line is forwarded to the local home in the S state."},
	{"SNP_RESP_TO_LOCAL_HOME.FWD_I_STATE", "Number of responses to read invalidate snoops to the local home that the L3 has the referenced cache line in the M state. The L3 cache line state is invalidated and the line is forwarded to the local home in the M state."},
	{"SNP_RESP_TO_LOCAL_HOME.CONFLICT", "Number of conflict snoop responses sent to the local home."},
	{"SNP_RESP_TO_LOCAL_HOME.WB", "Number of responses to code or data read snoops to the local home that the L3 has the referenced line cached in the M state."},
	{"SNP_RESP_TO_REMOTE_HOME.I_STATE", "Number of snoop responses to a remote home that L3 does not have the referenced cache line."},
	{"SNP_RESP_TO_REMOTE_HOME.S_STATE", "Number of snoop responses to a remote home that L3 has the referenced line cached in the S state."},
	{"SNP_RESP_TO_REMOTE_HOME.FWD_S_STATE", "Number of responses to code or data read snoops to a remote home that the L3 has the referenced cache line in the E state. The L3 cache line state is changed to the S state and the line is forwarded to the remote home in the S state."},
	{"SNP_RESP_TO_REMOTE_HOME.FWD_I_STATE", "Number of responses to read invalidate snoops to a remote home that the L3 has the referenced cache line in the M state. The L3 cache line state is invalidated and the line is forwarded to the remote home in the M state."},
	{"SNP_RESP_TO_REMOTE_HOME.CONFLICT", "Number of conflict snoop responses sent to the local home."},
	{"SNP_RESP_TO_REMOTE_HOME.WB", "Number of responses to code or data read snoops to a remote home that the L3 has the referenced line cached in the M state."},
	{"SNP_RESP_TO_REMOTE_HOME.HITM", "Number of HITM snoop responses to a remote home."},
	{"L3_HITS.READ", "Number of code read, data read and RFO requests that hit in the L3."},
	{"L3_HITS.WRITE", "Number of writeback requests that hit in the L3. Writebacks from the cores will always result in L3 hits due to the inclusive property of the L3."},
	{"L3_HITS.PROBE", "Number of snoops from IOH or remote sock- ets that hit in the L3."},
	{"L3_HITS.ANY", "Number of reads and writes that hit the L3."},
	{"L3_MISS.READ", "Number of code read, data read and RFO requests that miss the L3."},
	{"L3_MISS.WRITE", "Number of writeback requests that miss the L3. Should always be zero as writebacks from the cores will always result in L3 hits due to the inclusive property of the L3."},
	{"L3_MISS.PROBE", "Number of snoops from IOH or remote sock- ets that miss the L3."},
	{"L3_MISS.ANY", "Number of reads and writes that miss the L3."},
	{"L3_LINES_IN.M_STATE", "Counts the number of L3 lines allocated in M state. The only time a cache line is allocated in the M state is when the line was forwarded in M state is forwarded due to a Snoop Read Invalidate Own request."},
	{"L3_LINES_IN.E_STATE", "Counts the number of L3 lines allocated in E state."},
	{"L3_LINES_IN.S_STATE", "Counts the number of L3 lines allocated in S state."},
	{"L3_LINES_IN.F_STATE", "Counts the number of L3 lines allocated in F state."},
	{"L3_LINES_IN.ANY", "Counts the number of L3 lines allocated in any state."},
	{"L3_LINES_OUT.M_STATE", "Counts the number of L3 lines victimized that were in the M state. When the victim cache line is in M state, the line is written to its home cache agent which can be either local or remote."},
	{"L3_LINES_OUT.E_STATE", "Counts the number of L3 lines victimized that were in the E state."},
	{"L3_LINES_OUT.S_STATE", "Counts the number of L3 lines victimized that were in the S state."},
	{"L3_LINES_OUT.I_STATE", "Counts the number of L3 lines victimized that were in the I state."},
	{"L3_LINES_OUT.F_STATE", "Counts the number of L3 lines victimized that were in the F state."},
	{"L3_LINES_OUT.ANY", "Counts the number of L3 lines victimized in any state."},
	{"QHL_REQUESTS.IOH_READS", "Counts number of Quickpath Home Logic read requests from the IOH."},
	{"QHL_REQUESTS.IOH_WRITES", "Counts number of Quickpath Home Logic write requests from the IOH."},
	{"QHL_REQUESTS.REMOTE_READS", "Counts number of Quickpath Home Logic read requests from a remote socket."},
	{"QHL_REQUESTS.REMOTE_WRITES", "Counts number of Quickpath Home Logic write requests from a remote socket."},
	{"QHL_REQUESTS.LOCAL_READS", "Counts number of Quickpath Home Logic read requests from the local socket."},
	{"QHL_REQUESTS.LOCAL_WRITES", "Counts number of Quickpath Home Logic write requests from the local socket."},
	{"QHL_CYCLES_FULL.IOH", "Counts uclk cycles all entries in the Quickpath Home Logic IOH are full."},
	{"QHL_CYCLES_FULL.REMOTE", "Counts uclk cycles all entries in the Quickpath Home Logic remote tracker are full."},
	{"QHL_CYCLES_FULL.LOCAL", "Counts uclk cycles all entries in the Quickpath Home Logic local tracker are full."},
	{"QHL_CYCLES_NOT_EMPTY.IOH", "Counts uclk cycles all entries in the Quickpath Home Logic IOH is busy."},
	{"QHL_CYCLES_NOT_EMPTY.REMOTE", "Counts uclk cycles all entries in the Quickpath Home Logic remote tracker is busy."},
	{"QHL_CYCLES_NOT_EMPTY.LOCAL", "Counts uclk cycles all entries in the Quickpath Home Logic local tracker is busy."},
	{"QHL_OCCUPANCY.IOH", "QHL IOH tracker allocate to deallocate read occupancy."},
	{"QHL_OCCUPANCY.REMOTE", "QHL remote tracker allocate to deallocate read occupancy."},
	{"QHL_OCCUPANCY.LOCAL", "QHL local tracker allocate to deallocate read occupancy."},
	{"QHL_ADDRESS_CONFLICTS.2WAY", "Counts number of QHL Active Address Table (AAT) entries that saw a max of 2 conflicts. The AAT is a struc- ture that tracks requests that are in conflict.  The requests themselves are in the home tracker entries. The count is reported when an AAT entry deallocates."},
	{"QHL_ADDRESS_CONFLICTS.3WAY", "Counts number of QHL Active Address Table (AAT) entries that saw a max of 3 conflicts. The AAT is a struc- ture that tracks requests that are in conflict.  The requests themselves are in the home tracker entries. The count is reported when an AAT entry deallocates."},
	{"QHL_CONFLICT_CYCLES.IOH", "Counts cycles the Quickpath Home Logic IOH Tracker contains two or more requests with an address conflict. A max of 3 requests can be in conflict."},
	{"QHL_CONFLICT_CYCLES.REMOTE", "Counts cycles the Quickpath Home Logic Remote Tracker contains two or more requests with an address con- flict. A max of 3 requests can be in conflict."},
	{"QHL_CONFLICT_CYCLES.LOCAL", "Counts cycles the Quickpath Home Logic Local Tracker contains two or more requests with an address con- flict. A max of 3 requests can be in conflict."},
	{"QHL_TO_QMC_BYPASS", "Counts number or requests to the Quickpath Memory Controller that bypass the Quickpath Home Logic. All local accesses can be bypassed. For remote requests, only read requests can be bypassed."},
	{"QMC_NORMAL_FULL.READ.CH0", "Uncore cycles all the entries in the DRAM channel 0 medium or low priority queue are occupied with read requests."},
	{"QMC_NORMAL_FULL.READ.CH1", "Uncore cycles all the entries in the DRAM channel 1 medium or low priority queue are occupied with read requests."},
	{"QMC_NORMAL_FULL.READ.CH2", "Uncore cycles all the entries in the DRAM channel 2 medium or low priority queue are occupied with read requests."},
	{"QMC_NORMAL_FULL.WRITE.CH0", "Uncore cycles all the entries in the DRAM channel 0 medium or low priority queue are occupied with write requests."},
	{"QMC_NORMAL_FULL.WRITE.CH1", "Counts cycles all the entries in the DRAM channel 1 medium or low priority queue are occupied with write requests."},
	{"QMC_NORMAL_FULL.WRITE.CH2", "Uncore cycles all the entries in the DRAM channel 2 medium or low priority queue are occupied with write requests."},
	{"QMC_ISOC_FULL.READ.CH0", "Counts cycles all the entries in the DRAM channel 0 high priority queue are occupied with isochronous read requests."},
	{"QMC_ISOC_FULL.READ.CH1", "Counts cycles all the entries in the DRAM channel 1high priority queue are occupied with isochronous read requests."},
	{"QMC_ISOC_FULL.READ.CH2", "Counts cycles all the entries in the DRAM channel 2 high priority queue are occupied with isochronous read requests."},
	{"QMC_ISOC_FULL.WRITE.CH0", "Counts cycles all the entries in the DRAM channel 0 high priority queue are occupied with isochronous write requests."},
	{"QMC_ISOC_FULL.WRITE.CH1", "Counts cycles all the entries in the DRAM channel 1 high priority queue are occupied with isochronous write requests."},
	{"QMC_ISOC_FULL.WRITE.CH2", "Counts cycles all the entries in the DRAM channel 2 high priority queue are occupied with isochronous write requests."},
	{"QMC_BUSY.READ.CH0", "Counts cycles where Quickpath Memory Con- troller has at least 1 outstanding read request to DRAM channel 0."},
	{"QMC_BUSY.READ.CH1", "Counts cycles where Quickpath Memory Con- troller has at least 1 outstanding read request to DRAM channel 1."},
	{"QMC_BUSY.READ.CH2", "Counts cycles where Quickpath Memory Con- troller has at least 1 outstanding read request to DRAM channel 2."},
	{"QMC_BUSY.WRITE.CH0", "Counts cycles where Quickpath Memory Con- troller has at least 1 outstanding write request to DRAM channel 0."},
	{"QMC_BUSY.WRITE.CH1", "Counts cycles where Quickpath Memory Con- troller has at least 1 outstanding write request to DRAM channel 1."},
	{"QMC_BUSY.WRITE.CH2", "Counts cycles where Quickpath Memory Con- troller has at least 1 outstanding write request to DRAM channel 2."},
	{"QMC_OCCUPANCY.CH0", "IMC channel 0 normal read request occupancy."},
	{"QMC_OCCUPANCY.CH1", "IMC channel 1 normal read request occupancy."},
	{"QMC_OCCUPANCY.CH2", "IMC channel 2 normal read request occupancy."},
	{"QMC_ISSOC_OCCUPANCY.CH0", "IMC channel 0 issoc read request occupancy."},
	{"QMC_ISSOC_OCCUPANCY.CH1", "IMC channel 1 issoc read request occupancy."},
	{"QMC_ISSOC_OCCUPANCY.CH2", "IMC channel 2 issoc read request occu- pancy."},
	{"QMC_ISSOC_READS.ANY", "IMC issoc read request occupancy."},
	{"QMC_NORMAL_READS.CH0", "Counts the number of Quickpath Memory Con- troller channel 0 medium and low priority read requests. The QMC channel 0 normal read occupancy divided by this count provides the average QMC channel 0 read latency."},
	{"QMC_NORMAL_READS.CH1", "Counts the number of Quickpath Memory Con- troller channel 1 medium and low priority read requests. The QMC channel 1 normal read occupancy divided by this count provides the average QMC channel 1 read latency."},
	{"QMC_NORMAL_READS.CH2", "Counts the number of Quickpath Memory Con- troller channel 2 medium and low priority read requests. The QMC channel 2 normal read occupancy divided by this count provides the average QMC channel 2 read latency."},
	{"QMC_NORMAL_READS.ANY", "Counts the number of Quickpath Memory Con- troller medium and low priority read requests. The QMC normal read occupancy divided by this count provides the average QMC read latency."},
	{"QMC_HIGH_PRIORITY_READS.CH0", "Counts the number of Quickpath Memory Con- troller channel 0 high priority isochronous read requests."},
	{"QMC_HIGH_PRIORITY_READS.CH1", "Counts the number of Quickpath Memory Con- troller channel 1 high priority isochronous read requests."},
	{"QMC_HIGH_PRIORITY_READS.CH2", "Counts the number of Quickpath Memory Con- troller channel 2 high priority isochronous read requests."},
	{"QMC_HIGH_PRIORITY_READS.ANY", "Counts the number of Quickpath Memory Con- troller high priority isochronous read requests."},
	{"QMC_CRITICAL_PRIORITY_READS.CH0", "Counts the number of Quickpath Memory Con- troller channel 0 critical priority isochronous read requests."},
	{"QMC_CRITICAL_PRIORITY_READS.CH1", "Counts the number of Quickpath Memory Con- troller channel 1 critical priority isochronous read requests."},
	{"QMC_CRITICAL_PRIORITY_READS.CH2", "Counts the number of Quickpath Memory Con- troller channel 2 critical priority isochronous read requests."},
	{"QMC_CRITICAL_PRIORITY_READS.ANY", "Counts the number of Quickpath Memory Con- troller critical priority isochronous read requests."},
	{"QMC_WRITES.FULL.CH0", "Counts number of full cache line writes to DRAM channel 0."},
	{"QMC_WRITES.FULL.CH1", "Counts number of full cache line writes to DRAM channel 1."},
	{"QMC_WRITES.FULL.CH2", "Counts number of full cache line writes to DRAM channel 2."},
	{"QMC_WRITES.FULL.ANY", "Counts number of full cache line writes to DRAM."},
	{"QMC_WRITES.PARTIAL.CH0", "Counts number of partial cache line writes to DRAM channel 0."},
	{"QMC_WRITES.PARTIAL.CH1", "Counts number of partial cache line writes to DRAM channel 1."},
	{"QMC_WRITES.PARTIAL.CH2", "Counts number of partial cache line writes to DRAM channel 2."},
	{"QMC_WRITES.PARTIAL.ANY", "Counts number of partial cache line writes to DRAM."},
	{"QMC_CANCEL.CH0", "Counts number of DRAM channel 0 cancel requests."},
	{"QMC_CANCEL.CH1", "Counts number of DRAM channel 1 cancel requests."},
	{"QMC_CANCEL.CH2", "Counts number of DRAM channel 2 cancel requests."},
	{"QMC_CANCEL.ANY", "Counts number of DRAM cancel requests."},
	{"QMC_PRIORITY_UPDATES.CH0", "Counts number of DRAM channel 0 priority updates. A priority update occurs when an ISOC high or critical request is received by the QHL and there is a matching request with normal priority that has already been issued to the QMC. In this instance, the QHL will send a priority update to QMC to expedite the request."},
	{"QMC_PRIORITY_UPDATES.CH1", "Counts number of DRAM channel 1 priority updates. A priority update occurs when an ISOC high or critical request is received by the QHL and there is a matching request with normal priority that has already been issued to the QMC. In this instance, the QHL will send a priority update to QMC to expedite the request."},
	{"QMC_PRIORITY_UPDATES.CH2", "Counts number of DRAM channel 2 priority updates. A priority update occurs when an ISOC high or critical request is received by the QHL and there is a matching request with normal priority that has already been issued to the QMC. In this instance, the QHL will send a priority update to QMC to expedite the request."},
	{"QMC_PRIORITY_UPDATES.ANY", "Counts number of DRAM priority updates. A priority update occurs when an ISOC high or critical request is received by the QHL and there is a matching request with normal priority that has already been issued to the QMC. In this instance, the QHL will send a priority update to QMC to expedite the request."},
	{"QHL_FRC_ACK_CNFLTS.LOCAL", "Counts number of Force Acknowledge Con- flict messages sent by the Quickpath Home Logic to the local home."},
	{"QPI_TX_STALLED_SINGLE_FLIT.HOME.LINK_0", "Counts cycles the Quickpath outbound link 0 HOME virtual channel is stalled due to lack of a VNA and VN0 credit. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_SINGLE_FLIT.SNOOP.LINK_0", "Counts cycles the Quickpath outbound link 0 SNOOP virtual channel is stalled due to lack of a VNA and VN0 credit. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_SINGLE_FLIT.NDR.LINK_0", "Counts cycles the Quickpath outbound link 0 non-data response virtual channel is stalled due to lack of a VNA and VN0 credit. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_SINGLE_FLIT.HOME.LINK_1", "Counts cycles the Quickpath outbound link 1 HOME virtual channel is stalled due to lack of a VNA and VN0 credit. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_SINGLE_FLIT.SNOOP.LINK_1", "Counts cycles the Quickpath outbound link 1 SNOOP virtual channel is stalled due to lack of a VNA and VN0 credit. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_SINGLE_FLIT.NDR.LINK_1", "Counts cycles the Quickpath outbound link 1 non-data response virtual channel is stalled due to lack of a VNA and VN0 credit. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_SINGLE_FLIT.LINK_0", "Counts cycles the Quickpath outbound link 0 virtual channels are stalled due to lack of a VNA and VN0 credit. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_SINGLE_FLIT.LINK_1", "Counts cycles the Quickpath outbound link 1 virtual channels are stalled due to lack of a VNA and VN0 credit. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_MULTI_FLIT.DRS.LINK_0", "Counts cycles the Quickpath outbound link 0 Data ResponSe virtual channel is stalled due to lack of VNA and VN0 credits. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_MULTI_FLIT.NCB.LINK_0", "Counts cycles the Quickpath outbound link 0 Non-Coherent Bypass virtual channel is stalled due to lack of VNA and VN0 credits. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_MULTI_FLIT.NCS.LINK_0", "Counts cycles the Quickpath outbound link 0 Non-Coherent Standard virtual channel is stalled due to lack of VNA and VN0 credits. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_MULTI_FLIT.DRS.LINK_1", "Counts cycles the Quickpath outbound link 1 Data ResponSe virtual channel is stalled due to lack of VNA and VN0 credits. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_MULTI_FLIT.NCB.LINK_1", "Counts cycles the Quickpath outbound link 1 Non-Coherent Bypass virtual channel is stalled due to lack of VNA and VN0 credits. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_MULTI_FLIT.NCS.LINK_1", "Counts cycles the Quickpath outbound link 1 Non-Coherent Standard virtual channel is stalled due to lack of VNA and VN0 credits. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_MULTI_FLIT.LINK_0", "Counts cycles the Quickpath outbound link 0 virtual channels are stalled due to lack of VNA and VN0 cred- its. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_MULTI_FLIT.LINK_1", "Counts cycles the Quickpath outbound link 1 virtual channels are stalled due to lack of VNA and VN0 cred- its. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_HEADER.BUSY.LINK_0", "Number of cycles that the header buffer in the Quickpath Interface outbound link 0 is busy."},
	{"QPI_TX_HEADER.BUSY.LINK_1", "Number of cycles that the header buffer in the Quickpath Interface outbound link 1 is busy."},
	{"QPI_RX_NO_PPT_CREDIT.STALLS.LINK_0", "Number of cycles that snoop packets incom- ing to the Quickpath Interface link 0 are stalled and not sent to the GQ because the GQ Peer Probe Tracker (PPT) does not have any available entries."},
	{"QPI_RX_NO_PPT_CREDIT.STALLS.LINK_1", "Number of cycles that snoop packets incom- ing to the Quickpath Interface link 1 are stalled and not sent to the GQ because the GQ Peer Probe Tracker (PPT) does not have any available entries."},
	{"DRAM_OPEN.CH0", "Counts number of DRAM Channel 0 open com- mands issued either for read or write. To read or write data, the referenced DRAM page must first be opened."},
	{"DRAM_OPEN.CH1", "Counts number of DRAM Channel 1 open com- mands issued either for read or write. To read or write data, the referenced DRAM page must first be opened."},
	{"DRAM_OPEN.CH2", "Counts number of DRAM Channel 2 open com- mands issued either for read or write. To read or write data, the referenced DRAM page must first be opened."},
	{"DRAM_PAGE_CLOSE.CH0", "DRAM channel 0 command issued to CLOSE a page due to page idle timer expiration. Closing a page is done by issuing a precharge."},
	{"DRAM_PAGE_CLOSE.CH1", "DRAM channel 1 command issued to CLOSE a page due to page idle timer expiration. Closing a page is done by issuing a precharge."},
	{"DRAM_PAGE_CLOSE.CH2", "DRAM channel 2 command issued to CLOSE a page due to page idle timer expiration. Closing a page is done by issuing a precharge."},
	{"DRAM_PAGE_MISS.CH0", "Counts the number of precharges (PRE) that were issued to DRAM channel 0 because there was a page miss. A page miss refers to a situation in which a page is currently open and another page from the same bank needs to be opened. The new page experiences a page miss. Closing of the old page is done by issuing a precharge."},
	{"DRAM_PAGE_MISS.CH1", "Counts the number of precharges (PRE) that were issued to DRAM channel 1 because there was a page miss. A page miss refers to a situation in which a page is currently open and another page from the same bank needs to be opened. The new page experiences a page miss. Closing of the old page is done by issuing a precharge."},
	{"DRAM_PAGE_MISS.CH2", "Counts the number of precharges (PRE) that were issued to DRAM channel 2 because there was a page miss. A page miss refers to a situation in which a page is currently open and another page from the same bank needs to be opened. The new page experiences a page miss. Closing of the old page is done by issuing a precharge."},
	{"DRAM_READ_CAS.CH0", "Counts the number of times a read CAS com- mand was issued on DRAM channel 0."},
	{"DRAM_READ_CAS.AUTOPRE_CH0", "Counts the number of times a read CAS com- mand was issued on DRAM channel 0 where the command issued used the auto-precharge (auto page close) mode."},
	{"DRAM_READ_CAS.CH1", "Counts the number of times a read CAS com- mand was issued on DRAM channel 1."},
	{"DRAM_READ_CAS.AUTOPRE_CH1", "Counts the number of times a read CAS com- mand was issued on DRAM channel 1 where the command issued used the auto-precharge (auto page close) mode."},
	{"DRAM_READ_CAS.CH2", "Counts the number of times a read CAS com- mand was issued on DRAM channel 2."},
	{"DRAM_READ_CAS.AUTOPRE_CH2", "Counts the number of times a read CAS com- mand was issued on DRAM channel 2 where the command issued used the auto-precharge (auto page close) mode."},
	{"DRAM_WRITE_CAS.CH0", "Counts the number of times a write CAS command was issued on DRAM channel 0."},
	{"DRAM_WRITE_CAS.AUTOPRE_CH0", "Counts the number of times a write CAS command was issued on DRAM channel 0 where the command issued used the auto-precharge (auto page close) mode."},
	{"DRAM_WRITE_CAS.CH1", "Counts the number of times a write CAS command was issued on DRAM channel 1."},
	{"DRAM_WRITE_CAS.AUTOPRE_CH1", "Counts the number of times a write CAS command was issued on DRAM channel 1 where the command issued used the auto-precharge (auto page close) mode."},
	{"DRAM_WRITE_CAS.CH2", "Counts the number of times a write CAS command was issued on DRAM channel 2."},
	{"DRAM_WRITE_CAS.AUTOPRE_CH2", "Counts the number of times a write CAS command was issued on DRAM channel 2 where the command issued used the auto-precharge (auto page close) mode."},
	{"DRAM_REFRESH.CH0", "Counts number of DRAM channel 0 refresh commands. DRAM loses data content over time. In order to keep correct data content, the data values have to be refreshed periodically."},
	{"DRAM_REFRESH.CH1", "Counts number of DRAM channel 1 refresh commands. DRAM loses data content over time. In order to keep correct data content, the data values have to be refreshed periodically."},
	{"DRAM_REFRESH.CH2", "Counts number of DRAM channel 2 refresh commands. DRAM loses data content over time. In order to keep correct data content, the data values have to be refreshed periodically."},
	{"DRAM_PRE_ALL.CH0", "Counts number of DRAM Channel 0 precharge- all (PREALL) commands that close all open pages in a rank. PREALL is issued when the DRAM needs to be refreshed or needs to go into a power down mode."},
	{"DRAM_PRE_ALL.CH1", "Counts number of DRAM Channel 1 precharge- all (PREALL) commands that close all open pages in a rank. PREALL is issued when the DRAM needs to be refreshed or needs to go into a power down mode."},
	{"DRAM_PRE_ALL.CH2", "Counts number of DRAM Channel 2 precharge- all (PREALL) commands that close all open pages in a rank. PREALL is issued when the DRAM needs to be refreshed or needs to go into a power down mode."},
	{ NULL, NULL } 
};