Blob Blame History Raw
/****************************/
/* THIS IS OPEN SOURCE CODE */
/****************************/

/* 
* File:    map-westmere.c
* Author:  George Neville-Neil
*          gnn@freebsd.org
*          Harald Servat
*          redcrash@gmail.com
*/

#include "freebsd.h"
#include "papiStdEventDefs.h"
#include "map.h"


 /****************************************************************************
 Westmere SUBSTRATE 
 Westmere SUBSTRATE 
 Westmere SUBSTRATE
 Westmere SUBSTRATE
 Westmere SUBSTRATE
****************************************************************************/

/*
	NativeEvent_Value_Westmere must match Westmere_info 
*/

Native_Event_LabelDescription_t WestmereProcessor_info[] =
{
	{"LOAD_BLOCK.OVERLAP_STORE", "Loads that partially overlap an earlier store"},
	{"SB_DRAIN.ANY", "All Store buffer stall cycles"},
	{"MISALIGN_MEMORY.STORE", "All store referenced with misaligned address"},
	{"STORE_BLOCKS.AT_RET", "Counts number of loads delayed with at-Retirement block code. The following loads need to be executed at retirement and wait for all senior stores on the same thread to be drained: load splitting across 4K boundary (page split), load accessing uncacheable (UC or USWC) memory, load lock, and load with page table in UC or USWC memory region."},
	{"STORE_BLOCKS.L1D_BLOCK", "Cacheable loads delayed with L1D block code"},
	{"PARTIAL_ADDRESS_ALIAS", "Counts false dependency due to partial address aliasing"},
	{"DTLB_LOAD_MISSES.ANY", "Counts all load misses that cause a page walk"},
	{"DTLB_LOAD_MISSES.WALK_COMPLETED", "Counts number of completed page walks due to load miss in the STLB."},
	{"DTLB_LOAD_MISSES.WALK_CYCLES", "Cycles PMH is busy with a page walk due to a load miss in the STLB."},
	{"DTLB_LOAD_MISSES.STLB_HIT", "Number of cache load STLB hits"},
	{"DTLB_LOAD_MISSES.PDE_MISS", "Number of DTLB cache load misses where the low part of the linear tophysical address translation was missed."},
	{"MEM_INST_RETIRED.LOADS", "Counts the number of instructions with an architecturally-visible store retired on the architected path. In conjunction with ld_lat facility"},
	{"MEM_INST_RETIRED.STORES", "Counts the number of instructions with an architecturally-visible store retired on the architected path. In conjunction with ld_lat facility"},
	{"MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD", "Counts the number of instructions exceeding the latency specified with ld_lat facility. In conjunction with ld_lat facility"},
	{"MEM_STORE_RETIRED.DTLB_MISS", "The event counts the number of retired stores that missed the DTLB. The DTLB miss is not counted if the store operation causes a fault. Does not counter prefetches. Counts both primary and secondary misses to the TLB"},
	{"UOPS_ISSUED.ANY", "Counts the number of Uops issued by the Register Allocation Table to the Reservation Station, i.e. the UOPs issued from the front end to the back end."},
	{"UOPS_ISSUED.STALLED_CYCLES", "Counts the number of cycles no Uops issued by the Register Allocation Table to the Reservation Station, i.e. the UOPs issued from the front end to the back end."},
	{"UOPS_ISSUED.FUSED", "Counts the number of fused Uops that were issued from the Register Allocation Table to the Reservation Station."},
	{"MEM_UNCORE_RETIRED.LOCAL_HITM", "Load instructions retired that HIT modified data in sibling core (Precise Event)"},
	{"MEM_UNCORE_RETIRED.LOCAL_DRAM_AND_REMOTE_CACHE_HIT", "Load instructions retired local dram and remote cache HIT data sources (Precise Event)"},
	{"MEM_UNCORE_RETIRED.LOCAL_DRAM", "Load instructions retired with a data source of local DRAM or locally homed remote cache HITM (Precise Event)"},
	{"MEM_UNCORE_RETIRED.REMOTE_DRAM", "Load instructions retired remote DRAM and remote home-remote cache HITM (Precise Event)"},
	{"MEM_UNCORE_RETIRED.UNCACHEABLE", "Load instructions retired I/O (Precise Event)"},
	{"FP_COMP_OPS_EXE.X87", "Counts the number of FP Computational Uops Executed. The number of FADD, FSUB, FCOM, FMULs, integer MULsand IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a separate FADD instruction."},
	{"FP_COMP_OPS_EXE.MMX", "Counts number of MMX Uops executed."},
	{"FP_COMP_OPS_EXE.SSE_FP", "Counts number of SSE and SSE2 FP uops executed."},
	{"FP_COMP_OPS_EXE.SSE2_INTEGER", "Counts number of SSE2 integer uops executed."},
	{"FP_COMP_OPS_EXE.SSE_FP_PACKED", "Counts number of SSE FP packed uops executed."},
	{"FP_COMP_OPS_EXE.SSE_FP_SCALAR", "Counts number of SSE FP scalar uops executed."},
	{"FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION", "Counts number of SSE* FP single precision uops executed."},
	{"FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION", "Counts number of SSE* FP double precision uops executed."},
	{"SIMD_INT_128.PACKED_MPY", "Counts number of 128 bit SIMD integer multiply operations."},
	{"SIMD_INT_128.PACKED_SHIFT", "Counts number of 128 bit SIMD integer shift operations."},
	{"SIMD_INT_128.PACK", "Counts number of 128 bit SIMD integer pack operations."},
	{"SIMD_INT_128.UNPACK", "Counts number of 128 bit SIMD integer unpack operations."},
	{"SIMD_INT_128.PACKED_LOGICAL", "Counts number of 128 bit SIMD integer logical operations."},
	{"SIMD_INT_128.PACKED_ARITH", "Counts number of 128 bit SIMD integer arithmetic operations."},
	{"SIMD_INT_128.SHUFFLE_MOVE", "Counts number of 128 bit SIMD integer shuffle and move operations."},
	{"LOAD_DISPATCH.RS", "Counts number of loads dispatched from the Reservation Station that bypass the Memory Order Buffer."},
	{"LOAD_DISPATCH.RS_DELAYED", "Counts the number of delayed RS dispatches at the stage latch. If an RS dispatch can not bypass to LB, it has another chance to dispatch from the one-cycle delayed staging latch before it is written into the LB."},
	{"LOAD_DISPATCH.MOB", "Counts the number of loads dispatched from the Reservation Station to the Memory Order Buffer."},
	{"LOAD_DISPATCH.ANY", "Counts all loads dispatched from the Reservation Station."},
	{"ARITH.CYCLES_DIV_BUSY", "Counts the number of cycles the divider is busy executing divide or square root operations. The divide can be integer, X87 or Streaming SIMD Extensions (SSE). The square root operation can be either X87 or SSE. Set 'edge =1, invert=1, cmask=1' to count the number of divides. Count may be incorrect When SMT is on."},
	{"ARITH.MUL", "Counts the number of multiply operations executed. This includes integer as well as floating point multiply operations but excludes DPPS mul and MPSAD. Count may be incorrect When SMT is on."},
	{"INST_QUEUE_WRITES", "Counts the number of instructions written into the instruction queue every cycle."},
	{"INST_DECODED.DEC0", "Counts number of instructions that require decoder 0 to be decoded. Usually, this means that the instruction maps to more than 1 uop"},
	{"TWO_UOP_INSTS_DECODED", "An instruction that generates two uops was decoded"},
	{"INST_QUEUE_WRITE_CYCLES", "This event counts the number of cycles during which instructions are written to the instruction queue. Dividing this counter by the number of instructions written to the instruction queue (INST_QUEUE_WRITES) yields the average number of instructions decoded each cycle. If this number is less than four and the pipe stalls, this indicates that the decoder is failing to decode enough instructions per cycle to sustain the 4-wide pipeline. If SSE* instructions that are 6 bytes or longer arrive one after another, then front end throughput may limit execution speed. "},
	{"LSD_OVERFLOW", "Number of loops that can not stream from the instruction queue."},
	{"L2_RQSTS.LD_HIT", "Counts number of loads that hit the L2 cache. L2 loads include both L1D demand misses as well as L1D prefetches. L2 loads can be rejected for various reasons. Only non rejected loads are counted."},
	{"L2_RQSTS.LD_MISS", "Counts the number of loads that miss the L2 cache. L2 loads include both L1D demand misses as well as L1D prefetches."},
	{"L2_RQSTS.LOADS", "Counts all L2 load requests. L2 loads include both L1D demand misses as well as L1D prefetches."},
	{"L2_RQSTS.RFO_HIT", "Counts the number of store RFO requests that hit the L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches. Count includes WC memory requests, where the data is not fetched but the permission to write the line is required."},
	{"L2_RQSTS.RFO_MISS", "Counts the number of store RFO requests that miss the L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches."},
	{"L2_RQSTS.RFOS", "Counts all L2 store RFO requests. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches."},
	{"L2_RQSTS.IFETCH_HIT", "Counts number of instruction fetches that hit the L2 cache. L2 instruction fetches include both L1I demand misses as well as L1I instruction prefetches."},
	{"L2_RQSTS.IFETCH_MISS", "Counts number of instruction fetches that miss the L2 cache. L2 instruction fetches include both L1I demand misses as well as L1I instruction prefetches."},
	{"L2_RQSTS.IFETCHES", "Counts all instruction fetches. L2 instruction fetches include both L1I demand misses as well as L1I instruction prefetches."},
	{"L2_RQSTS.PREFETCH_HIT", "Counts L2 prefetch hits for both code and data."},
	{"L2_RQSTS.PREFETCH_MISS", "Counts L2 prefetch misses for both code and data."},
	{"L2_RQSTS.PREFETCHES", "Counts all L2 prefetches for both code and data."},
	{"L2_RQSTS.MISS", "Counts all L2 misses for both code and data."},
	{"L2_RQSTS.REFERENCES", "Counts all L2 requests for both code and data."},
	{"L2_DATA_RQSTS.DEMAND.I_STATE", "Counts number of L2 data demand loads where the cache line to be loaded is in the I (invalid) state, i.e. a cache miss. L2 demand loads are both L1D demand misses and L1D prefetches."},
	{"L2_DATA_RQSTS.DEMAND.S_STATE", "Counts number of L2 data demand loads where the cache line to be loaded is in the S (shared) state. L2 demand loads are both L1D demand misses and L1D prefetches."},
	{"L2_DATA_RQSTS.DEMAND.E_STATE", "Counts number of L2 data demand loads where the cache line to be loaded is in the E (exclusive) state. L2 demand loads are both L1D demand misses and L1D prefetches."},
	{"L2_DATA_RQSTS.DEMAND.M_STATE", "Counts number of L2 data demand loads where the cache line to be loaded is in the M (modified) state. L2 demand loads are both L1D demand misses and L1D prefetches."},
	{"L2_DATA_RQSTS.DEMAND.MESI", "Counts all L2 data demand requests. L2 demand loads are both L1D demand misses and L1D prefetches."},
	{"L2_DATA_RQSTS.PREFETCH.I_STATE", "Counts number of L2 prefetch data loads where the cache line to be loaded is in the I (invalid) state, i.e. a cache miss."},
	{"L2_DATA_RQSTS.PREFETCH.S_STATE", "Counts number of L2 prefetch data loads where the cache line to be loaded is in the S (shared) state. A prefetch RFO will miss on an S state line, while a prefetch read will hit on an S state line."},
	{"L2_DATA_RQSTS.PREFETCH.E_STATE", "Counts number of L2 prefetch data loads where the cache line to be loaded is in the E (exclusive) state."},
	{"L2_DATA_RQSTS.PREFETCH.M_STATE", "Counts number of L2 prefetch data loads where the cache line to be loaded is in the M (modified) state."},
	{"L2_DATA_RQSTS.PREFETCH.MESI", "Counts all L2 prefetch requests."},
	{"L2_DATA_RQSTS.ANY", "Counts all L2 data requests."},
	{"L2_WRITE.RFO.I_STATE", "Counts number of L2 demand store RFO requests where the cache line to be loaded is in the I (invalid) state, i.e, a cache miss. The L1D prefetcher does not issue a RFO prefetch. This is a demand RFO request."},
	{"L2_WRITE.RFO.S_STATE", "Counts number of L2 store RFO requests where the cache line to be loaded is in the S (shared) state. The L1D prefetcher does not issue a RFO prefetch. This is a demand RFO request."},
	{"L2_WRITE.RFO.M_STATE", "Counts number of L2 store RFO requests where the cache line to be loaded is in the M (modified) state. The L1D prefetcher does not issue a RFO prefetch. This is a demand RFO request."},
	{"L2_WRITE.RFO.HIT", "Counts number of L2 store RFO requests where the cache line to be loaded is in either the S, E or M states. The L1D prefetcher does not issue a RFO prefetch."},
	{"L2_WRITE.RFO.MESI", "Counts all L2 store RFO requests.The L1D prefetcher does not issue a RFO prefetch. This is a demand RFO request."},
	{"L2_WRITE.LOCK.I_STATE", "Counts number of L2 demand lock RFO requests where the cache line to be loaded is in the I (invalid) state, i.e. a cache miss."},
	{"L2_WRITE.LOCK.S_STATE", "Counts number of L2 lock RFO requests where the cache line to be loaded is in the S (shared) state."},
	{"L2_WRITE.LOCK.E_STATE", "Counts number of L2 demand lock RFO requests where the cache line to be loaded is in the E (exclusive) state."},
	{"L2_WRITE.LOCK.M_STATE", "Counts number of L2 demand lock RFO requests where the cache line to be loaded is in the M (modified) state."},
	{"L2_WRITE.LOCK.HIT", "Counts number of L2 demand lock RFO requests where the cache line to be loaded is in either the S, E, or M state."},
	{"L2_WRITE.LOCK.MESI", "Counts all L2 demand lock RFO requests."},
	{"L1D_WB_L2.I_STATE", "Counts number of L1 writebacks to the L2 where the cache line to be written is in the I (invalid) state, i.e. a cache miss."},
	{"L1D_WB_L2.S_STATE", "Counts number of L1 writebacks to the L2 where the cache line to be written is in the S state."},
	{"L1D_WB_L2.E_STATE", "Counts number of L1 writebacks to the L2 where the cache line to be written is in the E (exclusive) state."},
	{"L1D_WB_L2.M_STATE", "Counts number of L1 writebacks to the L2 where the cache line to be written is in the M (modified) state."},
	{"L1D_WB_L2.MESI", "Counts all L1 writebacks to the L2."},
	{"L3_LAT_CACHE.REFERENCE", "Counts uncore Last Level Cache references. Because cache hierarchy, cache sizes and other implementation-specific characteristics; value comparison to estimate performance differences is not recommended. See Table A-1."},
	{"L3_LAT_CACHE.MISS", "Counts uncore Last Level Cache misses. Because cache hierarchy, cache sizes and other implementation-specific characteristics; value comparison to estimate performance differences is not recommended. See Table A-1."},
	{"CPU_CLK_UNHALTED.THREAD_P", "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. See Table A-1."},
	{"CPU_CLK_UNHALTED.REF_P", "Increments at the frequency of TSC when not halted. See Table A-1."},
	{"DTLB_MISSES.ANY", "Counts the number of misses in the STLB which causes a page walk."},
	{"DTLB_MISSES.WALK_COMPLETED", "Counts number of misses in the STLB which resulted in a completed page walk."},
	{"DTLB_MISSES.WALK_CYCLES", "Counts cycles of page walk due to misses in the STLB."},
	{"DTLB_MISSES.STLB_HIT", "Counts the number of DTLB first level misses that hit in the second level TLB. This event is only relevant if the core contains multiple DTLB levels."},
	{"DTLB_MISSES.LARGE_WALK_COMPLETED", "Counts number of completed large page walks due to misses in the STLB."},
	{"LOAD_HIT_PRE", "Counts load operations sent to the L1 data cache while a previous SSE prefetch instruction to the same cache line has started prefetching but has not yet finished."},
	{"L1D_PREFETCH.REQUESTS", "Counts number of hardware prefetch requests dispatched out of the prefetch FIFO."},
	{"L1D_PREFETCH.MISS", "Counts number of hardware prefetch requests that miss the L1D. There are two prefetchers in the L1D. A streamer, which predicts lines sequentially after this one should be fetched, and the IP prefetcher that remembers access patterns for the current instruction. The streamer prefetcher stops on an L1D hit, while the IP prefetcher does not."},
	{"L1D_PREFETCH.TRIGGERS", "Counts number of prefetch requests triggered by the Finite State Machine and pushed into the prefetch FIFO. Some of the prefetch requests are dropped due to overwrites or competition between the IP index prefetcher and streamer prefetcher. The prefetch FIFO contains 4 entries."},
	{"EPT.WALK_CYCLES", "Counts Extended Page walk cycles."},
	{"L1D.REPL", "Counts the number of lines brought into the L1 data cache.Counter 0, 1 only."},
	{"L1D.M_REPL", "Counts the number of modified lines brought into the L1 data cache. Counter 0, 1 only."},
	{"L1D.M_EVICT", "Counts the number of modified lines evicted from the L1 data cache due to replacement. Counter 0, 1 only."},
	{"L1D.M_SNOOP_EVICT", "Counts the number of modified lines evicted from the L1 data cache due to snoop HITM intervention. Counter 0, 1 only."},
	{"L1D_CACHE_PREFETCH_LOCK_FB_HIT", "Counts the number of cacheable load lock speculated instructions accepted into the fill buffer."},
	{"L1D_CACHE_LOCK_FB_HIT", "Counts the number of cacheable load lock speculated or retired instructions accepted into the fill buffer."},
	{"OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_DATA", "Counts weighted cycles of offcore demand data read requests. Does not include L2 prefetch requests. Counter 0."},
	{"OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_CODE", "Counts weighted cycles of offcore demand code read requests. Does not include L2 prefetch requests. Counter 0."},
	{"OFFCORE_REQUESTS_OUTSTANDING.DEMAND.RFO", "Counts weighted cycles of offcore demand RFO requests. Does not include L2 prefetch requests. Counter 0."},
	{"OFFCORE_REQUESTS_OUTSTANDING.ANY.READ", "Counts weighted cycles of offcore read requests of any kind. Include L2 prefetch requests. Counter 0."},
	{"CACHE_LOCK_CYCLES.L1D_L2", "Cycle count during which the L1D and L2 are locked. A lock is asserted when there is a locked memory access, due to uncacheable memory, a locked operation that spans two cache lines, or a page walk from an uncacheable page table. Counter 0, 1 only. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such accesses."},
	{"CACHE_LOCK_CYCLES.L1D", "Counts the number of cycles that cacheline in the L1 data cache unit is locked. Counter 0, 1 only."},
	{"IO_TRANSACTIONS", "Counts the number of completed I/O transactions."},
	{"L1I.HITS", "Counts all instruction fetches that hit the L1 instruction cache."},
	{"L1I.MISSES", "Counts all instruction fetches that miss the L1I cache. This includes instruction cache misses, streaming buffer misses, victim cache misses and uncacheable fetches. An instruction fetch miss is counted only once and not once for every cycle it is outstanding."},
	{"L1I.READS", "Counts all instruction fetches, including uncacheable fetches that bypass the L1I."},
	{"L1I.CYCLES_STALLED", "Cycle counts for which an instruction fetch stalls due to a L1I cache miss, ITLB miss or ITLB fault."},
	{"LARGE_ITLB.HIT", "Counts number of large ITLB hits."},
	{"ITLB_MISSES.ANY", "Counts the number of misses in all levels of the ITLB which causes a page walk."},
	{"ITLB_MISSES.WALK_COMPLETED", "Counts number of misses in all levels of the ITLB which resulted in a completed page walk."},
	{"ITLB_MISSES.WALK_CYCLES", "Counts ITLB miss page walk cycles."},
	{"ITLB_MISSES.LARGE_WALK_COMPLETED", "Counts number of completed large page walks due to misses in the STLB."},
	{"ILD_STALL.LCP", "Cycles Instruction Length Decoder stalls due to length changing prefixes: 66, 67 or REX.W (for EM64T) instructions which change the length of the decoded instruction."},
	{"ILD_STALL.MRU", "Instruction Length Decoder stall cycles due to Brand Prediction Unit (PBU). Most Recently Used (MRU) bypass."},
	{"ILD_STALL.IQ_FULL", "Stall cycles due to a full instruction queue."},
	{"ILD_STALL.REGEN", "Counts the number of regen stalls."},
	{"ILD_STALL.ANY", "Counts any cycles the Instruction Length Decoder is stalled."},
	{"BR_INST_EXEC.COND", "Counts the number of conditional near branch instructions executed, but not necessarily retired."},
	{"BR_INST_EXEC.DIRECT", "Counts all unconditional near branch instructions excluding calls and indirect branches."},
	{"BR_INST_EXEC.INDIRECT_NON_CALL", "Counts the number of executed indirect near branch instructions that are not calls."},
	{"BR_INST_EXEC.NON_CALLS", "Counts all non call near branch instructions executed, but not necessarily retired."},
	{"BR_INST_EXEC.RETURN_NEAR", "Counts indirect near branches that have a return mnemonic."},
	{"BR_INST_EXEC.DIRECT_NEAR_CALL", "Counts unconditional near call branch instructions, excluding non call branch, executed."},
	{"BR_INST_EXEC.INDIRECT_NEAR_CALL", "Counts indirect near calls, including both register and memory indirect, executed."},
	{"BR_INST_EXEC.NEAR_CALLS", "Counts all near call branches executed, but not necessarily retired."},
	{"BR_INST_EXEC.TAKEN", "Counts taken near branches executed, but not necessarily retired."},
	{"BR_INST_EXEC.ANY", "Counts all near executed branches (not necessarily retired). This includes only instructions and not micro-op branches. Frequent branching is not necessarily a major performance issue. However frequent branch mispredictions may be a problem."},
	{"BR_MISP_EXEC.COND", "Counts the number of mispredicted conditional near branch instructions executed, but not necessarily retired."},
	{"BR_MISP_EXEC.DIRECT", "Counts mispredicted macro unconditional near branch instructions, excluding calls and indirect branches (should always be 0)."},
	{"BR_MISP_EXEC.INDIRECT_NON_CALL", "Counts the number of executed mispredicted indirect near branch instructions that are not calls."},
	{"BR_MISP_EXEC.NON_CALLS", "Counts mispredicted non call near branches executed, but not necessarily retired."},
	{"BR_MISP_EXEC.RETURN_NEAR", "Counts mispredicted indirect branches that have a rear return mnemonic."},
	{"BR_MISP_EXEC.DIRECT_NEAR_CALL", "Counts mispredicted non-indirect near calls executed, (should always be 0)."},
	{"BR_MISP_EXEC.INDIRECT_NEAR_CALL", "Counts mispredicted indirect near calls executed, including both register and memory indirect."},
	{"BR_MISP_EXEC.NEAR_CALLS", "Counts all mispredicted near call branches executed, but not necessarily retired."},
	{"BR_MISP_EXEC.TAKEN", "Counts executed mispredicted near branches that are taken, but not necessarily retired."},
	{"BR_MISP_EXEC.ANY", "Counts the number of mispredicted near branch instructions that were executed, but not necessarily retired."},
	{"RESOURCE_STALLS.ANY", "Counts the number of Allocator resource related stalls. Includes register renaming buffer entries, memory buffer entries. In addition to resource related stalls, this event counts some other events. Includes stalls arising during branch misprediction recovery, such as if retirement of the mispredicted branch is delayed and stalls arising while store buffer is draining from synchronizing operations. Does not include stalls due to SuperQ (off core) queue full, too many cache misses, etc."},
	{"RESOURCE_STALLS.LOAD", "Counts the cycles of stall due to lack of load buffer for load operation."},
	{"RESOURCE_STALLS.RS_FULL", "This event counts the number of cycles when the number of instructions in the pipeline waiting for execution reaches the limit the processor can handle. A high count of this event indicates that there are long latency operations in the pipe (possibly load and store operations that miss the L2 cache, or instructions dependent upon instructions further down the pipeline that have yet to retire. When RS is full, new instructions can not enter the reservation station and start execution."},
	{"RESOURCE_STALLS.STORE", "This event counts the number of cycles that a resource related stall will occur due to the number of store instructions reaching the limit of the pipeline, (i.e. all store buffers are used). The stall ends when a store instruction commits its data to the cache or memory."},
	{"RESOURCE_STALLS.ROB_FULL", "Counts the cycles of stall due to re- order buffer full."},
	{"RESOURCE_STALLS.FPCW", "Counts the number of cycles while execution was stalled due to writing the floating-point unit (FPU) control word."},
	{"RESOURCE_STALLS.MXCSR", "Stalls due to the MXCSR register rename occurring to close to a previous MXCSR rename. The MXCSR provides control and status for the MMX registers."},
	{"RESOURCE_STALLS.OTHER", "Counts the number of cycles while execution was stalled due to other resource issues."},
	{"MACRO_INSTS.FUSIONS_DECODED", "Counts the number of instructions decoded that are macro-fused but not necessarily executed or retired."},
	{"BACLEAR_FORCE_IQ", "Counts number of times a BACLEAR was forced by the Instruction Queue. The IQ is also responsible for providing conditional branch prediction direction based on a static scheme and dynamic data provided by the L2 Branch Prediction Unit. If the conditional branch target is not found in the Target Array and the IQ predicts that the branch is taken, then the IQ will force the Branch Address Calculator to issue a BACLEAR. Each BACLEAR asserted by the BAC generates approximately an 8 cycle bubble in the instruction fetch pipeline."},
	{"LSD.UOPS", "Counts the number of micro-ops delivered by loop stream detector. Use cmask=1 and invert to count cycles."},
	{"ITLB_FLUSH", "Counts the number of ITLB flushes"},
	{"OFFCORE_REQUESTS.DEMAND.READ_DATA", "Counts number of offcore demand data read requests. Does not count L2 prefetch requests."},
	{"OFFCORE_REQUESTS.DEMAND.READ_CODE", "Counts number of offcore demand code read requests. Does not count L2 prefetch requests."},
	{"OFFCORE_REQUESTS.DEMAND.RFO", "Counts number of offcore demand RFO requests. Does not count L2 prefetch requests."},
	{"OFFCORE_REQUESTS.ANY.READ", "Counts number of offcore read requests. Includes L2 prefetch requests."},
	{"OFFCORE_REQUESTS.ANY.RFO", "Counts number of offcore RFO requests. Includes L2 prefetch requests."},
	{"OFFCORE_REQUESTS.L1D_WRITEBACK", "Counts number of L1D writebacks to the uncore."},
	{"OFFCORE_REQUESTS.ANY", "Counts all offcore requests."},
	{"UOPS_EXECUTED.PORT0", "Counts number of Uops executed that were issued on port 0. Port 0 handles integer arithmetic, SIMD and FP add Uops."},
	{"UOPS_EXECUTED.PORT1", "Counts number of Uops executed that were issued on port 1. Port 1 handles integer arithmetic, SIMD, integer shift, FP multiply and FP divide Uops."},
	{"UOPS_EXECUTED.PORT2_CORE", "Counts number of Uops executed that were issued on port 2. Port 2 handles the load Uops. This is a core count only and can not be collected per thread."},
	{"UOPS_EXECUTED.PORT3_CORE", "Counts number of Uops executed that were issued on port 3. Port 3 handles store Uops. This is a core count only and can not be collected per thread."},
	{"UOPS_EXECUTED.PORT4_CORE", "Counts number of Uops executed that where issued on port 4. Port 4 handles the value to be stored for the store Uops issued on port 3. This is a core count only and can not be collected per thread."},
	{"UOPS_EXECUTED.CORE_ACTIVE_CYCLES_NO_PORT5", "Counts number of cycles there are one or more uops being executed and were issued on ports 0-4. This is a core count only and can not be collected per thread."},
	{"UOPS_EXECUTED.PORT5", "Counts number of Uops executed that where issued on port 5."},
	{"UOPS_EXECUTED.CORE_ACTIVE_CYCLES", "Counts number of cycles there are one or more uops being executed on any ports. This is a core count only and can not be collected per thread."},
	{"UOPS_EXECUTED.PORT015", "Counts number of Uops executed that where issued on port 0, 1, or 5.  Use cmask=1, invert=1 to count stall cycles."},
	{"UOPS_EXECUTED.PORT234", "Counts number of Uops executed that where issued on port 2, 3, or 4."},
	{"OFFCORE_REQUESTS_SQ_FULL", "Counts number of cycles the SQ is full to handle off-core requests."},
	{"SNOOPQ_REQUESTS_OUTSTANDING.DATA", "Counts weighted cycles of snoopq requests for data. Counter 0 only Use cmask=1 to count cycles not empty."},
	{"SNOOPQ_REQUESTS_OUTSTANDING.INVALIDATE", "Counts weighted cycles of snoopq invalidate requests. Counter 0 only Use cmask=1 to count cycles not empty."},
	{"SNOOPQ_REQUESTS_OUTSTANDING.CODE", "Counts weighted cycles of snoopq requests for code. Counter 0 only. Use cmask=1 to count cycles not empty."},
	{"SNOOPQ_REQUESTS.CODE", "Counts the number of snoop code requests."},
	{"SNOOPQ_REQUESTS.DATA", "Counts the number of snoop data requests."},
	{"SNOOPQ_REQUESTS.INVALIDATE", "Counts the number of snoop invalidate requests."},
	{"OFF_CORE_RESPONSE_0", "see Section 30.6.1.3, Off-core Response Performance Monitoring in the Processor Core. Requires programming MSR 01A6H."},
	{"SNOOP_RESPONSE.HIT", "Counts HIT snoop response sent by this thread in response to a snoop request."},
	{"SNOOP_RESPONSE.HITE", "Counts HIT E snoop response sent by this thread in response to a snoop request."},
	{"SNOOP_RESPONSE.HITM", "Counts HIT M snoop response sent by this thread in response to a snoop request."},
	{"OFF_CORE_RESPONSE_1", "See Section 30.6.1.3, Off-core Response Performance Monitoring in the Processor Core. Use MSR 01A7H."},
	{"INST_RETIRED.ANY_P", "See Table A-1 Notes: INST_RETIRED.ANY is counted by a designated fixed counter. INST_RETIRED.ANY_P is counted by a programmable counter and is an architectural performance event. Event is supported if CPUID.A.EBX[1] = 0. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions."},
	{"INST_RETIRED.X87", "Counts the number of floating point computational operations retired floating point computational operations executed by the assist handler and sub-operations of complex floating point instructions like transcendental instructions."},
	{"INST_RETIRED.MMX", "Counts the number of retired: MMX instructions."},
	{"UOPS_RETIRED.ANY", "Counts the number of micro-ops retired, (macro-fused=1, micro- fused=2, others=1; maximum count of 8 per cycle). Most instructions are composed of one or two micro-ops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists. Use cmask=1 and invert to count active cycles or stalled cycles"},
	{"UOPS_RETIRED.RETIRE_SLOTS", "Counts the number of retirement slots used each cycle"},
	{"UOPS_RETIRED.MACRO_FUSED", "Counts number of macro-fused uops retired."},
	{"MACHINE_CLEARS.CYCLES", "Counts the cycles machine clear is asserted."},
	{"MACHINE_CLEARS.MEM_ORDER", "Counts the number of machine clears due to memory order conflicts."},
	{"MACHINE_CLEARS.SMC", "Counts the number of times that a program writes to a code section. Self-modifying code causes a sever penalty in all Intel 64 and IA-32 processors. The modified cache line is written back to the L2 and L3caches."},
	{"BR_INST_RETIRED.ANY_P", "See Table A-1"},
	{"BR_INST_RETIRED.CONDITIONAL", "Counts the number of conditional branch instructions retired."},
	{"BR_INST_RETIRED.NEAR_CALL", "Counts the number of direct & indirect near unconditional calls retired."},
	{"BR_INST_RETIRED.ALL_BRANCHES", "Counts the number of branch instructions retired."},
	{"BR_MISP_RETIRED.ANY_P", "See Table A-1."},
	{"BR_MISP_RETIRED.CONDITIONAL", "Counts mispredicted conditional retired calls."},
	{"BR_MISP_RETIRED.NEAR_CALL", "Counts mispredicted direct & indirect near unconditional retired calls."},
	{"BR_MISP_RETIRED.ALL_BRANCHES", "Counts all mispredicted retired calls."},
	{"SSEX_UOPS_RETIRED.PACKED_SINGLE", "Counts SIMD packed single-precision floating point Uops retired."},
	{"SSEX_UOPS_RETIRED.SCALAR_SINGLE", "Counts SIMD calar single-precision floating point Uops retired."},
	{"SSEX_UOPS_RETIRED.PACKED_DOUBLE", "Counts SIMD packed double- precision floating point Uops retired."},
	{"SSEX_UOPS_RETIRED.SCALAR_DOUBLE", "Counts SIMD scalar double-precision floating point Uops retired."},
	{"SSEX_UOPS_RETIRED.VECTOR_INTEGER", "Counts 128-bit SIMD vector integer Uops retired."},
	{"ITLB_MISS_RETIRED", "Counts the number of retired instructions that missed the ITLB when the instruction was fetched."},
	{"MEM_LOAD_RETIRED.L1D_HIT", "Counts number of retired loads that hit the L1 data cache."},
	{"MEM_LOAD_RETIRED.L2_HIT", "Counts number of retired loads that hit the L2 data cache."},
	{"MEM_LOAD_RETIRED.L3_UNSHARED_HIT", "Counts number of retired loads that hit their own, unshared lines in the L3 cache."},
	{"MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM", "Counts number of retired loads that hit in a sibling core's L2 (on die core). Since the L3 is inclusive of all cores on the package, this is an L3 hit. This counts both clean or modified hits."},
	{"MEM_LOAD_RETIRED.L3_MISS", "Counts number of retired loads that miss the L3 cache. The load was satisfied by a remote socket, local memory or an IOH."},
	{"MEM_LOAD_RETIRED.HIT_LFB", "Counts number of retired loads that miss the L1D and the address is located in an allocated line fill buffer and will soon be committed to cache. This is counting secondary L1D misses."},
	{"MEM_LOAD_RETIRED.DTLB_MISS", "Counts the number of retired loads that missed the DTLB. The DTLB miss is not counted if the load operation causes a fault. This event counts loads from cacheable memory only. The event does not count loads by software prefetches. Counts both primary and secondary misses to the TLB."},
	{"FP_MMX_TRANS.TO_FP", "Counts the first floating-point instruction following any MMX instruction. You can use this event to estimate the penalties for the transitions between floating-point and MMX technology states."},
	{"FP_MMX_TRANS.TO_MMX", "Counts the first MMX instruction following a floating-point instruction. You can use this event to estimate the penalties for the transitions between floating-point and MMX technology states."},
	{"FP_MMX_TRANS.ANY", "Counts all transitions from floating point to MMX instructions and from MMX instructions to floating point instructions. You can use this event to estimate the penalties for the transitions between floating-point and MMX technology states."},
	{"MACRO_INSTS.DECODED", "Counts the number of instructions decoded, (but not necessarily executed or retired)."},
	{"UOPS_DECODED.STALL_CYCLES", "Counts the cycles of decoder stalls."},
	{"UOPS_DECODED.MS", "Counts the number of Uops decoded by the Microcode Sequencer, MS. The MS delivers uops when the instruction is more than 4 uops long or a microcode assist is occurring."},
	{"UOPS_DECODED.ESP_FOLDING", "Counts number of stack pointer (ESP) instructions decoded: push, pop, call, ret, etc. ESP instructions do not generate a Uop to increment or decrement ESP. Instead, they update an ESP_Offset register that keeps track of the delta to the current value of the ESP register."},
	{"UOPS_DECODED.ESP_SYNC", "Counts number of stack pointer (ESP) sync operations where an ESP instruction is corrected by adding the ESP offset register to the current value of the ESP register."},
	{"RAT_STALLS.FLAGS", "Counts the number of cycles during which execution stalled due to several reasons, one of which is a partial flag register stall. A partial register stall may occur when two conditions are met: 1) an instruction modifies some, but not all, of the flags in the flag register and 2) the next instruction, which depends on flags, depends on flags that were not modified by this instruction."},
	{"RAT_STALLS.REGISTERS", "This event counts the number of cycles instruction execution latency became longer than the defined latency because the instruction used a register that was partially written by previous instruction."},
	{"RAT_STALLS.ROB_READ_PORT", "Counts the number of cycles when ROB read port stalls occurred, which did not allow new micro-ops to enter the out-of-order pipeline. Note that, at this stage in the pipeline, additional stalls may occur at the same cycle and prevent the stalled micro-ops from entering the pipe. In such a case, micro-ops retry entering the execution pipe in the next cycle and the ROB-read port stall is counted again."},
	{"RAT_STALLS.SCOREBOARD", "Counts the cycles where we stall due to microarchitecturally required serialization. Microcode scoreboarding stalls."},
	{"RAT_STALLS.ANY", "Counts all Register Allocation Table stall cycles due to: Cycles when ROB read port stalls occurred, which did not allow new micro-ops to enter the execution pipe. Cycles when partial register stalls occurred Cycles when flag stalls occurred Cycles floating-point unit (FPU) status word stalls occurred. To count each of these conditions separately use the events: RAT_STALLS.ROB_READ_PORT, RAT_STALLS.PARTIAL, RAT_STALLS.FLAGS, and RAT_STALLS.FPSW."},
	{"SEG_RENAME_STALLS", "Counts the number of stall cycles due to the lack of renaming resources for the ES, DS, FS, and GS segment registers. If a segment is renamed but not retired and a second update to the same segment occurs, a stall occurs in the front- end of the pipeline until the renamed segment retires."},
	{"ES_REG_RENAMES", "Counts the number of times the ES segment register is renamed."},
	{"UOP_UNFUSION", "Counts unfusion events due to floating point exception to a fused uop."},
	{"BR_INST_DECODED", "Counts the number of branch instructions decoded."},
	{"BPU_MISSED_CALL_RET", "Counts number of times the Branch Prediction Unit missed predicting a call or return branch."},
	{"BACLEAR.CLEAR", "Counts the number of times the front end is resteered, mainly when the Branch Prediction Unit cannot provide a correct prediction and this is corrected by the Branch Address Calculator at the front end. This can occur if the code has many branches such that they cannot be consumed by the BPU. Each BACLEAR asserted by the BAC generates approximately an 8 cycle bubble in the instruction fetch pipeline. The effect on total execution time depends on the surrounding code."},
	{"BACLEAR.BAD_TARGET", "Counts number of Branch Address Calculator clears (BACLEAR) asserted due to conditional branch instructions in which there was a target hit but the direction was wrong. Each BACLEAR asserted by the BAC generates approximately an 8 cycle bubble in the instruction fetch pipeline."},
	{"BPU_CLEARS.EARLY", "Counts early (normal) Branch Prediction Unit clears: BPU predicted a taken branch after incorrectly assuming that it was not taken. The BPU clear leads to 2 cycle bubble in the Front End."},
	{"BPU_CLEARS.LATE", "Counts late Branch Prediction Unit clears due to Most Recently Used conflicts. The PBU clear leads to a 3 cycle bubble in the Front End."},
	{"THREAD_ACTIVE", "Counts cycles threads are active."},
	{"L2_TRANSACTIONS.LOAD", "Counts L2 load operations due to HW prefetch or demand loads."},
	{"L2_TRANSACTIONS.RFO", "Counts L2 RFO operations due to HW prefetch or demand RFOs."},
	{"L2_TRANSACTIONS.IFETCH", "Counts L2 instruction fetch operations due to HW prefetch or demand ifetch."},
	{"L2_TRANSACTIONS.PREFETCH", "Counts L2 prefetch operations."},
	{"L2_TRANSACTIONS.L1D_WB", "Counts L1D writeback operations to the L2."},
	{"L2_TRANSACTIONS.FILL", "Counts L2 cache line fill operations due to load, RFO, L1D writeback or prefetch."},
	{"L2_TRANSACTIONS.WB", "Counts L2 writeback operations to the L3."},
	{"L2_TRANSACTIONS.ANY", "Counts all L2 cache operations."},
	{"L2_LINES_IN.S_STATE", "Counts the number of cache lines allocated in the L2 cache in the S (shared) state."},
	{"L2_LINES_IN.E_STATE", "Counts the number of cache lines allocated in the L2 cache in the E (exclusive) state."},
	{"L2_LINES_IN.ANY", "Counts the number of cache lines allocated in the L2 cache."},
	{"L2_LINES_OUT.DEMAND_CLEAN", "Counts L2 clean cache lines evicted by a demand request."},
	{"L2_LINES_OUT.DEMAND_DIRTY", "Counts L2 dirty (modified) cache lines evicted by a demand request."},
	{"L2_LINES_OUT.PREFETCH_CLEAN", "Counts L2 clean cache line evicted by a prefetch request."},
	{"L2_LINES_OUT.PREFETCH_DIRTY", "Counts L2 modified cache line evicted by a prefetch request."},
	{"L2_LINES_OUT.ANY", "Counts all L2 cache lines evicted for any reason."},
	{"SQ_MISC.LRU_HINTS", "Counts number of Super Queue LRU hints sent to L3."},
	{"SQ_MISC.SPLIT_LOCK", "Counts the number of SQ lock splits across a cache line."},
	{"SQ_FULL_STALL_CYCLES", "Counts cycles the Super Queue is full. Neither of the threads on this core will be able to access the uncore."},
	{"FP_ASSIST.ALL", "Counts the number of floating point operations executed that required micro-code assist intervention. Assists are required in the following cases: SSE instructions, (Denormal input when the DAZ flag is off or Underflow result when the FTZ flag is off): x87 instructions, (NaN or denormal are loaded to a register or used as input from memory, Division by 0 or Underflow output)."},
	{"FP_ASSIST.OUTPUT", "Counts number of floating point micro-code assist when the output value (destination register) is invalid."},
	{"FP_ASSIST.INPUT", "Counts number of floating point micro-code assist when the input value (one of the source operands to an FP instruction) is invalid."},
	{"SIMD_INT_64.PACKED_MPY", "Counts number of SID integer 64 bit packed multiply operations."},
	{"SIMD_INT_64.PACKED_SHIFT", "Counts number of SID integer 64 bit packed shift operations."},
	{"SIMD_INT_64.PACK", "Counts number of SID integer 64 bit pack operations."},
	{"SIMD_INT_64.UNPACK", "Counts number of SID integer 64 bit unpack operations."},
	{"SIMD_INT_64.PACKED_LOGICAL", "Counts number of SID integer 64 bit logical operations."},
	{"SIMD_INT_64.PACKED_ARITH", "Counts number of SID integer 64 bit arithmetic operations."},
	{"SIMD_INT_64.SHUFFLE_MOVE", "Counts number of SID integer 64 bit shift or move operations."},
	{"INSTR_RETIRED_ANY", ""},
	{"CPU_CLK_UNHALTED_CORE", ""},
	{"CPU_CLK_UNHALTED_REF", ""},
	{"GQ_CYCLES_FULL.READ_TRACKER", "Uncore cycles Global Queue read tracker is full."},
	{"GQ_CYCLES_FULL.WRITE_TRACKER", "Uncore cycles Global Queue write tracker is full."},
	{"GQ_CYCLES_FULL.PEER_PROBE_TRACKER", "Uncore cycles Global Queue peer probe tracker is full. The peer probe tracker queue tracks snoops from the IOH and remote sockets."},
	{"GQ_CYCLES_NOT_EMPTY.READ_TRACKER", "Uncore cycles were Global Queue read tracker has at least one valid entry."},
	{"GQ_CYCLES_NOT_EMPTY.WRITE_TRACKER", "Uncore cycles were Global Queue write tracker has at least one valid entry."},
	{"GQ_CYCLES_NOT_EMPTY.PEER_PROBE_TRACKER", "Uncore cycles were Global Queue peer probe tracker has at least one valid entry. The peer probe tracker queue tracks IOH and remote socket snoops."},
	{"GQ_OCCUPANCY.READ_TRACKER", "Increments the number of queue entries (code read, data read, and RFOs) in the tread tracker. The GQ read tracker allocate to deallocate occupancy count is divided by the count to obtain the average read tracker latency."},
	{"GQ_ALLOC.READ_TRACKER", "Counts the number of tread tracker allocate to deallocate entries. The GQ read tracker allocate to deallocate occupancy count is divided by the count to obtain the average read tracker latency."},
	{"GQ_ALLOC.RT_L3_MISS", "Counts the number GQ read tracker entries for which a full cache line read has missed the L3. The GQ read tracker L3 miss to fill occupancy count is divided by this count to obtain the average cache line read L3 miss latency. The latency represents the time after which the L3 has determined that the cache line has missed. The time between a GQ read tracker allocation and the L3 determining that the cache line has missed is the average L3 hit latency. The total L3 cache line read miss latency is the hit latency + L3 misslatency."},
	{"GQ_ALLOC.RT_TO_L3_RESP", "Counts the number of GQ read tracker entries that are allocated in the read tracker queue that hit or miss the L3. The GQ read tracker L3 hit occupancy count is divided by this count to obtain the average L3 hit latency."},
	{"GQ_ALLOC.RT_TO_RTID_ACQUIRED", "Counts the number of GQ read tracker entries that are allocated in the read tracker, have missed in the L3 and have not acquired a Request Transaction ID. The GQ read tracker L3 miss to RTID acquired occupancy count is divided by this count to obtain the average latency for a read L3 miss to acquire an RTID."},
	{"GQ_ALLOC.WT_TO_RTID_ACQUIRED", "Counts the number of GQ write tracker entries that are allocated in the write tracker, have missed in the L3 and have not acquired a Request Transaction ID. The GQ write tracker L3 miss to RTID occupancy count is divided by this count to obtain the average latency for a write L3 miss to acquire an RTID."},
	{"GQ_ALLOC.WRITE_TRACKER", "Counts the number of GQ write tracker entries that are allocated in the write tracker queue that miss the L3. The GQ write tracker occupancy count is divided by the this count to obtain the average L3 write miss latency."},
	{"GQ_ALLOC.PEER_PROBE_TRACKER", "Counts the number of GQ peer probe tracker (snoop) entries that are allocated in the peer probe tracker queue that miss the L3. The GQ peer probe occupancy count is divided by this count to obtain the average L3 peer probe miss latency."},
	{"GQ_DATA.FROM_QPI", "Cycles Global Queue Quickpath Interface input data port is busy importing data from the Quickpath Interface. Each cycle the input port can transfer 8 or 16 bytes of data."},
	{"GQ_DATA.FROM_QMC", "Cycles Global Queue Quickpath Memory Interface input data port is busy importing data from the Quickpath Memory Interface. Each cycle the input port can transfer 8 or 16 bytes of data."},
	{"GQ_DATA.FROM_L3", "Cycles GQ L3 input data port is busy importing data from the Last Level Cache. Each cycle the input port can transfer 32 bytes of data."},
	{"GQ_DATA.FROM_CORES_02", "Cycles GQ Core 0 and 2 input data port is busy importing data from processor cores 0 and 2. Each cycle the input port can transfer 32 bytes of data."},
	{"GQ_DATA.FROM_CORES_13", "Cycles GQ Core 1 and 3 input data port is busy importing data from processor cores 1 and 3. Each cycle the input port can transfer 32 bytes of data."},
	{"GQ_DATA.TO_QPI_QMC", "Cycles GQ QPI and QMC output data port is busy sending data to the Quickpath Interface or Quickpath Memory Interface. Each cycle the output port can transfer 32 bytes of data."},
	{"GQ_DATA.TO_L3", "Cycles GQ L3 output data port is busy sending data to the Last Level Cache. Each cycle the output port can transfer 32 bytes of data."},
	{"GQ_DATA.TO_CORES", "Cycles GQ Core output data port is busy sending data to the Cores. Each cycle the output port can transfer 32 bytes of data."},
	{"SNP_RESP_TO_LOCAL_HOME.I_STATE", "Number of snoop responses to the local home that L3 does not have the referenced cache line."},
	{"SNP_RESP_TO_LOCAL_HOME.S_STATE", "Number of snoop responses to the local home that L3 has the referenced line cached in the S state."},
	{"SNP_RESP_TO_LOCAL_HOME.FWD_S_STATE", "Number of responses to code or data read snoops to the local home that the L3 has the referenced cache line in the E state. The L3 cache line state is changed to the S state and the line is forwarded to the local home in the S state."},
	{"SNP_RESP_TO_LOCAL_HOME.FWD_I_STATE", "Number of responses to read invalidate snoops to the local home that the L3 has the referenced cache line in the M state. The L3 cache line state is invalidated and the line is forwarded to the local home in the M state."},
	{"SNP_RESP_TO_LOCAL_HOME.CONFLICT", "Number of conflict snoop responses sent to the local home."},
	{"SNP_RESP_TO_LOCAL_HOME.WB", "Number of responses to code or data read snoops to the local home that the L3 has the referenced line cached in the M state."},
	{"SNP_RESP_TO_REMOTE_HOME.I_STATE", "Number of snoop responses to a remote home that L3 does not have the referenced cache line."},
	{"SNP_RESP_TO_REMOTE_HOME.S_STATE", "Number of snoop responses to a remote home that L3 has the referenced line cached in the S state."},
	{"SNP_RESP_TO_REMOTE_HOME.FWD_S_STATE", "Number of responses to code or data read snoops to a remote home that the L3 has the referenced cache line in the E state. The L3 cache line state is changed to the S state and the line is forwarded to the remote home in the S state."},
	{"SNP_RESP_TO_REMOTE_HOME.FWD_I_STATE", "Number of responses to read invalidate snoops to a remote home that the L3 has the referenced cache line in the M state. The L3 cache line state is invalidated and the line is forwarded to the remote home in the M state."},
	{"SNP_RESP_TO_REMOTE_HOME.CONFLICT", "Number of conflict snoop responses sent to the local home."},
	{"SNP_RESP_TO_REMOTE_HOME.WB", "Number of responses to code or data read snoops to a remote home that the L3 has the referenced line cached in the M state."},
	{"SNP_RESP_TO_REMOTE_HOME.HITM", "Number of HITM snoop responses to a remote home."},
	{"L3_HITS.READ", "Number of code read, data read and RFO requests that hit in the L3."},
	{"L3_HITS.WRITE", "Number of writeback requests that hit in the L3. Writebacks from the cores will always result in L3 hits due to the inclusive property of the L3."},
	{"L3_HITS.PROBE", "Number of snoops from IOH or remote sockets that hit in the L3."},
	{"L3_HITS.ANY", "Number of reads and writes that hit the L3."},
	{"L3_MISS.READ", "Number of code read, data read and RFO requests that miss the L3."},
	{"L3_MISS.WRITE", "Number of writeback requests that miss the L3. Should always be zero as writebacks from the cores will always result in L3 hits due to the inclusive property of the L3."},
	{"L3_MISS.PROBE", "Number of snoops from IOH or remote sockets that miss the L3."},
	{"L3_MISS.ANY", "Number of reads and writes that miss the L3."},
	{"L3_LINES_IN.M_STATE", "Counts the number of L3 lines allocated in M state. The only time a cache line is allocated in the M state is when the line was forwarded in M state is forwarded due to a Snoop Read Invalidate Own request."},
	{"L3_LINES_IN.E_STATE", "Counts the number of L3 lines allocated in E state."},
	{"L3_LINES_IN.S_STATE", "Counts the number of L3 lines allocated in S state."},
	{"L3_LINES_IN.F_STATE", "Counts the number of L3 lines allocated in F state."},
	{"L3_LINES_IN.ANY", "Counts the number of L3 lines allocated in any state."},
	{"L3_LINES_OUT.M_STATE", "Counts the number of L3 lines victimized that were in the M state. When the victim cache line is in M state, the line is written to its home cache agent which can be either local or remote."},
	{"L3_LINES_OUT.E_STATE", "Counts the number of L3 lines victimized that were in the E state."},
	{"L3_LINES_OUT.S_STATE", "Counts the number of L3 lines victimized that were in the S state."},
	{"L3_LINES_OUT.I_STATE", "Counts the number of L3 lines victimized that were in the I state."},
	{"L3_LINES_OUT.F_STATE", "Counts the number of L3 lines victimized that were in the F state."},
	{"L3_LINES_OUT.ANY", "Counts the number of L3 lines victimized in any state."},
	{"GQ_SNOOP.GOTO_S", "Counts the number of remote snoops that have requested a cache line be set to the S state."},
	{"GQ_SNOOP.GOTO_I", "Counts the number of remote snoops that have requested a cache line be set to the I state."},
	{"GQ_SNOOP.GOTO_S_HIT", "Counts the number of remote snoops that have requested a cache line be set to the S state from E state. Requires writing MSR 301H with mask = 2H"},
	{"GQ_SNOOP.GOTO_I_HIT", "Counts the number of remote snoops that have requested a cache line be set to the S state from F (forward) state. Requires writing MSR 301H with mask = 8H"},
	{"QHL_REQUESTS.IOH_READS", "Counts number of Quickpath Home Logic read requests from the IOH."},
	{"QHL_REQUESTS.IOH_WRITES", "Counts number of Quickpath Home Logic write requests from the IOH."},
	{"QHL_REQUESTS.REMOTE_READS", "Counts number of Quickpath Home Logic read requests from a remote socket."},
	{"QHL_REQUESTS.REMOTE_WRITES", "Counts number of Quickpath Home Logic write requests from a remote socket."},
	{"QHL_REQUESTS.LOCAL_READS", "Counts number of Quickpath Home Logic read requests from the local socket."},
	{"QHL_REQUESTS.LOCAL_WRITES", "Counts number of Quickpath Home Logic write requests from the local socket."},
	{"QHL_CYCLES_FULL.IOH", "Counts uclk cycles all entries in the Quickpath Home Logic IOH are full."},
	{"QHL_CYCLES_FULL.REMOTE", "Counts uclk cycles all entries in the Quickpath Home Logic remote tracker are full."},
	{"QHL_CYCLES_FULL.LOCAL", "Counts uclk cycles all entries in the Quickpath Home Logic local tracker are full."},
	{"QHL_CYCLES_NOT_EMPTY.IOH", "Counts uclk cycles all entries in the Quickpath Home Logic IOH is busy."},
	{"QHL_CYCLES_NOT_EMPTY.REMOTE", "Counts uclk cycles all entries in the Quickpath Home Logic remote tracker is busy."},
	{"QHL_CYCLES_NOT_EMPTY.LOCAL", "Counts uclk cycles all entries in the Quickpath Home Logic local tracker is busy."},
	{"QHL_OCCUPANCY.IOH", "QHL IOH tracker allocate to deallocate read occupancy."},
	{"QHL_OCCUPANCY.REMOTE", "QHL remote tracker allocate to deallocate read occupancy."},
	{"QHL_OCCUPANCY.LOCAL", "QHL local tracker allocate to deallocate read occupancy."},
	{"QHL_ADDRESS_CONFLICTS.2WAY", "Counts number of QHL Active Address Table (AAT) entries that saw a max of 2 conflicts. The AAT is a structure that tracks requests that are in conflict. The requests themselves are in the home tracker entries. The count is reported when an AAT entry deallocates."},
	{"QHL_ADDRESS_CONFLICTS.3WAY", "Counts number of QHL Active Address Table (AAT) entries that saw a max of 3 conflicts. The AAT is a structure that tracks requests that are in conflict. The requests themselves are in the home tracker entries. The count is reported when an AAT entry deallocates."},
	{"QHL_CONFLICT_CYCLES.IOH", "Counts cycles the Quickpath Home Logic IOH Tracker contains two or more requests with an address conflict. A max of 3 requests can be in conflict."},
	{"QHL_CONFLICT_CYCLES.REMOTE", "Counts cycles the Quickpath Home Logic Remote Tracker contains two or more requests with an address conflict. A max of 3 requests can be in conflict."},
	{"QHL_CONFLICT_CYCLES.LOCAL", "Counts cycles the Quickpath Home Logic Local Tracker contains two or more requests with an address conflict. A max of 3 requests can be in conflict."},
	{"QHL_TO_QMC_BYPASS", "Counts number or requests to the Quickpath Memory Controller that bypass the Quickpath Home Logic. All local accesses can be bypassed. For remote requests, only read requests can be bypassed."},
	{"QMC_ISOC_FULL.READ.CH0", "Counts cycles all the entries in the DRAM channel 0 high priority queue are occupied with isochronous read requests."},
	{"QMC_ISOC_FULL.READ.CH1", "Counts cycles all the entries in the DRAM channel 1 high priority queue are occupied with isochronous read requests."},
	{"QMC_ISOC_FULL.READ.CH2", "Counts cycles all the entries in the DRAM channel 2 high priority queue are occupied with isochronous read requests."},
	{"QMC_ISOC_FULL.WRITE.CH0", "Counts cycles all the entries in the DRAM channel 0 high priority queue are occupied with isochronous write requests."},
	{"QMC_ISOC_FULL.WRITE.CH1", "Counts cycles all the entries in the DRAM channel 1 high priority queue are occupied with isochronous write requests."},
	{"QMC_ISOC_FULL.WRITE.CH2", "Counts cycles all the entries in the DRAM channel 2 high priority queue are occupied with isochronous write requests."},
	{"QMC_BUSY.READ.CH0", "Counts cycles where Quickpath Memory Controller has at least 1 outstanding read request to DRAM channel 0."},
	{"QMC_BUSY.READ.CH1", "Counts cycles where Quickpath Memory Controller has at least 1 outstanding read request to DRAM channel 1."},
	{"QMC_BUSY.READ.CH2", "Counts cycles where Quickpath Memory Controller has at least 1 outstanding read request to DRAM channel 2."},
	{"QMC_BUSY.WRITE.CH0", "Counts cycles where Quickpath Memory Controller has at least 1 outstanding write request to DRAM channel 0."},
	{"QMC_BUSY.WRITE.CH1", "Counts cycles where Quickpath Memory Controller has at least 1 outstanding write request to DRAM channel 1."},
	{"QMC_BUSY.WRITE.CH2", "Counts cycles where Quickpath Memory Controller has at least 1 outstanding write request to DRAM channel 2."},
	{"QMC_OCCUPANCY.CH0", "IMC channel 0 normal read request occupancy."},
	{"QMC_OCCUPANCY.CH1", "IMC channel 1 normal read request occupancy."},
	{"QMC_OCCUPANCY.CH2", "IMC channel 2 normal read request occupancy."},
	{"QMC_OCCUPANCY.ANY", "Normal read request occupancy for any channel."},
	{"QMC_ISSOC_OCCUPANCY.CH0", "IMC channel 0 issoc read request occupancy."},
	{"QMC_ISSOC_OCCUPANCY.CH1", "IMC channel 1 issoc read request occupancy."},
	{"QMC_ISSOC_OCCUPANCY.CH2", "IMC channel 2 issoc read request occupancy."},
	{"QMC_ISSOC_READS.ANY", "IMC issoc read request occupancy."},
	{"QMC_NORMAL_READS.CH0", "Counts the number of Quickpath Memory Controller channel 0 medium and low priority read requests. The QMC channel 0 normal read occupancy divided by this count provides the average QMC channel 0 read latency."},
	{"QMC_NORMAL_READS.CH1", "Counts the number of Quickpath Memory Controller channel 1 medium and low priority read requests. The QMC channel 1 normal read occupancy divided by this count provides the average QMC channel 1 read latency."},
	{"QMC_NORMAL_READS.CH2", "Counts the number of Quickpath Memory Controller channel 2 medium and low priority read requests. The QMC channel 2 normal read occupancy divided by this count provides the average QMC channel 2 read latency."},
	{"QMC_NORMAL_READS.ANY", "Counts the number of Quickpath Memory Controller medium and low priority read requests. The QMC normal read occupancy divided by this count provides the average QMC read latency."},
	{"QMC_HIGH_PRIORITY_READS.CH0", "Counts the number of Quickpath Memory Controller channel 0 high priority isochronous read requests."},
	{"QMC_HIGH_PRIORITY_READS.CH1", "Counts the number of Quickpath Memory Controller channel 1 high priority isochronous read requests."},
	{"QMC_HIGH_PRIORITY_READS.CH2", "Counts the number of Quickpath Memory Controller channel 2 high priority isochronous read requests."},
	{"QMC_HIGH_PRIORITY_READS.ANY", "Counts the number of Quickpath Memory Controller high priority isochronous read requests."},
	{"QMC_CRITICAL_PRIORITY_READS.CH0", "Counts the number of Quickpath Memory Controller channel 0 critical priority isochronous read requests."},
	{"QMC_CRITICAL_PRIORITY_READS.CH1", "Counts the number of Quickpath Memory Controller channel 1 critical priority isochronous read requests."},
	{"QMC_CRITICAL_PRIORITY_READS.CH2", "Counts the number of Quickpath Memory Controller channel 2 critical priority isochronous read requests."},
	{"QMC_CRITICAL_PRIORITY_READS.ANY", "Counts the number of Quickpath Memory Controller critical priority isochronous read requests."},
	{"QMC_WRITES.FULL.CH0", "Counts number of full cache line writes to DRAM channel 0."},
	{"QMC_WRITES.FULL.CH1", "Counts number of full cache line writes to DRAM channel 1."},
	{"QMC_WRITES.FULL.CH2", "Counts number of full cache line writes to DRAM channel 2."},
	{"QMC_WRITES.FULL.ANY", "Counts number of full cache line writes to DRAM."},
	{"QMC_WRITES.PARTIAL.CH0", "Counts number of partial cache line writes to DRAM channel 0."},
	{"QMC_WRITES.PARTIAL.CH1", "Counts number of partial cache line writes to DRAM channel 1."},
	{"QMC_WRITES.PARTIAL.CH2", "Counts number of partial cache line writes to DRAM channel 2."},
	{"QMC_WRITES.PARTIAL.ANY", "Counts number of partial cache line writes to DRAM."},
	{"QMC_CANCEL.CH0", "Counts number of DRAM channel 0 cancel requests."},
	{"QMC_CANCEL.CH1", "Counts number of DRAM channel 1 cancel requests."},
	{"QMC_CANCEL.CH2", "Counts number of DRAM channel 2 cancel requests."},
	{"QMC_CANCEL.ANY", "Counts number of DRAM cancel requests."},
	{"QMC_PRIORITY_UPDATES.CH0", "Counts number of DRAM channel 0 priority updates. A priority update occurs when an ISOC high or critical request is received by the QHL and there is a matching request with normal priority that has already been issued to the QMC. In this instance, the QHL will send a priority update to QMC to expedite the request."},
	{"QMC_PRIORITY_UPDATES.CH1", "Counts number of DRAM channel 1 priority updates. A priority update occurs when an ISOC high or critical request is received by the QHL and there is a matching request with normal priority that has already been issued to the QMC. In this instance, the QHL will send a priority update to QMC to expedite the request."},
	{"QMC_PRIORITY_UPDATES.CH2", "Counts number of DRAM channel 2 priority updates. A priority update occurs when an ISOC high or critical request is received by the QHL and there is a matching request with normal priority that has already been issued to the QMC. In this instance, the QHL will send a priority update to QMC to expedite the request."},
	{"QMC_PRIORITY_UPDATES.ANY", "Counts number of DRAM priority updates. A priority update occurs when an ISOC high or critical request is received by the QHL and there is a matching request with normal priority that has already been issued to the QMC. In this instance, the QHL will send a priority update to QMC to expedite the request."},
	{"IMC_RETRY.CH0", "Counts number of IMC DRAM channel 0 retries. DRAM retry only occurs when configured in RAS mode."},
	{"IMC_RETRY.CH1", "Counts number of IMC DRAM channel 1 retries. DRAM retry only occurs when configured in RAS mode."},
	{"IMC_RETRY.CH2", "Counts number of IMC DRAM channel 2 retries. DRAM retry only occurs when configured in RAS mode."},
	{"IMC_RETRY.ANY", "Counts number of IMC DRAM retries from any channel. DRAM retry only occurs when configured in RAS mode."},
	{"QHL_FRC_ACK_CNFLTS.IOH", "Counts number of Force Acknowledge Conflict messages sent by the Quickpath Home Logic to the IOH."},
	{"QHL_FRC_ACK_CNFLTS.REMOTE", "Counts number of Force Acknowledge Conflict messages sent by the Quickpath Home Logic to the remote home."},
	{"QHL_FRC_ACK_CNFLTS.LOCAL", "Counts number of Force Acknowledge Conflict messages sent by the Quickpath Home Logic to the local home."},
	{"QHL_FRC_ACK_CNFLTS.ANY", "Counts number of Force Acknowledge Conflict messages sent by the Quickpath Home Logic."},
	{"QHL_SLEEPS.IOH_ORDER", "Counts number of occurrences a request was put to sleep due to IOH ordering (write after read) conflicts. While in the sleep state, the request is not eligible to be scheduled to the QMC."},
	{"QHL_SLEEPS.REMOTE_ORDER", "Counts number of occurrences a request was put to sleep due to remote socket ordering (write after read) conflicts. While in the sleep state, the request is not eligible to be scheduled to the QMC."},
	{"QHL_SLEEPS.LOCAL_ORDER", "Counts number of occurrences a request was put to sleep due to local socket ordering (write after read) conflicts. While in the sleep state, the request is not eligible to be scheduled to the QMC."},
	{"QHL_SLEEPS.IOH_CONFLICT", "Counts number of occurrences a request was put to sleep due to IOH address conflicts. While in the sleep state, the request is not eligible to be scheduled to the QMC."},
	{"QHL_SLEEPS.REMOTE_CONFLICT", "Counts number of occurrences a request was put to sleep due to remote socket address conflicts. While in the sleep state, the request is not eligible to be scheduled to the QMC."},
	{"QHL_SLEEPS.LOCAL_CONFLICT", "Counts number of occurrences a request was put to sleep due to local socket address conflicts. While in the sleep state, the request is not eligible to be scheduled to the QMC."},
	{"ADDR_OPCODE_MATCH.IOH", "Counts number of requests from the IOH, address/opcode of request is qualified by mask value written to MSR 396H. The following mask values are supported: 0: NONE 40000000_00000000H:RSPFWDI 40001A00_00000000H:RSPFWDS 40001D00_00000000H:RSPIWB Match opcode/address by writing MSR 396H with mask supported mask value."},
	{"ADDR_OPCODE_MATCH.REMOTE", "Counts number of requests from the remote socket, address/opcode of request is qualified by mask value written to MSR 396H. The following mask values are supported: 0: NONE 40000000_00000000H:RSPFWDI 40001A00_00000000H:RSPFWDS 40001D00_00000000H:RSPIWB Match opcode/address by writing MSR 396H with mask supported mask value."},
	{"ADDR_OPCODE_MATCH.LOCAL", "Counts number of requests from the local socket, address/opcode of request is qualified by mask value written to MSR 396H. The following mask values are supported: 0: NONE 40000000_00000000H:RSPFWDI 40001A00_00000000H:RSPFWDS 40001D00_00000000H:RSPIWB Match opcode/address by writing MSR 396H with mask supported mask value."},
	{"QPI_TX_STALLED_SINGLE_FLIT.HOME.LINK_0", "Counts cycles the Quickpath outbound link 0 HOME virtual channel is stalled due to lack of a VNA and VN0 credit. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_SINGLE_FLIT.SNOOP.LINK_0", "Counts cycles the Quickpath outbound link 0 SNOOP virtual channel is stalled due to lack of a VNA and VN0 credit. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_SINGLE_FLIT.NDR.LINK_0", "Counts cycles the Quickpath outbound link 0 non-data response virtual channel is stalled due to lack of a VNA and VN0 credit. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_SINGLE_FLIT.HOME.LINK_1", "Counts cycles the Quickpath outbound link 1 HOME virtual channel is stalled due to lack of a VNA and VN0 credit. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_SINGLE_FLIT.SNOOP.LINK_1", "Counts cycles the Quickpath outbound link 1 SNOOP virtual channel is stalled due to lack of a VNA and VN0 credit. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_SINGLE_FLIT.NDR.LINK_1", "Counts cycles the Quickpath outbound link 1 non-data response virtual channel is stalled due to lack of a VNA and VN0 credit. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_SINGLE_FLIT.LINK_0", "Counts cycles the Quickpath outbound link 0 virtual channels are stalled due to lack of a VNA and VN0 credit. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_SINGLE_FLIT.LINK_1", "Counts cycles the Quickpath outbound link 1 virtual channels are stalled due to lack of a VNA and VN0 credit. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_MULTI_FLIT.DRS.LINK_0", "Counts cycles the Quickpath outbound link 0 Data ResponSe virtual channel is stalled due to lack of VNA and VN0 credits. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_MULTI_FLIT.NCB.LINK_0", "Counts cycles the Quickpath outbound link 0 Non-Coherent Bypass virtual channel is stalled due to lack of VNA and VN0 credits. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_MULTI_FLIT.NCS.LINK_0", "Counts cycles the Quickpath outbound link 0 Non-Coherent Standard virtual channel is stalled due to lack of VNA and VN0 credits. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_MULTI_FLIT.DRS.LINK_1", "Counts cycles the Quickpath outbound link 1 Data ResponSe virtual channel is stalled due to lack of VNA and VN0 credits. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_MULTI_FLIT.NCB.LINK_1", "Counts cycles the Quickpath outbound link 1 Non-Coherent Bypass virtual channel is stalled due to lack of VNA and VN0 credits. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_MULTI_FLIT.NCS.LINK_1", "Counts cycles the Quickpath outbound link 1 Non-Coherent Standard virtual channel is stalled due to lack of VNA and VN0 credits. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_MULTI_FLIT.LINK_0", "Counts cycles the Quickpath outbound link 0 virtual channels are stalled due to lack of VNA and VN0 credits. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_STALLED_MULTI_FLIT.LINK_1", "Counts cycles the Quickpath outbound link 1 virtual channels are stalled due to lack of VNA and VN0 credits. Note that this event does not filter out when a flit would not have been selected for arbitration because another virtual channel is getting arbitrated."},
	{"QPI_TX_HEADER.FULL.LINK_0", "Number of cycles that the header buffer in the Quickpath Interface outbound link 0 is full."},
	{"QPI_TX_HEADER.BUSY.LINK_0", "Number of cycles that the header buffer in the Quickpath Interface outbound link 0 is busy."},
	{"QPI_TX_HEADER.FULL.LINK_1", "Number of cycles that the header buffer in the Quickpath Interface outbound link 1 is full."},
	{"QPI_TX_HEADER.BUSY.LINK_1", "Number of cycles that the header buffer in the Quickpath Interface outbound link 1 is busy."},
	{"QPI_RX_NO_PPT_CREDIT.STALLS.LINK_0", "Number of cycles that snoop packets incoming to the Quickpath Interface link0 are stalled and not sent to the GQ because the GQ Peer Probe Tracker (PPT) does not have any available entries."},
	{"QPI_RX_NO_PPT_CREDIT.STALLS.LINK_1", "Number of cycles that snoop packets incoming to the Quickpath Interface link 1 are stalled and not sent to the GQ because the GQ Peer Probe Tracker (PPT) does not have any available entries."},
	{"DRAM_OPEN.CH0", "Counts number of DRAM Channel 0 open commands issued either for read or write. To read or write data, the referenced DRAM page must first be opened."},
	{"DRAM_OPEN.CH1", "Counts number of DRAM Channel 1 open commands issued either for read or write. To read or write data, the referenced DRAM page must first be opened."},
	{"DRAM_OPEN.CH2", "Counts number of DRAM Channel 2 open commands issued either for read or write. To read or write data, the referenced DRAM page must first be opened."},
	{"DRAM_PAGE_CLOSE.CH0", "DRAM channel 0 command issued to CLOSE a page due to page idle timer expiration. Closing a page is done by issuing a precharge."},
	{"DRAM_PAGE_CLOSE.CH1", "DRAM channel 1 command issued to CLOSE a page due to page idle timer expiration. Closing a page is done by issuing a precharge."},
	{"DRAM_PAGE_CLOSE.CH2", "DRAM channel 2 command issued to CLOSE a page due to page idle timer expiration. Closing a page is done by issuing a precharge."},
	{"DRAM_PAGE_MISS.CH0", "Counts the number of precharges (PRE) that were issued to DRAM channel 0 because there was a page miss. A page miss refers to a situation in which a page is currently open and another page from the same bank needs to be opened. The new page experiences a page miss. Closing of the old page is done by issuing a precharge."},
	{"DRAM_PAGE_MISS.CH1", "Counts the number of precharges (PRE) that were issued to DRAM channel 1 because there was a page miss. A page miss refers to a situation in which a page is currently open and another page from the same bank needs to be opened. The new page experiences a page miss. Closing of the old page is done by issuing a precharge."},
	{"DRAM_PAGE_MISS.CH2", "Counts the number of precharges (PRE) that were issued to DRAM channel 2 because there was a page miss. A page miss refers to a situation in which a page is currently open and another page from the same bank needs to be opened. The new page experiences a page miss. Closing of the old page is done by issuing a precharge."},
	{"DRAM_READ_CAS.CH0", "Counts the number of times a read CAS command was issued on DRAM channel 0."},
	{"DRAM_READ_CAS.AUTOPRE_CH0", "Counts the number of times a read CAS command was issued on DRAM channel 0 where the command issued used the auto-precharge (auto page close) mode."},
	{"DRAM_READ_CAS.CH1", "Counts the number of times a read CAS command was issued on DRAM channel 1."},
	{"DRAM_READ_CAS.AUTOPRE_CH1", "Counts the number of times a read CAS command was issued on DRAM channel 1 where the command issued used the auto-precharge (auto page close) mode."},
	{"DRAM_READ_CAS.CH2", "Counts the number of times a read CAS command was issued on DRAM channel 2."},
	{"DRAM_READ_CAS.AUTOPRE_CH2", "Counts the number of times a read CAS command was issued on DRAM channel 2 where the command issued used the auto-precharge (auto page close) mode."},
	{"DRAM_WRITE_CAS.CH0", "Counts the number of times a write CAS command was issued on DRAM channel 0."},
	{"DRAM_WRITE_CAS.AUTOPRE_CH0", "Counts the number of times a write CAS command was issued on DRAM channel 0 where the command issued used the auto-precharge (auto page close) mode."},
	{"DRAM_WRITE_CAS.CH1", "Counts the number of times a write CAS command was issued on DRAM channel 1."},
	{"DRAM_WRITE_CAS.AUTOPRE_CH1", "Counts the number of times a write CAS command was issued on DRAM channel 1 where the command issued used the auto-precharge (auto page close) mode."},
	{"DRAM_WRITE_CAS.CH2", "Counts the number of times a write CAS command was issued on DRAM channel 2."},
	{"DRAM_WRITE_CAS.AUTOPRE_CH2", "Counts the number of times a write CAS command was issued on DRAM channel 2 where the command issued used the auto-precharge (auto page close) mode."},
	{"DRAM_REFRESH.CH0", "Counts number of DRAM channel 0 refresh commands. DRAM loses data content over time. In order to keep correct data content, the data values have to be refreshed periodically."},
	{"DRAM_REFRESH.CH1", "Counts number of DRAM channel 1 refresh commands. DRAM loses data content over time. In order to keep correct data content, the data values have to be refreshed periodically."},
	{"DRAM_REFRESH.CH2", "Counts number of DRAM channel 2 refresh commands. DRAM loses data content over time. In order to keep correct data content, the data values have to be refreshed periodically."},
	{"DRAM_PRE_ALL.CH0", "Counts number of DRAM Channel 0 precharge-all (PREALL) commands that close all open pages in a rank. PREALL is issued when the DRAM needs to be refreshed or needs to go into a power down mode."},
	{"DRAM_PRE_ALL.CH1", "Counts number of DRAM Channel 1 precharge-all (PREALL) commands that close all open pages in a rank. PREALL is issued when the DRAM needs to be refreshed or needs to go into a power down mode."},
	{"DRAM_PRE_ALL.CH2", "Counts number of DRAM Channel 2 precharge-all (PREALL) commands that close all open pages in a rank. PREALL is issued when the DRAM needs to be refreshed or needs to go into a power down mode."},
	{"DRAM_THERMAL_THROTTLED", "Uncore cycles DRAM was throttled due to its temperature being above the thermal throttling threshold."},
	{"THERMAL_THROTTLING_TEMP.CORE_0", "Cycles that the PCU records that core 0 is above the thermal throttling threshold temperature."},
	{"THERMAL_THROTTLING_TEMP.CORE_1", "Cycles that the PCU records that core 1 is above the thermal throttling threshold temperature."},
	{"THERMAL_THROTTLING_TEMP.CORE_2", "Cycles that the PCU records that core 2 is above the thermal throttling threshold temperature."},
	{"THERMAL_THROTTLING_TEMP.CORE_3", "Cycles that the PCU records that core 3 is above the thermal throttling threshold temperature."},
	{"THERMAL_THROTTLED_TEMP.CORE_0", "Cycles that the PCU records that core 0 is in the power throttled state due to cores temperature being above the thermal throttling threshold."},
	{"THERMAL_THROTTLED_TEMP.CORE_1", "Cycles that the PCU records that core 1 is in the power throttled state due to cores temperature being above the thermal throttling threshold."},
	{"THERMAL_THROTTLED_TEMP.CORE_2", "Cycles that the PCU records that core 2 is in the power throttled state due to cores temperature being above the thermal throttling threshold."},
	{"THERMAL_THROTTLED_TEMP.CORE_3", "Cycles that the PCU records that core 3 is in the power throttled state due to cores temperature being above the thermal throttling threshold."},
	{"PROCHOT_ASSERTION", "Number of system assertions of PROCHOT indicating the entire processor has exceeded the thermal limit."},
	{"THERMAL_THROTTLING_PROCHOT.CORE_0", "Cycles that the PCU records that core 0 is a low power state due to the system asserting PROCHOT the entire processor has exceeded the thermal limit."},
	{"THERMAL_THROTTLING_PROCHOT.CORE_1", "Cycles that the PCU records that core 1 is a low power state due to the system asserting PROCHOT the entire processor has exceeded the thermal limit."},
	{"THERMAL_THROTTLING_PROCHOT.CORE_2", "Cycles that the PCU records that core 2 is a low power state due to the system asserting PROCHOT the entire processor has exceeded the thermal limit."},
	{"THERMAL_THROTTLING_PROCHOT.CORE_3", "Cycles that the PCU records that core 3 is a low power state due to the system asserting PROCHOT the entire processor has exceeded the thermal limit."},
	{"TURBO_MODE.CORE_0", "Uncore cycles that core 0 is operating in turbo mode."},
	{"TURBO_MODE.CORE_1", "Uncore cycles that core 1 is operating in turbo mode."},
	{"TURBO_MODE.CORE_2", "Uncore cycles that core 2 is operating in turbo mode."},
	{"TURBO_MODE.CORE_3", "Uncore cycles that core 3 is operating in turbo mode."},
	{"CYCLES_UNHALTED_L3_FLL_ENABLE", "Uncore cycles that at least one core is unhalted and all L3 ways are enabled."},
	{"CYCLES_UNHALTED_L3_FLL_DISABLE", "Uncore cycles that at least one core is unhalted and all L3 ways are disabled."},
	{ NULL, NULL } 
};