From b8db85b5b5c22236d168eb03a67c2641bf7fa651 Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 7 Oct 2010 16:03:42 -0700 Subject: MIPS: Octeon: Update L2 Cache code for CN63XX The CN63XX has a different L2 cache architecture. Update the helper functions to reflect this. Some joining of split lines was also done to improve readability, as well as reformatting of comments. Signed-off-by: David Daney Patchwork: http://patchwork.linux-mips.org/patch/1663/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/octeon/cvmx-asm.h | 11 ++ arch/mips/include/asm/octeon/cvmx-l2c.h | 225 ++++++++++++++++++-------------- 2 files changed, 141 insertions(+), 95 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/octeon/cvmx-asm.h b/arch/mips/include/asm/octeon/cvmx-asm.h index b21d3fc1ef9..5de5de95311 100644 --- a/arch/mips/include/asm/octeon/cvmx-asm.h +++ b/arch/mips/include/asm/octeon/cvmx-asm.h @@ -114,6 +114,17 @@ #define CVMX_DCACHE_INVALIDATE \ { CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); } +#define CVMX_CACHE(op, address, offset) \ + asm volatile ("cache " CVMX_TMP_STR(op) ", " CVMX_TMP_STR(offset) "(%[rbase])" \ + : : [rbase] "d" (address) ) +/* fetch and lock the state. */ +#define CVMX_CACHE_LCKL2(address, offset) CVMX_CACHE(31, address, offset) +/* unlock the state. */ +#define CVMX_CACHE_WBIL2(address, offset) CVMX_CACHE(23, address, offset) +/* invalidate the cache block and clear the USED bits for the block */ +#define CVMX_CACHE_WBIL2I(address, offset) CVMX_CACHE(3, address, offset) +/* load virtual tag and data for the L2 cache block into L2C_TAD0_TAG register */ +#define CVMX_CACHE_LTGL2I(address, offset) CVMX_CACHE(7, address, offset) #define CVMX_POP(result, input) \ asm ("pop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input)) diff --git a/arch/mips/include/asm/octeon/cvmx-l2c.h b/arch/mips/include/asm/octeon/cvmx-l2c.h index 2a8c0902ea5..0b32c5b118e 100644 --- a/arch/mips/include/asm/octeon/cvmx-l2c.h +++ b/arch/mips/include/asm/octeon/cvmx-l2c.h @@ -4,7 +4,7 @@ * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * - * Copyright (c) 2003-2008 Cavium Networks + * Copyright (c) 2003-2010 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as @@ -26,7 +26,6 @@ ***********************license end**************************************/ /* - * * Interface to the Level 2 Cache (L2C) control, measurement, and debugging * facilities. */ @@ -34,93 +33,126 @@ #ifndef __CVMX_L2C_H__ #define __CVMX_L2C_H__ -/* Deprecated macro, use function */ -#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() - -/* Deprecated macro, use function */ -#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() +#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro, use function */ +#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro, use function */ +#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro, use function */ -/* Deprecated macro, use function */ -#define CVMX_L2_SETS cvmx_l2c_get_num_sets() #define CVMX_L2C_IDX_ADDR_SHIFT 7 /* based on 128 byte cache line size */ #define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1) /* Defines for index aliasing computations */ -#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT \ - (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits()) +#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits()) +#define CVMX_L2C_ALIAS_MASK (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) +#define CVMX_L2C_MEMBANK_SELECT_SIZE 4096 -#define CVMX_L2C_ALIAS_MASK \ - (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) +/* Defines for Virtualizations, valid only from Octeon II onwards. */ +#define CVMX_L2C_VRT_MAX_VIRTID_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 64 : 0) +#define CVMX_L2C_VRT_MAX_MEMSZ_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 32 : 0) union cvmx_l2c_tag { uint64_t u64; struct { uint64_t reserved:28; - uint64_t V:1; /* Line valid */ - uint64_t D:1; /* Line dirty */ - uint64_t L:1; /* Line locked */ - uint64_t U:1; /* Use, LRU eviction */ + uint64_t V:1; /* Line valid */ + uint64_t D:1; /* Line dirty */ + uint64_t L:1; /* Line locked */ + uint64_t U:1; /* Use, LRU eviction */ uint64_t addr:32; /* Phys mem (not all bits valid) */ } s; }; +/* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */ +#define CVMX_L2C_TADS 1 + /* L2C Performance Counter events. */ enum cvmx_l2c_event { - CVMX_L2C_EVENT_CYCLES = 0, - CVMX_L2C_EVENT_INSTRUCTION_MISS = 1, - CVMX_L2C_EVENT_INSTRUCTION_HIT = 2, - CVMX_L2C_EVENT_DATA_MISS = 3, - CVMX_L2C_EVENT_DATA_HIT = 4, - CVMX_L2C_EVENT_MISS = 5, - CVMX_L2C_EVENT_HIT = 6, - CVMX_L2C_EVENT_VICTIM_HIT = 7, - CVMX_L2C_EVENT_INDEX_CONFLICT = 8, - CVMX_L2C_EVENT_TAG_PROBE = 9, - CVMX_L2C_EVENT_TAG_UPDATE = 10, - CVMX_L2C_EVENT_TAG_COMPLETE = 11, - CVMX_L2C_EVENT_TAG_DIRTY = 12, - CVMX_L2C_EVENT_DATA_STORE_NOP = 13, - CVMX_L2C_EVENT_DATA_STORE_READ = 14, + CVMX_L2C_EVENT_CYCLES = 0, + CVMX_L2C_EVENT_INSTRUCTION_MISS = 1, + CVMX_L2C_EVENT_INSTRUCTION_HIT = 2, + CVMX_L2C_EVENT_DATA_MISS = 3, + CVMX_L2C_EVENT_DATA_HIT = 4, + CVMX_L2C_EVENT_MISS = 5, + CVMX_L2C_EVENT_HIT = 6, + CVMX_L2C_EVENT_VICTIM_HIT = 7, + CVMX_L2C_EVENT_INDEX_CONFLICT = 8, + CVMX_L2C_EVENT_TAG_PROBE = 9, + CVMX_L2C_EVENT_TAG_UPDATE = 10, + CVMX_L2C_EVENT_TAG_COMPLETE = 11, + CVMX_L2C_EVENT_TAG_DIRTY = 12, + CVMX_L2C_EVENT_DATA_STORE_NOP = 13, + CVMX_L2C_EVENT_DATA_STORE_READ = 14, CVMX_L2C_EVENT_DATA_STORE_WRITE = 15, - CVMX_L2C_EVENT_FILL_DATA_VALID = 16, - CVMX_L2C_EVENT_WRITE_REQUEST = 17, - CVMX_L2C_EVENT_READ_REQUEST = 18, + CVMX_L2C_EVENT_FILL_DATA_VALID = 16, + CVMX_L2C_EVENT_WRITE_REQUEST = 17, + CVMX_L2C_EVENT_READ_REQUEST = 18, CVMX_L2C_EVENT_WRITE_DATA_VALID = 19, - CVMX_L2C_EVENT_XMC_NOP = 20, - CVMX_L2C_EVENT_XMC_LDT = 21, - CVMX_L2C_EVENT_XMC_LDI = 22, - CVMX_L2C_EVENT_XMC_LDD = 23, - CVMX_L2C_EVENT_XMC_STF = 24, - CVMX_L2C_EVENT_XMC_STT = 25, - CVMX_L2C_EVENT_XMC_STP = 26, - CVMX_L2C_EVENT_XMC_STC = 27, - CVMX_L2C_EVENT_XMC_DWB = 28, - CVMX_L2C_EVENT_XMC_PL2 = 29, - CVMX_L2C_EVENT_XMC_PSL1 = 30, - CVMX_L2C_EVENT_XMC_IOBLD = 31, - CVMX_L2C_EVENT_XMC_IOBST = 32, - CVMX_L2C_EVENT_XMC_IOBDMA = 33, - CVMX_L2C_EVENT_XMC_IOBRSP = 34, - CVMX_L2C_EVENT_XMC_BUS_VALID = 35, - CVMX_L2C_EVENT_XMC_MEM_DATA = 36, - CVMX_L2C_EVENT_XMC_REFL_DATA = 37, - CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38, - CVMX_L2C_EVENT_RSC_NOP = 39, - CVMX_L2C_EVENT_RSC_STDN = 40, - CVMX_L2C_EVENT_RSC_FILL = 41, - CVMX_L2C_EVENT_RSC_REFL = 42, - CVMX_L2C_EVENT_RSC_STIN = 43, - CVMX_L2C_EVENT_RSC_SCIN = 44, - CVMX_L2C_EVENT_RSC_SCFL = 45, - CVMX_L2C_EVENT_RSC_SCDN = 46, - CVMX_L2C_EVENT_RSC_DATA_VALID = 47, - CVMX_L2C_EVENT_RSC_VALID_FILL = 48, - CVMX_L2C_EVENT_RSC_VALID_STRSP = 49, - CVMX_L2C_EVENT_RSC_VALID_REFL = 50, - CVMX_L2C_EVENT_LRF_REQ = 51, - CVMX_L2C_EVENT_DT_RD_ALLOC = 52, - CVMX_L2C_EVENT_DT_WR_INVAL = 53 + CVMX_L2C_EVENT_XMC_NOP = 20, + CVMX_L2C_EVENT_XMC_LDT = 21, + CVMX_L2C_EVENT_XMC_LDI = 22, + CVMX_L2C_EVENT_XMC_LDD = 23, + CVMX_L2C_EVENT_XMC_STF = 24, + CVMX_L2C_EVENT_XMC_STT = 25, + CVMX_L2C_EVENT_XMC_STP = 26, + CVMX_L2C_EVENT_XMC_STC = 27, + CVMX_L2C_EVENT_XMC_DWB = 28, + CVMX_L2C_EVENT_XMC_PL2 = 29, + CVMX_L2C_EVENT_XMC_PSL1 = 30, + CVMX_L2C_EVENT_XMC_IOBLD = 31, + CVMX_L2C_EVENT_XMC_IOBST = 32, + CVMX_L2C_EVENT_XMC_IOBDMA = 33, + CVMX_L2C_EVENT_XMC_IOBRSP = 34, + CVMX_L2C_EVENT_XMC_BUS_VALID = 35, + CVMX_L2C_EVENT_XMC_MEM_DATA = 36, + CVMX_L2C_EVENT_XMC_REFL_DATA = 37, + CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38, + CVMX_L2C_EVENT_RSC_NOP = 39, + CVMX_L2C_EVENT_RSC_STDN = 40, + CVMX_L2C_EVENT_RSC_FILL = 41, + CVMX_L2C_EVENT_RSC_REFL = 42, + CVMX_L2C_EVENT_RSC_STIN = 43, + CVMX_L2C_EVENT_RSC_SCIN = 44, + CVMX_L2C_EVENT_RSC_SCFL = 45, + CVMX_L2C_EVENT_RSC_SCDN = 46, + CVMX_L2C_EVENT_RSC_DATA_VALID = 47, + CVMX_L2C_EVENT_RSC_VALID_FILL = 48, + CVMX_L2C_EVENT_RSC_VALID_STRSP = 49, + CVMX_L2C_EVENT_RSC_VALID_REFL = 50, + CVMX_L2C_EVENT_LRF_REQ = 51, + CVMX_L2C_EVENT_DT_RD_ALLOC = 52, + CVMX_L2C_EVENT_DT_WR_INVAL = 53, + CVMX_L2C_EVENT_MAX +}; + +/* L2C Performance Counter events for Octeon2. */ +enum cvmx_l2c_tad_event { + CVMX_L2C_TAD_EVENT_NONE = 0, + CVMX_L2C_TAD_EVENT_TAG_HIT = 1, + CVMX_L2C_TAD_EVENT_TAG_MISS = 2, + CVMX_L2C_TAD_EVENT_TAG_NOALLOC = 3, + CVMX_L2C_TAD_EVENT_TAG_VICTIM = 4, + CVMX_L2C_TAD_EVENT_SC_FAIL = 5, + CVMX_L2C_TAD_EVENT_SC_PASS = 6, + CVMX_L2C_TAD_EVENT_LFB_VALID = 7, + CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB = 8, + CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB = 9, + CVMX_L2C_TAD_EVENT_QUAD0_INDEX = 128, + CVMX_L2C_TAD_EVENT_QUAD0_READ = 129, + CVMX_L2C_TAD_EVENT_QUAD0_BANK = 130, + CVMX_L2C_TAD_EVENT_QUAD0_WDAT = 131, + CVMX_L2C_TAD_EVENT_QUAD1_INDEX = 144, + CVMX_L2C_TAD_EVENT_QUAD1_READ = 145, + CVMX_L2C_TAD_EVENT_QUAD1_BANK = 146, + CVMX_L2C_TAD_EVENT_QUAD1_WDAT = 147, + CVMX_L2C_TAD_EVENT_QUAD2_INDEX = 160, + CVMX_L2C_TAD_EVENT_QUAD2_READ = 161, + CVMX_L2C_TAD_EVENT_QUAD2_BANK = 162, + CVMX_L2C_TAD_EVENT_QUAD2_WDAT = 163, + CVMX_L2C_TAD_EVENT_QUAD3_INDEX = 176, + CVMX_L2C_TAD_EVENT_QUAD3_READ = 177, + CVMX_L2C_TAD_EVENT_QUAD3_BANK = 178, + CVMX_L2C_TAD_EVENT_QUAD3_WDAT = 179, + CVMX_L2C_TAD_EVENT_MAX }; /** @@ -132,10 +164,10 @@ enum cvmx_l2c_event { * @clear_on_read: When asserted, any read of the performance counter * clears the counter. * - * The routine does not clear the counter. + * @note The routine does not clear the counter. */ -void cvmx_l2c_config_perf(uint32_t counter, - enum cvmx_l2c_event event, uint32_t clear_on_read); +void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event, uint32_t clear_on_read); + /** * Read the given L2 Cache performance counter. The counter must be configured * before reading, but this routine does not enforce this requirement. @@ -160,18 +192,18 @@ int cvmx_l2c_get_core_way_partition(uint32_t core); /** * Partitions the L2 cache for a core * - * @core: The core that the partitioning applies to. + * @core: The core that the partitioning applies to. + * @mask: The partitioning of the ways expressed as a binary + * mask. A 0 bit allows the core to evict cache lines from + * a way, while a 1 bit blocks the core from evicting any + * lines from that way. There must be at least one allowed + * way (0 bit) in the mask. * - * @mask: The partitioning of the ways expressed as a binary mask. A 0 - * bit allows the core to evict cache lines from a way, while a - * 1 bit blocks the core from evicting any lines from that - * way. There must be at least one allowed way (0 bit) in the - * mask. - * - * If any ways are blocked for all cores and the HW blocks, then those - * ways will never have any cache lines evicted from them. All cores - * and the hardware blocks are free to read from all ways regardless - * of the partitioning. + + * @note If any ways are blocked for all cores and the HW blocks, then + * those ways will never have any cache lines evicted from them. + * All cores and the hardware blocks are free to read from all + * ways regardless of the partitioning. */ int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask); @@ -187,19 +219,21 @@ int cvmx_l2c_get_hw_way_partition(void); /** * Partitions the L2 cache for the hardware blocks. * - * @mask: The partitioning of the ways expressed as a binary mask. A 0 - * bit allows the core to evict cache lines from a way, while a - * 1 bit blocks the core from evicting any lines from that - * way. There must be at least one allowed way (0 bit) in the - * mask. + * @mask: The partitioning of the ways expressed as a binary + * mask. A 0 bit allows the core to evict cache lines from + * a way, while a 1 bit blocks the core from evicting any + * lines from that way. There must be at least one allowed + * way (0 bit) in the mask. * - * If any ways are blocked for all cores and the HW blocks, then those - * ways will never have any cache lines evicted from them. All cores - * and the hardware blocks are free to read from all ways regardless - * of the partitioning. + + * @note If any ways are blocked for all cores and the HW blocks, then + * those ways will never have any cache lines evicted from them. + * All cores and the hardware blocks are free to read from all + * ways regardless of the partitioning. */ int cvmx_l2c_set_hw_way_partition(uint32_t mask); + /** * Locks a line in the L2 cache at the specified physical address * @@ -263,13 +297,14 @@ int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len); */ union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index); -/* Wrapper around deprecated old function name */ -static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, - uint32_t index) +/* Wrapper providing a deprecated old function name */ +static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, uint32_t index) __attribute__((deprecated)); +static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, uint32_t index) { return cvmx_l2c_get_tag(association, index); } + /** * Returns the cache index for a given physical address * -- cgit v1.2.3