aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2021-07-25 12:06:49 -1000
committerRichard Henderson <richard.henderson@linaro.org>2021-10-05 16:53:17 -0700
commit9002ffcb7264947d9a193567b457dea42f15c321 (patch)
treee2cb1ee1ad3036f4e6f7472d2411422536bd1fb7 /accel
parent4b473e0c60d802bb69accab3177d350fc580e2a4 (diff)
tcg: Rename TCGMemOpIdx to MemOpIdx
We're about to move this out of tcg.h, so rename it as we did when moving MemOp. Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/atomic_common.c.inc6
-rw-r--r--accel/tcg/atomic_template.h24
-rw-r--r--accel/tcg/cputlb.c78
-rw-r--r--accel/tcg/user-exec.c2
4 files changed, 55 insertions, 55 deletions
diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc
index 6c0339f610..ebaa793464 100644
--- a/accel/tcg/atomic_common.c.inc
+++ b/accel/tcg/atomic_common.c.inc
@@ -14,7 +14,7 @@
*/
static uint16_t atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi)
+ MemOpIdx oi)
{
CPUState *cpu = env_cpu(env);
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false);
@@ -34,7 +34,7 @@ static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
#if HAVE_ATOMIC128
static uint16_t atomic_trace_ld_pre(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi)
+ MemOpIdx oi)
{
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false);
@@ -50,7 +50,7 @@ static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
}
static uint16_t atomic_trace_st_pre(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi)
+ MemOpIdx oi)
{
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), true);
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
index 8098a1be31..4230ff2957 100644
--- a/accel/tcg/atomic_template.h
+++ b/accel/tcg/atomic_template.h
@@ -72,7 +72,7 @@
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
@@ -92,7 +92,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ, retaddr);
@@ -106,7 +106,7 @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
}
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_WRITE, retaddr);
@@ -119,7 +119,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
#endif
#else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
@@ -134,7 +134,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
#define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
- ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
+ ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
@@ -167,7 +167,7 @@ GEN_ATOMIC_HELPER(xor_fetch)
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
- ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
+ ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
@@ -211,7 +211,7 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
@@ -231,7 +231,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
#if DATA_SIZE >= 16
#if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ, retaddr);
@@ -245,7 +245,7 @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
}
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_WRITE, retaddr);
@@ -259,7 +259,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
#endif
#else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
PAGE_READ | PAGE_WRITE, retaddr);
@@ -274,7 +274,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
#define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
- ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
+ ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
@@ -304,7 +304,7 @@ GEN_ATOMIC_HELPER(xor_fetch)
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
- ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
+ ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
PAGE_READ | PAGE_WRITE, retaddr); \
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 0a1fdbefdd..d72f65f42b 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1749,7 +1749,7 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
*/
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, int size, int prot,
+ MemOpIdx oi, int size, int prot,
uintptr_t retaddr)
{
size_t mmu_idx = get_mmuidx(oi);
@@ -1850,7 +1850,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
*/
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
+ MemOpIdx oi, uintptr_t retaddr);
static inline uint64_t QEMU_ALWAYS_INLINE
load_memop(const void *haddr, MemOp op)
@@ -1876,7 +1876,7 @@ load_memop(const void *haddr, MemOp op)
}
static inline uint64_t QEMU_ALWAYS_INLINE
-load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
uintptr_t retaddr, MemOp op, bool code_read,
FullLoadHelper *full_load)
{
@@ -1991,78 +1991,78 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
*/
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
}
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return full_ldub_mmu(env, addr, oi, retaddr);
}
static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
full_le_lduw_mmu);
}
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return full_le_lduw_mmu(env, addr, oi, retaddr);
}
static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
full_be_lduw_mmu);
}
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return full_be_lduw_mmu(env, addr, oi, retaddr);
}
static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
full_le_ldul_mmu);
}
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return full_le_ldul_mmu(env, addr, oi, retaddr);
}
static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
full_be_ldul_mmu);
}
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return full_be_ldul_mmu(env, addr, oi, retaddr);
}
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
helper_le_ldq_mmu);
}
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
helper_be_ldq_mmu);
@@ -2075,31 +2075,31 @@ uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
}
@@ -2113,7 +2113,7 @@ static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
MemOp op, FullLoadHelper *full_load)
{
uint16_t meminfo;
- TCGMemOpIdx oi;
+ MemOpIdx oi;
uint64_t ret;
meminfo = trace_mem_get_info(op, mmu_idx, false);
@@ -2337,7 +2337,7 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
uintptr_t index, index2;
CPUTLBEntry *entry, *entry2;
target_ulong page2, tlb_addr, tlb_addr2;
- TCGMemOpIdx oi;
+ MemOpIdx oi;
size_t size2;
int i;
@@ -2404,7 +2404,7 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
static inline void QEMU_ALWAYS_INLINE
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
- TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
+ MemOpIdx oi, uintptr_t retaddr, MemOp op)
{
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
@@ -2502,43 +2502,43 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
void __attribute__((noinline))
helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_UB);
}
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_LEUW);
}
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_BEUW);
}
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_LEUL);
}
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_BEUL);
}
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_LEQ);
}
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
}
@@ -2551,7 +2551,7 @@ static inline void QEMU_ALWAYS_INLINE
cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr, MemOp op)
{
- TCGMemOpIdx oi;
+ MemOpIdx oi;
uint16_t meminfo;
meminfo = trace_mem_get_info(op, mmu_idx, true);
@@ -2717,49 +2717,49 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
/* Code access functions. */
static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
}
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
{
- TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
+ MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
return full_ldub_code(env, addr, oi, 0);
}
static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
}
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
{
- TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
+ MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
return full_lduw_code(env, addr, oi, 0);
}
static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
}
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
{
- TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
+ MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
return full_ldl_code(env, addr, oi, 0);
}
static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
}
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
{
- TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
+ MemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
return full_ldq_code(env, addr, oi, 0);
}
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 8f2644f26e..1187362a4c 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -1228,7 +1228,7 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
*/
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, int size, int prot,
+ MemOpIdx oi, int size, int prot,
uintptr_t retaddr)
{
/* Enforce qemu required alignment. */