aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-02-22 19:17:52 -1000
committerRichard Henderson <richard.henderson@linaro.org>2023-06-26 17:33:00 +0200
commit58e8f1f616d117aed6283690419dc16f53b7a202 (patch)
tree51c033440c2b72c4ca9d72fc868371257909063c /accel
parent97e1576957c28a5fbcc810f92643e52069cc49b7 (diff)
accel/tcg: Store some tlb flags in CPUTLBEntryFull
We have run out of bits we can use within the CPUTLBEntry comparators, as TLB_FLAGS_MASK cannot overlap alignment. Store slow_flags[] in CPUTLBEntryFull, and merge with the flags from the comparator. A new TLB_FORCE_SLOW bit is set within the comparator as an indication that the slow path must be used. Move TLB_BSWAP to TLB_SLOW_FLAGS_MASK. Since we are out of bits, we cannot create a new bit without moving an old one. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cputlb.c96
1 files changed, 57 insertions, 39 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 5666a8e23a..3671846744 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1107,6 +1107,24 @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
}
+static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
+ target_ulong address, int flags,
+ MMUAccessType access_type, bool enable)
+{
+ if (enable) {
+ address |= flags & TLB_FLAGS_MASK;
+ flags &= TLB_SLOW_FLAGS_MASK;
+ if (flags) {
+ address |= TLB_FORCE_SLOW;
+ }
+ } else {
+ address = -1;
+ flags = 0;
+ }
+ ent->addr_idx[access_type] = address;
+ full->slow_flags[access_type] = flags;
+}
+
/*
* Add a new TLB entry. At most one entry for a given virtual address
* is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
@@ -1122,9 +1140,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
CPUTLB *tlb = env_tlb(env);
CPUTLBDesc *desc = &tlb->d[mmu_idx];
MemoryRegionSection *section;
- unsigned int index;
- vaddr address;
- vaddr write_address;
+ unsigned int index, read_flags, write_flags;
uintptr_t addend;
CPUTLBEntry *te, tn;
hwaddr iotlb, xlat, sz, paddr_page;
@@ -1153,13 +1169,13 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
" prot=%x idx=%d\n",
addr, full->phys_addr, prot, mmu_idx);
- address = addr_page;
+ read_flags = 0;
if (full->lg_page_size < TARGET_PAGE_BITS) {
/* Repeat the MMU check and TLB fill on every access. */
- address |= TLB_INVALID_MASK;
+ read_flags |= TLB_INVALID_MASK;
}
if (full->attrs.byte_swap) {
- address |= TLB_BSWAP;
+ read_flags |= TLB_BSWAP;
}
is_ram = memory_region_is_ram(section->mr);
@@ -1173,7 +1189,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
addend = 0;
}
- write_address = address;
+ write_flags = read_flags;
if (is_ram) {
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
/*
@@ -1182,9 +1198,9 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
*/
if (prot & PAGE_WRITE) {
if (section->readonly) {
- write_address |= TLB_DISCARD_WRITE;
+ write_flags |= TLB_DISCARD_WRITE;
} else if (cpu_physical_memory_is_clean(iotlb)) {
- write_address |= TLB_NOTDIRTY;
+ write_flags |= TLB_NOTDIRTY;
}
}
} else {
@@ -1195,9 +1211,9 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
* Reads to romd devices go through the ram_ptr found above,
* but of course reads to I/O must go through MMIO.
*/
- write_address |= TLB_MMIO;
+ write_flags |= TLB_MMIO;
if (!is_romd) {
- address = write_address;
+ read_flags = write_flags;
}
}
@@ -1242,7 +1258,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
* TARGET_PAGE_BITS, and either
* + the ram_addr_t of the page base of the target RAM (RAM)
* + the offset within section->mr of the page base (I/O, ROMD)
- * We subtract the vaddr_page (which is page aligned and thus won't
+ * We subtract addr_page (which is page aligned and thus won't
* disturb the low bits) to give an offset which can be added to the
* (non-page-aligned) vaddr of the eventual memory access to get
* the MemoryRegion offset for the access. Note that the vaddr we
@@ -1250,36 +1266,30 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
*/
desc->fulltlb[index] = *full;
- desc->fulltlb[index].xlat_section = iotlb - addr_page;
- desc->fulltlb[index].phys_addr = paddr_page;
+ full = &desc->fulltlb[index];
+ full->xlat_section = iotlb - addr_page;
+ full->phys_addr = paddr_page;
/* Now calculate the new entry */
tn.addend = addend - addr_page;
- if (prot & PAGE_READ) {
- tn.addr_read = address;
- if (wp_flags & BP_MEM_READ) {
- tn.addr_read |= TLB_WATCHPOINT;
- }
- } else {
- tn.addr_read = -1;
- }
- if (prot & PAGE_EXEC) {
- tn.addr_code = address;
- } else {
- tn.addr_code = -1;
+ tlb_set_compare(full, &tn, addr_page, read_flags,
+ MMU_INST_FETCH, prot & PAGE_EXEC);
+
+ if (wp_flags & BP_MEM_READ) {
+ read_flags |= TLB_WATCHPOINT;
}
+ tlb_set_compare(full, &tn, addr_page, read_flags,
+ MMU_DATA_LOAD, prot & PAGE_READ);
- tn.addr_write = -1;
- if (prot & PAGE_WRITE) {
- tn.addr_write = write_address;
- if (prot & PAGE_WRITE_INV) {
- tn.addr_write |= TLB_INVALID_MASK;
- }
- if (wp_flags & BP_MEM_WRITE) {
- tn.addr_write |= TLB_WATCHPOINT;
- }
+ if (prot & PAGE_WRITE_INV) {
+ write_flags |= TLB_INVALID_MASK;
+ }
+ if (wp_flags & BP_MEM_WRITE) {
+ write_flags |= TLB_WATCHPOINT;
}
+ tlb_set_compare(full, &tn, addr_page, write_flags,
+ MMU_DATA_STORE, prot & PAGE_WRITE);
copy_tlb_helper_locked(te, &tn);
tlb_n_used_entries_inc(env, mmu_idx);
@@ -1509,7 +1519,8 @@ static int probe_access_internal(CPUArchState *env, vaddr addr,
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
vaddr page_addr = addr & TARGET_PAGE_MASK;
- int flags = TLB_FLAGS_MASK;
+ int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
+ CPUTLBEntryFull *full;
if (!tlb_hit_page(tlb_addr, page_addr)) {
if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) {
@@ -1538,7 +1549,8 @@ static int probe_access_internal(CPUArchState *env, vaddr addr,
}
flags &= tlb_addr;
- *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ *pfull = full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ flags |= full->slow_flags[access_type];
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
@@ -1761,6 +1773,8 @@ static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
bool maybe_resized = false;
+ CPUTLBEntryFull *full;
+ int flags;
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(tlb_addr, addr)) {
@@ -1774,8 +1788,12 @@ static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
}
- data->flags = tlb_addr & TLB_FLAGS_MASK;
- data->full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
+ flags |= full->slow_flags[access_type];
+
+ data->full = full;
+ data->flags = flags;
/* Compute haddr speculatively; depending on flags it might be invalid. */
data->haddr = (void *)((uintptr_t)addr + entry->addend);