summaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorIlya Leoshkevich <iii@linux.ibm.com>2022-07-11 20:56:38 +0200
committerRichard Henderson <richard.henderson@linaro.org>2022-07-12 10:43:33 +0530
commitb0f650f0477ae775e0915e3d60ab5110ad5e9157 (patch)
treeae709274082e661508bfa4ea9879c702ba6b5d02 /accel
parentba8924113ca459d02ee67656a713f7ff494b968e (diff)
accel/tcg: Fix unaligned stores to s390x low-address-protected lowcore
If low-address-protection is active, unaligned stores to non-protected parts of lowcore lead to protection exceptions. The reason is that in such cases tlb_fill() call in store_helper_unaligned() covers [0, addr + size) range, which contains the protected portion of lowcore. This range is too large. The most straightforward fix would be to make sure we stay within the original [addr, addr + size) range. However, if an unaligned access affects a single page, we don't need to call tlb_fill() in store_helper_unaligned() at all, since it would be identical to the previous tlb_fill() call in store_helper(), and therefore a no-op. If an unaligned access covers multiple pages, this situation does not occur. Therefore simply skip TLB handling in store_helper_unaligned() if we are dealing with a single page. Fixes: 2bcf018340cb ("s390x/tcg: low-address protection support") Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> Message-Id: <20220711185640.3558813-2-iii@linux.ibm.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cputlb.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index f90f4312ea..a46f3a654d 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -2248,7 +2248,7 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
uintptr_t index, index2;
CPUTLBEntry *entry, *entry2;
- target_ulong page2, tlb_addr, tlb_addr2;
+ target_ulong page1, page2, tlb_addr, tlb_addr2;
MemOpIdx oi;
size_t size2;
int i;
@@ -2256,15 +2256,17 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
/*
* Ensure the second page is in the TLB. Note that the first page
* is already guaranteed to be filled, and that the second page
- * cannot evict the first.
+ * cannot evict the first. An exception to this rule is PAGE_WRITE_INV
+ * handling: the first page could have evicted itself.
*/
+ page1 = addr & TARGET_PAGE_MASK;
page2 = (addr + size) & TARGET_PAGE_MASK;
size2 = (addr + size) & ~TARGET_PAGE_MASK;
index2 = tlb_index(env, mmu_idx, page2);
entry2 = tlb_entry(env, mmu_idx, page2);
tlb_addr2 = tlb_addr_write(entry2);
- if (!tlb_hit_page(tlb_addr2, page2)) {
+ if (page1 != page2 && !tlb_hit_page(tlb_addr2, page2)) {
if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
mmu_idx, retaddr);