aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2015-05-04 09:25:21 +0800
committerSasha Levin <sasha.levin@oracle.com>2015-05-11 07:07:35 -0400
commita412dc06d66e6f32269b3825221dc323dcc2ede0 (patch)
treea2b5ddc440b40128ab18ae55d66504d73d1a5eaf /arch/arm64/include
parentcc0c5f43df62402d1c283f047634e4672f7cb1d8 (diff)
arm/arm64: KVM: Invalidate data cache on unmap
commit 363ef89f8e9bcedc28b976d0fe2d858fe139c122 upstream. Let's assume a guest has created an uncached mapping, and written to that page. Let's also assume that the host uses a cache-coherent IO subsystem. Let's finally assume that the host is under memory pressure and starts to swap things out. Before this "uncached" page is evicted, we need to make sure we invalidate potential speculated, clean cache lines that are sitting there, or the IO subsystem is going to swap out the cached view, loosing the data that has been written directly into memory. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org> Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h18
1 files changed, 18 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 14a74f136272..ea1bca28593d 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -258,6 +258,24 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
}
}
+static inline void __kvm_flush_dcache_pte(pte_t pte)
+{
+ struct page *page = pte_page(pte);
+ kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
+}
+
+static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
+{
+ struct page *page = pmd_page(pmd);
+ kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
+}
+
+static inline void __kvm_flush_dcache_pud(pud_t pud)
+{
+ struct page *page = pud_page(pud);
+ kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
+}
+
#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
void stage2_flush_vm(struct kvm *kvm);