aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/mmu.c
diff options
context:
space:
mode:
authorYanan Wang <wangyanan55@huawei.com>2021-01-14 20:13:50 +0800
committerMarc Zyngier <maz@kernel.org>2021-01-25 16:30:20 +0000
commit509552e65ae8287178a5cdea2d734dcd2d6380ab (patch)
treef58f24f7a80c8a7de5fb9cfc8dfa4d33eb9252dd /arch/arm64/kvm/mmu.c
parent694d071f8d85d504055540a27f0dbe9dbf44584e (diff)
KVM: arm64: Mark the page dirty only if the fault is handled successfully
We now set the pfn dirty and mark the page dirty before calling fault handlers in user_mem_abort(), so we might end up having spurious dirty pages if update of permissions or mapping has failed. Let's move these two operations after the fault handlers, and they will be done only if the fault has been handled successfully. When an -EAGAIN errno is returned from the map handler, we hope to the vcpu to enter guest directly instead of exiting back to userspace, so adjust the return value at the end of function. Signed-off-by: Yanan Wang <wangyanan55@huawei.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20210114121350.123684-4-wangyanan55@huawei.com
Diffstat (limited to 'arch/arm64/kvm/mmu.c')
-rw-r--r--arch/arm64/kvm/mmu.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 7d2257cc5438..77cb2d28f2a4 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -879,11 +879,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (vma_pagesize == PAGE_SIZE && !force_pte)
vma_pagesize = transparent_hugepage_adjust(memslot, hva,
&pfn, &fault_ipa);
- if (writable) {
+ if (writable)
prot |= KVM_PGTABLE_PROT_W;
- kvm_set_pfn_dirty(pfn);
- mark_page_dirty(kvm, gfn);
- }
if (fault_status != FSC_PERM && !device)
clean_dcache_guest_page(pfn, vma_pagesize);
@@ -911,11 +908,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
memcache);
}
+ /* Mark the page dirty only if the fault is handled successfully */
+ if (writable && !ret) {
+ kvm_set_pfn_dirty(pfn);
+ mark_page_dirty(kvm, gfn);
+ }
+
out_unlock:
spin_unlock(&kvm->mmu_lock);
kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn);
- return ret;
+ return ret != -EAGAIN ? ret : 0;
}
/* Resolve the access fault by making the page young again. */