From fe769e6c1f80f542d6f4e7f7c8c6bf20c1307f99 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 27 Jun 2023 15:05:57 +0100 Subject: KVM: arm64: timers: Use CNTHCTL_EL2 when setting non-CNTKCTL_EL1 bits It recently appeared that, when running VHE, there is a notable difference between using CNTKCTL_EL1 and CNTHCTL_EL2, despite what the architecture documents: - When accessed from EL2, bits [19:18] and [16:10] of CNTKCTL_EL1 have the same assignment as CNTHCTL_EL2 - When accessed from EL1, bits [19:18] and [16:10] are RES0 It is all OK, until you factor in NV, where the EL2 guest runs at EL1. In this configuration, CNTKCTL_EL11 doesn't trap, nor ends up in the VNCR page. This means that any write from the guest affecting CNTHCTL_EL2 using CNTKCTL_EL1 ends up losing some state. Not good. The fix it obvious: don't use CNTKCTL_EL1 if you want to change bits that are not part of the EL1 definition of CNTKCTL_EL1, and use CNTHCTL_EL2 instead. This doesn't change anything for a bare-metal OS, and fixes it when running under NV. The NV hypervisor will itself have to work harder to merge the two accessors. Note that there is a pending update to the architecture to address this issue by making the affected bits UNKNOWN when CNTKCTL_EL1 is used from EL2 with VHE enabled. Fixes: c605ee245097 ("KVM: arm64: timers: Allow physical offset without CNTPOFF_EL2") Signed-off-by: Marc Zyngier Cc: stable@vger.kernel.org # v6.4 Reviewed-by: Eric Auger Link: https://lore.kernel.org/r/20230627140557.544885-1-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/arch_timer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 0696732fa38c..6dcdae4d38cb 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -827,8 +827,8 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map) assign_clear_set_bit(tpt, CNTHCTL_EL1PCEN << 10, set, clr); assign_clear_set_bit(tpc, CNTHCTL_EL1PCTEN << 10, set, clr); - /* This only happens on VHE, so use the CNTKCTL_EL1 accessor */ - sysreg_clear_set(cntkctl_el1, clr, set); + /* This only happens on VHE, so use the CNTHCTL_EL2 accessor. */ + sysreg_clear_set(cnthctl_el2, clr, set); } void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) @@ -1563,7 +1563,7 @@ no_vgic: void kvm_timer_init_vhe(void) { if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF)) - sysreg_clear_set(cntkctl_el1, 0, CNTHCTL_ECV); + sysreg_clear_set(cnthctl_el2, 0, CNTHCTL_ECV); } int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) -- cgit v1.2.3 From fa729bc7c9c8c17a2481358c841ef8ca920485d3 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 4 Jul 2023 20:32:43 +0100 Subject: KVM: arm64: Handle kvm_arm_init failure correctly in finalize_pkvm Currently there is no synchronisation between finalize_pkvm() and kvm_arm_init() initcalls. The finalize_pkvm() proceeds happily even if kvm_arm_init() fails resulting in the following warning on all the CPUs and eventually a HYP panic: | kvm [1]: IPA Size Limit: 48 bits | kvm [1]: Failed to init hyp memory protection | kvm [1]: error initializing Hyp mode: -22 | | | | WARNING: CPU: 0 PID: 0 at arch/arm64/kvm/pkvm.c:226 _kvm_host_prot_finalize+0x30/0x50 | Modules linked in: | CPU: 0 PID: 0 Comm: swapper/0 Not tainted 6.4.0 #237 | Hardware name: FVP Base RevC (DT) | pstate: 634020c5 (nZCv daIF +PAN -UAO +TCO +DIT -SSBS BTYPE=--) | pc : _kvm_host_prot_finalize+0x30/0x50 | lr : __flush_smp_call_function_queue+0xd8/0x230 | | Call trace: | _kvm_host_prot_finalize+0x3c/0x50 | on_each_cpu_cond_mask+0x3c/0x6c | pkvm_drop_host_privileges+0x4c/0x78 | finalize_pkvm+0x3c/0x5c | do_one_initcall+0xcc/0x240 | do_initcall_level+0x8c/0xac | do_initcalls+0x54/0x94 | do_basic_setup+0x1c/0x28 | kernel_init_freeable+0x100/0x16c | kernel_init+0x20/0x1a0 | ret_from_fork+0x10/0x20 | Failed to finalize Hyp protection: -22 | dtb=fvp-base-revc.dtb | kvm [95]: nVHE hyp BUG at: arch/arm64/kvm/hyp/nvhe/mem_protect.c:540! | kvm [95]: nVHE call trace: | kvm [95]: [] __kvm_nvhe_hyp_panic+0xac/0xf8 | kvm [95]: [] __kvm_nvhe_handle_host_mem_abort+0x1a0/0x2ac | kvm [95]: [] __kvm_nvhe_handle_trap+0x4c/0x160 | kvm [95]: [] __kvm_nvhe___skip_pauth_save+0x4/0x4 | kvm [95]: ---[ end nVHE call trace ]--- | kvm [95]: Hyp Offset: 0xfffe8db00ffa0000 | Kernel panic - not syncing: HYP panic: | PS:a34023c9 PC:0000f250710b973c ESR:00000000f2000800 | FAR:ffff000800cb00d0 HPFAR:000000000880cb00 PAR:0000000000000000 | VCPU:0000000000000000 | CPU: 3 PID: 95 Comm: kworker/u16:2 Tainted: G W 6.4.0 #237 | Hardware name: FVP Base RevC (DT) | Workqueue: rpciod rpc_async_schedule | Call trace: | dump_backtrace+0xec/0x108 | show_stack+0x18/0x2c | dump_stack_lvl+0x50/0x68 | dump_stack+0x18/0x24 | panic+0x138/0x33c | nvhe_hyp_panic_handler+0x100/0x184 | new_slab+0x23c/0x54c | ___slab_alloc+0x3e4/0x770 | kmem_cache_alloc_node+0x1f0/0x278 | __alloc_skb+0xdc/0x294 | tcp_stream_alloc_skb+0x2c/0xf0 | tcp_sendmsg_locked+0x3d0/0xda4 | tcp_sendmsg+0x38/0x5c | inet_sendmsg+0x44/0x60 | sock_sendmsg+0x1c/0x34 | xprt_sock_sendmsg+0xdc/0x274 | xs_tcp_send_request+0x1ac/0x28c | xprt_transmit+0xcc/0x300 | call_transmit+0x78/0x90 | __rpc_execute+0x114/0x3d8 | rpc_async_schedule+0x28/0x48 | process_one_work+0x1d8/0x314 | worker_thread+0x248/0x474 | kthread+0xfc/0x184 | ret_from_fork+0x10/0x20 | SMP: stopping secondary CPUs | Kernel Offset: 0x57c5cb460000 from 0xffff800080000000 | PHYS_OFFSET: 0x80000000 | CPU features: 0x00000000,1035b7a3,ccfe773f | Memory Limit: none | ---[ end Kernel panic - not syncing: HYP panic: | PS:a34023c9 PC:0000f250710b973c ESR:00000000f2000800 | FAR:ffff000800cb00d0 HPFAR:000000000880cb00 PAR:0000000000000000 | VCPU:0000000000000000 ]--- Fix it by checking for the successfull initialisation of kvm_arm_init() in finalize_pkvm() before proceeding any futher. Fixes: 87727ba2bb05 ("KVM: arm64: Ensure CPU PMU probes before pKVM host de-privilege") Cc: Will Deacon Cc: Marc Zyngier Cc: Oliver Upton Cc: James Morse Cc: Suzuki K Poulose Cc: Zenghui Yu Signed-off-by: Sudeep Holla Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20230704193243.3300506-1-sudeep.holla@arm.com Signed-off-by: Oliver Upton --- arch/arm64/include/asm/virt.h | 1 + arch/arm64/kvm/arm.c | 9 ++++++++- arch/arm64/kvm/pkvm.c | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 5227db7640c8..261d6e9df2e1 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -78,6 +78,7 @@ extern u32 __boot_cpu_mode[2]; void __hyp_set_vectors(phys_addr_t phys_vector_base); void __hyp_reset_vectors(void); +bool is_kvm_arm_initialised(void); DECLARE_STATIC_KEY_FALSE(kvm_protected_mode_initialized); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index c2c14059f6a8..0a564d2f30ad 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -53,11 +53,16 @@ DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); -static bool vgic_present; +static bool vgic_present, kvm_arm_initialised; static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use); +bool is_kvm_arm_initialised(void) +{ + return kvm_arm_initialised; +} + int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; @@ -2482,6 +2487,8 @@ static __init int kvm_arm_init(void) if (err) goto out_subs; + kvm_arm_initialised = true; + return 0; out_subs: diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c index 994a494703c3..6ff3ec18c925 100644 --- a/arch/arm64/kvm/pkvm.c +++ b/arch/arm64/kvm/pkvm.c @@ -244,7 +244,7 @@ static int __init finalize_pkvm(void) { int ret; - if (!is_protected_kvm_enabled()) + if (!is_protected_kvm_enabled() || !is_kvm_arm_initialised()) return 0; /* -- cgit v1.2.3 From 970dee09b230895fe2230d2b32ad05a2826818c6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 3 Jul 2023 17:35:48 +0100 Subject: KVM: arm64: Disable preemption in kvm_arch_hardware_enable() Since 0bf50497f03b ("KVM: Drop kvm_count_lock and instead protect kvm_usage_count with kvm_lock"), hotplugging back a CPU whilst a guest is running results in a number of ugly splats as most of this code expects to run with preemption disabled, which isn't the case anymore. While the context is preemptable, it isn't migratable, which should be enough. But we have plenty of preemptible() checks all over the place, and our per-CPU accessors also disable preemption. Since this affects released versions, let's do the easy fix first, disabling preemption in kvm_arch_hardware_enable(). We can always revisit this with a more invasive fix in the future. Fixes: 0bf50497f03b ("KVM: Drop kvm_count_lock and instead protect kvm_usage_count with kvm_lock") Reported-by: Kristina Martsenko Tested-by: Kristina Martsenko Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/aeab7562-2d39-e78e-93b1-4711f8cc3fa5@arm.com Cc: stable@vger.kernel.org # v6.3, v6.4 Link: https://lore.kernel.org/r/20230703163548.1498943-1-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/arm.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 0a564d2f30ad..a402ea5511f3 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1872,8 +1872,17 @@ static void _kvm_arch_hardware_enable(void *discard) int kvm_arch_hardware_enable(void) { - int was_enabled = __this_cpu_read(kvm_arm_hardware_enabled); + int was_enabled; + /* + * Most calls to this function are made with migration + * disabled, but not with preemption disabled. The former is + * enough to ensure correctness, but most of the helpers + * expect the later and will throw a tantrum otherwise. + */ + preempt_disable(); + + was_enabled = __this_cpu_read(kvm_arm_hardware_enabled); _kvm_arch_hardware_enable(NULL); if (!was_enabled) { @@ -1881,6 +1890,8 @@ int kvm_arch_hardware_enable(void) kvm_timer_cpu_up(); } + preempt_enable(); + return 0; } -- cgit v1.2.3 From df6556adf27b7372cfcd97e1c0afb0d516c8279f Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 27 Jun 2023 23:54:05 +0000 Subject: KVM: arm64: Correctly handle page aging notifiers for unaligned memslot Userspace is allowed to select any PAGE_SIZE aligned hva to back guest memory. This is even the case with hugepages, although it is a rather suboptimal configuration as PTE level mappings are used at stage-2. The arm64 page aging handlers have an assumption that the specified range is exactly one page/block of memory, which in the aforementioned case is not necessarily true. All together this leads to the WARN() in kvm_age_gfn() firing. However, the WARN is only part of the issue as the table walkers visit at most a single leaf PTE. For hugepage-backed memory in a memslot that isn't hugepage-aligned, page aging entirely misses accesses to the hugepage beyond the first page in the memslot. Add a new walker dedicated to handling page aging MMU notifiers capable of walking a range of PTEs. Convert kvm(_test)_age_gfn() over to the new walker and drop the WARN that caught the issue in the first place. The implementation of this walker was inspired by the test_clear_young() implementation by Yu Zhao [*], but repurposed to address a bug in the existing aging implementation. Cc: stable@vger.kernel.org # v5.15 Fixes: 056aad67f836 ("kvm: arm/arm64: Rework gpa callback handlers") Link: https://lore.kernel.org/kvmarm/20230526234435.662652-6-yuzhao@google.com/ Co-developed-by: Yu Zhao Signed-off-by: Yu Zhao Reported-by: Reiji Watanabe Reviewed-by: Marc Zyngier Reviewed-by: Shaoqin Huang Link: https://lore.kernel.org/r/20230627235405.4069823-1-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_pgtable.h | 26 +++++++------------- arch/arm64/kvm/hyp/pgtable.c | 47 +++++++++++++++++++++++++++++------- arch/arm64/kvm/mmu.c | 18 ++++++-------- 3 files changed, 55 insertions(+), 36 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 8294a9a7e566..929d355eae0a 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -608,22 +608,26 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size); kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr); /** - * kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry. + * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access + * flag in a page-table entry. * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). * @addr: Intermediate physical address to identify the page-table entry. + * @size: Size of the address range to visit. + * @mkold: True if the access flag should be cleared. * * The offset of @addr within a page is ignored. * - * If there is a valid, leaf page-table entry used to translate @addr, then - * clear the access flag in that entry. + * Tests and conditionally clears the access flag for every valid, leaf + * page-table entry used to translate the range [@addr, @addr + @size). * * Note that it is the caller's responsibility to invalidate the TLB after * calling this function to ensure that the updated permissions are visible * to the CPUs. * - * Return: The old page-table entry prior to clearing the flag, 0 on failure. + * Return: True if any of the visited PTEs had the access flag set. */ -kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr); +bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, + u64 size, bool mkold); /** * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a @@ -645,18 +649,6 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr); int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot); -/** - * kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the - * access flag set. - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). - * @addr: Intermediate physical address to identify the page-table entry. - * - * The offset of @addr within a page is ignored. - * - * Return: True if the page-table entry has the access flag set, false otherwise. - */ -bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr); - /** * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point * of Coherency for guest stage-2 address diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index aa740a974e02..f7a93ef29250 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -1195,25 +1195,54 @@ kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr) return pte; } -kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr) +struct stage2_age_data { + bool mkold; + bool young; +}; + +static int stage2_age_walker(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) { - kvm_pte_t pte = 0; - stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF, - &pte, NULL, 0); + kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF; + struct stage2_age_data *data = ctx->arg; + + if (!kvm_pte_valid(ctx->old) || new == ctx->old) + return 0; + + data->young = true; + + /* + * stage2_age_walker() is always called while holding the MMU lock for + * write, so this will always succeed. Nonetheless, this deliberately + * follows the race detection pattern of the other stage-2 walkers in + * case the locking mechanics of the MMU notifiers is ever changed. + */ + if (data->mkold && !stage2_try_set_pte(ctx, new)) + return -EAGAIN; + /* * "But where's the TLBI?!", you scream. * "Over in the core code", I sigh. * * See the '->clear_flush_young()' callback on the KVM mmu notifier. */ - return pte; + return 0; } -bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr) +bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, + u64 size, bool mkold) { - kvm_pte_t pte = 0; - stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL, 0); - return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF; + struct stage2_age_data data = { + .mkold = mkold, + }; + struct kvm_pgtable_walker walker = { + .cb = stage2_age_walker, + .arg = &data, + .flags = KVM_PGTABLE_WALK_LEAF, + }; + + WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker)); + return data.young; } int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 6db9ef288ec3..d3b4feed460c 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1756,27 +1756,25 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { u64 size = (range->end - range->start) << PAGE_SHIFT; - kvm_pte_t kpte; - pte_t pte; if (!kvm->arch.mmu.pgt) return false; - WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); - - kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, - range->start << PAGE_SHIFT); - pte = __pte(kpte); - return pte_valid(pte) && pte_young(pte); + return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt, + range->start << PAGE_SHIFT, + size, true); } bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { + u64 size = (range->end - range->start) << PAGE_SHIFT; + if (!kvm->arch.mmu.pgt) return false; - return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, - range->start << PAGE_SHIFT); + return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt, + range->start << PAGE_SHIFT, + size, false); } phys_addr_t kvm_mmu_get_httbr(void) -- cgit v1.2.3 From dcf89d111199562fa5f31a1bb76f17bc4831f6da Mon Sep 17 00:00:00 2001 From: Mostafa Saleh Date: Thu, 6 Jul 2023 15:22:40 +0000 Subject: KVM: arm64: Add missing BTI instructions Some bti instructions were missing from commit b53d4a272349 ("KVM: arm64: Use BTI for nvhe") 1) kvm_host_psci_cpu_entry kvm_host_psci_cpu_entry is called from __kvm_hyp_init_cpu through "br" instruction as __kvm_hyp_init_cpu resides in idmap section while kvm_host_psci_cpu_entry is in hyp .text so the offset is larger than 128MB range covered by "b". Which means that this function should start with "bti j" instruction. LLVM which is the only compiler supporting BTI for Linux, adds "bti j" for jump tables or by when taking the address of the block [1]. Same behaviour is observed with GCC. As kvm_host_psci_cpu_entry is a C function, this must be done in assembly. Another solution is to use X16/X17 with "br", as according to ARM ARM DDI0487I.a RLJHCL/IGMGRS, PACIASP has an implicit branch target identification instruction that is compatible with PSTATE.BTYPE 0b01 which includes "br X16/X17" And the kvm_host_psci_cpu_entry has PACIASP as it is an external function. Although, using explicit "bti" makes it more clear than relying on which register is used. A third solution is to clear SCTLR_EL2.BT, which would make PACIASP compatible PSTATE.BTYPE 0b11 ("br" to other registers). However this deviates from the kernel behaviour (in bti_enable()). 2) Spectre vector table "br" instructions are generated at runtime for the vector table (__bp_harden_hyp_vecs). These branches would land on vectors in __kvm_hyp_vector at offset 8. As all the macros are defined with valid_vect/invalid_vect, it is sufficient to add "bti j" at the correct offset. [1] https://reviews.llvm.org/D52867 Fixes: b53d4a272349 ("KVM: arm64: Use BTI for nvhe") Signed-off-by: Mostafa Saleh Reported-by: Sudeep Holla Acked-by: Marc Zyngier Tested-by: Sudeep Holla Link: https://lore.kernel.org/r/20230706152240.685684-1-smostafa@google.com Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/hyp-entry.S | 8 ++++++++ arch/arm64/kvm/hyp/nvhe/host.S | 10 ++++++++++ arch/arm64/kvm/hyp/nvhe/psci-relay.c | 2 +- 3 files changed, 19 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 8f3f93fa119e..03f97d71984c 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -154,6 +154,12 @@ SYM_CODE_END(\label) esb stp x0, x1, [sp, #-16]! 662: + /* + * spectre vectors __bp_harden_hyp_vecs generate br instructions at runtime + * that jump at offset 8 at __kvm_hyp_vector. + * As hyp .text is guarded section, it needs bti j. + */ + bti j b \target check_preamble_length 661b, 662b @@ -165,6 +171,8 @@ check_preamble_length 661b, 662b nop stp x0, x1, [sp, #-16]! 662: + /* Check valid_vect */ + bti j b \target check_preamble_length 661b, 662b diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index c87c63133e10..7693a6757cd7 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -297,3 +297,13 @@ SYM_CODE_START(__kvm_hyp_host_forward_smc) ret SYM_CODE_END(__kvm_hyp_host_forward_smc) + +/* + * kvm_host_psci_cpu_entry is called through br instruction, which requires + * bti j instruction as compilers (gcc and llvm) doesn't insert bti j for external + * functions, but bti c instead. + */ +SYM_CODE_START(kvm_host_psci_cpu_entry) + bti j + b __kvm_host_psci_cpu_entry +SYM_CODE_END(kvm_host_psci_cpu_entry) diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c index 08508783ec3d..24543d2a3490 100644 --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c @@ -200,7 +200,7 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) __hyp_pa(init_params), 0); } -asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on) +asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on) { struct psci_boot_args *boot_args; struct kvm_cpu_context *host_ctxt; -- cgit v1.2.3 From b321c31c9b7b309dcde5e8854b741c8e6a9a05f0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 13 Jul 2023 08:06:57 +0100 Subject: KVM: arm64: vgic-v4: Make the doorbell request robust w.r.t preemption Xiang reports that VMs occasionally fail to boot on GICv4.1 systems when running a preemptible kernel, as it is possible that a vCPU is blocked without requesting a doorbell interrupt. The issue is that any preemption that occurs between vgic_v4_put() and schedule() on the block path will mark the vPE as nonresident and *not* request a doorbell irq. This occurs because when the vcpu thread is resumed on its way to block, vcpu_load() will make the vPE resident again. Once the vcpu actually blocks, we don't request a doorbell anymore, and the vcpu won't be woken up on interrupt delivery. Fix it by tracking that we're entering WFI, and key the doorbell request on that flag. This allows us not to make the vPE resident when going through a preempt/schedule cycle, meaning we don't lose any state. Cc: stable@vger.kernel.org Fixes: 8e01d9a396e6 ("KVM: arm64: vgic-v4: Move the GICv4 residency flow to be driven by vcpu_load/put") Reported-by: Xiang Chen Suggested-by: Zenghui Yu Tested-by: Xiang Chen Co-developed-by: Oliver Upton Signed-off-by: Marc Zyngier Acked-by: Zenghui Yu Link: https://lore.kernel.org/r/20230713070657.3873244-1-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/arm.c | 6 ++++-- arch/arm64/kvm/vgic/vgic-v3.c | 2 +- arch/arm64/kvm/vgic/vgic-v4.c | 7 +++++-- 4 files changed, 12 insertions(+), 5 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8b6096753740..d3dd05bbfe23 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -727,6 +727,8 @@ struct kvm_vcpu_arch { #define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5)) /* PMUSERENR for the guest EL0 is on physical CPU */ #define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6)) +/* WFI instruction trapped */ +#define IN_WFI __vcpu_single_flag(sflags, BIT(7)) /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index a402ea5511f3..72dc53a75d1c 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -718,13 +718,15 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu) */ preempt_disable(); kvm_vgic_vmcr_sync(vcpu); - vgic_v4_put(vcpu, true); + vcpu_set_flag(vcpu, IN_WFI); + vgic_v4_put(vcpu); preempt_enable(); kvm_vcpu_halt(vcpu); vcpu_clear_flag(vcpu, IN_WFIT); preempt_disable(); + vcpu_clear_flag(vcpu, IN_WFI); vgic_v4_load(vcpu); preempt_enable(); } @@ -792,7 +794,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) { /* The distributor enable bits were changed */ preempt_disable(); - vgic_v4_put(vcpu, false); + vgic_v4_put(vcpu); vgic_v4_load(vcpu); preempt_enable(); } diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index c3b8e132d599..3dfc8b84e03e 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -749,7 +749,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; - WARN_ON(vgic_v4_put(vcpu, false)); + WARN_ON(vgic_v4_put(vcpu)); vgic_v3_vmcr_sync(vcpu); diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index c1c28fe680ba..339a55194b2c 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -336,14 +336,14 @@ void vgic_v4_teardown(struct kvm *kvm) its_vm->vpes = NULL; } -int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db) +int vgic_v4_put(struct kvm_vcpu *vcpu) { struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident) return 0; - return its_make_vpe_non_resident(vpe, need_db); + return its_make_vpe_non_resident(vpe, !!vcpu_get_flag(vcpu, IN_WFI)); } int vgic_v4_load(struct kvm_vcpu *vcpu) @@ -354,6 +354,9 @@ int vgic_v4_load(struct kvm_vcpu *vcpu) if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident) return 0; + if (vcpu_get_flag(vcpu, IN_WFI)) + return 0; + /* * Before making the VPE resident, make sure the redistributor * corresponding to our current CPU expects us here. See the -- cgit v1.2.3 From 6d4f9236cd678e0bf0c09fd0e1fa20435bb2e5a2 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 13 Jul 2023 22:16:49 +0000 Subject: KVM: arm64: Correctly handle RES0 bits PMEVTYPER_EL0.evtCount The PMU event ID varies from 10 to 16 bits, depending on the PMU version. If the PMU only supports 10 bits of event ID, bits [15:10] of the evtCount field behave as RES0. While the actual PMU emulation code gets this right (i.e. RES0 bits are masked out when programming the perf event), the sysreg emulation writes an unmasked value to the in-memory cpu context. The net effect is that guest reads and writes of PMEVTYPER_EL0 will see non-RES0 behavior in the reserved bits of the field. As it so happens, kvm_pmu_set_counter_event_type() already writes a masked value to the in-memory context that gets overwritten by access_pmu_evtyper(). Fix the issue by removing the unnecessary (and incorrect) register write in access_pmu_evtyper(). Reviewed-by: Marc Zyngier Reviewed-by: Reiji Watanabe Link: https://lore.kernel.org/r/20230713221649.3889210-1-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/sys_regs.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index bd3431823ec5..fa7597882ed8 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -986,7 +986,6 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (p->is_write) { kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); - __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; kvm_vcpu_pmu_restore_guest(vcpu); } else { p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; -- cgit v1.2.3 From 9d2a55b403eea26cab7c831d8e1c00ef1e6a6850 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Fri, 14 Jul 2023 11:38:40 +0800 Subject: KVM: arm64: Fix the name of sys_reg_desc related to PMU For those PMU system registers defined in sys_reg_descs[], use macro PMU_SYS_REG() / PMU_PMEVCNTR_EL0 / PMU_PMEVTYPER_EL0 to define them, and later two macros call macro PMU_SYS_REG() actually. Currently the input parameter of PMU_SYS_REG() is another macro which is calculation formula of the value of system registers, so for example, if we want to "SYS_PMINTENSET_EL1" as the name of sys register, actually the name we get is as following: (((3) << 19) | ((0) << 16) | ((9) << 12) | ((14) << 8) | ((1) << 5)) The name of system register is used in some tracepoints such as trace_kvm_sys_access(), if not set correctly, we need to analyze the inaccurate name to get the exact name (which also is inconsistent with other system registers), and also the inaccurate name occupies more space. To fix the issue, use the name as a input parameter of PMU_SYS_REG like MTE_REG or EL2_REG. Signed-off-by: Xiang Chen Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/1689305920-170523-1-git-send-email-chenxiang66@hisilicon.com Signed-off-by: Oliver Upton --- arch/arm64/kvm/sys_regs.c | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index fa7597882ed8..2ca2973abe66 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1114,18 +1114,19 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, { SYS_DESC(SYS_DBGWCRn_EL1(n)), \ trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr } -#define PMU_SYS_REG(r) \ - SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility +#define PMU_SYS_REG(name) \ + SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \ + .visibility = pmu_visibility /* Macro to expand the PMEVCNTRn_EL0 register */ #define PMU_PMEVCNTR_EL0(n) \ - { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \ + { PMU_SYS_REG(PMEVCNTRn_EL0(n)), \ .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \ .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), } /* Macro to expand the PMEVTYPERn_EL0 register */ #define PMU_PMEVTYPER_EL0(n) \ - { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \ + { PMU_SYS_REG(PMEVTYPERn_EL0(n)), \ .reset = reset_pmevtyper, \ .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), } @@ -2114,9 +2115,9 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_PMBSR_EL1), undef_access }, /* PMBIDR_EL1 is not trapped */ - { PMU_SYS_REG(SYS_PMINTENSET_EL1), + { PMU_SYS_REG(PMINTENSET_EL1), .access = access_pminten, .reg = PMINTENSET_EL1 }, - { PMU_SYS_REG(SYS_PMINTENCLR_EL1), + { PMU_SYS_REG(PMINTENCLR_EL1), .access = access_pminten, .reg = PMINTENSET_EL1 }, { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi }, @@ -2163,41 +2164,41 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_CTR_EL0), access_ctr }, { SYS_DESC(SYS_SVCR), undef_access }, - { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr, + { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr, .reg = PMCR_EL0 }, - { PMU_SYS_REG(SYS_PMCNTENSET_EL0), + { PMU_SYS_REG(PMCNTENSET_EL0), .access = access_pmcnten, .reg = PMCNTENSET_EL0 }, - { PMU_SYS_REG(SYS_PMCNTENCLR_EL0), + { PMU_SYS_REG(PMCNTENCLR_EL0), .access = access_pmcnten, .reg = PMCNTENSET_EL0 }, - { PMU_SYS_REG(SYS_PMOVSCLR_EL0), + { PMU_SYS_REG(PMOVSCLR_EL0), .access = access_pmovs, .reg = PMOVSSET_EL0 }, /* * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was * previously (and pointlessly) advertised in the past... */ - { PMU_SYS_REG(SYS_PMSWINC_EL0), + { PMU_SYS_REG(PMSWINC_EL0), .get_user = get_raz_reg, .set_user = set_wi_reg, .access = access_pmswinc, .reset = NULL }, - { PMU_SYS_REG(SYS_PMSELR_EL0), + { PMU_SYS_REG(PMSELR_EL0), .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 }, - { PMU_SYS_REG(SYS_PMCEID0_EL0), + { PMU_SYS_REG(PMCEID0_EL0), .access = access_pmceid, .reset = NULL }, - { PMU_SYS_REG(SYS_PMCEID1_EL0), + { PMU_SYS_REG(PMCEID1_EL0), .access = access_pmceid, .reset = NULL }, - { PMU_SYS_REG(SYS_PMCCNTR_EL0), + { PMU_SYS_REG(PMCCNTR_EL0), .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr}, - { PMU_SYS_REG(SYS_PMXEVTYPER_EL0), + { PMU_SYS_REG(PMXEVTYPER_EL0), .access = access_pmu_evtyper, .reset = NULL }, - { PMU_SYS_REG(SYS_PMXEVCNTR_EL0), + { PMU_SYS_REG(PMXEVCNTR_EL0), .access = access_pmu_evcntr, .reset = NULL }, /* * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero * in 32bit mode. Here we choose to reset it as zero for consistency. */ - { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr, + { PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr, .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 }, - { PMU_SYS_REG(SYS_PMOVSSET_EL0), + { PMU_SYS_REG(PMOVSSET_EL0), .access = access_pmovs, .reg = PMOVSSET_EL0 }, { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, @@ -2353,7 +2354,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero * in 32bit mode. Here we choose to reset it as zero for consistency. */ - { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper, + { PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper, .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 }, EL2_REG(VPIDR_EL2, access_rw, reset_unknown, 0), -- cgit v1.2.3