summaryrefslogtreecommitdiff
path: root/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
diff options
context:
space:
mode:
authorBipin Ravi <bipin.ravi@arm.com>2022-02-23 23:45:50 -0600
committerJohn Powell <john.powell@arm.com>2022-03-18 01:01:34 +0200
commit9b2510b69de26cc7f571731b415f6dec82669b6c (patch)
tree7e390dfaf479fdefd0432eb654d6fd2098acefe8 /lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
parentbe9121fd311ff48c94f3d90fe7efcf84586119e4 (diff)
fix(security): apply SMCCC_ARCH_WORKAROUND_3 to A73/A75/A72/A57
This patch applies CVE-2022-23960 workarounds for Cortex-A75, Cortex-A73, Cortex-A72 & Cortex-A57. This patch also implements the new SMCCC_ARCH_WORKAROUND_3 and enables necessary discovery hooks for Coxtex-A72, Cortex-A57, Cortex-A73 and Cortex-A75 to enable discovery of this SMC via SMC_FEATURES. SMCCC_ARCH_WORKAROUND_3 is implemented for A57/A72 because some revisions are affected by both CVE-2022-23960 and CVE-2017-5715 and this allows callers to replace SMCCC_ARCH_WORKAROUND_1 calls with SMCCC_ARCH_WORKAROUND_3. For details of SMCCC_ARCH_WORKAROUND_3, please refer SMCCCv1.4 specification. Signed-off-by: Bipin Ravi <bipin.ravi@arm.com> Signed-off-by: John Powell <john.powell@arm.com> Change-Id: Ifa6d9c7baa6764924638efe3c70468f98d60ed7c
Diffstat (limited to 'lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S')
-rw-r--r--lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S21
1 files changed, 16 insertions, 5 deletions
diff --git a/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
index c9a954496..022281876 100644
--- a/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
+++ b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -308,22 +308,25 @@ vector_entry bpiall_ret_sync_exception_aarch32
/*
* Check if SMC is coming from A64 state on #0
- * with W0 = SMCCC_ARCH_WORKAROUND_1
+ * with W0 = SMCCC_ARCH_WORKAROUND_1 or W0 = SMCCC_ARCH_WORKAROUND_3
*
* This sequence evaluates as:
- * (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
+ * (W0==SMCCC_ARCH_WORKAROUND_1) || (W0==SMCCC_ARCH_WORKAROUND_3) ?
+ * (ESR_EL3==SMC#0) : (NE)
* allowing use of a single branch operation
*/
orr w2, wzr, #SMCCC_ARCH_WORKAROUND_1
cmp w0, w2
+ orr w2, wzr, #SMCCC_ARCH_WORKAROUND_3
+ ccmp w0, w2, #4, ne
mov_imm w2, ESR_EL3_A64_SMC0
ccmp w3, w2, #0, eq
/* Static predictor will predict a fall through */
bne 1f
eret
1:
- ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
- b sync_exception_aarch64
+ /* restore x2 and x3 and continue sync exception handling */
+ b bpiall_ret_sync_exception_aarch32_tail
end_vector_entry bpiall_ret_sync_exception_aarch32
vector_entry bpiall_ret_irq_aarch32
@@ -355,3 +358,11 @@ end_vector_entry bpiall_ret_fiq_aarch32
vector_entry bpiall_ret_serror_aarch32
b report_unhandled_exception
end_vector_entry bpiall_ret_serror_aarch32
+
+ /*
+ * Part of bpiall_ret_sync_exception_aarch32 to save vector space
+ */
+func bpiall_ret_sync_exception_aarch32_tail
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ b sync_exception_aarch64
+endfunc bpiall_ret_sync_exception_aarch32_tail