summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2023-11-28 15:53:14 +0100
committerVincent Guittot <vincent.guittot@linaro.org>2023-11-30 08:52:20 +0100
commit0c6d2b491fbdb0998f1f79ec05383a9234007a6c (patch)
tree37ee711f764675b118f70b8fdaa90d317319f300
parentda7d6f217f3fb6c8634f433cf008c2fe03994108 (diff)
Add standalone scmi partition
Add a SCMI server partition based on cactus one. scmi is compiled only with header and C files in spm/scmi directory to help tracking dependency and ease the move of the code in SCP-firmware. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
-rw-r--r--spm/scmi/aarch64/asm_macros.S226
-rw-r--r--spm/scmi/aarch64/assert_macros.S30
-rw-r--r--spm/scmi/aarch64/cactus_entrypoint.S107
-rw-r--r--spm/scmi/aarch64/cactus_exceptions.S148
-rw-r--r--spm/scmi/aarch64/drivers/pl011/pl011_console.S245
-rw-r--r--spm/scmi/aarch64/ffa_arch_helpers.S62
-rw-r--r--spm/scmi/aarch64/framework/asm_debug.S119
-rw-r--r--spm/scmi/aarch64/framework/exception_report.c60
-rw-r--r--spm/scmi/aarch64/lib/cache_helpers.S209
-rw-r--r--spm/scmi/aarch64/lib/exceptions/sync.c45
-rw-r--r--spm/scmi/aarch64/lib/locks/spinlock.S33
-rw-r--r--spm/scmi/aarch64/lib/misc_helpers.S225
-rw-r--r--spm/scmi/aarch64/lib/smc/asm_smc.S75
-rw-r--r--spm/scmi/aarch64/lib/smc/hvc.c30
-rw-r--r--spm/scmi/aarch64/lib/smc/smc.c29
-rw-r--r--spm/scmi/aarch64/lib/xlat_tables_v2/enable_mmu.S97
-rw-r--r--spm/scmi/aarch64/lib/xlat_tables_v2/xlat_tables_arch.c295
-rw-r--r--spm/scmi/cactus.h29
-rw-r--r--spm/scmi/cactus.ld.S79
-rw-r--r--spm/scmi/cactus.mk133
-rw-r--r--spm/scmi/cactus_interrupt.c226
-rw-r--r--spm/scmi/cactus_main.c493
-rw-r--r--spm/scmi/ffa_helpers.c703
-rw-r--r--spm/scmi/include/aarch64/arch.h1381
-rw-r--r--spm/scmi/include/cactus_test_cmds.h642
-rw-r--r--spm/scmi/include/ext/common/aarch64/asm_macros.S226
-rw-r--r--spm/scmi/include/ext/common/aarch64/assert_macros.S30
-rw-r--r--spm/scmi/include/ext/common/asm_macros_common.S72
-rw-r--r--spm/scmi/include/ext/common/debug.h94
-rw-r--r--spm/scmi/include/ext/common/param_header.h55
-rw-r--r--spm/scmi/include/ext/common/test_helpers.h387
-rw-r--r--spm/scmi/include/ext/drivers/arm/arm_gic.h153
-rw-r--r--spm/scmi/include/ext/drivers/arm/gic_common.h113
-rw-r--r--spm/scmi/include/ext/drivers/arm/gic_v2.h341
-rw-r--r--spm/scmi/include/ext/drivers/arm/gic_v3.h234
-rw-r--r--spm/scmi/include/ext/drivers/arm/pl011.h87
-rw-r--r--spm/scmi/include/ext/drivers/arm/private_timer.h15
-rw-r--r--spm/scmi/include/ext/drivers/arm/sp804.h58
-rw-r--r--spm/scmi/include/ext/drivers/arm/sp805.h63
-rw-r--r--spm/scmi/include/ext/drivers/arm/system_timer.h39
-rw-r--r--spm/scmi/include/ext/drivers/console.h56
-rw-r--r--spm/scmi/include/ext/extensions/amu.h80
-rw-r--r--spm/scmi/include/ext/extensions/amu_private.h23
-rw-r--r--spm/scmi/include/ext/extensions/fpu.h54
-rw-r--r--spm/scmi/include/ext/extensions/pauth.h40
-rw-r--r--spm/scmi/include/ext/extensions/sme.h42
-rw-r--r--spm/scmi/include/ext/extensions/sve.h74
-rw-r--r--spm/scmi/include/ext/lib/aarch64/arch.h1381
-rw-r--r--spm/scmi/include/ext/lib/aarch64/arch_features.h409
-rw-r--r--spm/scmi/include/ext/lib/aarch64/arch_helpers.h676
-rw-r--r--spm/scmi/include/ext/lib/aarch64/serror.h14
-rw-r--r--spm/scmi/include/ext/lib/aarch64/sync.h14
-rw-r--r--spm/scmi/include/ext/lib/cassert.h18
-rw-r--r--spm/scmi/include/ext/lib/events.h82
-rw-r--r--spm/scmi/include/ext/lib/extensions/amu.h80
-rw-r--r--spm/scmi/include/ext/lib/extensions/amu_private.h23
-rw-r--r--spm/scmi/include/ext/lib/extensions/fpu.h54
-rw-r--r--spm/scmi/include/ext/lib/extensions/pauth.h40
-rw-r--r--spm/scmi/include/ext/lib/extensions/sme.h42
-rw-r--r--spm/scmi/include/ext/lib/extensions/sve.h74
-rw-r--r--spm/scmi/include/ext/lib/heap/page_alloc.h39
-rw-r--r--spm/scmi/include/ext/lib/io_storage.h106
-rw-r--r--spm/scmi/include/ext/lib/irq.h90
-rw-r--r--spm/scmi/include/ext/lib/libc/aarch32/endian_.h146
-rw-r--r--spm/scmi/include/ext/lib/libc/aarch32/limits_.h26
-rw-r--r--spm/scmi/include/ext/lib/libc/aarch32/stddef_.h15
-rw-r--r--spm/scmi/include/ext/lib/libc/aarch32/stdio_.h15
-rw-r--r--spm/scmi/include/ext/lib/libc/aarch64/endian_.h128
-rw-r--r--spm/scmi/include/ext/lib/libc/aarch64/limits_.h26
-rw-r--r--spm/scmi/include/ext/lib/libc/aarch64/setjmp_.h30
-rw-r--r--spm/scmi/include/ext/lib/libc/aarch64/stddef_.h15
-rw-r--r--spm/scmi/include/ext/lib/libc/aarch64/stdio_.h15
-rw-r--r--spm/scmi/include/ext/lib/libc/assert.h23
-rw-r--r--spm/scmi/include/ext/lib/libc/cdefs.h33
-rw-r--r--spm/scmi/include/ext/lib/libc/endian.h191
-rw-r--r--spm/scmi/include/ext/lib/libc/errno.h169
-rw-r--r--spm/scmi/include/ext/lib/libc/limits.h19
-rw-r--r--spm/scmi/include/ext/lib/libc/setjmp.h20
-rw-r--r--spm/scmi/include/ext/lib/libc/stdarg.h20
-rw-r--r--spm/scmi/include/ext/lib/libc/stdbool.h17
-rw-r--r--spm/scmi/include/ext/lib/libc/stddef.h27
-rw-r--r--spm/scmi/include/ext/lib/libc/stdint.h138
-rw-r--r--spm/scmi/include/ext/lib/libc/stdio.h31
-rw-r--r--spm/scmi/include/ext/lib/libc/stdlib.h30
-rw-r--r--spm/scmi/include/ext/lib/libc/string.h30
-rw-r--r--spm/scmi/include/ext/lib/libc/time.h18
-rw-r--r--spm/scmi/include/ext/lib/libc/uuid.h56
-rw-r--r--spm/scmi/include/ext/lib/mmio.h59
-rw-r--r--spm/scmi/include/ext/lib/power_management.h191
-rw-r--r--spm/scmi/include/ext/lib/sgi.h21
-rw-r--r--spm/scmi/include/ext/lib/spinlock.h18
-rw-r--r--spm/scmi/include/ext/lib/status.h28
-rw-r--r--spm/scmi/include/ext/lib/tftf_lib.h237
-rw-r--r--spm/scmi/include/ext/lib/timer.h133
-rw-r--r--spm/scmi/include/ext/lib/utils/math_utils.h23
-rw-r--r--spm/scmi/include/ext/lib/utils/uuid_utils.h50
-rw-r--r--spm/scmi/include/ext/lib/utils_def.h187
-rw-r--r--spm/scmi/include/ext/lib/xlat_tables/aarch32/xlat_tables_aarch32.h72
-rw-r--r--spm/scmi/include/ext/lib/xlat_tables/aarch64/xlat_tables_aarch64.h96
-rw-r--r--spm/scmi/include/ext/lib/xlat_tables/xlat_mmu_helpers.h91
-rw-r--r--spm/scmi/include/ext/lib/xlat_tables/xlat_tables_arch.h31
-rw-r--r--spm/scmi/include/ext/lib/xlat_tables/xlat_tables_defs.h202
-rw-r--r--spm/scmi/include/ext/lib/xlat_tables/xlat_tables_v2.h367
-rw-r--r--spm/scmi/include/ext/lib/xlat_tables/xlat_tables_v2_helpers.h163
-rw-r--r--spm/scmi/include/ext/plat/arm/common/plat_arm.h29
-rw-r--r--spm/scmi/include/ext/plat/common/common_def.h35
-rw-r--r--spm/scmi/include/ext/plat/common/plat_topology.h192
-rw-r--r--spm/scmi/include/ffa_endpoints.h34
-rw-r--r--spm/scmi/include/ffa_helpers.h762
-rw-r--r--spm/scmi/include/ffa_svc.h184
-rw-r--r--spm/scmi/include/platform.h196
-rw-r--r--spm/scmi/include/smccc.h80
-rw-r--r--spm/scmi/include/sp805.h63
-rw-r--r--spm/scmi/include/spm_common.h151
-rw-r--r--spm/scmi/include/std_svc.h27
-rw-r--r--spm/scmi/include/tftf.h172
-rw-r--r--spm/scmi/libc/aarch64/setjmp.S61
-rw-r--r--spm/scmi/libc/abort.c15
-rw-r--r--spm/scmi/libc/assert.c17
-rw-r--r--spm/scmi/libc/exit.c26
-rw-r--r--spm/scmi/libc/libc.mk36
-rw-r--r--spm/scmi/libc/memchr.c20
-rw-r--r--spm/scmi/libc/memcmp.c24
-rw-r--r--spm/scmi/libc/memcpy.c18
-rw-r--r--spm/scmi/libc/memmove.c31
-rw-r--r--spm/scmi/libc/memset.c17
-rw-r--r--spm/scmi/libc/printf.c251
-rw-r--r--spm/scmi/libc/putchar.c20
-rw-r--r--spm/scmi/libc/puts.c24
-rw-r--r--spm/scmi/libc/rand.c65
-rw-r--r--spm/scmi/libc/snprintf.c249
-rw-r--r--spm/scmi/libc/strchr.c53
-rw-r--r--spm/scmi/libc/strcmp.c52
-rw-r--r--spm/scmi/libc/strlcpy.c52
-rw-r--r--spm/scmi/libc/strlen.c17
-rw-r--r--spm/scmi/libc/strncmp.c53
-rw-r--r--spm/scmi/libc/strncpy.c61
-rw-r--r--spm/scmi/libc/strnlen.c46
-rw-r--r--spm/scmi/libc/strrchr.c49
-rw-r--r--spm/scmi/mp_printf.c24
-rw-r--r--spm/scmi/plat/arm/fvp/aarch64/plat_helpers.S91
-rw-r--r--spm/scmi/plat/arm/fvp/fdts/scmi.dts57
-rw-r--r--spm/scmi/plat/arm/fvp/fvp_def.h61
-rw-r--r--spm/scmi/plat/arm/fvp/include/platform_def.h313
-rw-r--r--spm/scmi/plat/arm/fvp/include/sp_platform_def.h27
-rw-r--r--spm/scmi/plat/arm/fvp/platform.mk15
-rw-r--r--spm/scmi/sp_debug.c72
-rw-r--r--spm/scmi/sp_debug.h13
-rw-r--r--spm/scmi/sp_def.h46
-rw-r--r--spm/scmi/sp_helpers.h67
-rw-r--r--spm/scmi/spm_common.c96
-rw-r--r--spm/scmi/spm_helpers.h26
-rw-r--r--spm/scmi/xlat_tables_context.c181
-rw-r--r--spm/scmi/xlat_tables_core.c1226
-rw-r--r--spm/scmi/xlat_tables_private.h108
-rw-r--r--spm/scmi/xlat_tables_utils.c573
156 files changed, 20584 insertions, 0 deletions
diff --git a/spm/scmi/aarch64/asm_macros.S b/spm/scmi/aarch64/asm_macros.S
new file mode 100644
index 0000000..8a69c38
--- /dev/null
+++ b/spm/scmi/aarch64/asm_macros.S
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ASM_MACROS_S__
+#define __ASM_MACROS_S__
+
+#include <arch.h>
+#include <asm_macros_common.S>
+
+#define TLB_INVALIDATE(_type) \
+ tlbi _type
+
+ .macro func_prologue
+ stp x29, x30, [sp, #-0x10]!
+ mov x29,sp
+ .endm
+
+ .macro func_epilogue
+ ldp x29, x30, [sp], #0x10
+ .endm
+
+
+ .macro dcache_line_size reg, tmp
+ mrs \tmp, ctr_el0
+ ubfx \tmp, \tmp, #16, #4
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+
+ .macro icache_line_size reg, tmp
+ mrs \tmp, ctr_el0
+ and \tmp, \tmp, #0xf
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+ /*
+ * Declare the exception vector table, enforcing it is aligned on a
+ * 2KB boundary, as required by the ARMv8 architecture.
+ * Use zero bytes as the fill value to be stored in the padding bytes
+ * so that it inserts illegal AArch64 instructions. This increases
+ * security, robustness and potentially facilitates debugging.
+ */
+ .macro vector_base label
+ .section .vectors, "ax"
+ .align 11, 0
+ \label:
+ .endm
+
+ /*
+ * Create an entry in the exception vector table, enforcing it is
+ * aligned on a 128-byte boundary, as required by the ARMv8
+ * architecture. Use zero bytes as the fill value to be stored in the
+ * padding bytes so that it inserts illegal AArch64 instructions.
+ * This increases security, robustness and potentially facilitates
+ * debugging.
+ */
+ .macro vector_entry label
+ .section .vectors, "ax"
+ .cfi_sections .debug_frame
+ .align 7, 0
+ .type \label, %function
+ .cfi_startproc
+ \label:
+ .endm
+
+ /*
+ * Add the bytes until fill the full exception vector, whose size is always
+ * 32 instructions. If there are more than 32 instructions in the
+ * exception vector then an error is emitted.
+ */
+ .macro end_vector_entry label
+ .cfi_endproc
+ .fill \label + (32 * 4) - .
+ .endm
+
+ /*
+ * Create a vector entry that just spins making the exception unrecoverable.
+ */
+ .macro vector_entry_spin name
+ vector_entry \name
+ b \name
+ end_vector_entry \name
+ .endm
+
+ /*
+ * This macro calculates the base address of an MP stack using the
+ * platform_get_core_pos() index, the name of the stack storage and
+ * the size of each stack
+ * Out: X0 = physical address of stack base
+ * Clobber: X30, X1, X2
+ */
+ .macro get_mp_stack _name, _size
+ bl platform_get_core_pos
+ ldr x2, =(\_name + \_size)
+ mov x1, #\_size
+ madd x0, x0, x1, x2
+ .endm
+
+ /*
+ * This macro calculates the base address of a UP stack using the
+ * name of the stack storage and the size of the stack
+ * Out: X0 = physical address of stack base
+ */
+ .macro get_up_stack _name, _size
+ ldr x0, =(\_name + \_size)
+ .endm
+
+ /*
+ * Helper macro to generate the best mov/movk combinations according
+ * the value to be moved. The 16 bits from '_shift' are tested and
+ * if not zero, they are moved into '_reg' without affecting
+ * other bits.
+ */
+ .macro _mov_imm16 _reg, _val, _shift
+ .if (\_val >> \_shift) & 0xffff
+ .if (\_val & (1 << \_shift - 1))
+ movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
+ .else
+ mov \_reg, \_val & (0xffff << \_shift)
+ .endif
+ .endif
+ .endm
+
+ /*
+ * Helper macro to load arbitrary values into 32 or 64-bit registers
+ * which generates the best mov/movk combinations. Many base addresses
+ * are 64KB aligned the macro will eliminate updating bits 15:0 in
+ * that case
+ */
+ .macro mov_imm _reg, _val
+ .if (\_val) == 0
+ mov \_reg, #0
+ .else
+ _mov_imm16 \_reg, (\_val), 0
+ _mov_imm16 \_reg, (\_val), 16
+ _mov_imm16 \_reg, (\_val), 32
+ _mov_imm16 \_reg, (\_val), 48
+ .endif
+ .endm
+
+ .macro asm_read_sysreg_el1_or_el2 sysreg
+ mrs x0, CurrentEL
+ cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
+ b.eq 1f
+ cmp x0, #(MODE_EL2 << MODE_EL_SHIFT)
+ b.eq 2f
+ b dead
+1:
+ mrs x0, \sysreg\()_el1
+ b 3f
+2:
+ mrs x0, \sysreg\()_el2
+3:
+ .endm
+
+ .macro asm_write_sysreg_el1_or_el2 sysreg scratch_reg
+ mrs \scratch_reg, CurrentEL
+ cmp \scratch_reg, #(MODE_EL1 << MODE_EL_SHIFT)
+ b.eq 1f
+ cmp \scratch_reg, #(MODE_EL2 << MODE_EL_SHIFT)
+ b.eq 2f
+ b dead
+1:
+ msr \sysreg\()_el1, x0
+ b 3f
+2:
+ msr \sysreg\()_el2, x0
+3:
+ .endm
+
+ .macro asm_read_sctlr_el1_or_el2
+ asm_read_sysreg_el1_or_el2 sctlr
+ .endm
+
+ .macro asm_write_sctlr_el1_or_el2 scratch_reg
+ asm_write_sysreg_el1_or_el2 sctlr \scratch_reg
+ .endm
+
+ .macro asm_write_vbar_el1_or_el2 scratch_reg
+ asm_write_sysreg_el1_or_el2 vbar \scratch_reg
+ .endm
+
+/*
+ * Depending on the current exception level, jump to 'label_el1' or 'label_el2'.
+ * If the current exception level is neither EL1 nor EL2, jump to 'label_error'
+ * instead.
+ * The caller needs to provide the macro with a scratch 64-bit register to use.
+ * Its contents prior to calling this function will be lost.
+ */
+ .macro JUMP_EL1_OR_EL2 scratch_reg, label_el1, label_el2, label_error
+ mrs \scratch_reg, CurrentEL
+ cmp \scratch_reg, #(MODE_EL1 << MODE_EL_SHIFT)
+ b.eq \label_el1
+ cmp \scratch_reg, #(MODE_EL2 << MODE_EL_SHIFT)
+ b.eq \label_el2
+ b \label_error
+ .endm
+
+ /*
+ * Helper macro to read system register value into x0
+ */
+ .macro read reg:req
+#if ENABLE_BTI
+ bti j
+#endif
+ mrs x0, \reg
+ ret
+ .endm
+
+ /*
+ * Helper macro to write value from x1 to system register
+ */
+ .macro write reg:req
+#if ENABLE_BTI
+ bti j
+#endif
+ msr \reg, x1
+ ret
+ .endm
+
+#endif /* __ASM_MACROS_S__ */
diff --git a/spm/scmi/aarch64/assert_macros.S b/spm/scmi/aarch64/assert_macros.S
new file mode 100644
index 0000000..b916331
--- /dev/null
+++ b/spm/scmi/aarch64/assert_macros.S
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ASSERT_MACROS_S__
+#define __ASSERT_MACROS_S__
+
+ /*
+ * Assembler macro to enable asm_assert. Use this macro wherever
+ * assert is required in assembly. Please note that the macro makes
+ * use of label '300' to provide the logic and the caller
+ * should make sure that this label is not used to branch prior
+ * to calling this macro.
+ */
+#define ASM_ASSERT(_cc) \
+.ifndef .L_assert_filename ;\
+ .pushsection .rodata.str1.1, "aS" ;\
+ .L_assert_filename: ;\
+ .string __FILE__ ;\
+ .popsection ;\
+.endif ;\
+ b._cc 300f ;\
+ adr x0, .L_assert_filename ;\
+ mov x1, __LINE__ ;\
+ b asm_assert ;\
+300:
+
+#endif /* __ASSERT_MACROS_S__ */
diff --git a/spm/scmi/aarch64/cactus_entrypoint.S b/spm/scmi/aarch64/cactus_entrypoint.S
new file mode 100644
index 0000000..17f0798
--- /dev/null
+++ b/spm/scmi/aarch64/cactus_entrypoint.S
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2017-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <sp_def.h>
+
+ .globl cactus_entrypoint
+ .globl secondary_cold_entry
+
+/* Provision one stack per Execution Context (or vCPU) */
+.section .bss.stacks
+ .balign CACHE_WRITEBACK_GRANULE
+ .fill SP_STACKS_SIZE * PLAT_SP_CORE_COUNT
+stacks_end:
+
+func cactus_entrypoint
+ /* Entry reason is primary EC cold boot */
+ mov x19, #1
+
+ /* Fall-through. */
+
+secondary_cold_entry:
+ /*
+ * Entry reason is secondary EC cold boot (or primary EC cold
+ * boot from above).
+ */
+
+ /*
+ * x0 holds a pointer to the Boot Information Blob.
+ * Save it for later usage.
+ */
+ mov x20, x0
+
+ /* The SPMC passes the vCPU id in vMPIDR low bits. */
+ mrs x0, mpidr_el1
+ bic x0, x0, #0x80000000
+
+ /*
+ * To maintain legacy, the SPMC passes the physical core id through x4.
+ * For a MP SP check the physical core id matches the vCPU id.
+ */
+ cmp x4, x0
+ bne .
+
+ /* Setup the stack pointer (from the linear id stored in x0). */
+ adr x1, stacks_end
+ mov x2, #SP_STACKS_SIZE
+ mul x2, x0, x2
+ sub sp, x1, x2
+
+ /*
+ * Invalidate the data cache for the whole partition.
+ * This prevents re-use of stale data cache entries from
+ * prior bootloader stages.
+ */
+ adrp x0, __TEXT_START__
+ adrp x1, __BSS_END__
+ sub x1, x1, x0
+ bl inv_dcache_range
+
+ /* Enable I-Cache */
+ mrs x1, sctlr_el1
+ orr x1, x1, #SCTLR_I_BIT
+ msr sctlr_el1, x1
+ isb
+
+ /*
+ * Set CPACR_EL1.FPEN=11 no EL1/0 trapping of
+ * SVE/Adv. SIMD/FP instructions.
+ */
+ mov x1, CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE)
+ mrs x0, cpacr_el1
+ orr x0, x0, x1
+ msr cpacr_el1, x0
+ isb
+
+ /* Set up exceptions vector table */
+ adrp x1, cactus_vector
+ add x1, x1, :lo12:cactus_vector
+ msr vbar_el1, x1
+ isb
+
+ /* Skip to main if warm boot */
+ cbz x19, 0f
+
+ /* Relocate symbols */
+pie_fixup:
+ ldr x0, =pie_fixup
+ and x0, x0, #~(0x1000 - 1)
+ mov x1, #SP_IMAGE_SIZE
+ add x1, x1, x0
+ bl fixup_gdt_reloc
+
+ /*
+ * Jump to the C entrypoint (it does not return).
+ * Pass the cold boot reason and BIB address.
+ */
+0: mov x0, x19
+ mov x1, x20
+
+ /* And jump to the C entrypoint. */
+ b cactus_main
+endfunc cactus_entrypoint
diff --git a/spm/scmi/aarch64/cactus_exceptions.S b/spm/scmi/aarch64/cactus_exceptions.S
new file mode 100644
index 0000000..06df31c
--- /dev/null
+++ b/spm/scmi/aarch64/cactus_exceptions.S
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl cactus_vector
+
+/*
+ * Exception vector code for unhandled exceptions.
+ * Print a crash dump on the UART and loops forever.
+ */
+.macro unhandled_exception name
+ vector_entry \name
+ b crash_dump
+ end_vector_entry \name
+.endm
+
+.macro interrupt_vector _type
+ sub sp, sp, #0x100
+ save_gp_regs
+ bl cactus_interrupt_handler_\_type
+ restore_gp_regs
+ add sp, sp, #0x100
+ eret
+.endm
+
+vector_base cactus_vector
+
+ /*
+ * Current EL with SP0 : 0x0 - 0x200.
+ */
+unhandled_exception sync_sp0
+unhandled_exception irq_sp0
+unhandled_exception fiq_sp0
+unhandled_exception serr_sp0
+
+ /*
+ * Current EL with SPx : 0x200 - 0x400.
+ */
+vector_entry sync_spx
+ b sync_exception_vector_entry
+end_vector_entry sync_spx
+
+vector_entry irq_spx
+ b irq_vector_entry
+end_vector_entry irq_spx
+
+vector_entry fiq_spx
+ b fiq_vector_entry
+end_vector_entry fiq_spx
+
+unhandled_exception serr_spx
+
+ /*
+ * Lower EL using AArch64 : 0x400 - 0x600.
+ */
+unhandled_exception sync_a64
+unhandled_exception irq_a64
+unhandled_exception fiq_a64
+unhandled_exception serr_a64
+
+ /*
+ * Lower EL using AArch32 : 0x600 - 0x800.
+ */
+unhandled_exception sync_a32
+unhandled_exception irq_a32
+unhandled_exception fiq_a32
+unhandled_exception serr_a32
+
+.macro save_gp_regs
+ stp x0, x1, [sp, #0x0]
+ stp x2, x3, [sp, #0x10]
+ stp x4, x5, [sp, #0x20]
+ stp x6, x7, [sp, #0x30]
+ stp x8, x9, [sp, #0x40]
+ stp x10, x11, [sp, #0x50]
+ stp x12, x13, [sp, #0x60]
+ stp x14, x15, [sp, #0x70]
+ stp x16, x17, [sp, #0x80]
+ stp x18, x19, [sp, #0x90]
+ stp x20, x21, [sp, #0xa0]
+ stp x22, x23, [sp, #0xb0]
+ stp x24, x25, [sp, #0xc0]
+ stp x26, x27, [sp, #0xd0]
+ stp x28, x29, [sp, #0xe0]
+ /* We push xzr simply to keep the stack 16-byte aligned. */
+ stp x30, xzr, [sp, #0xf0]
+.endm
+
+.macro restore_gp_regs
+ ldp x30, xzr, [sp, #0xf0]
+ ldp x28, x29, [sp, #0xe0]
+ ldp x26, x27, [sp, #0xd0]
+ ldp x24, x25, [sp, #0xc0]
+ ldp x22, x23, [sp, #0xb0]
+ ldp x20, x21, [sp, #0xa0]
+ ldp x18, x19, [sp, #0x90]
+ ldp x16, x17, [sp, #0x80]
+ ldp x14, x15, [sp, #0x70]
+ ldp x12, x13, [sp, #0x60]
+ ldp x10, x11, [sp, #0x50]
+ ldp x8, x9, [sp, #0x40]
+ ldp x6, x7, [sp, #0x30]
+ ldp x4, x5, [sp, #0x20]
+ ldp x2, x3, [sp, #0x10]
+ ldp x0, x1, [sp, #0x0]
+.endm
+
+func sync_exception_vector_entry
+ sub sp, sp, #0x100
+ save_gp_regs
+ mov x19, sp
+ bl tftf_sync_exception_handler
+ cbnz x0, 0f
+ mov x0, x19
+ /* Save original stack pointer value on the stack */
+ add x1, x0, #0x100
+ str x1, [x0, #0xf8]
+ b print_exception
+0: restore_gp_regs
+ add sp, sp, #0x100
+ eret
+endfunc sync_exception_vector_entry
+
+func irq_vector_entry
+ interrupt_vector irq
+endfunc irq_vector_entry
+
+func fiq_vector_entry
+ interrupt_vector fiq
+endfunc fiq_vector_entry
+
+func crash_dump
+ /* Save general-purpose registers on the stack. */
+ sub sp, sp, #0x100
+ save_gp_regs
+
+ /* Save original stack pointer value on the stack. */
+ add x1, sp, #0x100
+ str x1, [sp, #0xf8]
+
+ /* Print the saved CPU context on the UART. */
+ mov x0, sp
+ b print_exception
+endfunc crash_dump
diff --git a/spm/scmi/aarch64/drivers/pl011/pl011_console.S b/spm/scmi/aarch64/drivers/pl011/pl011_console.S
new file mode 100644
index 0000000..0d607b9
--- /dev/null
+++ b/spm/scmi/aarch64/drivers/pl011/pl011_console.S
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <drivers/arm/pl011.h>
+#include <drivers/console.h>
+
+ .globl console_init
+ .globl console_pl011_putc
+ .globl console_getc
+ .globl console_try_getc
+ .globl console_flush
+ .globl console_core_init
+ .globl console_core_putc
+ .globl console_core_getc
+ .globl console_core_flush
+
+ /*
+ * The console base is in the data section and not in .bss
+ * even though it is zero-init. In particular, this allows
+ * the console functions to start using this variable before
+ * the runtime memory is initialized for images which do not
+ * need to copy the .data section from ROM to RAM.
+ */
+ .section .data.console_base
+ .align 3
+console_base: .quad 0x0
+
+ /* -----------------------------------------------
+ * int console_init(uintptr_t base_addr,
+ * unsigned int uart_clk, unsigned int baud_rate)
+ *
+ * Clobber list : x1 - x3
+ * -----------------------------------------------
+ */
+func console_init
+ adrp x3, console_base
+ str x0, [x3, :lo12:console_base]
+ b console_core_init
+endfunc console_init
+
+ /* -----------------------------------------------
+ * int console_core_init(uintptr_t base_addr,
+ * unsigned int uart_clk, unsigned int baud_rate)
+ * Function to initialize the console without a
+ * C Runtime to print debug information. This
+ * function will be accessed by console_init and
+ * crash reporting.
+ * In: x0 - Console base address
+ * w1 - Uart clock in Hz
+ * w2 - Baud rate
+ * Out: w0 - Return 1 on success, 0 on error.
+ * Clobber list : x1 - x3
+ * -----------------------------------------------
+ */
+func console_core_init
+ /* Check the input base address */
+ cbz x0, init_fail
+ /* Check baud rate and uart clock for sanity */
+ cbz w1, init_fail
+ cbz w2, init_fail
+ /* Disable uart before programming */
+ ldr w3, [x0, #UARTCR]
+ bic w3, w3, #PL011_UARTCR_UARTEN
+ str w3, [x0, #UARTCR]
+ /* Program the baudrate */
+ /* Divisor = (Uart clock * 4) / baudrate */
+ lsl w1, w1, #2
+ udiv w2, w1, w2
+ /* IBRD = Divisor >> 6 */
+ lsr w1, w2, #6
+ /* Write the IBRD */
+ str w1, [x0, #UARTIBRD]
+ /* FBRD = Divisor & 0x3F */
+ and w1, w2, #0x3f
+ /* Write the FBRD */
+ str w1, [x0, #UARTFBRD]
+ mov w1, #PL011_LINE_CONTROL
+ str w1, [x0, #UARTLCR_H]
+ /* Clear any pending errors */
+ str wzr, [x0, #UARTECR]
+ /* Enable tx, rx, and uart overall */
+ mov w1, #(PL011_UARTCR_RXE | PL011_UARTCR_TXE | PL011_UARTCR_UARTEN)
+ str w1, [x0, #UARTCR]
+ mov w0, #1
+ ret
+init_fail:
+ mov w0, wzr
+ ret
+endfunc console_core_init
+
+ /* -------------------------------------------------
+ * To allow alternate implementation of putc, pl011
+ * is appended in the function name.
+ *
+ * int console_pl011_putc(int c)
+ *
+ * Clobber list : x1, x2
+ * -------------------------------------------------
+ */
+func console_pl011_putc
+ adrp x1, console_base
+ ldr x1, [x1, :lo12:console_base]
+ b console_core_putc
+endfunc console_pl011_putc
+
+ /* ---------------------------------------------
+ * int console_core_putc(int c, uintptr_t base_addr)
+ * Function to output a character over the console. It
+ * returns the character printed on success or an error
+ * code.
+ * In : w0 - Character to be printed
+ * x1 - Console base address
+ * Out : w0 - Input character or error code.
+ * Clobber list : x2
+ * ---------------------------------------------
+ */
+func console_core_putc
+ /* Check the input parameter */
+ cbz x1, putc_error
+ /* Prepend '\r' to '\n' */
+ cmp w0, #0xA
+ b.ne 2f
+1:
+ /* Check if the transmit FIFO is full */
+ ldr w2, [x1, #UARTFR]
+ tbnz w2, #PL011_UARTFR_TXFF_BIT, 1b
+ mov w2, #0xD
+ str w2, [x1, #UARTDR]
+2:
+ /* Check if the transmit FIFO is full */
+ ldr w2, [x1, #UARTFR]
+ tbnz w2, #PL011_UARTFR_TXFF_BIT, 2b
+
+ /* Only write 8 bits */
+ and w0, w0, #0xFF
+ str w0, [x1, #UARTDR]
+ ret
+putc_error:
+ mov w0, #ERROR_NO_VALID_CONSOLE
+ ret
+endfunc console_core_putc
+
+ /* ---------------------------------------------
+ * int console_getc(void)
+ *
+ * Clobber list : x0 - x3
+ * ---------------------------------------------
+ */
+func console_getc
+ adrp x2, console_base
+ ldr x2, [x2, :lo12:console_base]
+ mov x3, x30
+
+ /* Loop until it returns a character or an error. */
+1: mov x0, x2
+ bl console_core_getc
+ cmp w0, #ERROR_NO_PENDING_CHAR
+ b.eq 1b
+
+ ret x3
+endfunc console_getc
+
+ /* ---------------------------------------------
+ * int console_try_getc(void)
+ *
+ * Clobber list : x0, x1
+ * ---------------------------------------------
+ */
+func console_try_getc
+ adrp x0, console_base
+ ldr x0, [x0, :lo12:console_base]
+ b console_core_getc
+endfunc console_try_getc
+
+ /* ---------------------------------------------
+ * int console_core_getc(uintptr_t base_addr)
+ * Function to get a character from the console.
+ * It returns the character grabbed on success
+ * or an error code.
+ * In : x0 - Console base address
+ * Out : w0 - Return character or error code.
+ * Clobber list : x0, x1
+ * ---------------------------------------------
+ */
+func console_core_getc
+ cbz x0, getc_error
+
+ /* Check if the receive FIFO is empty */
+ ldr w1, [x0, #UARTFR]
+ tbnz w1, #PL011_UARTFR_RXFE_BIT, getc_empty
+
+ /* Read a character from the FIFO */
+ ldr w0, [x0, #UARTDR]
+ /* Mask out error flags */
+ and w0, w0, #0xFF
+ ret
+
+getc_empty:
+ mov w0, #ERROR_NO_PENDING_CHAR
+ ret
+getc_error:
+ mov w0, #ERROR_NO_VALID_CONSOLE
+ ret
+endfunc console_core_getc
+
+ /* ---------------------------------------------
+ * int console_flush(void)
+ *
+ * Clobber list : x0, x1
+ * ---------------------------------------------
+ */
+func console_flush
+ adrp x0, console_base
+ ldr x0, [x0, :lo12:console_base]
+ b console_core_flush
+endfunc console_flush
+
+ /* ---------------------------------------------
+ * int console_core_flush(uintptr_t base_addr)
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * In : x0 - Console base address
+ * Out : w0 - Error code or 0.
+ * Clobber list : x0, x1
+ * ---------------------------------------------
+ */
+func console_core_flush
+ cbz x0, flush_error
+
+1:
+ /* Loop until the transmit FIFO is empty */
+ ldr w1, [x0, #UARTFR]
+ tbnz w1, #PL011_UARTFR_BUSY_BIT, 1b
+
+ mov w0, wzr
+ ret
+flush_error:
+ mov w0, #ERROR_NO_VALID_CONSOLE
+ ret
+endfunc console_core_flush
diff --git a/spm/scmi/aarch64/ffa_arch_helpers.S b/spm/scmi/aarch64/ffa_arch_helpers.S
new file mode 100644
index 0000000..b9c9cd9
--- /dev/null
+++ b/spm/scmi/aarch64/ffa_arch_helpers.S
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .macro service_call _conduit
+ /*
+ * Use a callee saved register to point to ffa_value structure after
+ * returning from the conduit.
+ * Although x19 contains an 8-byte value, we are allocating 16 bytes
+ * on the stack to respect the 16-byte stack-alignment.
+ */
+ str x19, [sp, #-16]!
+
+ /*
+ * Save pointed to ffa_value structure into x19, which is a callee saved
+ * register.
+ */
+ mov x19, x0
+ /* Load the argument values into the appropriate registers. */
+ ldp x16, x17, [x0, #128]
+ ldp x14, x15, [x0, #112]
+ ldp x12, x13, [x0, #96]
+ ldp x10, x11, [x0, #80]
+ ldp x8, x9, [x0, #64]
+ ldp x6, x7, [x0, #48]
+ ldp x4, x5, [x0, #32]
+ ldp x2, x3, [x0, #16]
+ ldp x0, x1, [x0, #0]
+
+ \_conduit #0
+
+ /*
+ * The return values are stored in x0-x17, put them in the ffa_value
+ * return structure. x19 points to the ffa_value structure.
+ */
+ stp x0, x1, [x19, #0]
+ stp x2, x3, [x19, #16]
+ stp x4, x5, [x19, #32]
+ stp x6, x7, [x19, #48]
+ stp x8, x9, [x19, #64]
+ stp x10, x11, [x19, #80]
+ stp x12, x13, [x19, #96]
+ stp x14, x15, [x19, #112]
+ stp x16, x17, [x19, #128]
+ ldr x19, [sp], #16
+ .endm
+
+.globl ffa_svc
+func ffa_svc
+ service_call svc
+ ret
+endfunc ffa_svc
+
+.globl ffa_smc
+func ffa_smc
+ service_call smc
+ ret
+endfunc ffa_smc
diff --git a/spm/scmi/aarch64/framework/asm_debug.S b/spm/scmi/aarch64/framework/asm_debug.S
new file mode 100644
index 0000000..32c454f
--- /dev/null
+++ b/spm/scmi/aarch64/framework/asm_debug.S
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+#if ENABLE_ASSERTIONS
+
+ .globl asm_assert
+
+/* Since the max decimal input number is 65536 */
+#define MAX_DEC_DIVISOR 10000
+/* The offset to add to get ascii for numerals '0 - 9' */
+#define ASCII_OFFSET_NUM 0x30
+
+.section .rodata.assert_str, "aS"
+assert_msg1:
+ .asciz "ASSERT: File "
+assert_msg2:
+ .asciz " Line "
+
+ /*
+ * This macro is intended to be used to print the
+ * line number in decimal. Used by asm_assert macro.
+ * The max number expected is 65536.
+ * In: x4 = the decimal to print.
+ * Clobber: x30, x0, x1, x2, x5, x6
+ */
+ .macro asm_print_line_dec
+ mov x6, #10 /* Divide by 10 after every loop iteration */
+ mov x5, #MAX_DEC_DIVISOR
+dec_print_loop:
+ udiv x0, x4, x5 /* Get the quotient */
+ msub x4, x0, x5, x4 /* Find the remainder */
+ add x0, x0, #ASCII_OFFSET_NUM /* Convert to ascii */
+ bl plat_crash_console_putc
+ udiv x5, x5, x6 /* Reduce divisor */
+ cbnz x5, dec_print_loop
+ .endm
+
+/* ---------------------------------------------------------------------------
+ * Assertion support in assembly.
+ * The below function helps to support assertions in assembly where we do not
+ * have a C runtime stack. Arguments to the function are :
+ * x0 - File name
+ * x1 - Line no
+ * Clobber list : x30, x0, x1, x2, x3, x4, x5, x6.
+ * ---------------------------------------------------------------------------
+ */
+func asm_assert
+ mov x5, x0
+ mov x6, x1
+ /* Ensure the console is initialized */
+ bl plat_crash_console_init
+ /* Check if the console is initialized */
+ cbz x0, _assert_loop
+ /* The console is initialized */
+ adr x4, assert_msg1
+ bl asm_print_str
+ mov x4, x5
+ bl asm_print_str
+ adr x4, assert_msg2
+ bl asm_print_str
+ /* Check if line number higher than max permitted */
+ tst x6, #~0xffff
+ b.ne _assert_loop
+ mov x4, x6
+ asm_print_line_dec
+ bl plat_crash_console_flush
+_assert_loop:
+ wfi
+ b _assert_loop
+endfunc asm_assert
+
+/*
+ * This function prints a string from address in x4.
+ * In: x4 = pointer to string.
+ * Clobber: x30, x0, x1, x2, x3
+ */
+func asm_print_str
+ mov x3, x30
+1:
+ ldrb w0, [x4], #0x1
+ cbz x0, 2f
+ bl plat_crash_console_putc
+ b 1b
+2:
+ ret x3
+endfunc asm_print_str
+
+/*
+ * This function prints a hexadecimal number in x4.
+ * In: x4 = the hexadecimal to print.
+ * Clobber: x30, x0 - x3, x5
+ */
+func asm_print_hex
+ mov x3, x30
+ mov x5, #64 /* No of bits to convert to ascii */
+1:
+ sub x5, x5, #4
+ lsrv x0, x4, x5
+ and x0, x0, #0xf
+ cmp x0, #0xA
+ b.lo 2f
+ /* Add by 0x27 in addition to ASCII_OFFSET_NUM
+ * to get ascii for characters 'a - f'.
+ */
+ add x0, x0, #0x27
+2:
+ add x0, x0, #ASCII_OFFSET_NUM
+ bl plat_crash_console_putc
+ cbnz x5, 1b
+ ret x3
+endfunc asm_print_hex
+
+#endif /* ENABLE_ASSERTIONS */
diff --git a/spm/scmi/aarch64/framework/exception_report.c b/spm/scmi/aarch64/framework/exception_report.c
new file mode 100644
index 0000000..0add276
--- /dev/null
+++ b/spm/scmi/aarch64/framework/exception_report.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <platform.h>
+#include <utils_def.h>
+
+/* We save x0-x30. */
+#define GPREGS_CNT 31
+
+/* Set of registers saved by the crash_dump() assembly function. */
+struct cpu_context {
+ u_register_t regs[GPREGS_CNT];
+ u_register_t sp;
+};
+
+/*
+ * Read the EL1 or EL2 version of a register, depending on the current exception
+ * level.
+ */
+#define read_sysreg(_name) \
+ (IS_IN_EL2() ? read_##_name##_el2() : read_##_name##_el1())
+
+void __dead2 print_exception(const struct cpu_context *ctx)
+{
+ u_register_t mpid = read_mpidr_el1();
+
+ /*
+ * The instruction barrier ensures we don't read stale values of system
+ * registers.
+ */
+ isb();
+
+ printf("Unhandled exception on CPU%u.\n", platform_get_core_pos(mpid));
+
+ /* Dump some interesting system registers. */
+ printf("System registers:\n");
+ printf(" MPIDR=0x%lx\n", mpid);
+ printf(" ESR=0x%lx ELR=0x%lx FAR=0x%lx\n", read_sysreg(esr),
+ read_sysreg(elr), read_sysreg(far));
+ printf(" SCTLR=0x%lx SPSR=0x%lx DAIF=0x%lx\n",
+ read_sysreg(sctlr), read_sysreg(spsr), read_daif());
+
+ /* Dump general-purpose registers. */
+ printf("General-purpose registers:\n");
+ for (int i = 0; i < GPREGS_CNT; ++i) {
+ printf(" x%u=0x%lx\n", i, ctx->regs[i]);
+ }
+ printf(" SP=0x%lx\n", ctx->sp);
+
+ while (1)
+ wfi();
+}
diff --git a/spm/scmi/aarch64/lib/cache_helpers.S b/spm/scmi/aarch64/lib/cache_helpers.S
new file mode 100644
index 0000000..de9c8e4
--- /dev/null
+++ b/spm/scmi/aarch64/lib/cache_helpers.S
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .globl flush_dcache_range
+ .globl clean_dcache_range
+ .globl inv_dcache_range
+ .globl dcsw_op_louis
+ .globl dcsw_op_all
+ .globl dcsw_op_level1
+ .globl dcsw_op_level2
+ .globl dcsw_op_level3
+
+/*
+ * This macro can be used for implementing various data cache operations `op`
+ */
+.macro do_dcache_maintenance_by_mva op
+ /* Exit early if size is zero */
+ cbz x1, exit_loop_\op
+ dcache_line_size x2, x3
+ add x1, x0, x1
+ sub x3, x2, #1
+ bic x0, x0, x3
+loop_\op:
+ dc \op, x0
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo loop_\op
+ dsb sy
+exit_loop_\op:
+ ret
+.endm
+ /* ------------------------------------------
+ * Clean+Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func flush_dcache_range
+ do_dcache_maintenance_by_mva civac
+endfunc flush_dcache_range
+
+ /* ------------------------------------------
+ * Clean from base address till size.
+ * 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func clean_dcache_range
+ do_dcache_maintenance_by_mva cvac
+endfunc clean_dcache_range
+
+ /* ------------------------------------------
+ * Invalidate from base address till
+ * size. 'x0' = addr, 'x1' = size
+ * ------------------------------------------
+ */
+func inv_dcache_range
+ do_dcache_maintenance_by_mva ivac
+endfunc inv_dcache_range
+
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way to the level specified
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * x3: The last cache level to operate on
+ * x9: clidr_el1
+ * x10: The cache level to begin operation from
+ * and will carry out the operation on each data cache from level 0
+ * to the level in x3 in sequence
+ *
+ * The dcsw_op macro sets up the x3 and x9 parameters based on
+ * clidr_el1 cache information before invoking the main function
+ * ---------------------------------------------------------------
+ */
+
+ .macro dcsw_op shift, fw, ls
+ mrs x9, clidr_el1
+ ubfx x3, x9, \shift, \fw
+ lsl x3, x3, \ls
+ mov x10, xzr
+ b do_dcsw_op
+ .endm
+
+func do_dcsw_op
+ cbz x3, exit
+ adr x14, dcsw_loop_table // compute inner loop address
+ add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
+#if ENABLE_BTI
+ add x14, x14, x0, lsl #2 // inner loop is + "bti j" instruction
+#endif
+ mov x0, x9
+ mov w8, #1
+loop1:
+ add x2, x10, x10, lsr #1 // work out 3x current cache level
+ lsr x1, x0, x2 // extract cache type bits from clidr
+ and x1, x1, #7 // mask the bits for current cache only
+ cmp x1, #2 // see what cache we have at this level
+ b.lo level_done // nothing to do if no cache or icache
+
+ msr csselr_el1, x10 // select current cache level in csselr
+ isb // isb to sych the new cssr&csidr
+ mrs x1, ccsidr_el1 // read the new ccsidr
+ and x2, x1, #7 // extract the length of the cache lines
+ add x2, x2, #4 // add 4 (line length offset)
+ ubfx x4, x1, #3, #10 // maximum way number
+ clz w5, w4 // bit position of way size increment
+ lsl w9, w4, w5 // w9 = aligned max way number
+ lsl w16, w8, w5 // w16 = way number loop decrement
+ orr w9, w10, w9 // w9 = combine way and cache number
+ ubfx w6, w1, #13, #15 // w6 = max set number
+ lsl w17, w8, w2 // w17 = set number loop decrement
+ dsb sy // barrier before we start this level
+ br x14 // jump to DC operation specific loop
+
+ .macro dcsw_loop _op
+#if ENABLE_BTI
+ bti j
+#endif
+loop2_\_op:
+ lsl w7, w6, w2 // w7 = aligned max set number
+
+loop3_\_op:
+ orr w11, w9, w7 // combine cache, way and set number
+ dc \_op, x11
+ subs w7, w7, w17 // decrement set number
+ b.hs loop3_\_op
+
+ subs x9, x9, x16 // decrement way number
+ b.hs loop2_\_op
+
+ b level_done
+ .endm
+
+level_done:
+ add x10, x10, #2 // increment cache number
+ cmp x3, x10
+ b.hi loop1
+ msr csselr_el1, xzr // select cache level 0 in csselr
+ dsb sy // barrier to complete final cache operation
+ isb
+exit:
+ ret
+endfunc do_dcsw_op
+
+dcsw_loop_table:
+ dcsw_loop isw
+ dcsw_loop cisw
+ dcsw_loop csw
+
+
+func dcsw_op_louis
+ dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_louis
+
+
+func dcsw_op_all
+ dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_all
+
+ /* ---------------------------------------------------------------
+ * Helper macro for data cache operations by set/way for the
+ * level specified
+ * ---------------------------------------------------------------
+ */
+ .macro dcsw_op_level level
+ mrs x9, clidr_el1
+ mov x3, \level
+ sub x10, x3, #2
+ b do_dcsw_op
+ .endm
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 1 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level1
+ dcsw_op_level #(1 << LEVEL_SHIFT)
+endfunc dcsw_op_level1
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 2 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level2
+ dcsw_op_level #(2 << LEVEL_SHIFT)
+endfunc dcsw_op_level2
+
+ /* ---------------------------------------------------------------
+ * Data cache operations by set/way for level 3 cache
+ *
+ * The main function, do_dcsw_op requires:
+ * x0: The operation type (0-2), as defined in arch.h
+ * ---------------------------------------------------------------
+ */
+func dcsw_op_level3
+ dcsw_op_level #(3 << LEVEL_SHIFT)
+endfunc dcsw_op_level3
diff --git a/spm/scmi/aarch64/lib/exceptions/sync.c b/spm/scmi/aarch64/lib/exceptions/sync.c
new file mode 100644
index 0000000..49b6bd8
--- /dev/null
+++ b/spm/scmi/aarch64/lib/exceptions/sync.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <stdbool.h>
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <sync.h>
+
+static exception_handler_t custom_sync_exception_handler;
+
+void register_custom_sync_exception_handler(exception_handler_t handler)
+{
+ custom_sync_exception_handler = handler;
+}
+
+void unregister_custom_sync_exception_handler(void)
+{
+ custom_sync_exception_handler = NULL;
+}
+
+bool tftf_sync_exception_handler(void)
+{
+ uint64_t elr_elx = IS_IN_EL2() ? read_elr_el2() : read_elr_el1();
+ bool resume = false;
+
+ if (custom_sync_exception_handler == NULL) {
+ return false;
+ }
+
+ resume = custom_sync_exception_handler();
+
+ if (resume) {
+ /* Move ELR to next instruction to allow tftf to continue */
+ if (IS_IN_EL2()) {
+ write_elr_el2(elr_elx + 4U);
+ } else {
+ write_elr_el1(elr_elx + 4U);
+ }
+ }
+
+ return resume;
+}
diff --git a/spm/scmi/aarch64/lib/locks/spinlock.S b/spm/scmi/aarch64/lib/locks/spinlock.S
new file mode 100644
index 0000000..7f6d0c6
--- /dev/null
+++ b/spm/scmi/aarch64/lib/locks/spinlock.S
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl init_spinlock
+ .globl spin_lock
+ .globl spin_unlock
+
+func init_spinlock
+ str wzr, [x0]
+ ret
+endfunc init_spinlock
+
+func spin_lock
+ mov w2, #1
+ sevl
+l1: wfe
+l2: ldaxr w1, [x0]
+ cbnz w1, l1
+ stxr w1, w2, [x0]
+ cbnz w1, l2
+ ret
+endfunc spin_lock
+
+
+func spin_unlock
+ stlr wzr, [x0]
+ ret
+endfunc spin_unlock
diff --git a/spm/scmi/aarch64/lib/misc_helpers.S b/spm/scmi/aarch64/lib/misc_helpers.S
new file mode 100644
index 0000000..b677721
--- /dev/null
+++ b/spm/scmi/aarch64/lib/misc_helpers.S
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+ .globl smc
+
+ .globl zeromem16
+ .globl memcpy16
+
+ .globl disable_mmu
+ .globl disable_mmu_icache
+
+func smc
+ smc #0
+endfunc smc
+
+/* -----------------------------------------------------------------------
+ * void zeromem16(void *mem, unsigned int length);
+ *
+ * Initialise a memory region to 0.
+ * The memory address must be 16-byte aligned.
+ * -----------------------------------------------------------------------
+ */
+func zeromem16
+#if ENABLE_ASSERTIONS
+ tst x0, #0xf
+ ASM_ASSERT(eq)
+#endif
+ add x2, x0, x1
+/* zero 16 bytes at a time */
+z_loop16:
+ sub x3, x2, x0
+ cmp x3, #16
+ b.lt z_loop1
+ stp xzr, xzr, [x0], #16
+ b z_loop16
+/* zero byte per byte */
+z_loop1:
+ cmp x0, x2
+ b.eq z_end
+ strb wzr, [x0], #1
+ b z_loop1
+z_end:
+ ret
+endfunc zeromem16
+
+
+/* --------------------------------------------------------------------------
+ * void memcpy16(void *dest, const void *src, unsigned int length)
+ *
+ * Copy length bytes from memory area src to memory area dest.
+ * The memory areas should not overlap.
+ * Destination and source addresses must be 16-byte aligned.
+ * --------------------------------------------------------------------------
+ */
+func memcpy16
+#if ENABLE_ASSERTIONS
+ orr x3, x0, x1
+ tst x3, #0xf
+ ASM_ASSERT(eq)
+#endif
+/* copy 16 bytes at a time */
+m_loop16:
+ cmp x2, #16
+ b.lt m_loop1
+ ldp x3, x4, [x1], #16
+ stp x3, x4, [x0], #16
+ sub x2, x2, #16
+ b m_loop16
+/* copy byte per byte */
+m_loop1:
+ cbz x2, m_end
+ ldrb w3, [x1], #1
+ strb w3, [x0], #1
+ subs x2, x2, #1
+ b.ne m_loop1
+m_end:
+ ret
+endfunc memcpy16
+
+/* ---------------------------------------------------------------------------
+ * Disable the MMU at the current exception level (NS-EL1 or EL2)
+ * This is implemented in assembler to ensure that the data cache is cleaned
+ * and invalidated after the MMU is disabled without any intervening cacheable
+ * data accesses
+ * ---------------------------------------------------------------------------
+ */
+func disable_mmu
+ mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
+do_disable_mmu:
+ asm_read_sctlr_el1_or_el2
+ bic x0, x0, x1
+ asm_write_sctlr_el1_or_el2 x1
+ isb /* ensure MMU is off */
+ mov x0, #DCCISW /* DCache clean and invalidate */
+ b dcsw_op_all
+endfunc disable_mmu
+
+func disable_mmu_icache
+ mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
+ b do_disable_mmu
+endfunc disable_mmu_icache
+
+/* Need this label for asm_read/write_sctlr_el1_or_el2 */
+dead:
+ b dead
+
+/* ---------------------------------------------------------------------------
+ * Helper to fixup Global Offset table (GOT) and dynamic relocations
+ * (.rela.dyn) at runtime.
+ *
+ * This function is meant to be used when the firmware is compiled with -fpie
+ * and linked with -pie options. We rely on the linker script exporting
+ * appropriate markers for start and end of the section. For GOT, we
+ * expect __GOT_START__ and __GOT_END__. Similarly for .rela.dyn, we expect
+ * __RELA_START__ and __RELA_END__.
+ *
+ * The function takes the limits of the memory to apply fixups to as
+ * arguments (which is usually the limits of the relocable BL image).
+ * x0 - the start of the fixup region
+ * x1 - the limit of the fixup region
+ * These addresses have to be page (4KB aligned).
+ * ---------------------------------------------------------------------------
+ */
+.globl fixup_gdt_reloc
+func fixup_gdt_reloc
+ mov x6, x0
+ mov x7, x1
+
+ /* Test if the limits are 4K aligned */
+#if ENABLE_ASSERTIONS
+ orr x0, x0, x1
+ tst x0, #(PAGE_SIZE - 1)
+ ASM_ASSERT(eq)
+#endif
+ /*
+ * Calculate the offset based on return address in x30.
+ * Assume that this function is called within a page at the start of
+ * fixup region.
+ */
+ and x2, x30, #~(PAGE_SIZE - 1)
+ sub x0, x2, x6 /* Diff(S) = Current Address - Compiled Address */
+
+ adrp x1, __GOT_START__
+ add x1, x1, :lo12:__GOT_START__
+ adrp x2, __GOT_END__
+ add x2, x2, :lo12:__GOT_END__
+
+ /*
+ * GOT is an array of 64_bit addresses which must be fixed up as
+ * new_addr = old_addr + Diff(S).
+ * The new_addr is the address currently the binary is executing from
+ * and old_addr is the address at compile time.
+ */
+1:
+ ldr x3, [x1]
+ /* Skip adding offset if address is < lower limit */
+ cmp x3, x6
+ b.lo 2f
+ /* Skip adding offset if address is >= upper limit */
+ cmp x3, x7
+ b.ge 2f
+ add x3, x3, x0
+ str x3, [x1]
+2:
+ add x1, x1, #8
+ cmp x1, x2
+ b.lo 1b
+
+ /* Starting dynamic relocations. Use adrp/adr to get RELA_START and END */
+ adrp x1, __RELA_START__
+ add x1, x1, :lo12:__RELA_START__
+ adrp x2, __RELA_END__
+ add x2, x2, :lo12:__RELA_END__
+ /*
+ * According to ELF-64 specification, the RELA data structure is as
+ * follows:
+ * typedef struct
+ * {
+ * Elf64_Addr r_offset;
+ * Elf64_Xword r_info;
+ * Elf64_Sxword r_addend;
+ * } Elf64_Rela;
+ *
+ * r_offset is address of reference
+ * r_info is symbol index and type of relocation (in this case
+ * 0x403 which corresponds to R_AARCH64_RELATIVE).
+ * r_addend is constant part of expression.
+ *
+ * Size of Elf64_Rela structure is 24 bytes.
+ */
+1:
+ /* Assert that the relocation type is R_AARCH64_RELATIVE */
+#if ENABLE_ASSERTIONS
+ ldr x3, [x1, #8]
+ cmp x3, #0x403
+ ASM_ASSERT(eq)
+#endif
+ ldr x3, [x1] /* r_offset */
+ add x3, x0, x3
+ ldr x4, [x1, #16] /* r_addend */
+
+ /* Skip adding offset if r_addend is < lower limit */
+ cmp x4, x6
+ b.lo 2f
+ /* Skip adding offset if r_addend entry is >= upper limit */
+ cmp x4, x7
+ b.ge 2f
+
+ add x4, x0, x4 /* Diff(S) + r_addend */
+ str x4, [x3]
+
+2: add x1, x1, #24
+ cmp x1, x2
+ b.lo 1b
+
+ ret
+endfunc fixup_gdt_reloc
diff --git a/spm/scmi/aarch64/lib/smc/asm_smc.S b/spm/scmi/aarch64/lib/smc/asm_smc.S
new file mode 100644
index 0000000..b11baa8
--- /dev/null
+++ b/spm/scmi/aarch64/lib/smc/asm_smc.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2013-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .section .text, "ax"
+
+ .macro smccc_conduit _conduit
+
+ /*
+ * According to the AAPCS64, x8 is the indirect result location
+ * register. It contains the address of the memory block that the caller
+ * has reserved to hold the result, i.e. the smc_ret_values structure
+ * in our case.
+ * x8 might be clobbered across the SMC call so save it on the stack.
+ * Although x8 contains an 8 byte value, we are allocating 16bytes on the stack
+ * to respect 16byte stack-alignment.
+ */
+ str x8, [sp, #-16]!
+
+ /* "Conduit" arguments are already stored in x0-x7 */
+ \_conduit #0
+
+ /* Pop x8 into a caller-saved register */
+ ldr x9, [sp], #16
+
+ /*
+ * Return values are stored in x0-x7, put them in the 'smc_ret_values'
+ * return structure
+ */
+ stp x0, x1, [x9, #0]
+ stp x2, x3, [x9, #16]
+ stp x4, x5, [x9, #32]
+ stp x6, x7, [x9, #48]
+
+ .endm
+
+/* ---------------------------------------------------------------------------
+ * smc_ret_values asm_tftf_smc64(uint32_t fid,
+ * u_register_t arg1,
+ * u_register_t arg2,
+ * u_register_t arg3,
+ * u_register_t arg4,
+ * u_register_t arg5,
+ * u_register_t arg6,
+ * u_register_t arg7);
+ * ---------------------------------------------------------------------------
+ */
+ .globl asm_tftf_smc64
+
+func asm_tftf_smc64
+ smccc_conduit smc
+ ret
+endfunc asm_tftf_smc64
+
+/* ---------------------------------------------------------------------------
+ * hvc_ret_values asm_tftf_hvcc64(uint32_t fid,
+ * u_register_t arg1,
+ * u_register_t arg2,
+ * u_register_t arg3,
+ * u_register_t arg4,
+ * u_register_t arg5,
+ * u_register_t arg6,
+ * u_register_t arg7);
+ * ---------------------------------------------------------------------------
+ */
+ .globl asm_tftf_hvc64
+
+func asm_tftf_hvc64
+ smccc_conduit hvc
+ ret
+endfunc asm_tftf_hvc64
diff --git a/spm/scmi/aarch64/lib/smc/hvc.c b/spm/scmi/aarch64/lib/smc/hvc.c
new file mode 100644
index 0000000..c833864
--- /dev/null
+++ b/spm/scmi/aarch64/lib/smc/hvc.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdint.h>
+#include <tftf.h>
+
+hvc_ret_values asm_tftf_hvc64(uint32_t fid,
+ u_register_t arg1,
+ u_register_t arg2,
+ u_register_t arg3,
+ u_register_t arg4,
+ u_register_t arg5,
+ u_register_t arg6,
+ u_register_t arg7);
+
+hvc_ret_values tftf_hvc(const hvc_args *args)
+{
+ return asm_tftf_hvc64(args->fid,
+ args->arg1,
+ args->arg2,
+ args->arg3,
+ args->arg4,
+ args->arg5,
+ args->arg6,
+ args->arg7);
+}
diff --git a/spm/scmi/aarch64/lib/smc/smc.c b/spm/scmi/aarch64/lib/smc/smc.c
new file mode 100644
index 0000000..6667ee7
--- /dev/null
+++ b/spm/scmi/aarch64/lib/smc/smc.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+#include <tftf.h>
+
+smc_ret_values asm_tftf_smc64(uint32_t fid,
+ u_register_t arg1,
+ u_register_t arg2,
+ u_register_t arg3,
+ u_register_t arg4,
+ u_register_t arg5,
+ u_register_t arg6,
+ u_register_t arg7);
+
+smc_ret_values tftf_smc(const smc_args *args)
+{
+ return asm_tftf_smc64(args->fid,
+ args->arg1,
+ args->arg2,
+ args->arg3,
+ args->arg4,
+ args->arg5,
+ args->arg6,
+ args->arg7);
+}
diff --git a/spm/scmi/aarch64/lib/xlat_tables_v2/enable_mmu.S b/spm/scmi/aarch64/lib/xlat_tables_v2/enable_mmu.S
new file mode 100644
index 0000000..0f0aaa1
--- /dev/null
+++ b/spm/scmi/aarch64/lib/xlat_tables_v2/enable_mmu.S
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <xlat_tables_v2.h>
+
+ .global enable_mmu_direct_el1
+ .global enable_mmu_direct_el2
+ .global enable_mmu_direct_el3
+
+ /* Macros to read and write to system register for a given EL. */
+ .macro _msr reg_name, el, gp_reg
+ msr \reg_name\()_el\()\el, \gp_reg
+ .endm
+
+ .macro _mrs gp_reg, reg_name, el
+ mrs \gp_reg, \reg_name\()_el\()\el
+ .endm
+
+ .macro tlbi_invalidate_all el
+ .if \el == 1
+ TLB_INVALIDATE(vmalle1)
+ .elseif \el == 2
+ TLB_INVALIDATE(alle2)
+ .elseif \el == 3
+ TLB_INVALIDATE(alle3)
+ .else
+ .error "EL must be 1, 2 or 3"
+ .endif
+ .endm
+
+ /* void enable_mmu_direct_el<x>(unsigned int flags) */
+ .macro define_mmu_enable_func el
+ func enable_mmu_direct_\()el\el
+#if ENABLE_ASSERTIONS
+ _mrs x1, sctlr, \el
+ tst x1, #SCTLR_M_BIT
+ ASM_ASSERT(eq)
+#endif
+ /* Invalidate all TLB entries */
+ tlbi_invalidate_all \el
+
+ mov x7, x0
+ adrp x0, mmu_cfg_params
+ add x0, x0, :lo12:mmu_cfg_params
+
+ /* MAIR */
+ ldr x1, [x0, #(MMU_CFG_MAIR << 3)]
+ _msr mair, \el, x1
+
+ /* TCR */
+ ldr x2, [x0, #(MMU_CFG_TCR << 3)]
+ _msr tcr, \el, x2
+
+ /* TTBR */
+ ldr x3, [x0, #(MMU_CFG_TTBR0 << 3)]
+ _msr ttbr0, \el, x3
+
+ /*
+ * Ensure all translation table writes have drained into memory, the TLB
+ * invalidation is complete, and translation register writes are
+ * committed before enabling the MMU
+ */
+ dsb ish
+ isb
+
+ /* Set and clear required fields of SCTLR */
+ _mrs x4, sctlr, \el
+ mov_imm x5, SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT
+ orr x4, x4, x5
+
+ /* Additionally, amend SCTLR fields based on flags */
+ bic x5, x4, #SCTLR_C_BIT
+ tst x7, #DISABLE_DCACHE
+ csel x4, x5, x4, ne
+
+ _msr sctlr, \el, x4
+ isb
+
+ ret
+ endfunc enable_mmu_direct_\()el\el
+ .endm
+
+ /*
+ * Define MMU-enabling functions for EL1, EL2 and EL3:
+ *
+ * enable_mmu_direct_el1
+ * enable_mmu_direct_el2
+ * enable_mmu_direct_el3
+ */
+ define_mmu_enable_func 1
+ define_mmu_enable_func 2
+ define_mmu_enable_func 3
diff --git a/spm/scmi/aarch64/lib/xlat_tables_v2/xlat_tables_arch.c b/spm/scmi/aarch64/lib/xlat_tables_v2/xlat_tables_arch.c
new file mode 100644
index 0000000..fd21962
--- /dev/null
+++ b/spm/scmi/aarch64/lib/xlat_tables_v2/xlat_tables_arch.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2017-2023, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cassert.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <utils_def.h>
+#include <xlat_tables_v2.h>
+#include "../../../xlat_tables_private.h"
+
+/*
+ * Returns true if the provided granule size is supported, false otherwise.
+ */
+bool xlat_arch_is_granule_size_supported(size_t size)
+{
+ u_register_t tgranx;
+
+ if (size == PAGE_SIZE_4KB) {
+ tgranx = get_id_aa64mmfr0_el0_tgran4();
+ /* MSB of TGRAN4 field will be '1' for unsupported feature */
+ return ((tgranx >= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED) &&
+ (tgranx < 8ULL));
+ } else if (size == PAGE_SIZE_16KB) {
+ tgranx = get_id_aa64mmfr0_el0_tgran16();
+ return (tgranx >= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED);
+ } else if (size == PAGE_SIZE_64KB) {
+ tgranx = get_id_aa64mmfr0_el0_tgran64();
+ /* MSB of TGRAN64 field will be '1' for unsupported feature */
+ return ((tgranx >= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED) &&
+ (tgranx < 8ULL));
+ } else {
+ return false;
+ }
+}
+
+size_t xlat_arch_get_max_supported_granule_size(void)
+{
+ if (xlat_arch_is_granule_size_supported(PAGE_SIZE_64KB)) {
+ return PAGE_SIZE_64KB;
+ } else if (xlat_arch_is_granule_size_supported(PAGE_SIZE_16KB)) {
+ return PAGE_SIZE_16KB;
+ } else {
+ assert(xlat_arch_is_granule_size_supported(PAGE_SIZE_4KB));
+ return PAGE_SIZE_4KB;
+ }
+}
+
+unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr)
+{
+ /* Physical address can't exceed 48 bits */
+ assert((max_addr & ADDR_MASK_48_TO_63) == 0U);
+
+ /* 48 bits address */
+ if ((max_addr & ADDR_MASK_44_TO_47) != 0U)
+ return TCR_PS_BITS_256TB;
+
+ /* 44 bits address */
+ if ((max_addr & ADDR_MASK_42_TO_43) != 0U)
+ return TCR_PS_BITS_16TB;
+
+ /* 42 bits address */
+ if ((max_addr & ADDR_MASK_40_TO_41) != 0U)
+ return TCR_PS_BITS_4TB;
+
+ /* 40 bits address */
+ if ((max_addr & ADDR_MASK_36_TO_39) != 0U)
+ return TCR_PS_BITS_1TB;
+
+ /* 36 bits address */
+ if ((max_addr & ADDR_MASK_32_TO_35) != 0U)
+ return TCR_PS_BITS_64GB;
+
+ return TCR_PS_BITS_4GB;
+}
+
+#if ENABLE_ASSERTIONS
+/*
+ * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is
+ * supported in ARMv8.2 onwards.
+ */
+static const unsigned int pa_range_bits_arr[] = {
+ PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
+ PARANGE_0101, PARANGE_0110
+};
+
+unsigned long long xlat_arch_get_max_supported_pa(void)
+{
+ u_register_t pa_range = get_pa_range();
+
+ /* All other values are reserved */
+ assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
+
+ return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
+}
+
+/*
+ * Return minimum virtual address space size supported by the architecture
+ */
+uintptr_t xlat_get_min_virt_addr_space_size(void)
+{
+ uintptr_t ret;
+
+ if (is_armv8_4_ttst_present())
+ ret = MIN_VIRT_ADDR_SPACE_SIZE_TTST;
+ else
+ ret = MIN_VIRT_ADDR_SPACE_SIZE;
+
+ return ret;
+}
+#endif /* ENABLE_ASSERTIONS*/
+
+bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
+{
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ assert(xlat_arch_current_el() >= 1U);
+ return (read_sctlr_el1() & SCTLR_M_BIT) != 0U;
+ } else if (ctx->xlat_regime == EL2_REGIME) {
+ assert(xlat_arch_current_el() >= 2U);
+ return (read_sctlr_el2() & SCTLR_M_BIT) != 0U;
+ } else {
+ assert(ctx->xlat_regime == EL3_REGIME);
+ assert(xlat_arch_current_el() >= 3U);
+ return (read_sctlr_el3() & SCTLR_M_BIT) != 0U;
+ }
+}
+
+bool is_dcache_enabled(void)
+{
+ unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
+
+ if (el == 1U) {
+ return (read_sctlr_el1() & SCTLR_C_BIT) != 0U;
+ } else if (el == 2U) {
+ return (read_sctlr_el2() & SCTLR_C_BIT) != 0U;
+ } else {
+ return (read_sctlr_el3() & SCTLR_C_BIT) != 0U;
+ }
+}
+
+uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
+{
+ if (xlat_regime == EL1_EL0_REGIME) {
+ return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
+ } else {
+ assert((xlat_regime == EL2_REGIME) ||
+ (xlat_regime == EL3_REGIME));
+ return UPPER_ATTRS(XN);
+ }
+}
+
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
+{
+ /*
+ * Ensure the translation table write has drained into memory before
+ * invalidating the TLB entry.
+ */
+ dsbishst();
+
+ /*
+ * This function only supports invalidation of TLB entries for the EL3
+ * and EL1&0 translation regimes.
+ *
+ * Also, it is architecturally UNDEFINED to invalidate TLBs of a higher
+ * exception level (see section D4.9.2 of the ARM ARM rev B.a).
+ */
+ if (xlat_regime == EL1_EL0_REGIME) {
+ assert(xlat_arch_current_el() >= 1U);
+ tlbivaae1is(TLBI_ADDR(va));
+ } else if (xlat_regime == EL2_REGIME) {
+ assert(xlat_arch_current_el() >= 2U);
+ tlbivae2is(TLBI_ADDR(va));
+ } else {
+ assert(xlat_regime == EL3_REGIME);
+ assert(xlat_arch_current_el() >= 3U);
+ tlbivae3is(TLBI_ADDR(va));
+ }
+}
+
+void xlat_arch_tlbi_va_sync(void)
+{
+ /*
+ * A TLB maintenance instruction can complete at any time after
+ * it is issued, but is only guaranteed to be complete after the
+ * execution of DSB by the PE that executed the TLB maintenance
+ * instruction. After the TLB invalidate instruction is
+ * complete, no new memory accesses using the invalidated TLB
+ * entries will be observed by any observer of the system
+ * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
+ * "Ordering and completion of TLB maintenance instructions".
+ */
+ dsbish();
+
+ /*
+ * The effects of a completed TLB maintenance instruction are
+ * only guaranteed to be visible on the PE that executed the
+ * instruction after the execution of an ISB instruction by the
+ * PE that executed the TLB maintenance instruction.
+ */
+ isb();
+}
+
+unsigned int xlat_arch_current_el(void)
+{
+ unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
+
+ assert(el > 0U);
+
+ return el;
+}
+
+void setup_mmu_cfg(uint64_t *params, unsigned int flags,
+ const uint64_t *base_table, unsigned long long max_pa,
+ uintptr_t max_va, int xlat_regime)
+{
+ uint64_t mair, ttbr0, tcr;
+ uintptr_t virtual_addr_space_size;
+
+ /* Set attributes in the right indices of the MAIR. */
+ mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+ mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
+ mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);
+
+ /*
+ * Limit the input address ranges and memory region sizes translated
+ * using TTBR0 to the given virtual address space size.
+ */
+ assert(max_va < ((uint64_t)UINTPTR_MAX));
+
+ virtual_addr_space_size = (uintptr_t)max_va + 1U;
+
+ assert(virtual_addr_space_size >=
+ xlat_get_min_virt_addr_space_size());
+ assert(virtual_addr_space_size <= MAX_VIRT_ADDR_SPACE_SIZE);
+ assert(IS_POWER_OF_TWO(virtual_addr_space_size));
+
+ /*
+ * __builtin_ctzll(0) is undefined but here we are guaranteed that
+ * virtual_addr_space_size is in the range [1,UINTPTR_MAX].
+ */
+ int t0sz = 64 - __builtin_ctzll(virtual_addr_space_size);
+
+ tcr = (uint64_t)t0sz << TCR_T0SZ_SHIFT;
+
+ /*
+ * Set the cacheability and shareability attributes for memory
+ * associated with translation table walks.
+ */
+ if ((flags & XLAT_TABLE_NC) != 0U) {
+ /* Inner & outer non-cacheable non-shareable. */
+ tcr |= TCR_SH_NON_SHAREABLE |
+ TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
+ } else {
+ /* Inner & outer WBWA & shareable. */
+ tcr |= TCR_SH_INNER_SHAREABLE |
+ TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA;
+ }
+
+ /*
+ * It is safer to restrict the max physical address accessible by the
+ * hardware as much as possible.
+ */
+ unsigned long long tcr_ps_bits = tcr_physical_addr_size_bits(max_pa);
+
+ if (xlat_regime == EL1_EL0_REGIME) {
+ /*
+ * TCR_EL1.EPD1: Disable translation table walk for addresses
+ * that are translated using TTBR1_EL1.
+ */
+ tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
+ } else if (xlat_regime == EL2_REGIME) {
+ tcr |= TCR_EL2_RES1 | (tcr_ps_bits << TCR_EL2_PS_SHIFT);
+ } else {
+ assert(xlat_regime == EL3_REGIME);
+ tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
+ }
+
+ /* Set TTBR bits as well */
+ ttbr0 = (uint64_t) base_table;
+
+ if (is_armv8_2_ttcnp_present()) {
+ /* Enable CnP bit so as to share page tables with all PEs. */
+ ttbr0 |= TTBR_CNP_BIT;
+ }
+
+ params[MMU_CFG_MAIR] = mair;
+ params[MMU_CFG_TCR] = tcr;
+ params[MMU_CFG_TTBR0] = ttbr0;
+}
diff --git a/spm/scmi/cactus.h b/spm/scmi/cactus.h
new file mode 100644
index 0000000..c7176c2
--- /dev/null
+++ b/spm/scmi/cactus.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2017-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CACTUS_H__
+#define __CACTUS_H__
+
+#include <stdint.h>
+
+/* Linker symbols used to figure out the memory layout of Cactus. */
+extern uintptr_t __TEXT_START__, __TEXT_END__;
+#define CACTUS_TEXT_START ((uintptr_t)&__TEXT_START__)
+#define CACTUS_TEXT_END ((uintptr_t)&__TEXT_END__)
+
+extern uintptr_t __RODATA_START__, __RODATA_END__;
+#define CACTUS_RODATA_START ((uintptr_t)&__RODATA_START__)
+#define CACTUS_RODATA_END ((uintptr_t)&__RODATA_END__)
+
+extern uintptr_t __DATA_START__, __DATA_END__;
+#define CACTUS_DATA_START ((uintptr_t)&__DATA_START__)
+#define CACTUS_DATA_END ((uintptr_t)&__DATA_END__)
+
+extern uintptr_t __BSS_START__, __BSS_END__;
+#define CACTUS_BSS_START ((uintptr_t)&__BSS_START__)
+#define CACTUS_BSS_END ((uintptr_t)&__BSS_END__)
+
+#endif /* __CACTUS_H__ */
diff --git a/spm/scmi/cactus.ld.S b/spm/scmi/cactus.ld.S
new file mode 100644
index 0000000..673780d
--- /dev/null
+++ b/spm/scmi/cactus.ld.S
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2017-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <sp_def.h>
+#include <platform_def.h>
+#include <xlat_tables_defs.h>
+
+OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
+OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
+ENTRY(cactus_entrypoint)
+
+SECTIONS
+{
+ . = SP_IMAGE_BASE;
+
+ ASSERT(. == ALIGN(PAGE_SIZE),
+ "TEXT_START address is not aligned to PAGE_SIZE.")
+
+ .text : {
+ __TEXT_START__ = .;
+ *cactus_entrypoint.o(.text*)
+ *(.text*)
+ *(.vectors)
+ . = NEXT(PAGE_SIZE);
+ __TEXT_END__ = .;
+ }
+
+ .rodata : {
+ . = ALIGN(PAGE_SIZE);
+ __RODATA_START__ = .;
+ *(.rodata*)
+
+ /*
+ * Keep the .got section in the RO section as it is patched
+ * prior to enabling the MMU and having the .got in RO is better for
+ * security. GOT is a table of addresses so ensure 8-byte alignment.
+ */
+ . = ALIGN(8);
+ __GOT_START__ = .;
+ *(.got)
+ __GOT_END__ = .;
+
+ . = NEXT(PAGE_SIZE);
+ __RODATA_END__ = .;
+
+ }
+
+ .data : {
+ . = ALIGN(PAGE_SIZE);
+ __DATA_START__ = .;
+ *(.data*)
+ . = NEXT(PAGE_SIZE);
+ __DATA_END__ = .;
+ }
+
+ /*
+ * .rela.dyn needs to come after .data for the read-elf utility to parse
+ * this section correctly. Ensure 8-byte alignment so that the fields of
+ * RELA data structure are aligned.
+ */
+ . = ALIGN(8);
+ __RELA_START__ = .;
+ .rela.dyn . : {
+ }
+ __RELA_END__ = .;
+
+ .bss (NOLOAD) : {
+ . = ALIGN(PAGE_SIZE);
+ __BSS_START__ = .;
+ *(SORT_BY_ALIGNMENT(.bss*))
+ *(COMMON)
+ *(xlat_table*)
+ . = NEXT(PAGE_SIZE);
+ __BSS_END__ = .;
+ }
+}
diff --git a/spm/scmi/cactus.mk b/spm/scmi/cactus.mk
new file mode 100644
index 0000000..be32e7c
--- /dev/null
+++ b/spm/scmi/cactus.mk
@@ -0,0 +1,133 @@
+#
+# Copyright (c) 2018-2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include branch_protection.mk
+include lib/xlat_tables_v2/xlat_tables.mk
+
+# Include cactus platform make file
+SCMI_PLAT_PATH := $(shell find spm/scmi/plat -wholename '*/${PLAT}')
+ifneq (${SCMI_PLAT_PATH},)
+ include ${SCMI_PLAT_PATH}/platform.mk
+endif
+
+SCMI_DTB := $(BUILD_PLAT)/scmi.dtb
+SECURE_PARTITIONS += scmi
+
+SCMI_INCLUDES := \
+ -Ispm/scmi/include/ext \
+ -Ispm/scmi/include/ext/common \
+ -Ispm/scmi/include/ext/lib \
+ -Ispm/scmi/include/ext/lib/${ARCH} \
+ -Ispm/scmi/include/ext/lib/extensions \
+ -Ispm/scmi/include/ext/lib/xlat_tables \
+ -Ispm/scmi/${ARCH} \
+ -Ispm/scmi/include \
+ -Ispm/scmi/include/${ARCH} \
+ -Ispm/scmi \
+ -Ispm/scmi/${ARCH} \
+ -Ispm/scmi/plat/arm/fvp/include/ \
+ -Ispm/scmi/include/ext/plat/arm/common/
+
+SCMI_INCLUDES += -I${INCLUDE_SCMI}
+SCMI_LDFLAGS += -L${LIB_SCMI}
+SCMI_LDFLAGS += -lscmi-fw-all --static
+
+SCMI_SOURCES := \
+ $(addprefix spm/scmi/, \
+ ${ARCH}/cactus_entrypoint.S \
+ ${ARCH}/cactus_exceptions.S \
+ cactus_interrupt.c \
+ cactus_main.c \
+ sp_debug.c \
+ ${ARCH}/framework/asm_debug.S \
+ ${ARCH}/ffa_arch_helpers.S \
+ ffa_helpers.c \
+ spm_common.c \
+ ${ARCH}/framework/exception_report.c \
+ ${ARCH}/drivers/pl011/pl011_console.S \
+ ${ARCH}/lib/cache_helpers.S \
+ ${ARCH}/lib/misc_helpers.S \
+ ${ARCH}/lib/smc/asm_smc.S \
+ ${ARCH}/lib/smc/smc.c \
+ ${ARCH}/lib/smc/hvc.c \
+ ${ARCH}/lib/exceptions/sync.c \
+ ${ARCH}/lib/locks/spinlock.S \
+ plat/arm/fvp/${ARCH}/plat_helpers.S \
+ mp_printf.c \
+ ${ARCH}/lib/xlat_tables_v2/enable_mmu.S \
+ ${ARCH}/lib/xlat_tables_v2/xlat_tables_arch.c \
+ xlat_tables_context.c \
+ xlat_tables_core.c \
+ xlat_tables_utils.c \
+ )
+
+SCMI_SOURCES += $(addprefix spm/scmi/libc/, \
+ abort.c \
+ assert.c \
+ exit.c \
+ memchr.c \
+ memcmp.c \
+ memcpy.c \
+ memmove.c \
+ memset.c \
+ printf.c \
+ putchar.c \
+ puts.c \
+ rand.c \
+ snprintf.c \
+ strchr.c \
+ strcmp.c \
+ strlcpy.c \
+ strlen.c \
+ strncmp.c \
+ strncpy.c \
+ strnlen.c \
+ strrchr.c)
+
+ifeq (${ARCH},aarch64)
+SCMI_SOURCES += $(addprefix spm/scmi/libc/aarch64/, \
+ setjmp.S)
+endif
+
+SCMI_LINKERFILE := spm/scmi/cactus.ld.S
+
+SCMI_DEFINES :=
+
+$(eval $(call add_define,SCMI_DEFINES,ARM_ARCH_MAJOR))
+$(eval $(call add_define,SCMI_DEFINES,ARM_ARCH_MINOR))
+$(eval $(call add_define,SCMI_DEFINES,DEBUG))
+$(eval $(call add_define,SCMI_DEFINES,ENABLE_ASSERTIONS))
+$(eval $(call add_define,SCMI_DEFINES,ENABLE_BTI))
+$(eval $(call add_define,SCMI_DEFINES,ENABLE_PAUTH))
+$(eval $(call add_define,SCMI_DEFINES,LOG_LEVEL))
+$(eval $(call add_define,SCMI_DEFINES,PLAT_${PLAT}))
+$(eval $(call add_define,SCMI_DEFINES,PLAT_XLAT_TABLES_DYNAMIC))
+
+$(SCMI_DTB) : $(BUILD_PLAT)/scmi $(BUILD_PLAT)/scmi/scmi.elf
+$(SCMI_DTB) : $(SCMI_DTS)
+ @echo " DTBGEN $@"
+ ${Q}tools/generate_dtb/generate_dtb.sh \
+ scmi ${SCMI_DTS} $(BUILD_PLAT) $(SCMI_DTB)
+ @echo
+ @echo "Built $@ successfully"
+ @echo
+
+scmi: $(SCMI_DTB)
+
+# FDTS_CP copies flattened device tree sources
+# $(1) = output directory
+# $(2) = flattened device tree source file to copy
+define FDTS_CP
+ $(eval FDTS := $(addprefix $(1)/,$(notdir $(2))))
+$(FDTS): $(2) $(SCMI_DTB)
+ @echo " CP $$<"
+ ${Q}cp $$< $$@
+endef
+
+ $(eval files := $(SCMI_DTS))
+ $(eval $(foreach file,$(files),$(call FDTS_CP,$(BUILD_PLAT),$(file))))
+scmi: $(FDTS)
+
diff --git a/spm/scmi/cactus_interrupt.c b/spm/scmi/cactus_interrupt.c
new file mode 100644
index 0000000..c9df466
--- /dev/null
+++ b/spm/scmi/cactus_interrupt.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+
+#include <mmio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <ffa_svc.h>
+
+#include "cactus_test_cmds.h"
+#include <drivers/arm/sp805.h>
+#include <ffa_helpers.h>
+#include "spm_common.h"
+#include "sp_helpers.h"
+#include "spm_helpers.h"
+
+#include <platform_def.h>
+
+#define NOTIFICATION_PENDING_INTERRUPT_INTID 5
+
+extern ffa_id_t g_ffa_id;
+extern ffa_id_t g_dir_req_source_id;
+static uint32_t managed_exit_interrupt_id;
+
+spinlock_t sp_handler_lock[NUM_VINT_ID];
+
+void (*sp_interrupt_handler[NUM_VINT_ID])(void);
+
+void sp_handler_spin_lock_init(void)
+{
+ for (uint32_t i = 0; i < NUM_VINT_ID; i++) {
+ init_spinlock(&sp_handler_lock[i]);
+ }
+}
+
+void sp_register_interrupt_handler(void (*handler)(void),
+ uint32_t interrupt_id)
+{
+ if (interrupt_id >= NUM_VINT_ID) {
+ ERROR("Cannot register handler for interrupt %u\n", interrupt_id);
+ panic();
+ }
+
+ spin_lock(&sp_handler_lock[interrupt_id]);
+ sp_interrupt_handler[interrupt_id] = handler;
+ spin_unlock(&sp_handler_lock[interrupt_id]);
+}
+
+void sp_unregister_interrupt_handler(uint32_t interrupt_id)
+{
+ if (interrupt_id >= NUM_VINT_ID) {
+ ERROR("Cannot unregister handler for interrupt %u\n", interrupt_id);
+ panic();
+ }
+
+ spin_lock(&sp_handler_lock[interrupt_id]);
+ sp_interrupt_handler[interrupt_id] = NULL;
+ spin_unlock(&sp_handler_lock[interrupt_id]);
+}
+
+/*******************************************************************************
+ * Hypervisor Calls Wrappers
+ ******************************************************************************/
+
+uint32_t spm_interrupt_get(void)
+{
+ hvc_args args = {
+ .fid = SPM_INTERRUPT_GET
+ };
+
+ hvc_ret_values ret = tftf_hvc(&args);
+
+ return ret.ret0;
+}
+
+/**
+ * Hypervisor call to enable/disable SP delivery of a virtual interrupt of
+ * int_id value through the IRQ or FIQ vector (pin).
+ * Returns 0 on success, or -1 if passing an invalid interrupt id.
+ */
+int64_t spm_interrupt_enable(uint32_t int_id, bool enable, enum interrupt_pin pin)
+{
+ hvc_args args = {
+ .fid = SPM_INTERRUPT_ENABLE,
+ .arg1 = int_id,
+ .arg2 = enable,
+ .arg3 = pin
+ };
+
+ hvc_ret_values ret = tftf_hvc(&args);
+
+ return (int64_t)ret.ret0;
+}
+
+/**
+ * Hypervisor call to drop the priority and de-activate a secure interrupt.
+ * Returns 0 on success, or -1 if passing an invalid interrupt id.
+ */
+int64_t spm_interrupt_deactivate(uint32_t vint_id)
+{
+ hvc_args args = {
+ .fid = SPM_INTERRUPT_DEACTIVATE,
+ .arg1 = vint_id, /* pint_id */
+ .arg2 = vint_id
+ };
+
+ hvc_ret_values ret = tftf_hvc(&args);
+
+ return (int64_t)ret.ret0;
+}
+
+/*
+ * Managed exit ID discoverable by querying the SPMC through
+ * FFA_FEATURES API.
+ */
+void discover_managed_exit_interrupt_id(void)
+{
+ struct ffa_value ffa_ret;
+
+ /* Interrupt ID value is returned through register W2. */
+ ffa_ret = ffa_features(FFA_FEATURE_MEI);
+ managed_exit_interrupt_id = ffa_feature_intid(ffa_ret);
+
+ VERBOSE("Discovered managed exit interrupt ID: %d\n",
+ managed_exit_interrupt_id);
+}
+
+/*
+ * Cactus SP does not implement application threads. Hence, once the Cactus SP
+ * sends the managed exit response to the direct request originator, execution
+ * is still frozen in interrupt handler context.
+ * Though it moves to WAITING state, it is not able to accept new direct request
+ * message from any endpoint. It can only receive a direct request message with
+ * the command CACTUS_RESUME_AFTER_MANAGED_EXIT from the originator of the
+ * suspended direct request message in order to return from the interrupt
+ * handler context and resume the processing of suspended request.
+ */
+void send_managed_exit_response(void)
+{
+ struct ffa_value ffa_ret;
+ bool waiting_resume_after_managed_exit;
+
+ /*
+ * A secure partition performs its housekeeping and sends a direct
+ * response to signal interrupt completion. This is a pure virtual
+ * interrupt, no need for deactivation.
+ */
+ ffa_ret = cactus_response(g_ffa_id, g_dir_req_source_id,
+ MANAGED_EXIT_INTERRUPT_ID);
+ waiting_resume_after_managed_exit = true;
+
+ while (waiting_resume_after_managed_exit) {
+
+ waiting_resume_after_managed_exit =
+ (ffa_func_id(ffa_ret) != FFA_MSG_SEND_DIRECT_REQ_SMC32 &&
+ ffa_func_id(ffa_ret) != FFA_MSG_SEND_DIRECT_REQ_SMC64) ||
+ ffa_dir_msg_source(ffa_ret) != g_dir_req_source_id;
+
+ if (waiting_resume_after_managed_exit) {
+ VERBOSE("Expected a direct message request from endpoint"
+ " %x to resume the command\n",
+ g_dir_req_source_id);
+ ffa_ret = cactus_error_resp(g_ffa_id,
+ ffa_dir_msg_source(ffa_ret),
+ CACTUS_ERROR_TEST);
+ }
+ }
+ VERBOSE("Resuming the suspended command\n");
+}
+
+void notification_pending_interrupt_handler(void)
+{
+ /* Get which core it is running from. */
+ unsigned int core_pos = platform_get_core_pos(
+ read_mpidr_el1() & MPID_MASK);
+
+ VERBOSE("NPI handled in core %u\n", core_pos);
+}
+
+void register_maintenance_interrupt_handlers(void)
+{
+ sp_register_interrupt_handler(send_managed_exit_response,
+ managed_exit_interrupt_id);
+ sp_register_interrupt_handler(notification_pending_interrupt_handler,
+ NOTIFICATION_PENDING_INTERRUPT_INTID);
+}
+
+void cactus_interrupt_handler_irq(void)
+{
+ uint32_t intid = spm_interrupt_get();
+
+ /* Invoke the handler registered by the SP. */
+ spin_lock(&sp_handler_lock[intid]);
+ if (sp_interrupt_handler[intid]) {
+ sp_interrupt_handler[intid]();
+ } else {
+ ERROR("%s: Interrupt ID %x not handled!\n", __func__, intid);
+ panic();
+ }
+ spin_unlock(&sp_handler_lock[intid]);
+}
+
+void cactus_interrupt_handler_fiq(void)
+{
+ uint32_t intid = spm_interrupt_get();
+
+ if (intid == MANAGED_EXIT_INTERRUPT_ID) {
+ /*
+ * A secure partition performs its housekeeping and sends a
+ * direct response to signal interrupt completion.
+ * This is a pure virtual interrupt, no need for deactivation.
+ */
+ VERBOSE("vFIQ: Sending ME response to %x\n",
+ g_dir_req_source_id);
+ send_managed_exit_response();
+ } else {
+ /*
+ * Currently only managed exit interrupt is supported by vFIQ.
+ */
+ panic();
+ }
+}
diff --git a/spm/scmi/cactus_main.c b/spm/scmi/cactus_main.c
new file mode 100644
index 0000000..fd23e31
--- /dev/null
+++ b/spm/scmi/cactus_main.c
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <debug.h>
+
+#include <drivers/arm/pl011.h>
+#include <drivers/console.h>
+#include <lib/aarch64/arch_helpers.h>
+#include <lib/tftf_lib.h>
+#include <lib/xlat_tables/xlat_mmu_helpers.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include <ffa_helpers.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <sp_debug.h>
+#include <sp_helpers.h>
+#include <spm_helpers.h>
+#include <std_svc.h>
+
+#include "sp_def.h"
+#include "cactus.h"
+
+#include <cactus_test_cmds.h>
+#include <events.h>
+#include <platform.h>
+
+//scmi entry point
+#include <arch_main.h>
+
+enum scmi_ffa_pta_cmd {
+ /*
+ * FFA_SCMI_CMD_CAPABILITIES - Get channel capabilities
+ *
+ * [out] data0: Cmd FFA_SCMI_CMD_CAPABILITIES
+ * [in] data1: Capability bit mask
+ */
+ FFA_SCMI_CMD_CAPABILITIES = 0,
+
+ /*
+ * FFA_SCMI_CMD_GET_CHANNEL - Get channel handle
+ *
+ * [out] data0: Cmd FFA_SCMI_CMD_GET_CHANNEL
+ * [out] data1: Channel identifier
+ * [in] data1: Returned channel handle
+ * [out] data2: Shared memory handle (optional)
+ */
+ FFA_SCMI_CMD_GET_CHANNEL = 1,
+
+ /*
+ * FFA_SCMI_CMD_MSG_SEND_DIRECT_REQ - Process direct SCMI message
+ * with shared memory
+ *
+ * [out] data0: Cmd FFA_SCMI_CMD_MSG_SEND_DIRECT_REQ
+ * [out] data1: Channel handle
+ * [in/out] data2: Response size
+ *
+ */
+ FFA_SCMI_CMD_MSG_SEND_DIRECT_REQ = 2,
+
+ /*
+ * FFA_SCMI_CMD_SEND_MSG2 - Process SCMI message in RXTX buffer
+ *
+ * Use FFA RX/TX message to exchange request with the SCMI server.
+ */
+ FFA_SCMI_CMD_MSG_SEND2 = 3,
+};
+
+#define FFA_SCMI_CAPS_SHARED_BUFFER (0x1 << 1)
+
+extern void secondary_cold_entry(void);
+
+/* Global ffa_id */
+ffa_id_t g_ffa_id;
+
+/* Global FFA_MSG_DIRECT_REQ source ID */
+ffa_id_t g_dir_req_source_id;
+
+#define PRINT_CMD(smc_ret) \
+ VERBOSE("cmd %lx; args: %lx, %lx, %lx, %lx\n", \
+ smc_ret.arg3, smc_ret.arg4, smc_ret.arg5, \
+ smc_ret.arg6, smc_ret.arg7)
+
+void __attribute__((__noreturn__)) do_panic(const char *file, int line)
+{
+ printf("PANIC in file: %s line: %d\n", file, line);
+
+ console_flush();
+
+ while (1)
+ continue;
+}
+
+void __attribute__((__noreturn__)) do_bug_unreachable(const char *file, int line)
+{
+ mp_printf("BUG: Unreachable code!\n");
+ do_panic(file, line);
+}
+
+/*
+ * Retrieve shared memory.
+ */
+static void *scmi_memory_retrieve(ffa_id_t source, ffa_id_t vm_id, uint64_t handle, struct mailbox_buffers *mb)
+{
+ struct ffa_memory_region *m;
+ struct ffa_composite_memory_region *composite;
+ int ret;
+ unsigned int mem_attrs;
+ void *ptr;
+ ffa_memory_region_flags_t retrv_flags = 0;
+ bool non_secure = true;
+
+ if (!memory_retrieve(mb, &m, handle, source, vm_id, retrv_flags)){
+ ERROR("Failed to received memory region!\n");
+ return 0;
+ }
+
+ composite = ffa_memory_region_get_composite(m, 0);
+
+ /* This test is only concerned with RW permissions. */
+ if (ffa_get_data_access_attr(
+ m->receivers[0].receiver_permissions.permissions) !=
+ FFA_DATA_ACCESS_RW) {
+ ERROR("Permissions not expected!\n");
+ return 0;
+ }
+
+ mem_attrs = MT_RW_DATA | MT_EXECUTE_NEVER;
+
+ if (non_secure) {
+ mem_attrs |= MT_NS;
+ }
+
+ ret = mmap_add_dynamic_region(
+ (uint64_t)composite->constituents[0].address,
+ (uint64_t)composite->constituents[0].address,
+ composite->constituents[0].page_count * PAGE_SIZE,
+ mem_attrs);
+
+ if (ret != 0) {
+ ERROR("Failed to map received memory region(%d)!\n", ret);
+ return 0;
+ }
+
+ ptr = (void *) composite->constituents[0].address;
+
+ if (ffa_func_id(ffa_rx_release()) != FFA_SUCCESS_SMC32) {
+ ERROR("Failed to release buffer!\n");
+ return 0;
+ }
+
+ return ptr;
+}
+
+/**
+ * Traverses command table from section ".cactus_handler", searches for a
+ * registered command and invokes the respective handler.
+ */
+bool cactus_handle_cmd(struct ffa_value *cmd_args, struct ffa_value *ret,
+ struct mailbox_buffers *mb)
+{
+ uint64_t in_cmd;
+
+ if (cmd_args == NULL || ret == NULL) {
+ ERROR("Invalid arguments passed to %s!\n", __func__);
+ return false;
+ }
+
+ /* Get the source of the Direct Request message. */
+ if (ffa_func_id(*cmd_args) == FFA_MSG_SEND_DIRECT_REQ_SMC32 ||
+ ffa_func_id(*cmd_args) == FFA_MSG_SEND_DIRECT_REQ_SMC64) {
+ g_dir_req_source_id = ffa_dir_msg_source(*cmd_args);
+ }
+
+ PRINT_CMD((*cmd_args));
+
+ in_cmd = cactus_get_cmd(*cmd_args);
+
+ switch (in_cmd) {
+ case FFA_SCMI_CMD_CAPABILITIES:
+ *ret = cactus_send_response32(ffa_dir_msg_dest(*cmd_args),
+ ffa_dir_msg_source(*cmd_args),
+ 0, // success
+ FFA_SCMI_CAPS_SHARED_BUFFER, // direct req mode
+ 0, 0, 0);
+ break;
+
+ case FFA_SCMI_CMD_GET_CHANNEL:
+ {
+ ffa_id_t source = ffa_dir_msg_source(*cmd_args);
+ ffa_id_t vm_id = ffa_dir_msg_dest(*cmd_args);
+ uint64_t channel = cmd_args->arg4;
+ uint64_t handle = cmd_args->arg5;
+ void *buffer;
+
+ NOTICE("scmi_get_channel VM id: %x chnl: %llx mem hdl: %llx\n",
+ vm_id, channel, handle);
+
+ buffer = scmi_memory_retrieve(source, vm_id, handle, mb);
+ NOTICE("scmi_get_channel VM id: %x ptr: %p\n",
+ vm_id, buffer);
+
+ channel = scmi_get_device(channel, vm_id, buffer);
+
+ /* if channel == -1, then release the memory */
+
+ *ret = cactus_send_response32(ffa_dir_msg_dest(*cmd_args),
+ ffa_dir_msg_source(*cmd_args),
+ 0, // success
+ channel,
+ 0, 0, 0);
+ }
+ break;
+
+ case FFA_SCMI_CMD_MSG_SEND_DIRECT_REQ:
+ {
+ uint32_t channel = (uint32_t)cmd_args->arg4;
+ size_t msg_size = cmd_args->arg5;
+ ffa_id_t vm_id = ffa_dir_msg_dest(*cmd_args);
+
+ NOTICE("scmi_process_msg VM id: %x chnl: %x size: %lu\n",
+ vm_id, channel, msg_size);
+
+ scmi_process_mbx_msg(channel, vm_id, &msg_size);
+
+ *ret = cactus_send_response32(ffa_dir_msg_dest(*cmd_args),
+ ffa_dir_msg_source(*cmd_args),
+ 0, // success
+ channel,
+ msg_size,
+ 0, 0);
+ }
+ break;
+
+ default:
+ *ret = cactus_error_resp(ffa_dir_msg_dest(*cmd_args),
+ ffa_dir_msg_source(*cmd_args),
+ CACTUS_ERROR_UNHANDLED);
+ }
+
+ return true;
+}
+
+/*
+ *
+ * Message loop function
+ * Notice we cannot use regular print functions because this serves to both
+ * "primary" and "secondary" VMs. Secondary VM cannot access UART directly
+ * but rather through Hafnium print hypercall.
+ *
+ */
+
+static void __dead2 message_loop(ffa_id_t vm_id, struct mailbox_buffers *mb)
+{
+ struct ffa_value ffa_ret;
+ ffa_id_t destination;
+
+ /*
+ * This initial wait call is necessary to inform SPMD that
+ * SP initialization has completed. It blocks until receiving
+ * a direct message request.
+ */
+
+ ffa_ret = ffa_msg_wait();
+
+ for (;;) {
+ VERBOSE("Woke up with func id: %x\n", ffa_func_id(ffa_ret));
+
+ if (ffa_func_id(ffa_ret) == FFA_ERROR) {
+ ERROR("Error: %x\n", ffa_error_code(ffa_ret));
+ break;
+ }
+
+ if (ffa_func_id(ffa_ret) != FFA_MSG_SEND_DIRECT_REQ_SMC32 &&
+ ffa_func_id(ffa_ret) != FFA_MSG_SEND_DIRECT_REQ_SMC64 &&
+ ffa_func_id(ffa_ret) != FFA_INTERRUPT &&
+ ffa_func_id(ffa_ret) != FFA_RUN) {
+ ERROR("%s(%u) unknown func id 0x%x\n",
+ __func__, vm_id, ffa_func_id(ffa_ret));
+ break;
+ }
+
+ if ((ffa_func_id(ffa_ret) == FFA_INTERRUPT) ||
+ (ffa_func_id(ffa_ret) == FFA_RUN)) {
+ /*
+ * Received FFA_INTERRUPT in waiting state.
+ * The interrupt id is passed although this is just
+ * informational as we're running with virtual
+ * interrupts unmasked and the interrupt is processed
+ * by the interrupt handler.
+ *
+ * Received FFA_RUN in waiting state, the endpoint
+ * simply returns by FFA_MSG_WAIT.
+ */
+ ffa_ret = ffa_msg_wait();
+ continue;
+ }
+
+ destination = ffa_dir_msg_dest(ffa_ret);
+ if (destination != vm_id) {
+ ERROR("%s(%u) invalid vm id 0x%x\n",
+ __func__, vm_id, destination);
+ break;
+ }
+
+ if (!cactus_handle_cmd(&ffa_ret, &ffa_ret, mb)) {
+ break;
+ }
+ }
+
+ panic();
+}
+
+static const mmap_region_t cactus_mmap[] __attribute__((used)) = {
+ /* PLAT_ARM_DEVICE0 area includes UART2 necessary to console */
+ MAP_REGION_FLAT(PLAT_ARM_DEVICE0_BASE, PLAT_ARM_DEVICE0_SIZE,
+ MT_DEVICE | MT_RW),
+ {0}
+};
+
+static void cactus_print_memory_layout(unsigned int vm_id)
+{
+ INFO("Secure Partition memory layout:\n");
+
+ INFO(" Text region : %p - %p\n",
+ (void *)CACTUS_TEXT_START, (void *)CACTUS_TEXT_END);
+
+ INFO(" Read-only data region : %p - %p\n",
+ (void *)CACTUS_RODATA_START, (void *)CACTUS_RODATA_END);
+
+ INFO(" Data region : %p - %p\n",
+ (void *)CACTUS_DATA_START, (void *)CACTUS_DATA_END);
+
+ INFO(" BSS region : %p - %p\n",
+ (void *)CACTUS_BSS_START, (void *)CACTUS_BSS_END);
+
+ INFO(" RX : %p - %p\n",
+ (void *)get_sp_rx_start(vm_id),
+ (void *)get_sp_rx_end(vm_id));
+
+ INFO(" TX : %p - %p\n",
+ (void *)get_sp_tx_start(vm_id),
+ (void *)get_sp_tx_end(vm_id));
+}
+
+static void cactus_plat_configure_mmu(unsigned int vm_id)
+{
+ mmap_add_region(CACTUS_TEXT_START,
+ CACTUS_TEXT_START,
+ CACTUS_TEXT_END - CACTUS_TEXT_START,
+ MT_CODE);
+ mmap_add_region(CACTUS_RODATA_START,
+ CACTUS_RODATA_START,
+ CACTUS_RODATA_END - CACTUS_RODATA_START,
+ MT_RO_DATA);
+ mmap_add_region(CACTUS_DATA_START,
+ CACTUS_DATA_START,
+ CACTUS_DATA_END - CACTUS_DATA_START,
+ MT_RW_DATA);
+ mmap_add_region(CACTUS_BSS_START,
+ CACTUS_BSS_START,
+ CACTUS_BSS_END - CACTUS_BSS_START,
+ MT_RW_DATA);
+
+ mmap_add_region(get_sp_rx_start(vm_id),
+ get_sp_rx_start(vm_id),
+ (SP_RX_TX_SIZE / 2),
+ MT_RO_DATA);
+
+ mmap_add_region(get_sp_tx_start(vm_id),
+ get_sp_tx_start(vm_id),
+ (SP_RX_TX_SIZE / 2),
+ MT_RW_DATA);
+
+ mmap_add(cactus_mmap);
+ init_xlat_tables();
+}
+
+static void register_secondary_entrypoint(void)
+{
+ smc_args args;
+
+ args.fid = FFA_SECONDARY_EP_REGISTER_SMC64;
+ args.arg1 = (u_register_t)&secondary_cold_entry;
+
+ tftf_smc(&args);
+}
+
+void __dead2 cactus_main(bool primary_cold_boot,
+ struct ffa_boot_info_header *boot_info_header)
+{
+ assert(IS_IN_EL1() != 0);
+
+ struct mailbox_buffers mb;
+ struct ffa_value ret;
+
+ /* Get current FFA id */
+ struct ffa_value ffa_id_ret = ffa_id_get();
+ ffa_id_t ffa_id = ffa_endpoint_id(ffa_id_ret);
+ if (ffa_func_id(ffa_id_ret) != FFA_SUCCESS_SMC32) {
+ ERROR("FFA_ID_GET failed.\n");
+ panic();
+ }
+
+ if (primary_cold_boot == true) {
+ /* Clear BSS */
+ memset((void *)CACTUS_BSS_START,
+ 0, CACTUS_BSS_END - CACTUS_BSS_START);
+
+ /* Configure and enable Stage-1 MMU, enable D-Cache */
+ cactus_plat_configure_mmu(ffa_id);
+
+ /* Initialize locks for tail end interrupt handler */
+ sp_handler_spin_lock_init();
+
+ if (boot_info_header != NULL) {
+ /*
+ * TODO: Currently just validating that cactus can
+ * access the boot info descriptors. In case we want to
+ * use the boot info contents, we should check the
+ * blob and remap if the size is bigger than one page.
+ * Only then access the contents.
+ */
+ mmap_add_dynamic_region(
+ (unsigned long long)boot_info_header,
+ (uintptr_t)boot_info_header,
+ PAGE_SIZE, MT_RO_DATA);
+ }
+ }
+ mb.send = (void *) get_sp_tx_start(ffa_id);
+ mb.recv = (void *) get_sp_rx_start(ffa_id);
+
+ /*
+ * The local ffa_id value is held on the stack. The global g_ffa_id
+ * value is set after BSS is cleared.
+ */
+ g_ffa_id = ffa_id;
+
+ enable_mmu_el1(0);
+
+ /* Enable IRQ/FIQ */
+ enable_irq();
+// enable_fiq();
+
+ if (primary_cold_boot == false) {
+ goto msg_loop;
+ }
+
+ if (true) {
+ console_init(CACTUS_PL011_UART_BASE,
+ CACTUS_PL011_UART_CLK_IN_HZ,
+ PL011_BAUDRATE);
+
+ set_putc_impl(PL011_AS_STDOUT);
+
+ } else {
+ set_putc_impl(FFA_SVC_SMC_CALL_AS_STDOUT);
+ }
+
+ NOTICE("Booting Secure Partition (ID: %x)\n",
+ ffa_id);
+
+ CONFIGURE_AND_MAP_MAILBOX(mb, PAGE_SIZE, ret);
+ if (ffa_func_id(ret) != FFA_SUCCESS_SMC32) {
+ ERROR(
+ "Failed to map RXTX buffers. Error: %x\n",
+ ffa_error_code(ret));
+ panic();
+ }
+
+ cactus_print_memory_layout(ffa_id);
+
+ register_secondary_entrypoint();
+ discover_managed_exit_interrupt_id();
+ register_maintenance_interrupt_handlers();
+
+ /* Invoking SCMI server */
+ VERBOSE("SCMI server init start\n");
+ scmi_arch_init();
+ VERBOSE("SCMI server init end\n");
+
+msg_loop:
+ /* End up to message loop */
+ message_loop(ffa_id, &mb);
+
+ /* Not reached */
+}
diff --git a/spm/scmi/ffa_helpers.c b/spm/scmi/ffa_helpers.c
new file mode 100644
index 0000000..9e7148b
--- /dev/null
+++ b/spm/scmi/ffa_helpers.c
@@ -0,0 +1,703 @@
+/*
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+
+#include <ffa_endpoints.h>
+#include <ffa_helpers.h>
+#include <ffa_svc.h>
+#include <smccc.h>
+
+struct ffa_value ffa_service_call(struct ffa_value *args)
+{
+ ffa_smc(args);
+ return *args;
+}
+
+/*-----------------------------------------------------------------------------
+ * FFA_RUN
+ *
+ * Parameters
+ * uint32 Function ID (w0): 0x8400006D
+ * uint32 Target information (w1): Information to identify target SP/VM
+ * -Bits[31:16]: ID of SP/VM.
+ * -Bits[15:0]: ID of vCPU of SP/VM to run.
+ * Other Parameter registers w2-w7/x2-x7: Reserved (MBZ)
+ *
+ * On failure, returns FFA_ERROR in w0 and error code in w2:
+ * -INVALID_PARAMETERS: Unrecognized endpoint or vCPU ID
+ * -NOT_SUPPORTED: This function is not implemented at this FFA instance
+ * -DENIED: Callee is not in a state to handle this request
+ * -BUSY: vCPU is busy and caller must retry later
+ * -ABORTED: vCPU or VM ran into an unexpected error and has aborted
+ */
+struct ffa_value ffa_run(uint32_t dest_id, uint32_t vcpu_id)
+{
+ struct ffa_value args = {
+ FFA_RUN,
+ (dest_id << 16) | vcpu_id,
+ 0, 0, 0, 0, 0, 0
+ };
+
+ return ffa_service_call(&args);
+}
+
+/*-----------------------------------------------------------------------------
+ * FFA_MSG_SEND_DIRECT_REQ
+ *
+ * Parameters
+ * uint32 Function ID (w0): 0x8400006F / 0xC400006F
+ * uint32 Source/Destination IDs (w1): Source and destination endpoint IDs
+ * -Bit[31:16]: Source endpoint ID
+ * -Bit[15:0]: Destination endpoint ID
+ * uint32/uint64 (w2/x2) - RFU MBZ
+ * w3-w7 - Implementation defined
+ *
+ * On failure, returns FFA_ERROR in w0 and error code in w2:
+ * -INVALID_PARAMETERS: Invalid endpoint ID or non-zero reserved register
+ * -DENIED: Callee is not in a state to handle this request
+ * -NOT_SUPPORTED: This function is not implemented at this FFA instance
+ * -BUSY: Message target is busy
+ * -ABORTED: Message target ran into an unexpected error and has aborted
+ */
+struct ffa_value ffa_msg_send_direct_req64(ffa_id_t source_id,
+ ffa_id_t dest_id, uint64_t arg0,
+ uint64_t arg1, uint64_t arg2,
+ uint64_t arg3, uint64_t arg4)
+{
+ struct ffa_value args = {
+ .fid = FFA_MSG_SEND_DIRECT_REQ_SMC64,
+ .arg1 = ((uint32_t)(source_id << 16)) | (dest_id),
+ .arg2 = 0,
+ .arg3 = arg0,
+ .arg4 = arg1,
+ .arg5 = arg2,
+ .arg6 = arg3,
+ .arg7 = arg4,
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_msg_send_direct_req32(ffa_id_t source_id,
+ ffa_id_t dest_id, uint32_t arg0,
+ uint32_t arg1, uint32_t arg2,
+ uint32_t arg3, uint32_t arg4)
+{
+ struct ffa_value args = {
+ .fid = FFA_MSG_SEND_DIRECT_REQ_SMC32,
+ .arg1 = ((uint32_t)(source_id << 16)) | (dest_id),
+ .arg2 = 0,
+ .arg3 = arg0,
+ .arg4 = arg1,
+ .arg5 = arg2,
+ .arg6 = arg3,
+ .arg7 = arg4,
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_msg_send_direct_resp64(ffa_id_t source_id,
+ ffa_id_t dest_id, uint64_t arg0,
+ uint64_t arg1, uint64_t arg2,
+ uint64_t arg3, uint64_t arg4)
+{
+ struct ffa_value args = {
+ .fid = FFA_MSG_SEND_DIRECT_RESP_SMC64,
+ .arg1 = ((uint32_t)(source_id << 16)) | (dest_id),
+ .arg2 = 0,
+ .arg3 = arg0,
+ .arg4 = arg1,
+ .arg5 = arg2,
+ .arg6 = arg3,
+ .arg7 = arg4,
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_msg_send_direct_resp32(ffa_id_t source_id,
+ ffa_id_t dest_id, uint32_t arg0,
+ uint32_t arg1, uint32_t arg2,
+ uint32_t arg3, uint32_t arg4)
+{
+ struct ffa_value args = {
+ .fid = FFA_MSG_SEND_DIRECT_RESP_SMC32,
+ .arg1 = ((uint32_t)(source_id << 16)) | (dest_id),
+ .arg2 = 0,
+ .arg3 = arg0,
+ .arg4 = arg1,
+ .arg5 = arg2,
+ .arg6 = arg3,
+ .arg7 = arg4,
+ };
+
+ return ffa_service_call(&args);
+}
+
+
+/**
+ * Initialises the header of the given `ffa_memory_region`, not including the
+ * composite memory region offset.
+ */
+static void ffa_memory_region_init_header(
+ struct ffa_memory_region *memory_region, ffa_id_t sender,
+ ffa_memory_attributes_t attributes, ffa_memory_region_flags_t flags,
+ ffa_memory_handle_t handle, uint32_t tag, ffa_id_t receiver,
+ ffa_memory_access_permissions_t permissions)
+{
+ memory_region->sender = sender;
+ memory_region->attributes = attributes;
+ memory_region->flags = flags;
+ memory_region->handle = handle;
+ memory_region->tag = tag;
+ memory_region->memory_access_desc_size =
+ sizeof(struct ffa_memory_access);
+ memory_region->receiver_count = 1;
+ memory_region->receivers[0].receiver_permissions.receiver = receiver;
+ memory_region->receivers[0].receiver_permissions.permissions =
+ permissions;
+ memory_region->receivers[0].receiver_permissions.flags = 0;
+ memory_region->receivers[0].reserved_0 = 0;
+ /* Receivers at the end of the `ffa_memory_region` structure. */
+ memory_region->receivers_offset = sizeof(struct ffa_memory_region);
+ memset(memory_region->reserved, 0, sizeof(memory_region->reserved));
+}
+
+/**
+ * Initialises the given `ffa_memory_region` and copies as many as possible of
+ * the given constituents to it.
+ *
+ * Returns the number of constituents remaining which wouldn't fit, and (via
+ * return parameters) the size in bytes of the first fragment of data copied to
+ * `memory_region` (attributes, constituents and memory region header size), and
+ * the total size of the memory sharing message including all constituents.
+ */
+uint32_t ffa_memory_region_init(
+ struct ffa_memory_region *memory_region, size_t memory_region_max_size,
+ ffa_id_t sender, ffa_id_t receiver,
+ const struct ffa_memory_region_constituent constituents[],
+ uint32_t constituent_count, uint32_t tag,
+ ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
+ enum ffa_instruction_access instruction_access,
+ enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
+ enum ffa_memory_shareability shareability, uint32_t *total_length,
+ uint32_t *fragment_length)
+{
+ ffa_memory_access_permissions_t permissions = 0;
+ ffa_memory_attributes_t attributes = 0;
+ struct ffa_composite_memory_region *composite_memory_region;
+ uint32_t fragment_max_constituents;
+ uint32_t count_to_copy;
+ uint32_t i;
+ uint32_t constituents_offset;
+
+ /* Set memory region's permissions. */
+ ffa_set_data_access_attr(&permissions, data_access);
+ ffa_set_instruction_access_attr(&permissions, instruction_access);
+
+ /* Set memory region's page attributes. */
+ ffa_set_memory_type_attr(&attributes, type);
+ ffa_set_memory_cacheability_attr(&attributes, cacheability);
+ ffa_set_memory_shareability_attr(&attributes, shareability);
+
+ ffa_memory_region_init_header(memory_region, sender, attributes, flags,
+ 0, tag, receiver, permissions);
+ /*
+ * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
+ * ffa_memory_access)` must both be multiples of 16 (as verified by the
+ * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
+ * calculate here is aligned to a 64-bit boundary and so 64-bit values
+ * can be copied without alignment faults.
+ */
+ memory_region->receivers[0].composite_memory_region_offset =
+ sizeof(struct ffa_memory_region) +
+ memory_region->receiver_count *
+ sizeof(struct ffa_memory_access);
+
+ composite_memory_region =
+ ffa_memory_region_get_composite(memory_region, 0);
+ composite_memory_region->page_count = 0;
+ composite_memory_region->constituent_count = constituent_count;
+ composite_memory_region->reserved_0 = 0;
+
+ constituents_offset =
+ memory_region->receivers[0].composite_memory_region_offset +
+ sizeof(struct ffa_composite_memory_region);
+ fragment_max_constituents =
+ (memory_region_max_size - constituents_offset) /
+ sizeof(struct ffa_memory_region_constituent);
+
+ count_to_copy = constituent_count;
+ if (count_to_copy > fragment_max_constituents) {
+ count_to_copy = fragment_max_constituents;
+ }
+
+ for (i = 0; i < constituent_count; ++i) {
+ if (i < count_to_copy) {
+ composite_memory_region->constituents[i] =
+ constituents[i];
+ }
+ composite_memory_region->page_count +=
+ constituents[i].page_count;
+ }
+
+ if (total_length != NULL) {
+ *total_length =
+ constituents_offset +
+ composite_memory_region->constituent_count *
+ sizeof(struct ffa_memory_region_constituent);
+ }
+ if (fragment_length != NULL) {
+ *fragment_length =
+ constituents_offset +
+ count_to_copy *
+ sizeof(struct ffa_memory_region_constituent);
+ }
+
+ return composite_memory_region->constituent_count - count_to_copy;
+}
+
+/**
+ * Initialises the given `ffa_memory_region` to be used for an
+ * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
+ *
+ * Returns the size of the message written.
+ */
+uint32_t ffa_memory_retrieve_request_init(
+ struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
+ ffa_id_t sender, ffa_id_t receiver, uint32_t tag,
+ ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
+ enum ffa_instruction_access instruction_access,
+ enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
+ enum ffa_memory_shareability shareability)
+{
+ ffa_memory_access_permissions_t permissions = 0;
+ ffa_memory_attributes_t attributes = 0;
+
+ /* Set memory region's permissions. */
+ ffa_set_data_access_attr(&permissions, data_access);
+ ffa_set_instruction_access_attr(&permissions, instruction_access);
+
+ /* Set memory region's page attributes. */
+ ffa_set_memory_type_attr(&attributes, type);
+ ffa_set_memory_cacheability_attr(&attributes, cacheability);
+ ffa_set_memory_shareability_attr(&attributes, shareability);
+
+ ffa_memory_region_init_header(memory_region, sender, attributes, flags,
+ handle, tag, receiver, permissions);
+ /*
+ * Offset 0 in this case means that the hypervisor should allocate the
+ * address ranges. This is the only configuration supported by Hafnium,
+ * as it enforces 1:1 mappings in the stage 2 page tables.
+ */
+ memory_region->receivers[0].composite_memory_region_offset = 0;
+ memory_region->receivers[0].reserved_0 = 0;
+
+ return sizeof(struct ffa_memory_region) +
+ memory_region->receiver_count * sizeof(struct ffa_memory_access);
+}
+
+/*
+ * FFA Version ABI helper.
+ * Version fields:
+ * -Bits[30:16]: Major version.
+ * -Bits[15:0]: Minor version.
+ */
+struct ffa_value ffa_version(uint32_t input_version)
+{
+ struct ffa_value args = {
+ .fid = FFA_VERSION,
+ .arg1 = input_version
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_id_get(void)
+{
+ struct ffa_value args = {
+ .fid = FFA_ID_GET
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_spm_id_get(void)
+{
+ struct ffa_value args = {
+ .fid = FFA_SPM_ID_GET
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_msg_wait(void)
+{
+ struct ffa_value args = {
+ .fid = FFA_MSG_WAIT
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_error(int32_t error_code)
+{
+ struct ffa_value args = {
+ .fid = FFA_ERROR,
+ .arg1 = 0,
+ .arg2 = error_code
+ };
+
+ return ffa_service_call(&args);
+}
+
+/* Query the higher EL if the requested FF-A feature is implemented. */
+struct ffa_value ffa_features(uint32_t feature)
+{
+ struct ffa_value args = {
+ .fid = FFA_FEATURES,
+ .arg1 = feature
+ };
+
+ return ffa_service_call(&args);
+}
+
+/* Query the higher EL if the requested FF-A feature is implemented. */
+struct ffa_value ffa_features_with_input_property(uint32_t feature, uint32_t param)
+{
+ struct ffa_value args = {
+ .fid = FFA_FEATURES,
+ .arg1 = feature,
+ .arg2 = param,
+ };
+
+ return ffa_service_call(&args);
+}
+
+/* Get information about VMs or SPs based on UUID, using registers. */
+struct ffa_value ffa_partition_info_get_regs(const struct ffa_uuid uuid,
+ const uint16_t start_index,
+ const uint16_t tag)
+{
+ uint64_t arg1 = (uint64_t)uuid.uuid[1] << 32 | uuid.uuid[0];
+ uint64_t arg2 = (uint64_t)uuid.uuid[3] << 32 | uuid.uuid[2];
+ uint64_t arg3 = start_index | (uint64_t)tag << 16;
+
+ struct ffa_value args = {
+ .fid = FFA_PARTITION_INFO_GET_REGS_SMC64,
+ .arg1 = arg1,
+ .arg2 = arg2,
+ .arg3 = arg3,
+ };
+
+ return ffa_service_call(&args);
+}
+
+/* Get information about VMs or SPs based on UUID */
+struct ffa_value ffa_partition_info_get(const struct ffa_uuid uuid)
+{
+ struct ffa_value args = {
+ .fid = FFA_PARTITION_INFO_GET,
+ .arg1 = uuid.uuid[0],
+ .arg2 = uuid.uuid[1],
+ .arg3 = uuid.uuid[2],
+ .arg4 = uuid.uuid[3]
+ };
+
+ return ffa_service_call(&args);
+}
+
+/* Query SPMD that the rx buffer of the partition can be released */
+struct ffa_value ffa_rx_release(void)
+{
+ struct ffa_value args = {
+ .fid = FFA_RX_RELEASE
+ };
+
+ return ffa_service_call(&args);
+}
+
+/* Map the RXTX buffer */
+struct ffa_value ffa_rxtx_map(uintptr_t send, uintptr_t recv, uint32_t pages)
+{
+ struct ffa_value args = {
+ .fid = FFA_RXTX_MAP_SMC64,
+ .arg1 = send,
+ .arg2 = recv,
+ .arg3 = pages,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
+}
+
+/* Unmap the RXTX buffer allocated by the given FF-A component */
+struct ffa_value ffa_rxtx_unmap(void)
+{
+ struct ffa_value args = {
+ .fid = FFA_RXTX_UNMAP,
+ .arg1 = FFA_PARAM_MBZ,
+ .arg2 = FFA_PARAM_MBZ,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
+}
+
+/* Donate memory to another partition */
+struct ffa_value ffa_mem_donate(uint32_t descriptor_length,
+ uint32_t fragment_length)
+{
+ struct ffa_value args = {
+ .fid = FFA_MEM_DONATE_SMC32,
+ .arg1 = descriptor_length,
+ .arg2 = fragment_length,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
+}
+
+/* Lend memory to another partition */
+struct ffa_value ffa_mem_lend(uint32_t descriptor_length,
+ uint32_t fragment_length)
+{
+ struct ffa_value args = {
+ .fid = FFA_MEM_LEND_SMC32,
+ .arg1 = descriptor_length,
+ .arg2 = fragment_length,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
+}
+
+/* Share memory with another partition */
+struct ffa_value ffa_mem_share(uint32_t descriptor_length,
+ uint32_t fragment_length)
+{
+ struct ffa_value args = {
+ .fid = FFA_MEM_SHARE_SMC32,
+ .arg1 = descriptor_length,
+ .arg2 = fragment_length,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
+}
+
+/* Retrieve memory shared by another partition */
+struct ffa_value ffa_mem_retrieve_req(uint32_t descriptor_length,
+ uint32_t fragment_length)
+{
+ struct ffa_value args = {
+ .fid = FFA_MEM_RETRIEVE_REQ_SMC32,
+ .arg1 = descriptor_length,
+ .arg2 = fragment_length,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
+}
+
+/* Relinquish access to memory region */
+struct ffa_value ffa_mem_relinquish(void)
+{
+ struct ffa_value args = {
+ .fid = FFA_MEM_RELINQUISH,
+ };
+
+ return ffa_service_call(&args);
+}
+
+/* Reclaim exclusive access to owned memory region */
+struct ffa_value ffa_mem_reclaim(uint64_t handle, uint32_t flags)
+{
+ struct ffa_value args = {
+ .fid = FFA_MEM_RECLAIM,
+ .arg1 = (uint32_t) handle,
+ .arg2 = (uint32_t) (handle >> 32),
+ .arg3 = flags
+ };
+
+ return ffa_service_call(&args);
+}
+
+/** Create Notifications Bitmap for the given VM */
+struct ffa_value ffa_notification_bitmap_create(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_BITMAP_CREATE,
+ .arg1 = vm_id,
+ .arg2 = vcpu_count,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ,
+ };
+
+ return ffa_service_call(&args);
+}
+
+/** Destroy Notifications Bitmap for the given VM */
+struct ffa_value ffa_notification_bitmap_destroy(ffa_id_t vm_id)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_BITMAP_DESTROY,
+ .arg1 = vm_id,
+ .arg2 = FFA_PARAM_MBZ,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ,
+ };
+
+ return ffa_service_call(&args);
+}
+
+/** Bind VM to all the notifications in the bitmap */
+struct ffa_value ffa_notification_bind(ffa_id_t sender, ffa_id_t receiver,
+ uint32_t flags,
+ ffa_notification_bitmap_t bitmap)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_BIND,
+ .arg1 = (sender << 16) | (receiver),
+ .arg2 = flags,
+ .arg3 = (uint32_t)(bitmap & 0xFFFFFFFFU),
+ .arg4 = (uint32_t)(bitmap >> 32),
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ,
+ };
+
+ return ffa_service_call(&args);
+}
+
+/** Unbind previously bound VM from notifications in bitmap */
+struct ffa_value ffa_notification_unbind(ffa_id_t sender,
+ ffa_id_t receiver,
+ ffa_notification_bitmap_t bitmap)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_UNBIND,
+ .arg1 = (sender << 16) | (receiver),
+ .arg2 = FFA_PARAM_MBZ,
+ .arg3 = (uint32_t)(bitmap),
+ .arg4 = (uint32_t)(bitmap >> 32),
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ,
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_notification_set(ffa_id_t sender, ffa_id_t receiver,
+ uint32_t flags,
+ ffa_notification_bitmap_t bitmap)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_SET,
+ .arg1 = (sender << 16) | (receiver),
+ .arg2 = flags,
+ .arg3 = (uint32_t)(bitmap & 0xFFFFFFFFU),
+ .arg4 = (uint32_t)(bitmap >> 32),
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_notification_get(ffa_id_t receiver, uint32_t vcpu_id,
+ uint32_t flags)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_GET,
+ .arg1 = (vcpu_id << 16) | (receiver),
+ .arg2 = flags,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_notification_info_get(void)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_INFO_GET_SMC64,
+ .arg1 = FFA_PARAM_MBZ,
+ .arg2 = FFA_PARAM_MBZ,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
+}
+
+static size_t char_to_arg_helper(const char *message, size_t size,
+ u_register_t *arg)
+{
+ size_t to_write = size > sizeof(uint64_t) ? sizeof(uint64_t) : size;
+
+ for (unsigned int i = 0; i < to_write; i++) {
+ ((char *)arg)[i] = message[i];
+ }
+ return to_write;
+}
+
+struct ffa_value ffa_console_log(const char *message, size_t char_count)
+{
+ struct ffa_value args = {
+ .fid = FFA_CONSOLE_LOG_SMC64,
+ .arg1 = char_count,
+ };
+ size_t written = 0;
+
+ assert(char_count <= sizeof(uint64_t) * 6);
+
+ written += char_to_arg_helper(&message[written], char_count - written,
+ &args.arg2);
+ written += char_to_arg_helper(&message[written], char_count - written,
+ &args.arg3);
+ written += char_to_arg_helper(&message[written], char_count - written,
+ &args.arg4);
+ written += char_to_arg_helper(&message[written], char_count - written,
+ &args.arg5);
+ written += char_to_arg_helper(&message[written], char_count - written,
+ &args.arg6);
+ char_to_arg_helper(&message[written], char_count - written,
+ &args.arg7);
+
+ return ffa_service_call(&args);
+}
diff --git a/spm/scmi/include/aarch64/arch.h b/spm/scmi/include/aarch64/arch.h
new file mode 100644
index 0000000..739cd26
--- /dev/null
+++ b/spm/scmi/include/aarch64/arch.h
@@ -0,0 +1,1381 @@
+/*
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARCH_H
+#define ARCH_H
+
+#include <utils_def.h>
+
+/*******************************************************************************
+ * MIDR bit definitions
+ ******************************************************************************/
+#define MIDR_IMPL_MASK U(0xff)
+#define MIDR_IMPL_SHIFT U(0x18)
+#define MIDR_VAR_SHIFT U(20)
+#define MIDR_VAR_BITS U(4)
+#define MIDR_VAR_MASK U(0xf0)
+#define MIDR_REV_SHIFT U(0)
+#define MIDR_REV_BITS U(4)
+#define MIDR_REV_MASK U(0xf)
+#define MIDR_PN_MASK U(0xfff)
+#define MIDR_PN_SHIFT U(0x4)
+
+/*******************************************************************************
+ * MPIDR macros
+ ******************************************************************************/
+#define MPIDR_MT_MASK (ULL(1) << 24)
+#define MPIDR_CPU_MASK MPIDR_AFFLVL_MASK
+#define MPIDR_CLUSTER_MASK (MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
+#define MPIDR_AFFINITY_BITS U(8)
+#define MPIDR_AFFLVL_MASK ULL(0xff)
+#define MPIDR_AFF0_SHIFT U(0)
+#define MPIDR_AFF1_SHIFT U(8)
+#define MPIDR_AFF2_SHIFT U(16)
+#define MPIDR_AFF3_SHIFT U(32)
+#define MPIDR_AFF_SHIFT(_n) MPIDR_AFF##_n##_SHIFT
+#define MPIDR_AFFINITY_MASK ULL(0xff00ffffff)
+#define MPIDR_AFFLVL_SHIFT U(3)
+#define MPIDR_AFFLVL0 ULL(0x0)
+#define MPIDR_AFFLVL1 ULL(0x1)
+#define MPIDR_AFFLVL2 ULL(0x2)
+#define MPIDR_AFFLVL3 ULL(0x3)
+#define MPIDR_AFFLVL(_n) MPIDR_AFFLVL##_n
+#define MPIDR_AFFLVL0_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL1_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL2_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL3_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK)
+/*
+ * The MPIDR_MAX_AFFLVL count starts from 0. Take care to
+ * add one while using this macro to define array sizes.
+ * TODO: Support only the first 3 affinity levels for now.
+ */
+#define MPIDR_MAX_AFFLVL U(2)
+
+#define MPID_MASK (MPIDR_MT_MASK | \
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF3_SHIFT) | \
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT) | \
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF1_SHIFT) | \
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF0_SHIFT))
+
+#define MPIDR_AFF_ID(mpid, n) \
+ (((mpid) >> MPIDR_AFF_SHIFT(n)) & MPIDR_AFFLVL_MASK)
+
+/*
+ * An invalid MPID. This value can be used by functions that return an MPID to
+ * indicate an error.
+ */
+#define INVALID_MPID U(0xFFFFFFFF)
+
+/*******************************************************************************
+ * Definitions for CPU system register interface to GICv3
+ ******************************************************************************/
+#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7
+#define ICC_SGI1R S3_0_C12_C11_5
+#define ICC_SRE_EL1 S3_0_C12_C12_5
+#define ICC_SRE_EL2 S3_4_C12_C9_5
+#define ICC_SRE_EL3 S3_6_C12_C12_5
+#define ICC_CTLR_EL1 S3_0_C12_C12_4
+#define ICC_CTLR_EL3 S3_6_C12_C12_4
+#define ICC_PMR_EL1 S3_0_C4_C6_0
+#define ICC_RPR_EL1 S3_0_C12_C11_3
+#define ICC_IGRPEN1_EL3 S3_6_C12_C12_7
+#define ICC_IGRPEN0_EL1 S3_0_C12_C12_6
+#define ICC_HPPIR0_EL1 S3_0_C12_C8_2
+#define ICC_HPPIR1_EL1 S3_0_C12_C12_2
+#define ICC_IAR0_EL1 S3_0_C12_C8_0
+#define ICC_IAR1_EL1 S3_0_C12_C12_0
+#define ICC_EOIR0_EL1 S3_0_C12_C8_1
+#define ICC_EOIR1_EL1 S3_0_C12_C12_1
+#define ICC_SGI0R_EL1 S3_0_C12_C11_7
+
+#define ICV_CTRL_EL1 S3_0_C12_C12_4
+#define ICV_IAR1_EL1 S3_0_C12_C12_0
+#define ICV_IGRPEN1_EL1 S3_0_C12_C12_7
+#define ICV_EOIR1_EL1 S3_0_C12_C12_1
+#define ICV_PMR_EL1 S3_0_C4_C6_0
+
+/*******************************************************************************
+ * Generic timer memory mapped registers & offsets
+ ******************************************************************************/
+#define CNTCR_OFF U(0x000)
+#define CNTFID_OFF U(0x020)
+
+#define CNTCR_EN (U(1) << 0)
+#define CNTCR_HDBG (U(1) << 1)
+#define CNTCR_FCREQ(x) ((x) << 8)
+
+/*******************************************************************************
+ * System register bit definitions
+ ******************************************************************************/
+/* CLIDR definitions */
+#define LOUIS_SHIFT U(21)
+#define LOC_SHIFT U(24)
+#define CLIDR_FIELD_WIDTH U(3)
+
+/* CSSELR definitions */
+#define LEVEL_SHIFT U(1)
+
+/* Data cache set/way op type defines */
+#define DCISW U(0x0)
+#define DCCISW U(0x1)
+#define DCCSW U(0x2)
+
+/* ID_AA64PFR0_EL1 definitions */
+#define ID_AA64PFR0_EL0_SHIFT U(0)
+#define ID_AA64PFR0_EL1_SHIFT U(4)
+#define ID_AA64PFR0_EL2_SHIFT U(8)
+#define ID_AA64PFR0_EL3_SHIFT U(12)
+#define ID_AA64PFR0_AMU_SHIFT U(44)
+#define ID_AA64PFR0_AMU_LENGTH U(4)
+#define ID_AA64PFR0_AMU_MASK ULL(0xf)
+#define ID_AA64PFR0_AMU_NOT_SUPPORTED U(0x0)
+#define ID_AA64PFR0_AMU_V1 U(0x1)
+#define ID_AA64PFR0_AMU_V1P1 U(0x2)
+#define ID_AA64PFR0_ELX_MASK ULL(0xf)
+#define ID_AA64PFR0_SVE_SHIFT U(32)
+#define ID_AA64PFR0_SVE_WIDTH U(4)
+#define ID_AA64PFR0_SVE_MASK ULL(0xf)
+#define ID_AA64PFR0_SVE_LENGTH U(4)
+#define ID_AA64PFR0_MPAM_SHIFT U(40)
+#define ID_AA64PFR0_MPAM_MASK ULL(0xf)
+#define ID_AA64PFR0_DIT_SHIFT U(48)
+#define ID_AA64PFR0_DIT_MASK ULL(0xf)
+#define ID_AA64PFR0_DIT_LENGTH U(4)
+#define ID_AA64PFR0_DIT_SUPPORTED U(1)
+#define ID_AA64PFR0_CSV2_SHIFT U(56)
+#define ID_AA64PFR0_CSV2_MASK ULL(0xf)
+#define ID_AA64PFR0_CSV2_WIDTH U(4)
+#define ID_AA64PFR0_CSV2_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64PFR0_CSV2_SUPPORTED ULL(0x1)
+#define ID_AA64PFR0_CSV2_2_SUPPORTED ULL(0x2)
+#define ID_AA64PFR0_FEAT_RME_SHIFT U(52)
+#define ID_AA64PFR0_FEAT_RME_MASK ULL(0xf)
+#define ID_AA64PFR0_FEAT_RME_LENGTH U(4)
+#define ID_AA64PFR0_FEAT_RME_NOT_SUPPORTED U(0)
+#define ID_AA64PFR0_FEAT_RME_V1 U(1)
+#define ID_AA64PFR0_RAS_MASK ULL(0xf)
+#define ID_AA64PFR0_RAS_SHIFT U(28)
+#define ID_AA64PFR0_RAS_WIDTH U(4)
+#define ID_AA64PFR0_RAS_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64PFR0_RAS_SUPPORTED ULL(0x1)
+#define ID_AA64PFR0_RASV1P1_SUPPORTED ULL(0x2)
+#define ID_AA64PFR0_GIC_SHIFT U(24)
+#define ID_AA64PFR0_GIC_WIDTH U(4)
+#define ID_AA64PFR0_GIC_MASK ULL(0xf)
+#define ID_AA64PFR0_GIC_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64PFR0_GICV3_GICV4_SUPPORTED ULL(0x1)
+#define ID_AA64PFR0_GICV4_1_SUPPORTED ULL(0x2)
+
+/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
+#define ID_AA64DFR0_PMS_SHIFT U(32)
+#define ID_AA64DFR0_PMS_LENGTH U(4)
+#define ID_AA64DFR0_PMS_MASK ULL(0xf)
+#define ID_AA64DFR0_SPE_NOT_SUPPORTED U(0)
+#define ID_AA64DFR0_SPE U(1)
+#define ID_AA64DFR0_SPE_V1P1 U(2)
+#define ID_AA64DFR0_SPE_V1P2 U(3)
+#define ID_AA64DFR0_SPE_V1P3 U(4)
+#define ID_AA64DFR0_SPE_V1P4 U(5)
+
+/* ID_AA64DFR0_EL1.DEBUG definitions */
+#define ID_AA64DFR0_DEBUG_SHIFT U(0)
+#define ID_AA64DFR0_DEBUG_LENGTH U(4)
+#define ID_AA64DFR0_DEBUG_MASK ULL(0xf)
+#define ID_AA64DFR0_DEBUG_BITS (ID_AA64DFR0_DEBUG_MASK << \
+ ID_AA64DFR0_DEBUG_SHIFT)
+#define ID_AA64DFR0_V8_DEBUG_ARCH_SUPPORTED U(6)
+#define ID_AA64DFR0_V8_DEBUG_ARCH_VHE_SUPPORTED U(7)
+#define ID_AA64DFR0_V8_2_DEBUG_ARCH_SUPPORTED U(8)
+#define ID_AA64DFR0_V8_4_DEBUG_ARCH_SUPPORTED U(9)
+
+/* ID_AA64DFR0_EL1.HPMN0 definitions */
+#define ID_AA64DFR0_HPMN0_SHIFT U(60)
+#define ID_AA64DFR0_HPMN0_MASK ULL(0xf)
+#define ID_AA64DFR0_HPMN0_SUPPORTED ULL(1)
+
+/* ID_AA64DFR0_EL1.BRBE definitions */
+#define ID_AA64DFR0_BRBE_SHIFT U(52)
+#define ID_AA64DFR0_BRBE_MASK ULL(0xf)
+#define ID_AA64DFR0_BRBE_SUPPORTED ULL(1)
+
+/* ID_AA64DFR0_EL1.TraceBuffer definitions */
+#define ID_AA64DFR0_TRACEBUFFER_SHIFT U(44)
+#define ID_AA64DFR0_TRACEBUFFER_MASK ULL(0xf)
+#define ID_AA64DFR0_TRACEBUFFER_SUPPORTED ULL(1)
+
+/* ID_DFR0_EL1.Tracefilt definitions */
+#define ID_AA64DFR0_TRACEFILT_SHIFT U(40)
+#define ID_AA64DFR0_TRACEFILT_MASK U(0xf)
+#define ID_AA64DFR0_TRACEFILT_SUPPORTED U(1)
+
+/* ID_AA64DFR0_EL1.PMUVer definitions */
+#define ID_AA64DFR0_PMUVER_SHIFT U(8)
+#define ID_AA64DFR0_PMUVER_MASK ULL(0xf)
+#define ID_AA64DFR0_PMUVER_NOT_SUPPORTED ULL(0)
+
+/* ID_AA64DFR0_EL1.TraceVer definitions */
+#define ID_AA64DFR0_TRACEVER_SHIFT U(4)
+#define ID_AA64DFR0_TRACEVER_MASK ULL(0xf)
+#define ID_AA64DFR0_TRACEVER_SUPPORTED ULL(1)
+
+#define EL_IMPL_NONE ULL(0)
+#define EL_IMPL_A64ONLY ULL(1)
+#define EL_IMPL_A64_A32 ULL(2)
+
+/* ID_AA64ISAR0_EL1 definitions */
+#define ID_AA64ISAR0_EL1 S3_0_C0_C6_0
+#define ID_AA64ISAR0_TLB_MASK ULL(0xf)
+#define ID_AA64ISAR0_TLB_SHIFT U(56)
+#define ID_AA64ISAR0_TLB_WIDTH U(4)
+#define ID_AA64ISAR0_TLBIRANGE_SUPPORTED ULL(0x2)
+#define ID_AA64ISAR0_TLB_NOT_SUPPORTED ULL(0)
+
+/* ID_AA64ISAR1_EL1 definitions */
+#define ID_AA64ISAR1_EL1 S3_0_C0_C6_1
+#define ID_AA64ISAR1_GPI_SHIFT U(28)
+#define ID_AA64ISAR1_GPI_WIDTH U(4)
+#define ID_AA64ISAR1_GPI_MASK ULL(0xf)
+#define ID_AA64ISAR1_GPA_SHIFT U(24)
+#define ID_AA64ISAR1_GPA_WIDTH U(4)
+#define ID_AA64ISAR1_GPA_MASK ULL(0xf)
+#define ID_AA64ISAR1_API_SHIFT U(8)
+#define ID_AA64ISAR1_API_WIDTH U(4)
+#define ID_AA64ISAR1_API_MASK ULL(0xf)
+#define ID_AA64ISAR1_APA_SHIFT U(4)
+#define ID_AA64ISAR1_APA_WIDTH U(4)
+#define ID_AA64ISAR1_APA_MASK ULL(0xf)
+#define ID_AA64ISAR1_SPECRES_MASK ULL(0xf)
+#define ID_AA64ISAR1_SPECRES_SHIFT U(40)
+#define ID_AA64ISAR1_SPECRES_WIDTH U(4)
+#define ID_AA64ISAR1_SPECRES_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64ISAR1_SPECRES_SUPPORTED ULL(0x1)
+#define ID_AA64ISAR1_DPB_MASK ULL(0xf)
+#define ID_AA64ISAR1_DPB_SHIFT U(0)
+#define ID_AA64ISAR1_DPB_WIDTH U(4)
+#define ID_AA64ISAR1_DPB_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64ISAR1_DPB_SUPPORTED ULL(0x1)
+#define ID_AA64ISAR1_DPB2_SUPPORTED ULL(0x2)
+#define ID_AA64ISAR1_LS64_MASK ULL(0xf)
+#define ID_AA64ISAR1_LS64_SHIFT U(60)
+#define ID_AA64ISAR1_LS64_WIDTH U(4)
+#define ID_AA64ISAR1_LS64_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64ISAR1_LS64_SUPPORTED ULL(0x1)
+#define ID_AA64ISAR1_LS64_V_SUPPORTED ULL(0x2)
+#define ID_AA64ISAR1_LS64_ACCDATA_SUPPORTED ULL(0x3)
+
+/* ID_AA64ISAR2_EL1 definitions */
+#define ID_AA64ISAR2_EL1 S3_0_C0_C6_2
+#define ID_AA64ISAR2_WFXT_MASK ULL(0xf)
+#define ID_AA64ISAR2_WFXT_SHIFT U(0x0)
+#define ID_AA64ISAR2_WFXT_SUPPORTED ULL(0x2)
+#define ID_AA64ISAR2_GPA3_SHIFT U(8)
+#define ID_AA64ISAR2_GPA3_MASK ULL(0xf)
+#define ID_AA64ISAR2_APA3_SHIFT U(12)
+#define ID_AA64ISAR2_APA3_MASK ULL(0xf)
+
+/* ID_AA64MMFR0_EL1 definitions */
+#define ID_AA64MMFR0_EL1_PARANGE_SHIFT U(0)
+#define ID_AA64MMFR0_EL1_PARANGE_MASK ULL(0xf)
+
+#define PARANGE_0000 U(32)
+#define PARANGE_0001 U(36)
+#define PARANGE_0010 U(40)
+#define PARANGE_0011 U(42)
+#define PARANGE_0100 U(44)
+#define PARANGE_0101 U(48)
+#define PARANGE_0110 U(52)
+
+#define ID_AA64MMFR0_EL1_ECV_SHIFT U(60)
+#define ID_AA64MMFR0_EL1_ECV_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_ECV_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_ECV_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_ECV_SELF_SYNCH ULL(0x2)
+
+#define ID_AA64MMFR0_EL1_FGT_SHIFT U(56)
+#define ID_AA64MMFR0_EL1_FGT_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_FGT_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_FGT_SUPPORTED ULL(0x1)
+
+#define ID_AA64MMFR0_EL1_TGRAN4_SHIFT U(28)
+#define ID_AA64MMFR0_EL1_TGRAN4_WIDTH U(4)
+#define ID_AA64MMFR0_EL1_TGRAN4_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN4_52B_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN4_NOT_SUPPORTED ULL(0xf)
+
+#define ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT U(40)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_WIDTH U(4)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_AS_1 ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_NOT_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_SUPPORTED ULL(0x2)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_52B_SUPPORTED ULL(0x3)
+
+#define ID_AA64MMFR0_EL1_TGRAN64_SHIFT U(24)
+#define ID_AA64MMFR0_EL1_TGRAN64_WIDTH U(4)
+#define ID_AA64MMFR0_EL1_TGRAN64_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN64_NOT_SUPPORTED ULL(0xf)
+
+#define ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT U(36)
+#define ID_AA64MMFR0_EL1_TGRAN64_2_WIDTH U(4)
+#define ID_AA64MMFR0_EL1_TGRAN64_2_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN64_2_AS_1 ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN64_2_NOT_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN64_2_SUPPORTED ULL(0x2)
+
+#define ID_AA64MMFR0_EL1_TGRAN16_SHIFT U(20)
+#define ID_AA64MMFR0_EL1_TGRAN16_WIDTH U(4)
+#define ID_AA64MMFR0_EL1_TGRAN16_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN16_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN16_52B_SUPPORTED ULL(0x2)
+
+#define ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT U(32)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_WIDTH U(4)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_AS_1 ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_NOT_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_SUPPORTED ULL(0x2)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_52B_SUPPORTED ULL(0x3)
+
+/* ID_AA64MMFR1_EL1 definitions */
+#define ID_AA64MMFR1_EL1_PAN_SHIFT U(20)
+#define ID_AA64MMFR1_EL1_PAN_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_PAN_WIDTH U(4)
+#define ID_AA64MMFR1_EL1_PAN_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR1_EL1_PAN2_SUPPORTED ULL(0x2)
+#define ID_AA64MMFR1_EL1_PAN3_SUPPORTED ULL(0x3)
+#define ID_AA64MMFR1_EL1_HCX_SHIFT U(40)
+#define ID_AA64MMFR1_EL1_HCX_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_HCX_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR1_EL1_HCX_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR1_EL1_AFP_SHIFT U(44)
+#define ID_AA64MMFR1_EL1_AFP_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_AFP_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR1_EL1_LO_SHIFT U(16)
+#define ID_AA64MMFR1_EL1_LO_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_LO_WIDTH U(4)
+#define ID_AA64MMFR1_EL1_LOR_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR1_EL1_LOR_SUPPORTED ULL(0x1)
+
+
+/* ID_AA64MMFR2_EL1 definitions */
+#define ID_AA64MMFR2_EL1 S3_0_C0_C7_2
+
+#define ID_AA64MMFR2_EL1_ST_SHIFT U(28)
+#define ID_AA64MMFR2_EL1_ST_MASK ULL(0xf)
+
+#define ID_AA64MMFR2_EL1_CNP_SHIFT U(0)
+#define ID_AA64MMFR2_EL1_CNP_MASK ULL(0xf)
+
+/* ID_AA64PFR1_EL1 definitions */
+#define ID_AA64PFR1_EL1_SSBS_SHIFT U(4)
+#define ID_AA64PFR1_EL1_SSBS_MASK ULL(0xf)
+
+#define SSBS_UNAVAILABLE ULL(0) /* No architectural SSBS support */
+
+#define ID_AA64PFR1_EL1_BT_SHIFT U(0)
+#define ID_AA64PFR1_EL1_BT_MASK ULL(0xf)
+
+#define BTI_IMPLEMENTED ULL(1) /* The BTI mechanism is implemented */
+
+#define ID_AA64PFR1_EL1_MTE_SHIFT U(8)
+#define ID_AA64PFR1_EL1_MTE_MASK ULL(0xf)
+
+#define ID_AA64PFR1_EL1_RNDR_TRAP_SHIFT U(28)
+#define ID_AA64PFR1_EL1_RNDR_TRAP_MASK ULL(0xf)
+
+#define ID_AA64PFR1_EL1_RNG_TRAP_SUPPORTED ULL(0x1)
+#define ID_AA64PFR1_EL1_RNG_TRAP_NOT_SUPPORTED ULL(0x0)
+
+#define ID_AA64PFR1_CSV2_FRAC_MASK ULL(0xf)
+#define ID_AA64PFR1_CSV2_FRAC_SHIFT U(32)
+#define ID_AA64PFR1_CSV2_FRAC_WIDTH U(4)
+#define ID_AA64PFR1_CSV2_1P1_SUPPORTED ULL(0x1)
+#define ID_AA64PFR1_CSV2_1P2_SUPPORTED ULL(0x2)
+
+#define MTE_UNIMPLEMENTED ULL(0)
+#define MTE_IMPLEMENTED_EL0 ULL(1) /* MTE is only implemented at EL0 */
+#define MTE_IMPLEMENTED_ELX ULL(2) /* MTE is implemented at all ELs */
+
+#define ID_AA64PFR1_EL1_SME_SHIFT U(24)
+#define ID_AA64PFR1_EL1_SME_MASK ULL(0xf)
+#define ID_AA64PFR1_EL1_SME_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64PFR1_EL1_SME_SUPPORTED ULL(0x1)
+#define ID_AA64PFR1_EL1_SME2_SUPPORTED ULL(0x2)
+
+#define ID_AA64PFR1_RAS_FRAC_MASK ULL(0xf)
+#define ID_AA64PFR1_RAS_FRAC_SHIFT U(12)
+#define ID_AA64PFR1_RAS_FRAC_WIDTH U(4)
+#define ID_AA64PFR1_RASV1P1_SUPPORTED ULL(0x1)
+
+/* ID_PFR1_EL1 definitions */
+#define ID_PFR1_VIRTEXT_SHIFT U(12)
+#define ID_PFR1_VIRTEXT_MASK U(0xf)
+#define GET_VIRT_EXT(id) (((id) >> ID_PFR1_VIRTEXT_SHIFT) \
+ & ID_PFR1_VIRTEXT_MASK)
+
+/* SCTLR definitions */
+#define SCTLR_EL2_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+ (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
+ (U(1) << 11) | (U(1) << 5) | (U(1) << 4))
+
+#define SCTLR_EL1_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+ (U(1) << 22) | (U(1) << 20) | (U(1) << 11))
+#define SCTLR_AARCH32_EL1_RES1 \
+ ((U(1) << 23) | (U(1) << 22) | (U(1) << 11) | \
+ (U(1) << 4) | (U(1) << 3))
+
+#define SCTLR_EL3_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+ (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
+ (U(1) << 11) | (U(1) << 5) | (U(1) << 4))
+
+#define SCTLR_M_BIT (ULL(1) << 0)
+#define SCTLR_A_BIT (ULL(1) << 1)
+#define SCTLR_C_BIT (ULL(1) << 2)
+#define SCTLR_SA_BIT (ULL(1) << 3)
+#define SCTLR_SA0_BIT (ULL(1) << 4)
+#define SCTLR_CP15BEN_BIT (ULL(1) << 5)
+#define SCTLR_ITD_BIT (ULL(1) << 7)
+#define SCTLR_SED_BIT (ULL(1) << 8)
+#define SCTLR_UMA_BIT (ULL(1) << 9)
+#define SCTLR_I_BIT (ULL(1) << 12)
+#define SCTLR_EnDB_BIT (ULL(1) << 13)
+#define SCTLR_DZE_BIT (ULL(1) << 14)
+#define SCTLR_UCT_BIT (ULL(1) << 15)
+#define SCTLR_NTWI_BIT (ULL(1) << 16)
+#define SCTLR_NTWE_BIT (ULL(1) << 18)
+#define SCTLR_WXN_BIT (ULL(1) << 19)
+#define SCTLR_UWXN_BIT (ULL(1) << 20)
+#define SCTLR_IESB_BIT (ULL(1) << 21)
+#define SCTLR_SPAN_BIT (ULL(1) << 23)
+#define SCTLR_E0E_BIT (ULL(1) << 24)
+#define SCTLR_EE_BIT (ULL(1) << 25)
+#define SCTLR_UCI_BIT (ULL(1) << 26)
+#define SCTLR_EnDA_BIT (ULL(1) << 27)
+#define SCTLR_EnIB_BIT (ULL(1) << 30)
+#define SCTLR_EnIA_BIT (ULL(1) << 31)
+#define SCTLR_DSSBS_BIT (ULL(1) << 44)
+#define SCTLR_RESET_VAL SCTLR_EL3_RES1
+
+/* CPACR_El1 definitions */
+#define CPACR_EL1_FPEN(x) ((x) << 20)
+#define CPACR_EL1_FP_TRAP_EL0 U(0x1)
+#define CPACR_EL1_FP_TRAP_ALL U(0x2)
+#define CPACR_EL1_FP_TRAP_NONE U(0x3)
+
+#define CPACR_EL1_ZEN(x) ((x) << 16)
+#define CPACR_EL1_ZEN_TRAP_EL0 U(0x1)
+#define CPACR_EL1_ZEN_TRAP_ALL U(0x2)
+#define CPACR_EL1_ZEN_TRAP_NONE U(0x3)
+
+/* SCR definitions */
+#define SCR_RES1_BITS ((U(1) << 4) | (U(1) << 5))
+#define SCR_AMVOFFEN_BIT (UL(1) << 35)
+#define SCR_ATA_BIT (U(1) << 26)
+#define SCR_FIEN_BIT (U(1) << 21)
+#define SCR_API_BIT (U(1) << 17)
+#define SCR_APK_BIT (U(1) << 16)
+#define SCR_TWE_BIT (U(1) << 13)
+#define SCR_TWI_BIT (U(1) << 12)
+#define SCR_ST_BIT (U(1) << 11)
+#define SCR_RW_BIT (U(1) << 10)
+#define SCR_SIF_BIT (U(1) << 9)
+#define SCR_HCE_BIT (U(1) << 8)
+#define SCR_SMD_BIT (U(1) << 7)
+#define SCR_EA_BIT (U(1) << 3)
+#define SCR_FIQ_BIT (U(1) << 2)
+#define SCR_IRQ_BIT (U(1) << 1)
+#define SCR_NS_BIT (U(1) << 0)
+#define SCR_VALID_BIT_MASK U(0x2f8f)
+#define SCR_RESET_VAL SCR_RES1_BITS
+
+/* MDCR_EL3 definitions */
+#define MDCR_SPD32(x) ((x) << 14)
+#define MDCR_SPD32_LEGACY ULL(0x0)
+#define MDCR_SPD32_DISABLE ULL(0x2)
+#define MDCR_SPD32_ENABLE ULL(0x3)
+#define MDCR_SDD_BIT (ULL(1) << 16)
+#define MDCR_NSPB(x) ((x) << 12)
+#define MDCR_NSPB_EL1 ULL(0x3)
+#define MDCR_TDOSA_BIT (ULL(1) << 10)
+#define MDCR_TDA_BIT (ULL(1) << 9)
+#define MDCR_TPM_BIT (ULL(1) << 6)
+#define MDCR_SCCD_BIT (ULL(1) << 23)
+#define MDCR_EL3_RESET_VAL ULL(0x0)
+
+/* MDCR_EL2 definitions */
+#define MDCR_EL2_TPMS (U(1) << 14)
+#define MDCR_EL2_E2PB(x) ((x) << 12)
+#define MDCR_EL2_E2PB_EL1 U(0x3)
+#define MDCR_EL2_TDRA_BIT (U(1) << 11)
+#define MDCR_EL2_TDOSA_BIT (U(1) << 10)
+#define MDCR_EL2_TDA_BIT (U(1) << 9)
+#define MDCR_EL2_TDE_BIT (U(1) << 8)
+#define MDCR_EL2_HPME_BIT (U(1) << 7)
+#define MDCR_EL2_TPM_BIT (U(1) << 6)
+#define MDCR_EL2_TPMCR_BIT (U(1) << 5)
+#define MDCR_EL2_HPMN_SHIFT U(0)
+#define MDCR_EL2_HPMN_MASK ULL(0x1f)
+#define MDCR_EL2_RESET_VAL U(0x0)
+
+/* HSTR_EL2 definitions */
+#define HSTR_EL2_RESET_VAL U(0x0)
+#define HSTR_EL2_T_MASK U(0xff)
+
+/* CNTHP_CTL_EL2 definitions */
+#define CNTHP_CTL_ENABLE_BIT (U(1) << 0)
+#define CNTHP_CTL_RESET_VAL U(0x0)
+
+/* VTTBR_EL2 definitions */
+#define VTTBR_RESET_VAL ULL(0x0)
+#define VTTBR_VMID_MASK ULL(0xff)
+#define VTTBR_VMID_SHIFT U(48)
+#define VTTBR_BADDR_MASK ULL(0xffffffffffff)
+#define VTTBR_BADDR_SHIFT U(0)
+
+/* HCR definitions */
+#define HCR_AMVOFFEN_BIT (ULL(1) << 51)
+#define HCR_API_BIT (ULL(1) << 41)
+#define HCR_APK_BIT (ULL(1) << 40)
+#define HCR_E2H_BIT (ULL(1) << 34)
+#define HCR_TGE_BIT (ULL(1) << 27)
+#define HCR_RW_SHIFT U(31)
+#define HCR_RW_BIT (ULL(1) << HCR_RW_SHIFT)
+#define HCR_AMO_BIT (ULL(1) << 5)
+#define HCR_IMO_BIT (ULL(1) << 4)
+#define HCR_FMO_BIT (ULL(1) << 3)
+
+/* ISR definitions */
+#define ISR_A_SHIFT U(8)
+#define ISR_I_SHIFT U(7)
+#define ISR_F_SHIFT U(6)
+
+/* CNTHCTL_EL2 definitions */
+#define CNTHCTL_RESET_VAL U(0x0)
+#define EVNTEN_BIT (U(1) << 2)
+#define EL1PCEN_BIT (U(1) << 1)
+#define EL1PCTEN_BIT (U(1) << 0)
+
+/* CNTKCTL_EL1 definitions */
+#define EL0PTEN_BIT (U(1) << 9)
+#define EL0VTEN_BIT (U(1) << 8)
+#define EL0PCTEN_BIT (U(1) << 0)
+#define EL0VCTEN_BIT (U(1) << 1)
+#define EVNTEN_BIT (U(1) << 2)
+#define EVNTDIR_BIT (U(1) << 3)
+#define EVNTI_SHIFT U(4)
+#define EVNTI_MASK U(0xf)
+
+/* CPTR_EL3 definitions */
+#define TCPAC_BIT (U(1) << 31)
+#define TAM_BIT (U(1) << 30)
+#define TTA_BIT (U(1) << 20)
+#define ESM_BIT (U(1) << 12)
+#define TFP_BIT (U(1) << 10)
+#define CPTR_EZ_BIT (U(1) << 8)
+#define CPTR_EL3_RESET_VAL U(0x0)
+
+/* CPTR_EL2 definitions */
+#define CPTR_EL2_RES1 ((ULL(3) << 12) | (ULL(1) << 9) | (ULL(0xff)))
+#define CPTR_EL2_TCPAC_BIT (ULL(1) << 31)
+#define CPTR_EL2_TAM_BIT (ULL(1) << 30)
+#define CPTR_EL2_SMEN_MASK ULL(0x3)
+#define CPTR_EL2_SMEN_SHIFT U(24)
+#define CPTR_EL2_TTA_BIT (ULL(1) << 20)
+#define CPTR_EL2_TSM_BIT (ULL(1) << 12)
+#define CPTR_EL2_TFP_BIT (ULL(1) << 10)
+#define CPTR_EL2_TZ_BIT (ULL(1) << 8)
+#define CPTR_EL2_RESET_VAL CPTR_EL2_RES1
+
+/* CPSR/SPSR definitions */
+#define DAIF_FIQ_BIT (U(1) << 0)
+#define DAIF_IRQ_BIT (U(1) << 1)
+#define DAIF_ABT_BIT (U(1) << 2)
+#define DAIF_DBG_BIT (U(1) << 3)
+#define SPSR_DAIF_SHIFT U(6)
+#define SPSR_DAIF_MASK U(0xf)
+
+#define SPSR_AIF_SHIFT U(6)
+#define SPSR_AIF_MASK U(0x7)
+
+#define SPSR_E_SHIFT U(9)
+#define SPSR_E_MASK U(0x1)
+#define SPSR_E_LITTLE U(0x0)
+#define SPSR_E_BIG U(0x1)
+
+#define SPSR_T_SHIFT U(5)
+#define SPSR_T_MASK U(0x1)
+#define SPSR_T_ARM U(0x0)
+#define SPSR_T_THUMB U(0x1)
+
+#define SPSR_M_SHIFT U(4)
+#define SPSR_M_MASK U(0x1)
+#define SPSR_M_AARCH64 U(0x0)
+#define SPSR_M_AARCH32 U(0x1)
+
+#define DISABLE_ALL_EXCEPTIONS \
+ (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
+
+#define DISABLE_INTERRUPTS (DAIF_FIQ_BIT | DAIF_IRQ_BIT)
+
+/*
+ * RMR_EL3 definitions
+ */
+#define RMR_EL3_RR_BIT (U(1) << 1)
+#define RMR_EL3_AA64_BIT (U(1) << 0)
+
+/*
+ * HI-VECTOR address for AArch32 state
+ */
+#define HI_VECTOR_BASE U(0xFFFF0000)
+
+/*
+ * TCR defintions
+ */
+#define TCR_EL3_RES1 ((ULL(1) << 31) | (ULL(1) << 23))
+#define TCR_EL2_RES1 ((ULL(1) << 31) | (ULL(1) << 23))
+#define TCR_EL1_IPS_SHIFT U(32)
+#define TCR_EL2_PS_SHIFT U(16)
+#define TCR_EL3_PS_SHIFT U(16)
+
+#define TCR_TxSZ_MIN ULL(16)
+#define TCR_TxSZ_MAX ULL(39)
+#define TCR_TxSZ_MAX_TTST ULL(48)
+
+#define TCR_T0SZ_SHIFT U(0)
+#define TCR_T1SZ_SHIFT U(16)
+
+/* (internal) physical address size bits in EL3/EL1 */
+#define TCR_PS_BITS_4GB ULL(0x0)
+#define TCR_PS_BITS_64GB ULL(0x1)
+#define TCR_PS_BITS_1TB ULL(0x2)
+#define TCR_PS_BITS_4TB ULL(0x3)
+#define TCR_PS_BITS_16TB ULL(0x4)
+#define TCR_PS_BITS_256TB ULL(0x5)
+
+#define ADDR_MASK_48_TO_63 ULL(0xFFFF000000000000)
+#define ADDR_MASK_44_TO_47 ULL(0x0000F00000000000)
+#define ADDR_MASK_42_TO_43 ULL(0x00000C0000000000)
+#define ADDR_MASK_40_TO_41 ULL(0x0000030000000000)
+#define ADDR_MASK_36_TO_39 ULL(0x000000F000000000)
+#define ADDR_MASK_32_TO_35 ULL(0x0000000F00000000)
+
+#define TCR_RGN_INNER_NC (ULL(0x0) << 8)
+#define TCR_RGN_INNER_WBA (ULL(0x1) << 8)
+#define TCR_RGN_INNER_WT (ULL(0x2) << 8)
+#define TCR_RGN_INNER_WBNA (ULL(0x3) << 8)
+
+#define TCR_RGN_OUTER_NC (ULL(0x0) << 10)
+#define TCR_RGN_OUTER_WBA (ULL(0x1) << 10)
+#define TCR_RGN_OUTER_WT (ULL(0x2) << 10)
+#define TCR_RGN_OUTER_WBNA (ULL(0x3) << 10)
+
+#define TCR_SH_NON_SHAREABLE (ULL(0x0) << 12)
+#define TCR_SH_OUTER_SHAREABLE (ULL(0x2) << 12)
+#define TCR_SH_INNER_SHAREABLE (ULL(0x3) << 12)
+
+#define TCR_RGN1_INNER_NC (ULL(0x0) << 24)
+#define TCR_RGN1_INNER_WBA (ULL(0x1) << 24)
+#define TCR_RGN1_INNER_WT (ULL(0x2) << 24)
+#define TCR_RGN1_INNER_WBNA (ULL(0x3) << 24)
+
+#define TCR_RGN1_OUTER_NC (ULL(0x0) << 26)
+#define TCR_RGN1_OUTER_WBA (ULL(0x1) << 26)
+#define TCR_RGN1_OUTER_WT (ULL(0x2) << 26)
+#define TCR_RGN1_OUTER_WBNA (ULL(0x3) << 26)
+
+#define TCR_SH1_NON_SHAREABLE (ULL(0x0) << 28)
+#define TCR_SH1_OUTER_SHAREABLE (ULL(0x2) << 28)
+#define TCR_SH1_INNER_SHAREABLE (ULL(0x3) << 28)
+
+#define TCR_TG0_SHIFT U(14)
+#define TCR_TG0_MASK ULL(3)
+#define TCR_TG0_4K (ULL(0) << TCR_TG0_SHIFT)
+#define TCR_TG0_64K (ULL(1) << TCR_TG0_SHIFT)
+#define TCR_TG0_16K (ULL(2) << TCR_TG0_SHIFT)
+
+#define TCR_TG1_SHIFT U(30)
+#define TCR_TG1_MASK ULL(3)
+#define TCR_TG1_16K (ULL(1) << TCR_TG1_SHIFT)
+#define TCR_TG1_4K (ULL(2) << TCR_TG1_SHIFT)
+#define TCR_TG1_64K (ULL(3) << TCR_TG1_SHIFT)
+
+#define TCR_EPD0_BIT (ULL(1) << 7)
+#define TCR_EPD1_BIT (ULL(1) << 23)
+
+#define MODE_SP_SHIFT U(0x0)
+#define MODE_SP_MASK U(0x1)
+#define MODE_SP_EL0 U(0x0)
+#define MODE_SP_ELX U(0x1)
+
+#define MODE_RW_SHIFT U(0x4)
+#define MODE_RW_MASK U(0x1)
+#define MODE_RW_64 U(0x0)
+#define MODE_RW_32 U(0x1)
+
+#define MODE_EL_SHIFT U(0x2)
+#define MODE_EL_MASK U(0x3)
+#define MODE_EL3 U(0x3)
+#define MODE_EL2 U(0x2)
+#define MODE_EL1 U(0x1)
+#define MODE_EL0 U(0x0)
+
+#define MODE32_SHIFT U(0)
+#define MODE32_MASK U(0xf)
+#define MODE32_usr U(0x0)
+#define MODE32_fiq U(0x1)
+#define MODE32_irq U(0x2)
+#define MODE32_svc U(0x3)
+#define MODE32_mon U(0x6)
+#define MODE32_abt U(0x7)
+#define MODE32_hyp U(0xa)
+#define MODE32_und U(0xb)
+#define MODE32_sys U(0xf)
+
+#define GET_RW(mode) (((mode) >> MODE_RW_SHIFT) & MODE_RW_MASK)
+#define GET_EL(mode) (((mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)
+#define GET_SP(mode) (((mode) >> MODE_SP_SHIFT) & MODE_SP_MASK)
+#define GET_M32(mode) (((mode) >> MODE32_SHIFT) & MODE32_MASK)
+
+#define SPSR_64(el, sp, daif) \
+ ((MODE_RW_64 << MODE_RW_SHIFT) | \
+ (((el) & MODE_EL_MASK) << MODE_EL_SHIFT) | \
+ (((sp) & MODE_SP_MASK) << MODE_SP_SHIFT) | \
+ (((daif) & SPSR_DAIF_MASK) << SPSR_DAIF_SHIFT))
+
+#define SPSR_MODE32(mode, isa, endian, aif) \
+ ((MODE_RW_32 << MODE_RW_SHIFT) | \
+ (((mode) & MODE32_MASK) << MODE32_SHIFT) | \
+ (((isa) & SPSR_T_MASK) << SPSR_T_SHIFT) | \
+ (((endian) & SPSR_E_MASK) << SPSR_E_SHIFT) | \
+ (((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT))
+
+/*
+ * TTBR Definitions
+ */
+#define TTBR_CNP_BIT ULL(0x1)
+
+/*
+ * CTR_EL0 definitions
+ */
+#define CTR_CWG_SHIFT U(24)
+#define CTR_CWG_MASK U(0xf)
+#define CTR_ERG_SHIFT U(20)
+#define CTR_ERG_MASK U(0xf)
+#define CTR_DMINLINE_SHIFT U(16)
+#define CTR_DMINLINE_MASK U(0xf)
+#define CTR_L1IP_SHIFT U(14)
+#define CTR_L1IP_MASK U(0x3)
+#define CTR_IMINLINE_SHIFT U(0)
+#define CTR_IMINLINE_MASK U(0xf)
+
+#define MAX_CACHE_LINE_SIZE U(0x800) /* 2KB */
+
+/*
+ * FPCR definitions
+ */
+#define FPCR_FIZ_BIT (ULL(1) << 0)
+#define FPCR_AH_BIT (ULL(1) << 1)
+#define FPCR_NEP_BIT (ULL(1) << 2)
+
+/* Physical timer control register bit fields shifts and masks */
+#define CNTP_CTL_ENABLE_SHIFT U(0)
+#define CNTP_CTL_IMASK_SHIFT U(1)
+#define CNTP_CTL_ISTATUS_SHIFT U(2)
+
+#define CNTP_CTL_ENABLE_MASK U(1)
+#define CNTP_CTL_IMASK_MASK U(1)
+#define CNTP_CTL_ISTATUS_MASK U(1)
+
+/* Exception Syndrome register bits and bobs */
+#define ESR_EC_SHIFT U(26)
+#define ESR_EC_MASK U(0x3f)
+#define ESR_EC_LENGTH U(6)
+#define ESR_ISS_SHIFT U(0x0)
+#define ESR_ISS_MASK U(0x1ffffff)
+#define EC_UNKNOWN U(0x0)
+#define EC_WFE_WFI U(0x1)
+#define EC_AARCH32_CP15_MRC_MCR U(0x3)
+#define EC_AARCH32_CP15_MRRC_MCRR U(0x4)
+#define EC_AARCH32_CP14_MRC_MCR U(0x5)
+#define EC_AARCH32_CP14_LDC_STC U(0x6)
+#define EC_FP_SIMD U(0x7)
+#define EC_AARCH32_CP10_MRC U(0x8)
+#define EC_AARCH32_CP14_MRRC_MCRR U(0xc)
+#define EC_ILLEGAL U(0xe)
+#define EC_AARCH32_SVC U(0x11)
+#define EC_AARCH32_HVC U(0x12)
+#define EC_AARCH32_SMC U(0x13)
+#define EC_AARCH64_SVC U(0x15)
+#define EC_AARCH64_HVC U(0x16)
+#define EC_AARCH64_SMC U(0x17)
+#define EC_AARCH64_SYS U(0x18)
+#define EC_IABORT_LOWER_EL U(0x20)
+#define EC_IABORT_CUR_EL U(0x21)
+#define EC_PC_ALIGN U(0x22)
+#define EC_DABORT_LOWER_EL U(0x24)
+#define EC_DABORT_CUR_EL U(0x25)
+#define EC_SP_ALIGN U(0x26)
+#define EC_AARCH32_FP U(0x28)
+#define EC_AARCH64_FP U(0x2c)
+#define EC_SERROR U(0x2f)
+/* Data Fault Status code, not all error codes listed */
+#define ISS_DFSC_MASK U(0x3f)
+#define DFSC_EXT_DABORT U(0x10)
+#define DFSC_GPF_DABORT U(0x28)
+/* ISS encoding an exception from HVC or SVC instruction execution */
+#define ISS_HVC_SMC_IMM16_MASK U(0xffff)
+
+/*
+ * External Abort bit in Instruction and Data Aborts synchronous exception
+ * syndromes.
+ */
+#define ESR_ISS_EABORT_EA_BIT U(9)
+
+#define EC_BITS(x) (((x) >> ESR_EC_SHIFT) & ESR_EC_MASK)
+#define ISS_BITS(x) (((x) >> ESR_ISS_SHIFT) & ESR_ISS_MASK)
+
+/* Reset bit inside the Reset management register for EL3 (RMR_EL3) */
+#define RMR_RESET_REQUEST_SHIFT U(0x1)
+#define RMR_WARM_RESET_CPU (U(1) << RMR_RESET_REQUEST_SHIFT)
+
+/*******************************************************************************
+ * Definitions of register offsets, fields and macros for CPU system
+ * instructions.
+ ******************************************************************************/
+
+#define TLBI_ADDR_SHIFT U(12)
+#define TLBI_ADDR_MASK ULL(0x00000FFFFFFFFFFF)
+#define TLBI_ADDR(x) (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTCTLBase Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+#define CNTCTLBASE_CNTFRQ U(0x0)
+#define CNTNSAR U(0x4)
+#define CNTNSAR_NS_SHIFT(x) (x)
+
+#define CNTACR_BASE(x) (U(0x40) + ((x) << 2))
+#define CNTACR_RPCT_SHIFT U(0x0)
+#define CNTACR_RVCT_SHIFT U(0x1)
+#define CNTACR_RFRQ_SHIFT U(0x2)
+#define CNTACR_RVOFF_SHIFT U(0x3)
+#define CNTACR_RWVT_SHIFT U(0x4)
+#define CNTACR_RWPT_SHIFT U(0x5)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTBaseN Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+/* Physical Count register. */
+#define CNTPCT_LO U(0x0)
+/* Counter Frequency register. */
+#define CNTBASEN_CNTFRQ U(0x10)
+/* Physical Timer CompareValue register. */
+#define CNTP_CVAL_LO U(0x20)
+/* Physical Timer Control register. */
+#define CNTP_CTL U(0x2c)
+
+/* PMCR_EL0 definitions */
+#define PMCR_EL0_RESET_VAL U(0x0)
+#define PMCR_EL0_N_SHIFT U(11)
+#define PMCR_EL0_N_MASK U(0x1f)
+#define PMCR_EL0_N_BITS (PMCR_EL0_N_MASK << PMCR_EL0_N_SHIFT)
+#define PMCR_EL0_LC_BIT (U(1) << 6)
+#define PMCR_EL0_DP_BIT (U(1) << 5)
+#define PMCR_EL0_X_BIT (U(1) << 4)
+#define PMCR_EL0_D_BIT (U(1) << 3)
+#define PMCR_EL0_C_BIT (U(1) << 2)
+#define PMCR_EL0_P_BIT (U(1) << 1)
+#define PMCR_EL0_E_BIT (U(1) << 0)
+
+/* PMCNTENSET_EL0 definitions */
+#define PMCNTENSET_EL0_C_BIT (U(1) << 31)
+#define PMCNTENSET_EL0_P_BIT(x) (U(1) << x)
+
+/* PMEVTYPER<n>_EL0 definitions */
+#define PMEVTYPER_EL0_P_BIT (U(1) << 31)
+#define PMEVTYPER_EL0_U_BIT (U(1) << 30)
+#define PMEVTYPER_EL0_NSK_BIT (U(1) << 29)
+#define PMEVTYPER_EL0_NSU_BIT (U(1) << 28)
+#define PMEVTYPER_EL0_NSH_BIT (U(1) << 27)
+#define PMEVTYPER_EL0_M_BIT (U(1) << 26)
+#define PMEVTYPER_EL0_MT_BIT (U(1) << 25)
+#define PMEVTYPER_EL0_SH_BIT (U(1) << 24)
+#define PMEVTYPER_EL0_T_BIT (U(1) << 23)
+#define PMEVTYPER_EL0_RLK_BIT (U(1) << 22)
+#define PMEVTYPER_EL0_RLU_BIT (U(1) << 21)
+#define PMEVTYPER_EL0_RLH_BIT (U(1) << 20)
+#define PMEVTYPER_EL0_EVTCOUNT_BITS U(0x0000FFFF)
+
+/* PMCCFILTR_EL0 definitions */
+#define PMCCFILTR_EL0_P_BIT (U(1) << 31)
+#define PMCCFILTR_EL0_U_BIT (U(1) << 30)
+#define PMCCFILTR_EL0_NSK_BIT (U(1) << 29)
+#define PMCCFILTR_EL0_NSH_BIT (U(1) << 27)
+#define PMCCFILTR_EL0_M_BIT (U(1) << 26)
+#define PMCCFILTR_EL0_SH_BIT (U(1) << 24)
+#define PMCCFILTR_EL0_T_BIT (U(1) << 23)
+#define PMCCFILTR_EL0_RLK_BIT (U(1) << 22)
+#define PMCCFILTR_EL0_RLU_BIT (U(1) << 21)
+#define PMCCFILTR_EL0_RLH_BIT (U(1) << 20)
+
+/* PMSELR_EL0 definitions */
+#define PMSELR_EL0_SEL_SHIFT U(0)
+#define PMSELR_EL0_SEL_MASK U(0x1f)
+
+/* PMU event counter ID definitions */
+#define PMU_EV_PC_WRITE_RETIRED U(0x000C)
+
+/*******************************************************************************
+ * Definitions for system register interface to SVE
+ ******************************************************************************/
+#define ID_AA64ZFR0_EL1 S3_0_C0_C4_4
+
+/* ZCR_EL2 definitions */
+#define ZCR_EL2 S3_4_C1_C2_0
+#define ZCR_EL2_SVE_VL_SHIFT UL(0)
+#define ZCR_EL2_SVE_VL_WIDTH UL(4)
+
+/* ZCR_EL1 definitions */
+#define ZCR_EL1 S3_0_C1_C2_0
+#define ZCR_EL1_SVE_VL_SHIFT UL(0)
+#define ZCR_EL1_SVE_VL_WIDTH UL(4)
+
+/*******************************************************************************
+ * Definitions for system register interface to SME
+ ******************************************************************************/
+#define ID_AA64SMFR0_EL1 S3_0_C0_C4_5
+#define SVCR S3_3_C4_C2_2
+#define TPIDR2_EL0 S3_3_C13_C0_5
+#define SMCR_EL2 S3_4_C1_C2_6
+
+/* ID_AA64SMFR0_EL1 definitions */
+#define ID_AA64SMFR0_EL1_FA64_BIT (UL(1) << 63)
+
+/* SVCR definitions */
+#define SVCR_ZA_BIT (U(1) << 1)
+#define SVCR_SM_BIT (U(1) << 0)
+
+/* SMPRI_EL1 definitions */
+#define SMPRI_EL1_PRIORITY_SHIFT U(0)
+#define SMPRI_EL1_PRIORITY_MASK U(0xf)
+
+/* SMPRIMAP_EL2 definitions */
+/* Register is composed of 16 priority map fields of 4 bits numbered 0-15. */
+#define SMPRIMAP_EL2_MAP_SHIFT(pri) U((pri) * 4)
+#define SMPRIMAP_EL2_MAP_MASK U(0xf)
+
+/* SMCR_ELx definitions */
+#define SMCR_ELX_LEN_SHIFT U(0)
+#define SMCR_ELX_LEN_MASK U(0x1ff)
+#define SMCR_ELX_EZT0_BIT (U(1) << 30)
+#define SMCR_ELX_FA64_BIT (U(1) << 31)
+
+/*******************************************************************************
+ * Definitions of MAIR encodings for device and normal memory
+ ******************************************************************************/
+/*
+ * MAIR encodings for device memory attributes.
+ */
+#define MAIR_DEV_nGnRnE ULL(0x0)
+#define MAIR_DEV_nGnRE ULL(0x4)
+#define MAIR_DEV_nGRE ULL(0x8)
+#define MAIR_DEV_GRE ULL(0xc)
+
+/*
+ * MAIR encodings for normal memory attributes.
+ *
+ * Cache Policy
+ * WT: Write Through
+ * WB: Write Back
+ * NC: Non-Cacheable
+ *
+ * Transient Hint
+ * NTR: Non-Transient
+ * TR: Transient
+ *
+ * Allocation Policy
+ * RA: Read Allocate
+ * WA: Write Allocate
+ * RWA: Read and Write Allocate
+ * NA: No Allocation
+ */
+#define MAIR_NORM_WT_TR_WA ULL(0x1)
+#define MAIR_NORM_WT_TR_RA ULL(0x2)
+#define MAIR_NORM_WT_TR_RWA ULL(0x3)
+#define MAIR_NORM_NC ULL(0x4)
+#define MAIR_NORM_WB_TR_WA ULL(0x5)
+#define MAIR_NORM_WB_TR_RA ULL(0x6)
+#define MAIR_NORM_WB_TR_RWA ULL(0x7)
+#define MAIR_NORM_WT_NTR_NA ULL(0x8)
+#define MAIR_NORM_WT_NTR_WA ULL(0x9)
+#define MAIR_NORM_WT_NTR_RA ULL(0xa)
+#define MAIR_NORM_WT_NTR_RWA ULL(0xb)
+#define MAIR_NORM_WB_NTR_NA ULL(0xc)
+#define MAIR_NORM_WB_NTR_WA ULL(0xd)
+#define MAIR_NORM_WB_NTR_RA ULL(0xe)
+#define MAIR_NORM_WB_NTR_RWA ULL(0xf)
+
+#define MAIR_NORM_OUTER_SHIFT U(4)
+
+#define MAKE_MAIR_NORMAL_MEMORY(inner, outer) \
+ ((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
+
+/* PAR_EL1 fields */
+#define PAR_F_SHIFT U(0)
+#define PAR_F_MASK ULL(0x1)
+#define PAR_ADDR_SHIFT U(12)
+#define PAR_ADDR_MASK (BIT(40) - ULL(1)) /* 40-bits-wide page address */
+
+/*******************************************************************************
+ * Definitions for system register interface to SPE
+ ******************************************************************************/
+#define PMSCR_EL1 S3_0_C9_C9_0
+#define PMSNEVFR_EL1 S3_0_C9_C9_1
+#define PMSICR_EL1 S3_0_C9_C9_2
+#define PMSIRR_EL1 S3_0_C9_C9_3
+#define PMSFCR_EL1 S3_0_C9_C9_4
+#define PMSEVFR_EL1 S3_0_C9_C9_5
+#define PMSLATFR_EL1 S3_0_C9_C9_6
+#define PMSIDR_EL1 S3_0_C9_C9_7
+#define PMBLIMITR_EL1 S3_0_C9_C10_0
+#define PMBPTR_EL1 S3_0_C9_C10_1
+#define PMBSR_EL1 S3_0_C9_C10_3
+#define PMSCR_EL2 S3_4_C9_C9_0
+
+/*******************************************************************************
+ * Definitions for system register interface to MPAM
+ ******************************************************************************/
+#define MPAMIDR_EL1 S3_0_C10_C4_4
+#define MPAM2_EL2 S3_4_C10_C5_0
+#define MPAMHCR_EL2 S3_4_C10_C4_0
+#define MPAM3_EL3 S3_6_C10_C5_0
+
+/*******************************************************************************
+ * Definitions for system register interface to AMU for ARMv8.4 onwards
+ ******************************************************************************/
+#define AMCR_EL0 S3_3_C13_C2_0
+#define AMCFGR_EL0 S3_3_C13_C2_1
+#define AMCGCR_EL0 S3_3_C13_C2_2
+#define AMUSERENR_EL0 S3_3_C13_C2_3
+#define AMCNTENCLR0_EL0 S3_3_C13_C2_4
+#define AMCNTENSET0_EL0 S3_3_C13_C2_5
+#define AMCNTENCLR1_EL0 S3_3_C13_C3_0
+#define AMCNTENSET1_EL0 S3_3_C13_C3_1
+
+/* Activity Monitor Group 0 Event Counter Registers */
+#define AMEVCNTR00_EL0 S3_3_C13_C4_0
+#define AMEVCNTR01_EL0 S3_3_C13_C4_1
+#define AMEVCNTR02_EL0 S3_3_C13_C4_2
+#define AMEVCNTR03_EL0 S3_3_C13_C4_3
+
+/* Activity Monitor Group 0 Event Type Registers */
+#define AMEVTYPER00_EL0 S3_3_C13_C6_0
+#define AMEVTYPER01_EL0 S3_3_C13_C6_1
+#define AMEVTYPER02_EL0 S3_3_C13_C6_2
+#define AMEVTYPER03_EL0 S3_3_C13_C6_3
+
+/* Activity Monitor Group 1 Event Counter Registers */
+#define AMEVCNTR10_EL0 S3_3_C13_C12_0
+#define AMEVCNTR11_EL0 S3_3_C13_C12_1
+#define AMEVCNTR12_EL0 S3_3_C13_C12_2
+#define AMEVCNTR13_EL0 S3_3_C13_C12_3
+#define AMEVCNTR14_EL0 S3_3_C13_C12_4
+#define AMEVCNTR15_EL0 S3_3_C13_C12_5
+#define AMEVCNTR16_EL0 S3_3_C13_C12_6
+#define AMEVCNTR17_EL0 S3_3_C13_C12_7
+#define AMEVCNTR18_EL0 S3_3_C13_C13_0
+#define AMEVCNTR19_EL0 S3_3_C13_C13_1
+#define AMEVCNTR1A_EL0 S3_3_C13_C13_2
+#define AMEVCNTR1B_EL0 S3_3_C13_C13_3
+#define AMEVCNTR1C_EL0 S3_3_C13_C13_4
+#define AMEVCNTR1D_EL0 S3_3_C13_C13_5
+#define AMEVCNTR1E_EL0 S3_3_C13_C13_6
+#define AMEVCNTR1F_EL0 S3_3_C13_C13_7
+
+/* Activity Monitor Group 1 Event Type Registers */
+#define AMEVTYPER10_EL0 S3_3_C13_C14_0
+#define AMEVTYPER11_EL0 S3_3_C13_C14_1
+#define AMEVTYPER12_EL0 S3_3_C13_C14_2
+#define AMEVTYPER13_EL0 S3_3_C13_C14_3
+#define AMEVTYPER14_EL0 S3_3_C13_C14_4
+#define AMEVTYPER15_EL0 S3_3_C13_C14_5
+#define AMEVTYPER16_EL0 S3_3_C13_C14_6
+#define AMEVTYPER17_EL0 S3_3_C13_C14_7
+#define AMEVTYPER18_EL0 S3_3_C13_C15_0
+#define AMEVTYPER19_EL0 S3_3_C13_C15_1
+#define AMEVTYPER1A_EL0 S3_3_C13_C15_2
+#define AMEVTYPER1B_EL0 S3_3_C13_C15_3
+#define AMEVTYPER1C_EL0 S3_3_C13_C15_4
+#define AMEVTYPER1D_EL0 S3_3_C13_C15_5
+#define AMEVTYPER1E_EL0 S3_3_C13_C15_6
+#define AMEVTYPER1F_EL0 S3_3_C13_C15_7
+
+/* AMCFGR_EL0 definitions */
+#define AMCFGR_EL0_NCG_SHIFT U(28)
+#define AMCFGR_EL0_NCG_MASK U(0xf)
+
+/* AMCGCR_EL0 definitions */
+#define AMCGCR_EL0_CG1NC_SHIFT U(8)
+#define AMCGCR_EL0_CG1NC_LENGTH U(8)
+#define AMCGCR_EL0_CG1NC_MASK U(0xff)
+
+/* MPAM register definitions */
+#define MPAM3_EL3_MPAMEN_BIT (ULL(1) << 63)
+#define MPAMHCR_EL2_TRAP_MPAMIDR_EL1 (ULL(1) << 31)
+
+#define MPAM2_EL2_TRAPMPAM0EL1 (ULL(1) << 49)
+#define MPAM2_EL2_TRAPMPAM1EL1 (ULL(1) << 48)
+
+#define MPAMIDR_HAS_HCR_BIT (ULL(1) << 17)
+
+/*******************************************************************************
+ * Definitions for system register interface to AMU for ARMv8.6 enhancements
+ ******************************************************************************/
+
+/* Definition for register defining which virtual offsets are implemented. */
+#define AMCG1IDR_EL0 S3_3_C13_C2_6
+#define AMCG1IDR_CTR_MASK ULL(0xffff)
+#define AMCG1IDR_CTR_SHIFT U(0)
+#define AMCG1IDR_VOFF_MASK ULL(0xffff)
+#define AMCG1IDR_VOFF_SHIFT U(16)
+
+/* New bit added to AMCR_EL0 */
+#define AMCR_CG1RZ_BIT (ULL(0x1) << 17)
+
+/* Definitions for virtual offset registers for architected event counters. */
+/* AMEVCNTR01_EL0 intentionally left undefined, as it does not exist. */
+#define AMEVCNTVOFF00_EL2 S3_4_C13_C8_0
+#define AMEVCNTVOFF02_EL2 S3_4_C13_C8_2
+#define AMEVCNTVOFF03_EL2 S3_4_C13_C8_3
+
+/* Definitions for virtual offset registers for auxiliary event counters. */
+#define AMEVCNTVOFF10_EL2 S3_4_C13_C10_0
+#define AMEVCNTVOFF11_EL2 S3_4_C13_C10_1
+#define AMEVCNTVOFF12_EL2 S3_4_C13_C10_2
+#define AMEVCNTVOFF13_EL2 S3_4_C13_C10_3
+#define AMEVCNTVOFF14_EL2 S3_4_C13_C10_4
+#define AMEVCNTVOFF15_EL2 S3_4_C13_C10_5
+#define AMEVCNTVOFF16_EL2 S3_4_C13_C10_6
+#define AMEVCNTVOFF17_EL2 S3_4_C13_C10_7
+#define AMEVCNTVOFF18_EL2 S3_4_C13_C11_0
+#define AMEVCNTVOFF19_EL2 S3_4_C13_C11_1
+#define AMEVCNTVOFF1A_EL2 S3_4_C13_C11_2
+#define AMEVCNTVOFF1B_EL2 S3_4_C13_C11_3
+#define AMEVCNTVOFF1C_EL2 S3_4_C13_C11_4
+#define AMEVCNTVOFF1D_EL2 S3_4_C13_C11_5
+#define AMEVCNTVOFF1E_EL2 S3_4_C13_C11_6
+#define AMEVCNTVOFF1F_EL2 S3_4_C13_C11_7
+
+/*******************************************************************************
+ * RAS system registers
+ ******************************************************************************/
+#define DISR_EL1 S3_0_C12_C1_1
+#define DISR_A_BIT U(31)
+
+#define ERRIDR_EL1 S3_0_C5_C3_0
+#define ERRIDR_MASK U(0xffff)
+
+#define ERRSELR_EL1 S3_0_C5_C3_1
+
+/* System register access to Standard Error Record registers */
+#define ERXFR_EL1 S3_0_C5_C4_0
+#define ERXCTLR_EL1 S3_0_C5_C4_1
+#define ERXSTATUS_EL1 S3_0_C5_C4_2
+#define ERXADDR_EL1 S3_0_C5_C4_3
+#define ERXPFGF_EL1 S3_0_C5_C4_4
+#define ERXPFGCTL_EL1 S3_0_C5_C4_5
+#define ERXPFGCDN_EL1 S3_0_C5_C4_6
+#define ERXMISC0_EL1 S3_0_C5_C5_0
+#define ERXMISC1_EL1 S3_0_C5_C5_1
+
+#define ERXCTLR_ED_BIT (U(1) << 0)
+#define ERXCTLR_UE_BIT (U(1) << 4)
+
+#define ERXPFGCTL_UC_BIT (U(1) << 1)
+#define ERXPFGCTL_UEU_BIT (U(1) << 2)
+#define ERXPFGCTL_CDEN_BIT (U(1) << 31)
+
+/*******************************************************************************
+ * Armv8.1 Registers - Privileged Access Never Registers
+ ******************************************************************************/
+#define PAN S3_0_C4_C2_3
+#define PAN_BIT BIT(22)
+
+/*******************************************************************************
+ * Armv8.3 Pointer Authentication Registers
+ ******************************************************************************/
+#define APIAKeyLo_EL1 S3_0_C2_C1_0
+#define APIAKeyHi_EL1 S3_0_C2_C1_1
+#define APIBKeyLo_EL1 S3_0_C2_C1_2
+#define APIBKeyHi_EL1 S3_0_C2_C1_3
+#define APDAKeyLo_EL1 S3_0_C2_C2_0
+#define APDAKeyHi_EL1 S3_0_C2_C2_1
+#define APDBKeyLo_EL1 S3_0_C2_C2_2
+#define APDBKeyHi_EL1 S3_0_C2_C2_3
+#define APGAKeyLo_EL1 S3_0_C2_C3_0
+#define APGAKeyHi_EL1 S3_0_C2_C3_1
+
+/*******************************************************************************
+ * Armv8.4 Data Independent Timing Registers
+ ******************************************************************************/
+#define DIT S3_3_C4_C2_5
+#define DIT_BIT BIT(24)
+
+/*******************************************************************************
+ * Armv8.5 - new MSR encoding to directly access PSTATE.SSBS field
+ ******************************************************************************/
+#define SSBS S3_3_C4_C2_6
+
+/*******************************************************************************
+ * Armv8.5 - Memory Tagging Extension Registers
+ ******************************************************************************/
+#define TFSRE0_EL1 S3_0_C5_C6_1
+#define TFSR_EL1 S3_0_C5_C6_0
+#define RGSR_EL1 S3_0_C1_C0_5
+#define GCR_EL1 S3_0_C1_C0_6
+
+/*******************************************************************************
+ * Armv8.6 - Fine Grained Virtualization Traps Registers
+ ******************************************************************************/
+#define HFGRTR_EL2 S3_4_C1_C1_4
+#define HFGWTR_EL2 S3_4_C1_C1_5
+#define HFGITR_EL2 S3_4_C1_C1_6
+#define HDFGRTR_EL2 S3_4_C3_C1_4
+#define HDFGWTR_EL2 S3_4_C3_C1_5
+
+/*******************************************************************************
+ * Armv8.6 - Enhanced Counter Virtualization Registers
+ ******************************************************************************/
+#define CNTPOFF_EL2 S3_4_C14_C0_6
+
+/*******************************************************************************
+ * Armv9.0 - Trace Buffer Extension System Registers
+ ******************************************************************************/
+#define TRBLIMITR_EL1 S3_0_C9_C11_0
+#define TRBPTR_EL1 S3_0_C9_C11_1
+#define TRBBASER_EL1 S3_0_C9_C11_2
+#define TRBSR_EL1 S3_0_C9_C11_3
+#define TRBMAR_EL1 S3_0_C9_C11_4
+#define TRBTRG_EL1 S3_0_C9_C11_6
+#define TRBIDR_EL1 S3_0_C9_C11_7
+
+/*******************************************************************************
+ * FEAT_BRBE - Branch Record Buffer Extension System Registers
+ ******************************************************************************/
+
+#define BRBCR_EL1 S2_1_C9_C0_0
+#define BRBCR_EL2 S2_4_C9_C0_0
+#define BRBFCR_EL1 S2_1_C9_C0_1
+#define BRBTS_EL1 S2_1_C9_C0_2
+#define BRBINFINJ_EL1 S2_1_C9_C1_0
+#define BRBSRCINJ_EL1 S2_1_C9_C1_1
+#define BRBTGTINJ_EL1 S2_1_C9_C1_2
+#define BRBIDR0_EL1 S2_1_C9_C2_0
+
+/*******************************************************************************
+ * Armv8.4 - Trace Filter System Registers
+ ******************************************************************************/
+#define TRFCR_EL1 S3_0_C1_C2_1
+#define TRFCR_EL2 S3_4_C1_C2_1
+
+/*******************************************************************************
+ * Trace System Registers
+ ******************************************************************************/
+#define TRCAUXCTLR S2_1_C0_C6_0
+#define TRCRSR S2_1_C0_C10_0
+#define TRCCCCTLR S2_1_C0_C14_0
+#define TRCBBCTLR S2_1_C0_C15_0
+#define TRCEXTINSELR0 S2_1_C0_C8_4
+#define TRCEXTINSELR1 S2_1_C0_C9_4
+#define TRCEXTINSELR2 S2_1_C0_C10_4
+#define TRCEXTINSELR3 S2_1_C0_C11_4
+#define TRCCLAIMSET S2_1_c7_c8_6
+#define TRCCLAIMCLR S2_1_c7_c9_6
+#define TRCDEVARCH S2_1_c7_c15_6
+
+/*******************************************************************************
+ * FEAT_HCX - Extended Hypervisor Configuration Register
+ ******************************************************************************/
+#define HCRX_EL2 S3_4_C1_C2_2
+#define HCRX_EL2_MSCEn_BIT (UL(1) << 11)
+#define HCRX_EL2_MCE2_BIT (UL(1) << 10)
+#define HCRX_EL2_CMOW_BIT (UL(1) << 9)
+#define HCRX_EL2_VFNMI_BIT (UL(1) << 8)
+#define HCRX_EL2_VINMI_BIT (UL(1) << 7)
+#define HCRX_EL2_TALLINT_BIT (UL(1) << 6)
+#define HCRX_EL2_SMPME_BIT (UL(1) << 5)
+#define HCRX_EL2_FGTnXS_BIT (UL(1) << 4)
+#define HCRX_EL2_FnXS_BIT (UL(1) << 3)
+#define HCRX_EL2_EnASR_BIT (UL(1) << 2)
+#define HCRX_EL2_EnALS_BIT (UL(1) << 1)
+#define HCRX_EL2_EnAS0_BIT (UL(1) << 0)
+#define HCRX_EL2_INIT_VAL ULL(0x0)
+
+/*******************************************************************************
+ * PFR0_EL1 - Definitions for AArch32 Processor Feature Register 0
+ ******************************************************************************/
+#define ID_PFR0_EL1 S3_0_C0_C1_0
+#define ID_PFR0_EL1_RAS_MASK ULL(0xf)
+#define ID_PFR0_EL1_RAS_SHIFT U(28)
+#define ID_PFR0_EL1_RAS_WIDTH U(4)
+#define ID_PFR0_EL1_RAS_SUPPORTED ULL(0x1)
+#define ID_PFR0_EL1_RASV1P1_SUPPORTED ULL(0x2)
+
+/*******************************************************************************
+ * PFR2_EL1 - Definitions for AArch32 Processor Feature Register 2
+ ******************************************************************************/
+#define ID_PFR2_EL1 S3_0_C0_C3_4
+#define ID_PFR2_EL1_RAS_FRAC_MASK ULL(0xf)
+#define ID_PFR2_EL1_RAS_FRAC_SHIFT U(8)
+#define ID_PFR2_EL1_RAS_FRAC_WIDTH U(4)
+#define ID_PFR2_EL1_RASV1P1_SUPPORTED ULL(0x1)
+
+/*******************************************************************************
+ * FEAT_FGT - Definitions for Fine-Grained Trap registers
+ ******************************************************************************/
+#define HFGITR_EL2_INIT_VAL ULL(0x180000000000000)
+#define HFGITR_EL2_FEAT_BRBE_MASK ULL(0x180000000000000)
+#define HFGITR_EL2_FEAT_SPECRES_MASK ULL(0x7000000000000)
+#define HFGITR_EL2_FEAT_TLBIRANGE_MASK ULL(0x3fc00000000)
+#define HFGITR_EL2_FEAT_TLBIRANGE_TLBIOS_MASK ULL(0xf000000)
+#define HFGITR_EL2_FEAT_TLBIOS_MASK ULL(0xfc0000)
+#define HFGITR_EL2_FEAT_PAN2_MASK ULL(0x30000)
+#define HFGITR_EL2_FEAT_DPB2_MASK ULL(0x200)
+#define HFGITR_EL2_NON_FEAT_DEPENDENT_MASK ULL(0x78fc03f000fdff)
+
+#define HFGRTR_EL2_INIT_VAL ULL(0xc4000000000000)
+#define HFGRTR_EL2_FEAT_SME_MASK ULL(0xc0000000000000)
+#define HFGRTR_EL2_FEAT_LS64_ACCDATA_MASK ULL(0x4000000000000)
+#define HFGRTR_EL2_FEAT_RAS_MASK ULL(0x27f0000000000)
+#define HFGRTR_EL2_FEAT_RASV1P1_MASK ULL(0x1800000000000)
+#define HFGRTR_EL2_FEAT_GICV3_MASK ULL(0x800000000)
+#define HFGRTR_EL2_FEAT_CSV2_2_CSV2_1P2_MASK ULL(0xc0000000)
+#define HFGRTR_EL2_FEAT_LOR_MASK ULL(0xf80000)
+#define HFGRTR_EL2_FEAT_PAUTH_MASK ULL(0x1f0)
+#define HFGRTR_EL2_NON_FEAT_DEPENDENT_MASK ULL(0x7f3f07fe0f)
+
+#define HFGWTR_EL2_INIT_VAL ULL(0xc4000000000000)
+#define HFGWTR_EL2_FEAT_SME_MASK ULL(0xc0000000000000)
+#define HFGWTR_EL2_FEAT_LS64_ACCDATA_MASK ULL(0x4000000000000)
+#define HFGWTR_EL2_FEAT_RAS_MASK ULL(0x23a0000000000)
+#define HFGWTR_EL2_FEAT_RASV1P1_MASK ULL(0x1800000000000)
+#define HFGWTR_EL2_FEAT_GICV3_MASK ULL(0x8000000000)
+#define HFGWTR_EL2_FEAT_CSV2_2_CSV2_1P2_MASK ULL(0xc0000000)
+#define HFGWTR_EL2_FEAT_LOR_MASK ULL(0xf80000)
+#define HFGWTR_EL2_FEAT_PAUTH_MASK ULL(0x1f0)
+#define HFGWTR_EL2_NON_FEAT_DEPENDENT_MASK ULL(0x7f2903380b)
+
+
+#endif /* ARCH_H */
diff --git a/spm/scmi/include/cactus_test_cmds.h b/spm/scmi/include/cactus_test_cmds.h
new file mode 100644
index 0000000..7bd397e
--- /dev/null
+++ b/spm/scmi/include/cactus_test_cmds.h
@@ -0,0 +1,642 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CACTUS_TEST_CMDS
+#define CACTUS_TEST_CMDS
+
+#include <ffa_helpers.h>
+#include <spm_common.h>
+
+/**
+ * Success and error return to be sent over a msg response.
+ */
+#define CACTUS_SUCCESS U(0)
+#define CACTUS_ERROR U(-1)
+
+/**
+ * Error codes.
+ */
+#define CACTUS_ERROR_INVALID U(1)
+#define CACTUS_ERROR_TEST U(2)
+#define CACTUS_ERROR_FFA_CALL U(3)
+#define CACTUS_ERROR_UNHANDLED U(4)
+
+#define ECHO_VAL1 U(0xa0a0a0a0)
+#define ECHO_VAL2 U(0xb0b0b0b0)
+#define ECHO_VAL3 U(0xc0c0c0c0)
+
+/**
+ * Get command from struct ffa_value.
+ */
+static inline uint64_t cactus_get_cmd(struct ffa_value ret)
+{
+ return (uint64_t)ret.arg3;
+}
+
+/**
+ * Template for commands to be sent to CACTUS partitions over direct
+ * messages interfaces.
+ */
+static inline struct ffa_value cactus_send_cmd(
+ ffa_id_t source, ffa_id_t dest, uint64_t cmd, uint64_t val0,
+ uint64_t val1, uint64_t val2, uint64_t val3)
+{
+ return ffa_msg_send_direct_req64(source, dest, cmd, val0, val1, val2,
+ val3);
+}
+
+/**
+ * Template for responses to Cactus commands.
+ * 'cactus_send_response32' is the template for custom responses, in case there is
+ * a need to propagate more than one value in the response of a command.
+ */
+static inline struct ffa_value cactus_send_response32(
+ ffa_id_t source, ffa_id_t dest, uint32_t resp, uint32_t val0,
+ uint32_t val1, uint32_t val2, uint32_t val3)
+{
+ return ffa_msg_send_direct_resp32(source, dest, resp, val0, val1,
+ val2, val3);
+}
+
+/**
+ * Template for responses to Cactus commands.
+ * 'cactus_send_response' is the template for custom responses, in case there is
+ * a need to propagate more than one value in the response of a command.
+ */
+static inline struct ffa_value cactus_send_response(
+ ffa_id_t source, ffa_id_t dest, uint32_t resp, uint64_t val0,
+ uint64_t val1, uint64_t val2, uint64_t val3)
+{
+ return ffa_msg_send_direct_resp64(source, dest, resp, val0, val1,
+ val2, val3);
+}
+
+/**
+ * For responses of one value only.
+ */
+static inline struct ffa_value cactus_response(
+ ffa_id_t source, ffa_id_t dest, uint32_t response)
+{
+ return cactus_send_response(source, dest, response, 0, 0, 0, 0);
+}
+
+static inline uint32_t cactus_get_response(struct ffa_value ret)
+{
+ return (uint32_t)ret.arg3;
+}
+
+/**
+ * In a successful test, in case the SP needs to propagate an extra value
+ * to conclude the test.
+ * If more arguments are needed, a custom response should be defined for the
+ * specific test.
+ */
+static inline struct ffa_value cactus_success_resp(
+ ffa_id_t source, ffa_id_t dest, uint64_t value)
+{
+ return cactus_send_response(source, dest, CACTUS_SUCCESS, value,
+ 0, 0, 0);
+}
+
+/**
+ * In case the test fails on the SP side, the 'error_code' should help specify
+ * the reason, which can be specific to the test, or general ones as defined
+ * in the error code list.
+ */
+static inline struct ffa_value cactus_error_resp32(
+ ffa_id_t source, ffa_id_t dest, uint32_t error_code)
+{
+ return cactus_send_response32(source, dest, CACTUS_ERROR, error_code,
+ 0, 0, 0);
+}
+
+/**
+ * In case the test fails on the SP side, the 'error_code' should help specify
+ * the reason, which can be specific to the test, or general ones as defined
+ * in the error code list.
+ */
+static inline struct ffa_value cactus_error_resp(
+ ffa_id_t source, ffa_id_t dest, uint32_t error_code)
+{
+ return cactus_send_response(source, dest, CACTUS_ERROR, error_code,
+ 0, 0, 0);
+}
+
+static inline uint32_t cactus_error_code(struct ffa_value ret)
+{
+ return (uint32_t) ret.arg4;
+}
+
+/**
+ * With this test command the sender transmits a 64-bit value that it then
+ * expects to receive on the respective command response.
+ *
+ * The id is the hex representation of the string 'echo'.
+ */
+#define CACTUS_ECHO_CMD U(0x6563686f)
+
+static inline struct ffa_value cactus_echo_send_cmd(
+ ffa_id_t source, ffa_id_t dest, uint64_t echo_val)
+{
+ return cactus_send_cmd(source, dest, CACTUS_ECHO_CMD, echo_val, 0, 0,
+ 0);
+}
+
+static inline uint64_t cactus_echo_get_val(struct ffa_value ret)
+{
+ return (uint64_t)ret.arg4;
+}
+
+/**
+ * Command to request a cactus secure partition to send an echo command to
+ * another partition.
+ *
+ * The sender of this command expects to receive CACTUS_SUCCESS if the requested
+ * echo interaction happened successfully, or CACTUS_ERROR otherwise.
+ */
+#define CACTUS_REQ_ECHO_CMD (CACTUS_ECHO_CMD + 1)
+
+static inline struct ffa_value cactus_req_echo_send_cmd(
+ ffa_id_t source, ffa_id_t dest, ffa_id_t echo_dest,
+ uint64_t echo_val)
+{
+ return cactus_send_cmd(source, dest, CACTUS_REQ_ECHO_CMD, echo_val,
+ echo_dest, 0, 0);
+}
+
+static inline ffa_id_t cactus_req_echo_get_echo_dest(struct ffa_value ret)
+{
+ return (ffa_id_t)ret.arg5;
+}
+
+/**
+ * Command to create a cyclic dependency between SPs, which could result in
+ * a deadlock. This aims at proving such scenario cannot happen.
+ * If the deadlock happens, the system will just hang.
+ * If the deadlock is prevented, the last partition to use the command will
+ * send response CACTUS_SUCCESS.
+ *
+ * The id is the hex representation of the string 'dead'.
+ */
+#define CACTUS_DEADLOCK_CMD U(0x64656164)
+
+static inline struct ffa_value cactus_deadlock_send_cmd(
+ ffa_id_t source, ffa_id_t dest, ffa_id_t next_dest)
+{
+ return cactus_send_cmd(source, dest, CACTUS_DEADLOCK_CMD, next_dest, 0,
+ 0, 0);
+}
+
+static inline ffa_id_t cactus_deadlock_get_next_dest(struct ffa_value ret)
+{
+ return (ffa_id_t)ret.arg4;
+}
+
+/**
+ * Command to request a sequence CACTUS_DEADLOCK_CMD between the partitions
+ * of specified IDs.
+ */
+#define CACTUS_REQ_DEADLOCK_CMD (CACTUS_DEADLOCK_CMD + 1)
+
+static inline struct ffa_value cactus_req_deadlock_send_cmd(
+ ffa_id_t source, ffa_id_t dest, ffa_id_t next_dest1,
+ ffa_id_t next_dest2)
+{
+ return cactus_send_cmd(source, dest, CACTUS_REQ_DEADLOCK_CMD,
+ next_dest1, next_dest2, 0, 0);
+}
+
+/* To get next_dest1 use CACTUS_DEADLOCK_GET_NEXT_DEST */
+static inline ffa_id_t cactus_deadlock_get_next_dest2(struct ffa_value ret)
+{
+ return (ffa_id_t)ret.arg5;
+}
+
+/**
+ * Command to notify cactus of a memory management operation. The cmd value
+ * should be the memory management smc function id.
+ *
+ * The id is the hex representation of the string "mem"
+ */
+#define CACTUS_MEM_SEND_CMD U(0x6d656d)
+
+static inline struct ffa_value cactus_mem_send_cmd(
+ ffa_id_t source, ffa_id_t dest, uint32_t mem_func,
+ ffa_memory_handle_t handle, ffa_memory_region_flags_t retrieve_flags,
+ bool non_secure, uint16_t word_to_write)
+{
+ /*
+ * `non_secure` and `word_to_write` are packed in the same register.
+ * Packed in a 32-bit value to support AArch32 platforms (eg Juno).
+ */
+ uint32_t val3 = ((uint32_t)non_secure << 16) | word_to_write;
+ return cactus_send_cmd(source, dest, CACTUS_MEM_SEND_CMD, mem_func,
+ handle, retrieve_flags, val3);
+}
+
+static inline ffa_memory_handle_t cactus_mem_send_get_handle(
+ struct ffa_value ret)
+{
+ return (ffa_memory_handle_t)ret.arg5;
+}
+
+static inline ffa_memory_region_flags_t cactus_mem_send_get_retrv_flags(
+ struct ffa_value ret)
+{
+ return (ffa_memory_region_flags_t)ret.arg6;
+}
+
+static inline uint16_t cactus_mem_send_words_to_write(struct ffa_value ret)
+{
+ return (uint16_t)ret.arg7;
+}
+
+static inline bool cactus_mem_send_get_non_secure(struct ffa_value ret)
+{
+ return (bool)(ret.arg7 >> 16);
+}
+
+/**
+ * Command to request a memory management operation. The 'mem_func' argument
+ * identifies the operation that is to be performend, and 'receiver' is the id
+ * of the partition to receive the memory region.
+ *
+ * The command id is the hex representation of the string "memory".
+ */
+#define CACTUS_REQ_MEM_SEND_CMD U(0x6d656d6f7279)
+
+static inline struct ffa_value cactus_req_mem_send_send_cmd(
+ ffa_id_t source, ffa_id_t dest, uint32_t mem_func,
+ ffa_id_t receiver, bool non_secure)
+{
+ return cactus_send_cmd(source, dest, CACTUS_REQ_MEM_SEND_CMD, mem_func,
+ receiver, non_secure, 0);
+}
+
+static inline uint32_t cactus_req_mem_send_get_mem_func(struct ffa_value ret)
+{
+ return (uint32_t)ret.arg4;
+}
+
+static inline ffa_id_t cactus_req_mem_send_get_receiver(struct ffa_value ret)
+{
+ return (ffa_id_t)ret.arg5;
+}
+
+static inline bool cactus_req_mem_send_get_non_secure(struct ffa_value ret)
+{
+ return (bool)ret.arg6;
+}
+
+/**
+ * Request to fill SIMD vectors with dummy values with purpose to check a
+ * save/restore routine during the context switches between secure world and
+ * normal world.
+ *
+ * The command id is the hex representation of the string "SIMD"
+ */
+#define CACTUS_REQ_SIMD_FILL_CMD U(0x53494d44)
+
+static inline struct ffa_value cactus_req_simd_fill_send_cmd(
+ ffa_id_t source, ffa_id_t dest)
+{
+ return cactus_send_cmd(source, dest, CACTUS_REQ_SIMD_FILL_CMD, 0, 0, 0,
+ 0);
+}
+
+/**
+ * Request to compare FPU state(SIMD vectors, FPCR, FPSR) content
+ * with previous template values to check a save/restore routine during the
+ * context switches between secure world and normal world.
+ */
+#define CACTUS_CMP_SIMD_VALUE_CMD (CACTUS_REQ_SIMD_FILL_CMD + 1)
+
+static inline struct ffa_value cactus_req_simd_compare_send_cmd(
+ ffa_id_t source, ffa_id_t dest)
+{
+ return cactus_send_cmd(source, dest, CACTUS_CMP_SIMD_VALUE_CMD, 0, 0, 0,
+ 0);
+}
+
+/**
+ * Command to request cactus to sleep for the given time in ms
+ *
+ * The command id is the hex representation of string "sleep"
+ */
+#define CACTUS_SLEEP_CMD U(0x736c656570)
+
+static inline struct ffa_value cactus_sleep_cmd(
+ ffa_id_t source, ffa_id_t dest, uint32_t sleep_time)
+{
+ return cactus_send_cmd(source, dest, CACTUS_SLEEP_CMD, sleep_time, 0, 0,
+ 0);
+}
+
+/**
+ * Command to request cactus to forward sleep command for the given time in ms
+ *
+ * The sender of this command expects to receive CACTUS_SUCCESS if the requested
+ * echo interaction happened successfully, or CACTUS_ERROR otherwise.
+ * Moreover, the sender can send a hint to the destination SP to expect that
+ * the forwaded sleep command could be preempted by a non-secure interrupt.
+ */
+#define CACTUS_FWD_SLEEP_CMD (CACTUS_SLEEP_CMD + 1)
+
+static inline struct ffa_value cactus_fwd_sleep_cmd(
+ ffa_id_t source, ffa_id_t dest, ffa_id_t fwd_dest,
+ uint32_t sleep_time, bool hint_interrupted)
+{
+ return cactus_send_cmd(source, dest, CACTUS_FWD_SLEEP_CMD, sleep_time,
+ fwd_dest, hint_interrupted, 0);
+}
+
+static inline uint32_t cactus_get_sleep_time(struct ffa_value ret)
+{
+ return (uint32_t)ret.arg4;
+}
+
+static inline ffa_id_t cactus_get_fwd_sleep_dest(struct ffa_value ret)
+{
+ return (ffa_id_t)ret.arg5;
+}
+
+static inline bool cactus_get_fwd_sleep_interrupted_hint(struct ffa_value ret)
+{
+ return (bool)ret.arg6;
+}
+
+/**
+ * Command to request cactus to sleep for half the given time in ms, trigger
+ * trusted watchdog timer and then sleep again for another half the given time.
+ *
+ * The sender of this command expects to receive CACTUS_SUCCESS if the requested
+ * echo interaction happened successfully, or CACTUS_ERROR otherwise.
+ */
+#define CACTUS_SLEEP_TRIGGER_TWDOG_CMD (CACTUS_SLEEP_CMD + 2)
+
+static inline struct ffa_value cactus_sleep_trigger_wdog_cmd(
+ ffa_id_t source, ffa_id_t dest, uint32_t sleep_time,
+ uint64_t wdog_time)
+{
+ return cactus_send_cmd(source, dest, CACTUS_SLEEP_TRIGGER_TWDOG_CMD, sleep_time,
+ wdog_time, 0, 0);
+}
+
+
+static inline uint32_t cactus_get_wdog_trigger_duration(struct ffa_value ret)
+{
+ return (uint32_t)ret.arg5;
+}
+
+/**
+ * Command to request cactus to enable/disable an interrupt
+ *
+ * The command id is the hex representation of string "intr"
+ */
+#define CACTUS_INTERRUPT_CMD U(0x696e7472)
+
+static inline struct ffa_value cactus_interrupt_cmd(
+ ffa_id_t source, ffa_id_t dest, uint32_t interrupt_id,
+ bool enable, uint32_t pin)
+{
+ return cactus_send_cmd(source, dest, CACTUS_INTERRUPT_CMD, interrupt_id,
+ enable, pin, 0);
+}
+
+static inline uint32_t cactus_get_interrupt_id(struct ffa_value ret)
+{
+ return (uint32_t)ret.arg4;
+}
+
+static inline bool cactus_get_interrupt_enable(struct ffa_value ret)
+{
+ return (bool)ret.arg5;
+}
+
+static inline enum interrupt_pin cactus_get_interrupt_pin(struct ffa_value ret)
+{
+ return (enum interrupt_pin)ret.arg6;
+}
+
+/**
+ * Request to initiate DMA transaction by upstream peripheral.
+ *
+ * The command id is the hex representation of the string "SMMU"
+ */
+#define CACTUS_DMA_SMMUv3_CMD (0x534d4d55)
+
+static inline struct ffa_value cactus_send_dma_cmd(
+ ffa_id_t source, ffa_id_t dest)
+{
+ return cactus_send_cmd(source, dest, CACTUS_DMA_SMMUv3_CMD, 0, 0, 0,
+ 0);
+}
+
+/*
+ * Request SP to bind a notification to a FF-A endpoint. In case of error
+ * when using the FFA_NOTIFICATION_BIND interface, include the error code
+ * in the response to the command's request. The receiver and sender arguments
+ * are propagated through the command's arguments, to allow the test of
+ * erroneous uses of the FFA_NOTIFICATION_BIND interface.
+ *
+ * The command id is the hex representation of the string "bind".
+ */
+#define CACTUS_NOTIFICATION_BIND_CMD U(0x62696e64)
+
+static inline struct ffa_value cactus_notification_bind_send_cmd(
+ ffa_id_t source, ffa_id_t dest, ffa_id_t receiver,
+ ffa_id_t sender, ffa_notification_bitmap_t notifications, uint32_t flags)
+{
+ return cactus_send_cmd(source, dest, CACTUS_NOTIFICATION_BIND_CMD,
+ receiver, sender, notifications, flags);
+}
+
+/**
+ * Request to SP unbind a notification. In case of error when using the
+ * FFA_NOTIFICATION_UNBIND interface, the test includes the error code in the
+ * response. The receiver and sender arguments are propagated throught the
+ * command's arguments, to allow the test of erroneous uses of the
+ * FFA_NOTIFICATION_BIND interface.
+ *
+ * The command id is the hex representation of the string "unbind".
+ */
+#define CACTUS_NOTIFICATION_UNBIND_CMD U(0x756e62696e64)
+
+static inline struct ffa_value cactus_notification_unbind_send_cmd(
+ ffa_id_t source, ffa_id_t dest, ffa_id_t receiver,
+ ffa_id_t sender, ffa_notification_bitmap_t notifications)
+{
+ return cactus_send_cmd(source, dest, CACTUS_NOTIFICATION_UNBIND_CMD,
+ receiver, sender, notifications, 0);
+}
+
+static inline ffa_id_t cactus_notification_get_receiver(struct ffa_value ret)
+{
+ return (ffa_id_t)ret.arg4;
+}
+
+static inline ffa_id_t cactus_notification_get_sender(struct ffa_value ret)
+{
+ return (ffa_id_t)ret.arg5;
+}
+
+static inline ffa_notification_bitmap_t cactus_notification_get_notifications(
+ struct ffa_value ret)
+{
+ return (uint64_t)ret.arg6;
+}
+
+/**
+ * Request SP to get notifications. The arguments to use in ffa_notification_get
+ * are propagated on the command to test erroneous uses of the interface.
+ * In a successful call to the interface, the SP's response payload should
+ * include all bitmaps returned by the SPMC.
+ *
+ * The command id is the hex representation of the string "getnot".
+ */
+#define CACTUS_NOTIFICATION_GET_CMD U(0x6765746e6f74)
+
+static inline struct ffa_value cactus_notification_get_send_cmd(
+ ffa_id_t source, ffa_id_t dest, ffa_id_t receiver,
+ uint32_t vcpu_id, uint32_t flags, bool check_npi_handled)
+{
+ return cactus_send_cmd(source, dest, CACTUS_NOTIFICATION_GET_CMD,
+ receiver, vcpu_id, check_npi_handled, flags);
+}
+
+static inline uint32_t cactus_notification_get_vcpu(struct ffa_value ret)
+{
+ return (uint32_t)ret.arg5;
+}
+
+static inline uint32_t cactus_notification_get_flags(struct ffa_value ret)
+{
+ return (uint32_t)ret.arg7;
+}
+
+static inline struct ffa_value cactus_notifications_get_success_resp(
+ ffa_id_t source, ffa_id_t dest, uint64_t from_sp,
+ uint64_t from_vm)
+{
+ return cactus_send_response(source, dest, CACTUS_SUCCESS, from_sp,
+ from_vm, 0, 0);
+}
+
+static inline uint64_t cactus_notifications_get_from_sp(struct ffa_value ret)
+{
+ return (uint64_t)ret.arg4;
+}
+
+static inline uint64_t cactus_notifications_get_from_vm(struct ffa_value ret)
+{
+ return (uint64_t)ret.arg5;
+}
+
+static inline bool cactus_notifications_check_npi_handled(struct ffa_value ret)
+{
+ return (bool)ret.arg6;
+}
+
+/**
+ * Request SP to set notifications. The arguments to use in ffa_notification_set
+ * are propagated on the command to test erroneous uses of the interface.
+ * In case of error while calling the interface, the response should include the
+ * error code. If in the flags a delay SRI is requested, cactus should
+ * send a CACTUS_ECHO_CMD to the SP specified as `echo_dest`. This should help
+ * validate that the SRI is only sent when returning execution to the NWd.
+ */
+#define CACTUS_NOTIFICATIONS_SET_CMD U(0x6e6f74736574)
+
+static inline struct ffa_value cactus_notifications_set_send_cmd(
+ ffa_id_t source, ffa_id_t dest, ffa_id_t receiver,
+ ffa_id_t sender, uint32_t flags, ffa_notification_bitmap_t notifications,
+ ffa_id_t echo_dest)
+{
+ return cactus_send_cmd(source, dest, CACTUS_NOTIFICATIONS_SET_CMD,
+ (uint32_t)receiver | ((uint32_t)sender << 16),
+ echo_dest,
+ notifications, flags);
+}
+
+static inline ffa_id_t cactus_notifications_set_get_receiver(
+ struct ffa_value ret)
+{
+ return (ffa_id_t)(ret.arg4 & 0xFFFFU);
+}
+
+static inline ffa_id_t cactus_notifications_set_get_sender(struct ffa_value ret)
+{
+ return (ffa_id_t)((ret.arg4 >> 16U) & 0xFFFFU);
+}
+
+/**
+ * Request to start trusted watchdog timer.
+ *
+ * The command id is the hex representaton of the string "WDOG"
+ */
+#define CACTUS_TWDOG_START_CMD U(0x57444f47)
+
+static inline struct ffa_value cactus_send_twdog_cmd(
+ ffa_id_t source, ffa_id_t dest, uint64_t time)
+{
+ return cactus_send_cmd(source, dest, CACTUS_TWDOG_START_CMD, time, 0, 0,
+ 0);
+}
+
+static inline uint32_t cactus_get_wdog_duration(struct ffa_value ret)
+{
+ return (uint32_t)ret.arg4;
+}
+
+/**
+ * Request SP to return the current count of handled requests.
+ *
+ * The command id is the hex representation of the string "getnot".
+ */
+#define CACTUS_GET_REQ_COUNT_CMD U(0x726571636f756e74)
+
+static inline struct ffa_value cactus_get_req_count_send_cmd(
+ ffa_id_t source, ffa_id_t dest)
+{
+ return cactus_send_cmd(source, dest, CACTUS_GET_REQ_COUNT_CMD, 0, 0, 0,
+ 0);
+}
+
+static inline uint32_t cactus_get_req_count(struct ffa_value ret)
+{
+ return (uint32_t)ret.arg4;
+}
+
+/**
+ * Request SP to return the last serviced secure virtual interrupt.
+ *
+ * The command id is the hex representaton of the string "vINT"
+ */
+#define CACTUS_LAST_INTERRUPT_SERVICED_CMD U(0x76494e54)
+
+static inline struct ffa_value cactus_get_last_interrupt_cmd(
+ ffa_id_t source, ffa_id_t dest)
+{
+ return cactus_send_cmd(source, dest, CACTUS_LAST_INTERRUPT_SERVICED_CMD,
+ 0, 0, 0, 0);
+}
+
+/**
+ * Request SP to resume the task requested by current endpoint after managed
+ * exit.
+ *
+ * The command id is the hex representation of the string "RAME" which denotes
+ * (R)esume (A)fter (M)anaged (E)xit.
+ */
+#define CACTUS_RESUME_AFTER_MANAGED_EXIT U(0x52414d45)
+
+static inline struct ffa_value cactus_resume_after_managed_exit(
+ ffa_id_t source, ffa_id_t dest)
+{
+ return cactus_send_cmd(source, dest, CACTUS_RESUME_AFTER_MANAGED_EXIT,
+ 0, 0, 0, 0);
+}
+#endif
diff --git a/spm/scmi/include/ext/common/aarch64/asm_macros.S b/spm/scmi/include/ext/common/aarch64/asm_macros.S
new file mode 100644
index 0000000..8a69c38
--- /dev/null
+++ b/spm/scmi/include/ext/common/aarch64/asm_macros.S
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ASM_MACROS_S__
+#define __ASM_MACROS_S__
+
+#include <arch.h>
+#include <asm_macros_common.S>
+
+#define TLB_INVALIDATE(_type) \
+ tlbi _type
+
+ .macro func_prologue
+ stp x29, x30, [sp, #-0x10]!
+ mov x29,sp
+ .endm
+
+ .macro func_epilogue
+ ldp x29, x30, [sp], #0x10
+ .endm
+
+
+ .macro dcache_line_size reg, tmp
+ mrs \tmp, ctr_el0
+ ubfx \tmp, \tmp, #16, #4
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+
+ .macro icache_line_size reg, tmp
+ mrs \tmp, ctr_el0
+ and \tmp, \tmp, #0xf
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+ /*
+ * Declare the exception vector table, enforcing it is aligned on a
+ * 2KB boundary, as required by the ARMv8 architecture.
+ * Use zero bytes as the fill value to be stored in the padding bytes
+ * so that it inserts illegal AArch64 instructions. This increases
+ * security, robustness and potentially facilitates debugging.
+ */
+ .macro vector_base label
+ .section .vectors, "ax"
+ .align 11, 0
+ \label:
+ .endm
+
+ /*
+ * Create an entry in the exception vector table, enforcing it is
+ * aligned on a 128-byte boundary, as required by the ARMv8
+ * architecture. Use zero bytes as the fill value to be stored in the
+ * padding bytes so that it inserts illegal AArch64 instructions.
+ * This increases security, robustness and potentially facilitates
+ * debugging.
+ */
+ .macro vector_entry label
+ .section .vectors, "ax"
+ .cfi_sections .debug_frame
+ .align 7, 0
+ .type \label, %function
+ .cfi_startproc
+ \label:
+ .endm
+
+ /*
+ * Add the bytes until fill the full exception vector, whose size is always
+ * 32 instructions. If there are more than 32 instructions in the
+ * exception vector then an error is emitted.
+ */
+ .macro end_vector_entry label
+ .cfi_endproc
+ .fill \label + (32 * 4) - .
+ .endm
+
+ /*
+ * Create a vector entry that just spins making the exception unrecoverable.
+ */
+ .macro vector_entry_spin name
+ vector_entry \name
+ b \name
+ end_vector_entry \name
+ .endm
+
+ /*
+ * This macro calculates the base address of an MP stack using the
+ * platform_get_core_pos() index, the name of the stack storage and
+ * the size of each stack
+ * Out: X0 = physical address of stack base
+ * Clobber: X30, X1, X2
+ */
+ .macro get_mp_stack _name, _size
+ bl platform_get_core_pos
+ ldr x2, =(\_name + \_size)
+ mov x1, #\_size
+ madd x0, x0, x1, x2
+ .endm
+
+ /*
+ * This macro calculates the base address of a UP stack using the
+ * name of the stack storage and the size of the stack
+ * Out: X0 = physical address of stack base
+ */
+ .macro get_up_stack _name, _size
+ ldr x0, =(\_name + \_size)
+ .endm
+
+ /*
+ * Helper macro to generate the best mov/movk combinations according
+ * the value to be moved. The 16 bits from '_shift' are tested and
+ * if not zero, they are moved into '_reg' without affecting
+ * other bits.
+ */
+ .macro _mov_imm16 _reg, _val, _shift
+ .if (\_val >> \_shift) & 0xffff
+ .if (\_val & (1 << \_shift - 1))
+ movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
+ .else
+ mov \_reg, \_val & (0xffff << \_shift)
+ .endif
+ .endif
+ .endm
+
+ /*
+ * Helper macro to load arbitrary values into 32 or 64-bit registers
+ * which generates the best mov/movk combinations. Many base addresses
+ * are 64KB aligned the macro will eliminate updating bits 15:0 in
+ * that case
+ */
+ .macro mov_imm _reg, _val
+ .if (\_val) == 0
+ mov \_reg, #0
+ .else
+ _mov_imm16 \_reg, (\_val), 0
+ _mov_imm16 \_reg, (\_val), 16
+ _mov_imm16 \_reg, (\_val), 32
+ _mov_imm16 \_reg, (\_val), 48
+ .endif
+ .endm
+
+ .macro asm_read_sysreg_el1_or_el2 sysreg
+ mrs x0, CurrentEL
+ cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
+ b.eq 1f
+ cmp x0, #(MODE_EL2 << MODE_EL_SHIFT)
+ b.eq 2f
+ b dead
+1:
+ mrs x0, \sysreg\()_el1
+ b 3f
+2:
+ mrs x0, \sysreg\()_el2
+3:
+ .endm
+
+ .macro asm_write_sysreg_el1_or_el2 sysreg scratch_reg
+ mrs \scratch_reg, CurrentEL
+ cmp \scratch_reg, #(MODE_EL1 << MODE_EL_SHIFT)
+ b.eq 1f
+ cmp \scratch_reg, #(MODE_EL2 << MODE_EL_SHIFT)
+ b.eq 2f
+ b dead
+1:
+ msr \sysreg\()_el1, x0
+ b 3f
+2:
+ msr \sysreg\()_el2, x0
+3:
+ .endm
+
+ .macro asm_read_sctlr_el1_or_el2
+ asm_read_sysreg_el1_or_el2 sctlr
+ .endm
+
+ .macro asm_write_sctlr_el1_or_el2 scratch_reg
+ asm_write_sysreg_el1_or_el2 sctlr \scratch_reg
+ .endm
+
+ .macro asm_write_vbar_el1_or_el2 scratch_reg
+ asm_write_sysreg_el1_or_el2 vbar \scratch_reg
+ .endm
+
+/*
+ * Depending on the current exception level, jump to 'label_el1' or 'label_el2'.
+ * If the current exception level is neither EL1 nor EL2, jump to 'label_error'
+ * instead.
+ * The caller needs to provide the macro with a scratch 64-bit register to use.
+ * Its contents prior to calling this function will be lost.
+ */
+ .macro JUMP_EL1_OR_EL2 scratch_reg, label_el1, label_el2, label_error
+ mrs \scratch_reg, CurrentEL
+ cmp \scratch_reg, #(MODE_EL1 << MODE_EL_SHIFT)
+ b.eq \label_el1
+ cmp \scratch_reg, #(MODE_EL2 << MODE_EL_SHIFT)
+ b.eq \label_el2
+ b \label_error
+ .endm
+
+ /*
+ * Helper macro to read system register value into x0
+ */
+ .macro read reg:req
+#if ENABLE_BTI
+ bti j
+#endif
+ mrs x0, \reg
+ ret
+ .endm
+
+ /*
+ * Helper macro to write value from x1 to system register
+ */
+ .macro write reg:req
+#if ENABLE_BTI
+ bti j
+#endif
+ msr \reg, x1
+ ret
+ .endm
+
+#endif /* __ASM_MACROS_S__ */
diff --git a/spm/scmi/include/ext/common/aarch64/assert_macros.S b/spm/scmi/include/ext/common/aarch64/assert_macros.S
new file mode 100644
index 0000000..b916331
--- /dev/null
+++ b/spm/scmi/include/ext/common/aarch64/assert_macros.S
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ASSERT_MACROS_S__
+#define __ASSERT_MACROS_S__
+
+ /*
+ * Assembler macro to enable asm_assert. Use this macro wherever
+ * assert is required in assembly. Please note that the macro makes
+ * use of label '300' to provide the logic and the caller
+ * should make sure that this label is not used to branch prior
+ * to calling this macro.
+ */
+#define ASM_ASSERT(_cc) \
+.ifndef .L_assert_filename ;\
+ .pushsection .rodata.str1.1, "aS" ;\
+ .L_assert_filename: ;\
+ .string __FILE__ ;\
+ .popsection ;\
+.endif ;\
+ b._cc 300f ;\
+ adr x0, .L_assert_filename ;\
+ mov x1, __LINE__ ;\
+ b asm_assert ;\
+300:
+
+#endif /* __ASSERT_MACROS_S__ */
diff --git a/spm/scmi/include/ext/common/asm_macros_common.S b/spm/scmi/include/ext/common/asm_macros_common.S
new file mode 100644
index 0000000..1cf94f4
--- /dev/null
+++ b/spm/scmi/include/ext/common/asm_macros_common.S
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ASM_MACROS_COMMON_S__
+#define __ASM_MACROS_COMMON_S__
+
+#include <lib/utils_def.h>
+
+#if ENABLE_BTI && !ARM_ARCH_AT_LEAST(8, 5)
+#error Branch Target Identification requires ARM_ARCH_MINOR >= 5
+#endif
+
+ /*
+ * This macro is used to create a function label and place the
+ * code into a separate text section based on the function name
+ * to enable elimination of unused code during linking. It also adds
+ * basic debug information to enable call stack printing most of the
+ * time.
+ */
+ .macro func _name
+ /*
+ * Add Call Frame Information entry in the .debug_frame section for
+ * debugger consumption. This enables callstack printing in debuggers.
+ * This does not use any space in the final loaded binary, only in the
+ * ELF file.
+ * Note that a function manipulating the CFA pointer location (i.e. the
+ * x29 frame pointer on AArch64) should declare it using the
+ * appropriate .cfi* directives, or be prepared to have a degraded
+ * debugging experience.
+ */
+ .cfi_sections .debug_frame
+ .section .text.\_name, "ax"
+ .type \_name, %function
+ .func \_name
+ /*
+ * .cfi_startproc and .cfi_endproc are needed to output entries in
+ * .debug_frame
+ */
+ .cfi_startproc
+ \_name:
+ .endm
+
+ /*
+ * This macro is used to mark the end of a function.
+ */
+ .macro endfunc _name
+ .endfunc
+ .cfi_endproc
+ .size \_name, . - \_name
+ .endm
+
+ /*
+ * This macro declares an array of 1 or more stacks, properly
+ * aligned and in the requested section
+ */
+#define STACK_ALIGN 6
+
+ .macro declare_stack _name, _section, _size, _count
+ .if ((\_size & ((1 << STACK_ALIGN) - 1)) <> 0)
+ .error "Stack size not correctly aligned"
+ .endif
+ .section \_section, "aw", %nobits
+ .align STACK_ALIGN
+ \_name:
+ .space ((\_count) * (\_size)), 0
+ .endm
+
+#endif /* __ASM_MACROS_COMMON_S__ */
+
diff --git a/spm/scmi/include/ext/common/debug.h b/spm/scmi/include/ext/common/debug.h
new file mode 100644
index 0000000..6025590
--- /dev/null
+++ b/spm/scmi/include/ext/common/debug.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __DEBUG_H__
+#define __DEBUG_H__
+
+#include <stdio.h>
+
+#ifdef IMAGE_CACTUS_MM
+/* Remove dependency on spinlocks for Cactus-MM */
+#define mp_printf printf
+#else
+/*
+ * Print a formatted string on the UART.
+ *
+ * Does the same thing as the standard libc's printf() function but in a MP-safe
+ * manner, i.e. it can be called from several CPUs simultaneously without
+ * getting interleaved messages.
+ */
+__attribute__((format(printf, 1, 2)))
+void mp_printf(const char *fmt, ...);
+#endif /* IMAGE_CACTUS_MM */
+
+#ifdef IMAGE_REALM
+void realm_printf(const char *fmt, ...);
+#define mp_printf realm_printf
+#endif
+
+/*
+ * The log output macros print output to the console. These macros produce
+ * compiled log output only if the LOG_LEVEL defined in the makefile (or the
+ * make command line) is greater or equal than the level required for that
+ * type of log output.
+ * The format expected is similar to printf(). For example:
+ * INFO("Info %s.\n", "message") -> INFO: Info message.
+ * WARN("Warning %s.\n", "message") -> WARNING: Warning message.
+ */
+#define LOG_LEVEL_NONE 0
+#define LOG_LEVEL_ERROR 10
+#define LOG_LEVEL_NOTICE 20
+#define LOG_LEVEL_WARNING 30
+#define LOG_LEVEL_INFO 40
+#define LOG_LEVEL_VERBOSE 50
+
+#if LOG_LEVEL >= LOG_LEVEL_NOTICE
+# define NOTICE(...) mp_printf("NOTICE: " __VA_ARGS__)
+#else
+# define NOTICE(...)
+#endif
+
+#if LOG_LEVEL >= LOG_LEVEL_ERROR
+# define ERROR(...) mp_printf("ERROR: " __VA_ARGS__)
+#else
+# define ERROR(...)
+#endif
+
+#if LOG_LEVEL >= LOG_LEVEL_WARNING
+# define WARN(...) mp_printf("WARNING: " __VA_ARGS__)
+#else
+# define WARN(...)
+#endif
+
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+# define INFO(...) mp_printf("INFO: " __VA_ARGS__)
+#else
+# define INFO(...)
+#endif
+
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+# define VERBOSE(...) mp_printf("VERBOSE: " __VA_ARGS__)
+#else
+# define VERBOSE(...)
+#endif
+
+#if ENABLE_BACKTRACE
+void backtrace(const char *cookie);
+#else
+#define backtrace(x)
+#endif
+
+/*
+ * For the moment this panic function is very basic: report an error and
+ * spin. This can be expanded in the future to provide more information.
+ */
+void __attribute__((__noreturn__)) do_panic(const char *file, int line);
+#define panic() do_panic(__FILE__, __LINE__)
+
+void __attribute__((__noreturn__)) do_bug_unreachable(const char *file, int line);
+#define bug_unreachable() do_bug_unreachable(__FILE__, __LINE__)
+
+#endif /* __DEBUG_H__ */
diff --git a/spm/scmi/include/ext/common/param_header.h b/spm/scmi/include/ext/common/param_header.h
new file mode 100644
index 0000000..7834cbb
--- /dev/null
+++ b/spm/scmi/include/ext/common/param_header.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PARAM_HEADER_H__
+#define __PARAM_HEADER_H__
+
+/* Param header types */
+#define PARAM_EP 0x01
+#define PARAM_IMAGE_BINARY 0x02
+#define PARAM_BL31 0x03
+#define PARAM_BL_LOAD_INFO 0x04
+#define PARAM_BL_PARAMS 0x05
+#define PARAM_PSCI_LIB_ARGS 0x06
+#define PARAM_SP_IMAGE_BOOT_INFO 0x07
+
+/* Param header version */
+#define VERSION_1 0x01
+#define VERSION_2 0x02
+
+#define SET_PARAM_HEAD(_p, _type, _ver, _attr) do { \
+ (_p)->h.type = (uint8_t)(_type); \
+ (_p)->h.version = (uint8_t)(_ver); \
+ (_p)->h.size = (uint16_t)sizeof(*_p); \
+ (_p)->h.attr = (uint32_t)(_attr) ; \
+ } while (0)
+
+/* Following is used for populating structure members statically. */
+#define SET_STATIC_PARAM_HEAD(_p, _type, _ver, _p_type, _attr) \
+ ._p.h.type = (uint8_t)(_type), \
+ ._p.h.version = (uint8_t)(_ver), \
+ ._p.h.size = (uint16_t)sizeof(_p_type), \
+ ._p.h.attr = (uint32_t)(_attr)
+
+#ifndef __ASSEMBLY__
+
+#include <stdint.h>
+
+/***************************************************************************
+ * This structure provides version information and the size of the
+ * structure, attributes for the structure it represents
+ ***************************************************************************/
+typedef struct param_header {
+ uint8_t type; /* type of the structure */
+ uint8_t version; /* version of this structure */
+ uint16_t size; /* size of this structure in bytes */
+ uint32_t attr; /* attributes: unused bits SBZ */
+} param_header_t;
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* __PARAM_HEADER_H__ */
+
diff --git a/spm/scmi/include/ext/common/test_helpers.h b/spm/scmi/include/ext/common/test_helpers.h
new file mode 100644
index 0000000..4972d6a
--- /dev/null
+++ b/spm/scmi/include/ext/common/test_helpers.h
@@ -0,0 +1,387 @@
+/*
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEST_HELPERS_H__
+#define TEST_HELPERS_H__
+
+#include <arch_features.h>
+#include <plat_topology.h>
+#include <psci.h>
+#include <sme.h>
+#include <tftf_lib.h>
+#include <trusted_os.h>
+#include <tsp.h>
+#include <uuid_utils.h>
+#include <uuid.h>
+
+typedef struct {
+ uintptr_t addr;
+ size_t size;
+ unsigned int attr;
+ void *arg;
+} map_args_unmap_t;
+
+typedef test_result_t (*test_function_arg_t)(void *arg);
+
+#ifndef __aarch64__
+#define SKIP_TEST_IF_AARCH32() \
+ do { \
+ tftf_testcase_printf("Test not supported on aarch32\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } while (0)
+#else
+#define SKIP_TEST_IF_AARCH32()
+#endif
+
+#define SKIP_TEST_IF_LESS_THAN_N_CLUSTERS(n) \
+ do { \
+ unsigned int clusters_cnt; \
+ clusters_cnt = tftf_get_total_clusters_count(); \
+ if (clusters_cnt < (n)) { \
+ tftf_testcase_printf( \
+ "Need at least %u clusters, only found %u\n", \
+ (n), clusters_cnt); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_LESS_THAN_N_CPUS(n) \
+ do { \
+ unsigned int cpus_cnt; \
+ cpus_cnt = tftf_get_total_cpus_count(); \
+ if (cpus_cnt < (n)) { \
+ tftf_testcase_printf( \
+ "Need at least %u CPUs, only found %u\n", \
+ (n), cpus_cnt); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_TRUSTED_OS_NOT_PRESENT() \
+ do { \
+ uuid_t tos_uuid; \
+ \
+ if (!is_trusted_os_present(&tos_uuid)) { \
+ tftf_testcase_printf("No Trusted OS detected\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_TSP_NOT_PRESENT() \
+ do { \
+ uuid_t tos_uuid; \
+ char tos_uuid_str[UUID_STR_SIZE]; \
+ \
+ if (!is_trusted_os_present(&tos_uuid)) { \
+ tftf_testcase_printf("No Trusted OS detected\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ \
+ if (!uuid_equal(&tos_uuid, &tsp_uuid)) { \
+ tftf_testcase_printf( \
+ "Trusted OS is not the TSP, its UUID is: %s\n", \
+ uuid_to_str(&tos_uuid, tos_uuid_str)); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_DIT_NOT_SUPPORTED() \
+ do { \
+ if (!is_armv8_4_dit_present()) { \
+ tftf_testcase_printf( \
+ "DIT not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_PAUTH_NOT_SUPPORTED() \
+ do { \
+ if (!is_armv8_3_pauth_present()) { \
+ tftf_testcase_printf( \
+ "Pointer Authentication not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_FGT_NOT_SUPPORTED() \
+ do { \
+ if (!is_armv8_6_fgt_present()) { \
+ tftf_testcase_printf( \
+ "Fine Grained Traps not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_SVE_NOT_SUPPORTED() \
+ do { \
+ if (!is_armv8_2_sve_present()) { \
+ tftf_testcase_printf("SVE not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_ECV_NOT_SELF_SYNC() \
+ do { \
+ if (get_armv8_6_ecv_support() != \
+ ID_AA64MMFR0_EL1_ECV_SELF_SYNCH) { \
+ tftf_testcase_printf("ARMv8.6-ECV not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_MM_NOT_PRESENT() \
+ do { \
+ smc_args version_smc = { MM_VERSION_AARCH32 }; \
+ smc_ret_values smc_ret = tftf_smc(&version_smc); \
+ uint32_t version = smc_ret.ret0; \
+ \
+ if (version == (uint32_t) SMC_UNKNOWN) { \
+ tftf_testcase_printf("SPM not detected.\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_MTE_SUPPORT_LESS_THAN(n) \
+ do { \
+ if (get_armv8_5_mte_support() < (n)) { \
+ tftf_testcase_printf( \
+ "Memory Tagging Extension not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_MM_VERSION_LESS_THAN(major, minor) \
+ do { \
+ smc_args version_smc = { MM_VERSION_AARCH32 }; \
+ smc_ret_values smc_ret = tftf_smc(&version_smc); \
+ uint32_t version = smc_ret.ret0; \
+ \
+ if (version == (uint32_t) SMC_UNKNOWN) { \
+ tftf_testcase_printf("SPM not detected.\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ \
+ if (version < MM_VERSION_FORM(major, minor)) { \
+ tftf_testcase_printf("MM_VERSION returned %u.%u\n" \
+ "The required version is %u.%u\n", \
+ version >> MM_VERSION_MAJOR_SHIFT, \
+ version & MM_VERSION_MINOR_MASK, \
+ major, minor); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ \
+ VERBOSE("MM_VERSION returned %u.%u\n", \
+ version >> MM_VERSION_MAJOR_SHIFT, \
+ version & MM_VERSION_MINOR_MASK); \
+ } while (0)
+
+#define SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(version) \
+ do { \
+ uint32_t debug_ver = arch_get_debug_version(); \
+ \
+ if (debug_ver < version) { \
+ tftf_testcase_printf("Debug version returned %d\n" \
+ "The required version is %d\n", \
+ debug_ver, \
+ version); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_TRBE_NOT_SUPPORTED() \
+ do { \
+ if (!get_armv9_0_trbe_support()) { \
+ tftf_testcase_printf("ARMv9-TRBE not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#define SKIP_TEST_IF_TRF_NOT_SUPPORTED() \
+ do { \
+ if (!get_armv8_4_trf_support()) { \
+ tftf_testcase_printf("ARMv8.4-TRF not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#define SKIP_TEST_IF_SYS_REG_TRACE_NOT_SUPPORTED() \
+ do { \
+ if (!get_armv8_0_sys_reg_trace_support()) { \
+ tftf_testcase_printf("ARMv8-system register" \
+ "trace not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#define SKIP_TEST_IF_AFP_NOT_SUPPORTED() \
+ do { \
+ if (!get_feat_afp_present()) { \
+ tftf_testcase_printf("ARMv8.7-afp not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#ifdef __aarch64__
+#define SKIP_TEST_IF_PA_SIZE_LESS_THAN(n) \
+ do { \
+ static const unsigned int pa_range_bits_arr[] = { \
+ PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011,\
+ PARANGE_0100, PARANGE_0101, PARANGE_0110 \
+ }; \
+ if (pa_range_bits_arr[get_pa_range()] < n) { \
+ tftf_testcase_printf("PA size less than %d bit\n", n); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+#else
+#define SKIP_TEST_IF_PA_SIZE_LESS_THAN(n) \
+ do { \
+ return TEST_RESULT_SKIPPED; \
+ } while (false)
+#endif
+
+#define SKIP_TEST_IF_BRBE_NOT_SUPPORTED() \
+ do { \
+ if (!get_feat_brbe_support()) { \
+ tftf_testcase_printf("FEAT_BRBE not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#define SKIP_TEST_IF_WFXT_NOT_SUPPORTED() \
+ do { \
+ if (!get_feat_wfxt_present()) { \
+ tftf_testcase_printf("ARMv8.7-WFxT not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#define SKIP_TEST_IF_RNG_TRAP_NOT_SUPPORTED() \
+ do { \
+ if (!is_feat_rng_trap_present()) { \
+ tftf_testcase_printf("ARMv8.5-RNG_TRAP not" \
+ "supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#define SKIP_TEST_IF_PMUV3_NOT_SUPPORTED() \
+ do { \
+ if (!get_feat_pmuv3_supported()) { \
+ tftf_testcase_printf("FEAT_PMUv3 not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#define SKIP_TEST_IF_SME_NOT_SUPPORTED() \
+ do { \
+ if(!is_feat_sme_supported()) { \
+ tftf_testcase_printf("FEAT_SME not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#define SKIP_TEST_IF_SME2_NOT_SUPPORTED() \
+ do { \
+ if(!is_feat_sme2_supported()) { \
+ tftf_testcase_printf("FEAT_SME2 not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#define SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP() \
+ do { \
+ u_register_t retrmm; \
+ \
+ if (!get_armv9_2_feat_rme_support()) { \
+ tftf_testcase_printf("FEAT_RME not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ \
+ host_rmi_init_cmp_result(); \
+ retrmm = host_rmi_version(); \
+ \
+ VERBOSE("RMM version is: %lu.%lu\n", \
+ RMI_ABI_VERSION_GET_MAJOR(retrmm), \
+ RMI_ABI_VERSION_GET_MINOR(retrmm)); \
+ \
+ /* \
+ * TODO: Remove this once SMC_RMM_REALM_CREATE is implemented \
+ * in TRP. For the moment skip the test if RMM is TRP, TRP \
+ * version is always 0. \
+ */ \
+ if (retrmm == 0U) { \
+ tftf_testcase_printf("RMM is TRP\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+/* Helper macro to verify if system suspend API is supported */
+#define is_psci_sys_susp_supported() \
+ (tftf_get_psci_feature_info(SMC_PSCI_SYSTEM_SUSPEND) \
+ == PSCI_E_SUCCESS)
+
+/* Helper macro to verify if PSCI_STAT_COUNT API is supported */
+#define is_psci_stat_count_supported() \
+ (tftf_get_psci_feature_info(SMC_PSCI_STAT_COUNT) \
+ == PSCI_E_SUCCESS)
+
+/*
+ * Helper function to verify the system state is ready for system
+ * suspend. i.e., a single CPU is running and all other CPUs are powered off.
+ * Returns 1 if the system is ready to suspend, 0 otherwise.
+ */
+int is_sys_suspend_state_ready(void);
+
+/*
+ * Helper function to reset the system. This function shouldn't return.
+ * It is not marked with __dead to help the test to catch some error in
+ * TF
+ */
+void psci_system_reset(void);
+
+/*
+ * Helper function that enables/disables the mem_protect mechanism
+ */
+int psci_mem_protect(int val);
+
+
+/*
+ * Helper function to call PSCI MEM_PROTECT_CHECK
+ */
+int psci_mem_protect_check(uintptr_t addr, size_t size);
+
+
+/*
+ * Helper function to get a sentinel address that can be used to test mem_protect
+ */
+unsigned char *psci_mem_prot_get_sentinel(void);
+
+/*
+ * Helper function to memory map and unmap a region needed by a test.
+ *
+ * Return TEST_RESULT_FAIL if the memory could not be successfully mapped or
+ * unmapped. Otherwise, return the test functions's result.
+ */
+test_result_t map_test_unmap(const map_args_unmap_t *args,
+ test_function_arg_t test);
+
+/*
+ * Utility function to wait for all CPUs other than the caller to be
+ * OFF.
+ */
+void wait_for_non_lead_cpus(void);
+
+/*
+ * Utility function to wait for a given CPU other than the caller to be
+ * OFF.
+ */
+void wait_for_core_to_turn_off(unsigned int mpidr);
+
+/* Generate 64-bit random number */
+unsigned long long rand64(void);
+
+#endif /* __TEST_HELPERS_H__ */
diff --git a/spm/scmi/include/ext/drivers/arm/arm_gic.h b/spm/scmi/include/ext/drivers/arm/arm_gic.h
new file mode 100644
index 0000000..0f27dc1
--- /dev/null
+++ b/spm/scmi/include/ext/drivers/arm/arm_gic.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __ARM_GIC_H__
+#define __ARM_GIC_H__
+
+#include <stdint.h>
+
+/***************************************************************************
+ * Defines and prototypes for ARM GIC driver.
+ **************************************************************************/
+#define MAX_SGIS 16
+#define MIN_SGI_ID 0
+#define MAX_SGI_ID 15
+#define MIN_PPI_ID 16
+#define MAX_PPI_ID 31
+#define MIN_SPI_ID 32
+#define MAX_SPI_ID 1019
+
+#define IS_SGI(irq_num) \
+ (((irq_num) >= MIN_SGI_ID) && ((irq_num) <= MAX_SGI_ID))
+
+#define IS_PPI(irq_num) \
+ (((irq_num) >= MIN_PPI_ID) && ((irq_num) <= MAX_PPI_ID))
+
+#define IS_SPI(irq_num) \
+ (((irq_num) >= MIN_SPI_ID) && ((irq_num) <= MAX_SPI_ID))
+
+#define IS_VALID_INTR_ID(irq_num) \
+ (((irq_num) >= MIN_SGI_ID) && ((irq_num) <= MAX_SPI_ID))
+
+#define GIC_HIGHEST_NS_PRIORITY 0
+#define GIC_LOWEST_NS_PRIORITY 254 /* 255 would disable an interrupt */
+#define GIC_SPURIOUS_INTERRUPT 1023
+
+/******************************************************************************
+ * Setup the global GIC interface. In case of GICv2, it would be the GIC
+ * Distributor and in case of GICv3 it would be GIC Distributor and
+ * Re-distributor.
+ *****************************************************************************/
+void arm_gic_setup_global(void);
+
+/******************************************************************************
+ * Setup the GIC interface local to the CPU
+ *****************************************************************************/
+void arm_gic_setup_local(void);
+
+/******************************************************************************
+ * Disable interrupts for this local CPU
+ *****************************************************************************/
+void arm_gic_disable_interrupts_local(void);
+
+/******************************************************************************
+ * Enable interrupts for this local CPU
+ *****************************************************************************/
+void arm_gic_enable_interrupts_local(void);
+
+/******************************************************************************
+ * Send SGI with ID `sgi_id` to a core with index `core_pos`.
+ *****************************************************************************/
+void arm_gic_send_sgi(unsigned int sgi_id, unsigned int core_pos);
+
+/******************************************************************************
+ * Set the interrupt target of interrupt ID `num` to a core with index
+ * `core_pos`
+ *****************************************************************************/
+void arm_gic_set_intr_target(unsigned int num, unsigned int core_pos);
+
+/******************************************************************************
+ * Get the priority of the interrupt ID `num`.
+ *****************************************************************************/
+unsigned int arm_gic_get_intr_priority(unsigned int num);
+
+/******************************************************************************
+ * Set the priority of the interrupt ID `num` to `priority`.
+ *****************************************************************************/
+void arm_gic_set_intr_priority(unsigned int num, unsigned int priority);
+
+/******************************************************************************
+ * Check if the interrupt ID `num` is enabled
+ *****************************************************************************/
+unsigned int arm_gic_intr_enabled(unsigned int num);
+
+/******************************************************************************
+ * Enable the interrupt ID `num`
+ *****************************************************************************/
+void arm_gic_intr_enable(unsigned int num);
+
+/******************************************************************************
+ * Disable the interrupt ID `num`
+ *****************************************************************************/
+void arm_gic_intr_disable(unsigned int num);
+
+/******************************************************************************
+ * Acknowledge the highest pending interrupt. Return the interrupt ID of the
+ * acknowledged interrupt. The raw interrupt acknowledge register value will
+ * be populated in `raw_iar`.
+ *****************************************************************************/
+unsigned int arm_gic_intr_ack(unsigned int *raw_iar);
+
+/******************************************************************************
+ * Signal the end of interrupt processing of a interrupt. The raw interrupt
+ * acknowledge register value returned by arm_gic_intr_ack() should be passed
+ * as argument to this function.
+ *****************************************************************************/
+void arm_gic_end_of_intr(unsigned int raw_iar);
+
+/******************************************************************************
+ * Check if the interrupt with ID `num` is pending at the GIC. Returns 1 if
+ * interrupt is pending else returns 0.
+ *****************************************************************************/
+unsigned int arm_gic_is_intr_pending(unsigned int num);
+
+/******************************************************************************
+ * Clear the pending status of the interrupt with ID `num` at the GIC.
+ *****************************************************************************/
+void arm_gic_intr_clear(unsigned int num);
+
+/******************************************************************************
+ * Initialize the GIC Driver. This function will detect the GIC Architecture
+ * present on the system and initialize the appropriate driver. The
+ * `gicr_base` argument will be ignored on GICv2 systems.
+ *****************************************************************************/
+void arm_gic_init(uintptr_t gicc_base, uintptr_t gicd_base, uintptr_t gicr_base);
+
+/******************************************************************************
+ * Save the GIC context local to this CPU (like GIC CPU Interface) which will
+ * be lost when this CPU is powered down.
+ *****************************************************************************/
+void arm_gic_save_context_local(void);
+
+/******************************************************************************
+ * Restore the GIC context local to this CPU ((like GIC CPU Interface) which
+ * was lost when this CPU was powered down.
+ *****************************************************************************/
+void arm_gic_restore_context_local(void);
+
+/******************************************************************************
+ * Save the global GIC context when GIC will be powered down (like GIC
+ * Distributor and Re-distributor) as a result of system suspend.
+ *****************************************************************************/
+void arm_gic_save_context_global(void);
+
+/******************************************************************************
+ * Restore the global GIC context which was lost as a result of GIC power
+ * down (like GIC Distributor and Re-distributor) during system suspend.
+ *****************************************************************************/
+void arm_gic_restore_context_global(void);
+
+#endif /* __ARM_GIC_H__ */
diff --git a/spm/scmi/include/ext/drivers/arm/gic_common.h b/spm/scmi/include/ext/drivers/arm/gic_common.h
new file mode 100644
index 0000000..71387f6
--- /dev/null
+++ b/spm/scmi/include/ext/drivers/arm/gic_common.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __GIC_COMMON_H__
+#define __GIC_COMMON_H__
+
+/***************************************************************************
+ * Defines and prototypes common to GIC v2 and v3 drivers.
+ **************************************************************************/
+/* Distributor interface register offsets */
+#define GICD_CTLR 0x0
+#define GICD_TYPER 0x4
+#define GICD_ISENABLER 0x100
+#define GICD_ICENABLER 0x180
+#define GICD_ISPENDR 0x200
+#define GICD_ICPENDR 0x280
+#define GICD_ISACTIVER 0x300
+#define GICD_ICACTIVER 0x380
+#define GICD_IPRIORITYR 0x400
+#define GICD_ICFGR 0xC00
+
+/* Distributor interface register shifts */
+#define ISENABLER_SHIFT 5
+#define ICENABLER_SHIFT ISENABLER_SHIFT
+#define ISPENDR_SHIFT 5
+#define ICPENDR_SHIFT ISPENDR_SHIFT
+#define ISACTIVER_SHIFT 5
+#define ICACTIVER_SHIFT ISACTIVER_SHIFT
+#define IPRIORITYR_SHIFT 2
+#define ICFGR_SHIFT 4
+
+/* GICD_TYPER bit definitions */
+#define IT_LINES_NO_MASK 0x1f
+
+/* GICD Priority register mask */
+#define GIC_PRI_MASK 0xff
+
+/*
+ * Number of per-cpu interrupts to save prior to system suspend.
+ * This comprises all SGIs and PPIs.
+ */
+#define NUM_PCPU_INTR 32
+
+#ifndef __ASSEMBLY__
+
+#include <mmio.h>
+
+/* Helper to detect the GIC mode (GICv2 or GICv3) configured in the system */
+unsigned int is_gicv3_mode(void);
+
+/*******************************************************************************
+ * Private GIC Distributor function prototypes for use by GIC drivers
+ ******************************************************************************/
+unsigned int gicd_read_isenabler(uintptr_t base, unsigned int interrupt_id);
+unsigned int gicd_read_icenabler(uintptr_t base, unsigned int interrupt_id);
+unsigned int gicd_read_ispendr(uintptr_t base, unsigned int interrupt_id);
+unsigned int gicd_read_icpendr(uintptr_t base, unsigned int interrupt_id);
+unsigned int gicd_read_isactiver(uintptr_t base, unsigned int interrupt_id);
+unsigned int gicd_read_icactiver(uintptr_t base, unsigned int interrupt_id);
+unsigned int gicd_read_ipriorityr(uintptr_t base, unsigned int interrupt_id);
+unsigned int gicd_get_ipriorityr(uintptr_t base, unsigned int interrupt_id);
+unsigned int gicd_read_icfgr(uintptr_t base, unsigned int interrupt_id);
+void gicd_write_isenabler(uintptr_t base, unsigned int interrupt_id,
+ unsigned int val);
+void gicd_write_icenabler(uintptr_t base, unsigned int interrupt_id,
+ unsigned int val);
+void gicd_write_ispendr(uintptr_t base, unsigned int interrupt_id,
+ unsigned int val);
+void gicd_write_icpendr(uintptr_t base, unsigned int interrupt_id,
+ unsigned int val);
+void gicd_write_isactiver(uintptr_t base, unsigned int interrupt_id,
+ unsigned int val);
+void gicd_write_icactiver(uintptr_t base, unsigned int interrupt_id,
+ unsigned int val);
+void gicd_write_ipriorityr(uintptr_t base, unsigned int interrupt_id,
+ unsigned int val);
+void gicd_write_icfgr(uintptr_t base, unsigned int interrupt_id,
+ unsigned int val);
+unsigned int gicd_get_isenabler(uintptr_t base, unsigned int interrupt_id);
+void gicd_set_isenabler(uintptr_t base, unsigned int interrupt_id);
+void gicd_set_icenabler(uintptr_t base, unsigned int interrupt_id);
+void gicd_set_ispendr(uintptr_t base, unsigned int interrupt_id);
+void gicd_set_icpendr(uintptr_t base, unsigned int interrupt_id);
+void gicd_set_isactiver(uintptr_t base, unsigned int interrupt_id);
+void gicd_set_icactiver(uintptr_t base, unsigned int interrupt_id);
+void gicd_set_ipriorityr(uintptr_t base, unsigned int interrupt_id,
+ unsigned int priority);
+
+/*******************************************************************************
+ * Private GIC Distributor interface accessors for reading and writing
+ * entire registers
+ ******************************************************************************/
+static inline unsigned int gicd_read_ctlr(uintptr_t base)
+{
+ return mmio_read_32(base + GICD_CTLR);
+}
+
+static inline unsigned int gicd_read_typer(uintptr_t base)
+{
+ return mmio_read_32(base + GICD_TYPER);
+}
+
+static inline void gicd_write_ctlr(uintptr_t base, unsigned int val)
+{
+ mmio_write_32(base + GICD_CTLR, val);
+}
+
+
+#endif /*__ASSEMBLY__*/
+#endif /* __GIC_COMMON_H__ */
diff --git a/spm/scmi/include/ext/drivers/arm/gic_v2.h b/spm/scmi/include/ext/drivers/arm/gic_v2.h
new file mode 100644
index 0000000..6e7b764
--- /dev/null
+++ b/spm/scmi/include/ext/drivers/arm/gic_v2.h
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __GIC_V2_H__
+#define __GIC_V2_H__
+
+/***************************************************************************
+ * Defines and prototypes specific to GIC v2.
+ **************************************************************************/
+
+/* GICD_CTLR bit definitions */
+#define GICD_CTLR_ENABLE (1 << 0)
+
+/* Distributor interface register offsets */
+#define GICD_ITARGETSR 0x800
+#define GICD_SGIR 0xF00
+#define GICD_CPENDSGIR 0xF10
+#define GICD_SPENDSGIR 0xF20
+
+/* GIC Distributor register shifts */
+#define ITARGETSR_SHIFT 2
+#define CPENDSGIR_SHIFT 2
+#define SPENDSGIR_SHIFT CPENDSGIR_SHIFT
+
+/* GICD_SGIR bit shifts */
+#define GICD_SGIR_INTID_SHIFT 0
+#define GICD_SGIR_CPUTL_SHIFT 16
+
+/* Physical CPU Interface register offsets */
+#define GICC_CTLR 0x0
+#define GICC_PMR 0x4
+#define GICC_BPR 0x8
+#define GICC_IAR 0xC
+#define GICC_EOIR 0x10
+#define GICC_RPR 0x14
+#define GICC_HPPIR 0x18
+#define GICC_AHPPIR 0x28
+#define GICC_IIDR 0xFC
+#define GICC_DIR 0x1000
+#define GICC_PRIODROP GICC_EOIR
+
+/* GICC_IIDR bit masks and shifts */
+#define GICC_IIDR_PID_SHIFT 20
+#define GICC_IIDR_ARCH_SHIFT 16
+#define GICC_IIDR_REV_SHIFT 12
+#define GICC_IIDR_IMP_SHIFT 0
+
+#define GICC_IIDR_PID_MASK 0xfff
+#define GICC_IIDR_ARCH_MASK 0xf
+#define GICC_IIDR_REV_MASK 0xf
+#define GICC_IIDR_IMP_MASK 0xfff
+
+/* HYP view virtual CPU Interface register offsets */
+#define GICH_CTL 0x0
+#define GICH_VTR 0x4
+#define GICH_ELRSR0 0x30
+#define GICH_ELRSR1 0x34
+#define GICH_APR0 0xF0
+#define GICH_LR_BASE 0x100
+
+/* Virtual CPU Interface register offsets */
+#define GICV_CTL 0x0
+#define GICV_PRIMASK 0x4
+#define GICV_BP 0x8
+#define GICV_INTACK 0xC
+#define GICV_EOI 0x10
+#define GICV_RUNNINGPRI 0x14
+#define GICV_HIGHESTPEND 0x18
+#define GICV_DEACTIVATE 0x1000
+
+/* GICC_IAR bit masks and shifts */
+#define GICC_IAR_INTID_SHIFT 0
+#define GICC_IAR_CPUID_SHIFT 10
+
+#define GICC_IAR_INTID_MASK 0x3ff
+#define GICC_IAR_CPUID_MASK 0x7
+
+#define get_gicc_iar_intid(val) (((val) >> GICC_IAR_INTID_SHIFT) \
+ & GICC_IAR_INTID_MASK)
+#define get_gicc_iar_cpuid(val) (((val) >> GICC_IAR_CPUID_SHIFT) \
+ & GICC_IAR_CPUID_MASK)
+
+/*
+ * GICC_CTLR is banked to provide Secure and Non-secure copies and the register
+ * bit assignments are different in the Secure and Non-secure copies.
+ * These are the bit assignments for the Non-secure copy.
+ */
+#define GICC_CTLR_ENABLE (1 << 0)
+#define FIQ_BYP_DIS_GRP1 (1 << 5)
+#define IRQ_BYP_DIS_GRP1 (1 << 6)
+#define EOI_MODE_NS (1 << 9)
+
+#ifndef __ASSEMBLY__
+
+#include <mmio.h>
+
+/*******************************************************************************
+ * Private Interfaces for internal use by the GICv2 driver
+ ******************************************************************************/
+
+/*******************************************************************************
+ * GICv2 Distributor interface accessors for reading/writing entire registers
+ ******************************************************************************/
+static inline unsigned int gicd_read_sgir(unsigned int base)
+{
+ return mmio_read_32(base + GICD_SGIR);
+}
+
+static inline void gicd_write_sgir(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICD_SGIR, val);
+}
+
+/*******************************************************************************
+ * GICv2 CPU interface accessors for reading entire registers
+ ******************************************************************************/
+
+static inline unsigned int gicc_read_ctlr(unsigned int base)
+{
+ return mmio_read_32(base + GICC_CTLR);
+}
+
+static inline unsigned int gicc_read_pmr(unsigned int base)
+{
+ return mmio_read_32(base + GICC_PMR);
+}
+
+static inline unsigned int gicc_read_bpr(unsigned int base)
+{
+ return mmio_read_32(base + GICC_BPR);
+}
+
+static inline unsigned int gicc_read_iar(unsigned int base)
+{
+ return mmio_read_32(base + GICC_IAR);
+}
+
+static inline unsigned int gicc_read_eoir(unsigned int base)
+{
+ return mmio_read_32(base + GICC_EOIR);
+}
+
+static inline unsigned int gicc_read_hppir(unsigned int base)
+{
+ return mmio_read_32(base + GICC_HPPIR);
+}
+
+static inline unsigned int gicc_read_ahppir(unsigned int base)
+{
+ return mmio_read_32(base + GICC_AHPPIR);
+}
+
+static inline unsigned int gicc_read_dir(unsigned int base)
+{
+ return mmio_read_32(base + GICC_DIR);
+}
+
+static inline unsigned int gicc_read_iidr(unsigned int base)
+{
+ return mmio_read_32(base + GICC_IIDR);
+}
+
+
+/*******************************************************************************
+ * GICv2 CPU interface accessors for writing entire registers
+ ******************************************************************************/
+
+static inline void gicc_write_ctlr(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICC_CTLR, val);
+}
+
+static inline void gicc_write_pmr(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICC_PMR, val);
+}
+
+static inline void gicc_write_bpr(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICC_BPR, val);
+}
+
+
+static inline void gicc_write_iar(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICC_IAR, val);
+}
+
+static inline void gicc_write_eoir(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICC_EOIR, val);
+}
+
+static inline void gicc_write_hppir(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICC_HPPIR, val);
+}
+
+static inline void gicc_write_dir(unsigned int base, unsigned int val)
+{
+ mmio_write_32(base + GICC_DIR, val);
+}
+
+/******************************************************************************
+ * GICv2 public driver API
+ *****************************************************************************/
+
+/*
+ * Initialize the GICv2 driver. The base addresses of GIC CPU interface
+ * `gicc_base` and the Distributor interface `gicd_base` must be provided
+ * as arguments.
+ */
+void gicv2_init(uintptr_t gicc_base, uintptr_t gicd_base);
+
+/*
+ * Write the GICv2 EOIR register with `val` passed as argument. `val`
+ * should be the raw value read from IAR register.
+ */
+void gicv2_gicc_write_eoir(unsigned int val);
+
+/*
+ * Set the bit corresponding to `interrupt_id` in the GICD ISPENDR register.
+ */
+void gicv2_gicd_set_ispendr(unsigned int interrupt_id);
+
+/*
+ * Set the bit corresponding to `interrupt_id` in the GICD ICPENDR register.
+ */
+void gicv2_gicd_set_icpendr(unsigned int interrupt_id);
+
+/*
+ * Get the bit corresponding to `interrupt_id` from the GICD ISPENDR register.
+ */
+unsigned int gicv2_gicd_get_ispendr(unsigned int interrupt_id);
+
+/*
+ * Read and return the value in GICC IAR register
+ */
+unsigned int gicv2_gicc_read_iar(void);
+
+/*
+ * Read and return the target core mask of interrupt ID `num`.
+ */
+uint8_t gicv2_read_itargetsr_value(unsigned int num);
+
+/*
+ * Set the bit corresponding to `num` in the GICD ICENABLER register.
+ */
+void gicv2_gicd_set_icenabler(unsigned int num);
+
+/*
+ * Get the bit corresponding to `num` in the GICD ISENABLER register.
+ */
+unsigned int gicv2_gicd_get_isenabler(unsigned int num);
+
+/*
+ * Set the bit corresponding to `num` in the GICD ISENABLER register.
+ */
+void gicv2_gicd_set_isenabler(unsigned int num);
+
+/*
+ * Set the target of interrupt ID `num` to core with index `core_pos`.
+ */
+void gicv2_set_itargetsr(unsigned int num, unsigned int core_pos);
+
+/*
+ * Set the target of interrupt ID `num` to the desired core mask.
+ */
+void gicv2_set_itargetsr_value(unsigned int num, unsigned int val);
+
+/*
+ * Send SGI with ID `sgi_id` to core with index `core_pos`.
+ */
+void gicv2_send_sgi(unsigned int sgi_id, unsigned int core_pos);
+
+/*
+ * Get the priority of the interrupt `interrupt_id`.
+ */
+unsigned int gicv2_gicd_get_ipriorityr(unsigned int interrupt_id);
+
+/*
+ * Set the priority of the interrupt `interrupt_id` to `priority`.
+ */
+void gicv2_gicd_set_ipriorityr(unsigned int interrupt_id, unsigned int priority);
+
+/*
+ * Setup the GIC Distributor interface.
+ */
+void gicv2_setup_distif(void);
+
+/*
+ * Save the GICv2 SGI and PPI context prior to powering down the
+ * GIC Distributor.
+ */
+void gicv2_save_sgi_ppi_context(void);
+
+/*
+ * Restore the GICv2 SGI and PPI context after powering up the
+ * GIC Distributor.
+ */
+void gicv2_restore_sgi_ppi_context(void);
+
+/*
+ * Disable the GIC CPU interface.
+ */
+void gicv2_disable_cpuif(void);
+
+/*
+ * Setup the GIC CPU interface.
+ */
+void gicv2_setup_cpuif(void);
+
+/*
+ * Enable the GIC CPU interface.
+ */
+void gicv2_enable_cpuif(void);
+
+/*
+ * Save the GICv2 CPU interface prior to powering down the CPU interface.
+ */
+void gicv2_save_cpuif_context(void);
+
+/*
+ * Restore the GICv2 CPU interface after powering up the CPU interface.
+ */
+void gicv2_restore_cpuif_context(void);
+
+/*
+ * Read the GICD ITARGETR0 to figure out the GIC ID for the current core.
+ * This function is required to be invoked on successful boot of a core.
+ * The GIC ID will be stored internally by the driver to convert core index
+ * to GIC CPU ID when required.
+ */
+void gicv2_probe_gic_cpu_id(void);
+
+
+#endif /*__ASSEMBLY__*/
+#endif /* __GIC_V2_H__ */
diff --git a/spm/scmi/include/ext/drivers/arm/gic_v3.h b/spm/scmi/include/ext/drivers/arm/gic_v3.h
new file mode 100644
index 0000000..e164103
--- /dev/null
+++ b/spm/scmi/include/ext/drivers/arm/gic_v3.h
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __GIC_V3_H__
+#define __GIC_V3_H__
+
+/***************************************************************************
+ * Defines and prototypes specific to GIC v3.
+ *************************************************************************/
+
+/* GICD register offsets */
+#define GICD_IROUTER 0x6000
+
+/* GICD_CTLR bit definitions */
+#define GICD_CTLR_ENABLE_GRP1A (1 << 1)
+#define GICD_CTLR_ARE_NS_SHIFT 4
+#define GICD_CTLR_ARE_NS_MASK 0x1
+
+/* GICR_TYPER bit definitions */
+#define TYPER_AFF_VAL_SHIFT 32
+#define TYPER_PROC_NUM_SHIFT 8
+#define TYPER_LAST_SHIFT 4
+
+#define TYPER_AFF_VAL_MASK 0xffffffff
+#define TYPER_PROC_NUM_MASK 0xffff
+#define TYPER_LAST_MASK 0x1
+
+#define TYPER_LAST_BIT (1 << TYPER_LAST_SHIFT)
+
+/* GICD_IROUTER shifts and masks */
+#define IROUTER_IRM_SHIFT 31
+#define IROUTER_IRM_MASK 0x1
+
+/*******************************************************************************
+ * GICv3 Re-distributor interface registers & constants
+ ******************************************************************************/
+#define GICR_PCPUBASE_SHIFT 0x11
+#define GICR_SGIBASE_OFFSET (1 << 0x10) /* 64 KB */
+#define GICR_CTLR 0x0
+#define GICR_TYPER 0x08
+#define GICR_WAKER 0x14
+#define GICR_IGROUPR0 (GICR_SGIBASE_OFFSET + 0x80)
+#define GICR_ISENABLER0 (GICR_SGIBASE_OFFSET + 0x100)
+#define GICR_ICENABLER0 (GICR_SGIBASE_OFFSET + 0x180)
+#define GICR_ISPENDR0 (GICR_SGIBASE_OFFSET + 0x200)
+#define GICR_ICPENDR0 (GICR_SGIBASE_OFFSET + 0x280)
+#define GICR_IPRIORITYR (GICR_SGIBASE_OFFSET + 0x400)
+#define GICR_ICFGR0 (GICR_SGIBASE_OFFSET + 0xc00)
+#define GICR_ICFGR1 (GICR_SGIBASE_OFFSET + 0xc04)
+#define GICR_IGRPMODR0 (GICR_SGIBASE_OFFSET + 0xd00)
+
+/*******************************************************************************
+ * GICv3 CPU interface registers & constants
+ ******************************************************************************/
+/* ICC_SRE bit definitions*/
+#define ICC_SRE_EN_BIT (1 << 3)
+#define ICC_SRE_DIB_BIT (1 << 2)
+#define ICC_SRE_DFB_BIT (1 << 1)
+#define ICC_SRE_SRE_BIT (1 << 0)
+
+/* ICC_IAR1_EL1 bit definitions */
+#define IAR1_EL1_INTID_SHIFT 0
+#define IAR1_EL1_INTID_MASK 0xffffff
+
+/* ICC_SGI1R bit definitions */
+#define SGI1R_TARGET_LIST_MASK 0xffff
+#define SGI1R_TARGET_LIST_SHIFT 0x0
+#define SGI1R_AFF_MASK 0xff
+#define SGI1R_AFF1_SHIFT 16ULL
+#define SGI1R_AFF2_SHIFT 32ULL
+#ifdef __aarch64__
+#define SGI1R_AFF3_SHIFT 48ULL
+#endif
+#define SGI1R_INTID_MASK 0xf
+#define SGI1R_INTID_SHIFT 24
+#define SGI1R_IRM_MASK 0x1
+#define SGI1R_IRM_SHIFT 0x40
+
+/* ICC_IGRPEN1_EL1 bit definitions */
+#define IGRPEN1_EL1_ENABLE_SHIFT 0
+#define IGRPEN1_EL1_ENABLE_BIT (1 << IGRPEN1_EL1_ENABLE_SHIFT)
+
+/* ICH_ICH_LR<n>_EL2 definitions */
+#define ICH_LRn_EL2_STATE_Invalid (0UL << 62)
+#define ICH_LRn_EL2_STATE_Pending (1UL << 62)
+#define ICH_LRn_EL2_STATE_Active (2UL << 62)
+#define ICH_LRn_EL2_STATE_Pending_Active (3UL << 62)
+#define ICH_LRn_EL2_Group_0 (0UL << 60)
+#define ICH_LRn_EL2_Group_1 (1UL << 60)
+#define ICH_LRn_EL2_Priority_SHIFT 48
+#define ICH_LRn_EL2_Priority_MASK 0xFF
+#define ICH_LRn_EL2_vINTID_SHIFT 0
+#define ICH_LRn_EL2_vINTID_MASK 0xFFFF
+
+/* ICV_CTLR_EL1 definitions */
+#define ICV_CTLR_EL1_PRIbits_SHIFT 8
+#define ICV_CTLR_EL1_PRIbits_MASK 7
+
+/* ICV_IGRPEN1_EL1 definition */
+#define ICV_IGRPEN1_EL1_Enable 1UL
+
+/* The highest affinity 0 that can be a SGI target*/
+#define SGI_TARGET_MAX_AFF0 16
+
+#ifndef ASSEMBLY
+
+/*******************************************************************************
+ * Helper GICv3 macros
+ ******************************************************************************/
+#define gicv3_acknowledge_interrupt() read_icc_iar1_el1() &\
+ IAR1_EL1_INTID_MASK
+#define gicv3_end_of_interrupt(id) write_icc_eoir1_el1(id)
+
+#define is_sre_enabled() \
+ (IS_IN_EL2() ? (read_icc_sre_el2() & ICC_SRE_SRE_BIT) :\
+ (read_icc_sre_el1() & ICC_SRE_SRE_BIT))
+
+/******************************************************************************
+ * GICv3 public driver API
+ *****************************************************************************/
+ /*
+ * Initialize the GICv3 driver. The base addresses of GIC Re-distributor
+ * interface `gicr_base` and the Distributor interface `gicd_base` must
+ * be provided as arguments.
+ */
+void gicv3_init(uintptr_t gicr_base, uintptr_t gicd_base);
+
+/*
+ * Setup the GIC Distributor interface.
+ */
+void gicv3_setup_distif(void);
+
+/*
+ * Probe the Re-distributor base corresponding to this core.
+ * This function is required to be invoked on successful boot of a core.
+ * The base address will be stored internally by the driver and will be
+ * used when accessing the Re-distributor interface.
+ */
+void gicv3_probe_redistif_addr(void);
+
+/*
+ * Set the bit corresponding to `interrupt_id` in the ICPENDR register
+ * at either Distributor or Re-distributor depending on the interrupt.
+ */
+void gicv3_set_icpendr(unsigned int interrupt_id);
+
+/*
+ * Get the bit corresponding to `interrupt_id` in the ISPENDR register
+ * at either Distributor or Re-distributor depending on the interrupt.
+ */
+unsigned int gicv3_get_ispendr(unsigned int interrupt_id);
+
+/*
+ * Set the bit corresponding to `interrupt_id` in the ICENABLER register
+ * at either Distributor or Re-distributor depending on the interrupt.
+ */
+void gicv3_set_icenabler(unsigned int interrupt_id);
+
+/*
+ * Get the bit corresponding to `interrupt_id` in the ISENABLER register
+ * at either Distributor or Re-distributor depending on the interrupt.
+ */
+unsigned int gicv3_get_isenabler(unsigned int interrupt_id);
+
+/*
+ * Set the bit corresponding to `interrupt_id` in the ISENABLER register
+ * at either Distributor or Re-distributor depending on the interrupt.
+ */
+void gicv3_set_isenabler(unsigned int interrupt_id);
+
+/*
+ * Set the `route` corresponding to `interrupt_id` in the IROUTER register
+ * at Distributor.
+ */
+void gicv3_set_intr_route(unsigned int interrupt_id, unsigned int core_pos);
+
+/*
+ * Send SGI with ID `sgi_id` to core with index `core_pos`.
+ */
+void gicv3_send_sgi(unsigned int sgi_id, unsigned int core_pos);
+
+/*
+ * Get the priority of the interrupt `interrupt_id`.
+ */
+unsigned int gicv3_get_ipriorityr(unsigned int interrupt_id);
+
+/*
+ * Set the priority of the interrupt `interrupt_id` to `priority`.
+ */
+void gicv3_set_ipriorityr(unsigned int interrupt_id, unsigned int priority);
+
+/*
+ * Restore the GICv3 SGI and PPI context after powering up the
+ * GIC Re-distributor.
+ */
+void gicv3_restore_sgi_ppi_context(void);
+
+/*
+ * Save the GICv3 SGI and PPI context prior to powering down the
+ * GIC Re-distributor.
+ */
+void gicv3_save_sgi_ppi_context(void);
+
+/*
+ * Restore the GICv3 CPU interface after powering up the CPU interface.
+ */
+void gicv3_restore_cpuif_context(void);
+
+/*
+ * Save the GICv3 CPU interface prior to powering down the CPU interface.
+ */
+void gicv3_save_cpuif_context(void);
+
+/*
+ * Disable the GIC CPU interface.
+ */
+void gicv3_disable_cpuif(void);
+
+/*
+ * Setup the GIC CPU interface.
+ */
+void gicv3_setup_cpuif(void);
+
+/*
+ * Enable the GIC CPU interface.
+ */
+void gicv3_enable_cpuif(void);
+
+
+#endif /*__ASSEMBLY__*/
+#endif /* __GIC_V3_H__ */
diff --git a/spm/scmi/include/ext/drivers/arm/pl011.h b/spm/scmi/include/ext/drivers/arm/pl011.h
new file mode 100644
index 0000000..3e19ee8
--- /dev/null
+++ b/spm/scmi/include/ext/drivers/arm/pl011.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PL011_H__
+#define __PL011_H__
+
+/* PL011 Registers */
+#define UARTDR 0x000
+#define UARTRSR 0x004
+#define UARTECR 0x004
+#define UARTFR 0x018
+#define UARTILPR 0x020
+#define UARTIBRD 0x024
+#define UARTFBRD 0x028
+#define UARTLCR_H 0x02C
+#define UARTCR 0x030
+#define UARTIFLS 0x034
+#define UARTIMSC 0x038
+#define UARTRIS 0x03C
+#define UARTMIS 0x040
+#define UARTICR 0x044
+#define UARTDMACR 0x048
+
+/* Data status bits */
+#define UART_DATA_ERROR_MASK 0x0F00
+
+/* Status reg bits */
+#define UART_STATUS_ERROR_MASK 0x0F
+
+/* Flag reg bits */
+#define PL011_UARTFR_RI (1 << 8) /* Ring indicator */
+#define PL011_UARTFR_TXFE (1 << 7) /* Transmit FIFO empty */
+#define PL011_UARTFR_RXFF (1 << 6) /* Receive FIFO full */
+#define PL011_UARTFR_TXFF (1 << 5) /* Transmit FIFO full */
+#define PL011_UARTFR_RXFE (1 << 4) /* Receive FIFO empty */
+#define PL011_UARTFR_BUSY (1 << 3) /* UART busy */
+#define PL011_UARTFR_DCD (1 << 2) /* Data carrier detect */
+#define PL011_UARTFR_DSR (1 << 1) /* Data set ready */
+#define PL011_UARTFR_CTS (1 << 0) /* Clear to send */
+
+#define PL011_UARTFR_TXFF_BIT 5 /* Transmit FIFO full bit in UARTFR register */
+#define PL011_UARTFR_RXFE_BIT 4 /* Receive FIFO empty bit in UARTFR register */
+#define PL011_UARTFR_BUSY_BIT 3 /* UART busy bit in UARTFR register */
+
+/* Control reg bits */
+#define PL011_UARTCR_CTSEN (1 << 15) /* CTS hardware flow control enable */
+#define PL011_UARTCR_RTSEN (1 << 14) /* RTS hardware flow control enable */
+#define PL011_UARTCR_RTS (1 << 11) /* Request to send */
+#define PL011_UARTCR_DTR (1 << 10) /* Data transmit ready. */
+#define PL011_UARTCR_RXE (1 << 9) /* Receive enable */
+#define PL011_UARTCR_TXE (1 << 8) /* Transmit enable */
+#define PL011_UARTCR_LBE (1 << 7) /* Loopback enable */
+#define PL011_UARTCR_UARTEN (1 << 0) /* UART Enable */
+
+#if !defined(PL011_LINE_CONTROL)
+/* FIFO Enabled / No Parity / 8 Data bit / One Stop Bit */
+#define PL011_LINE_CONTROL (PL011_UARTLCR_H_FEN | PL011_UARTLCR_H_WLEN_8)
+#endif
+
+/* Line Control Register Bits */
+#define PL011_UARTLCR_H_SPS (1 << 7) /* Stick parity select */
+#define PL011_UARTLCR_H_WLEN_8 (3 << 5)
+#define PL011_UARTLCR_H_WLEN_7 (2 << 5)
+#define PL011_UARTLCR_H_WLEN_6 (1 << 5)
+#define PL011_UARTLCR_H_WLEN_5 (0 << 5)
+#define PL011_UARTLCR_H_FEN (1 << 4) /* FIFOs Enable */
+#define PL011_UARTLCR_H_STP2 (1 << 3) /* Two stop bits select */
+#define PL011_UARTLCR_H_EPS (1 << 2) /* Even parity select */
+#define PL011_UARTLCR_H_PEN (1 << 1) /* Parity Enable */
+#define PL011_UARTLCR_H_BRK (1 << 0) /* Send break */
+
+/* Constants */
+#define PL011_BAUDRATE 115200
+
+#ifndef __ASSEMBLER__
+#include <stdint.h>
+
+/* Functions */
+
+int console_pl011_putc(int);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* __PL011_H__ */
diff --git a/spm/scmi/include/ext/drivers/arm/private_timer.h b/spm/scmi/include/ext/drivers/arm/private_timer.h
new file mode 100644
index 0000000..aff0b8e
--- /dev/null
+++ b/spm/scmi/include/ext/drivers/arm/private_timer.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PRIVATE_TIMER_H__
+#define __PRIVATE_TIMER_H__
+
+void private_timer_start(unsigned long timeo);
+void private_timer_stop(void);
+void private_timer_save(void);
+void private_timer_restore(void);
+
+#endif /* __PRIVATE_TIMER_H__ */
diff --git a/spm/scmi/include/ext/drivers/arm/sp804.h b/spm/scmi/include/ext/drivers/arm/sp804.h
new file mode 100644
index 0000000..004fb76
--- /dev/null
+++ b/spm/scmi/include/ext/drivers/arm/sp804.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SP804_H__
+#define __SP804_H__
+
+#include <stdint.h>
+
+#define SP804_LOAD_OFFSET 0x0
+#define SP804_CURRENT_VALUE_OFFSET 0x4
+#define SP804_CTRL_OFFSET 0x8
+#define SP804_INT_CLR_OFFSET 0xC
+#define SP804_INT_STATUS_OFFSET 0x10
+#define SP804_MASKED_INT_STATUS_OFFSET 0x14
+#define SP804_BG_LOAD_OFFSET 0x18
+
+/* SP804 Timer control register bit-fields */
+#define ONESHOT_MODE (0x1 << 0) /* Bit [0] */
+#define TIMER_SIZE (0x1 << 1) /* Bit [1] */
+#define TIMER_PRE_DIV1 (0x00 << 2) /* Bits [2:3] */
+#define INT_ENABLE (0x01 << 5) /* Bit [5] */
+#define TIMER_MODE_FREE_RUN (0x0 << 6) /* Bit [6] */
+#define TIMER_EN (0x01 << 7) /* Bit [7] */
+
+/*
+ * Program sp804 timer to fire an interrupt after `time_out_ms` milliseconds.
+ *
+ * Always return 0
+ */
+int sp804_timer_program(unsigned long time_out_ms);
+
+/*
+ * Cancel the currently programmed sp804 timer interrupt
+ *
+ * Always return 0
+ */
+int sp804_timer_cancel(void);
+
+/*
+ * Initializes the sp804 timer so that it can be used for programming
+ * timer interrupt.
+ * Must be called by the primary CPU only.
+ *
+ * Always return 0
+ */
+int sp804_timer_init(uintptr_t base_addr, unsigned int timer_freq);
+
+/*
+ * Handler to acknowledge and de-activate the sp804 timer interrupt
+ *
+ * Always return 0
+ */
+int sp804_timer_handler(void);
+
+#endif /* __SP804_H__ */
diff --git a/spm/scmi/include/ext/drivers/arm/sp805.h b/spm/scmi/include/ext/drivers/arm/sp805.h
new file mode 100644
index 0000000..75bcc12
--- /dev/null
+++ b/spm/scmi/include/ext/drivers/arm/sp805.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SP805_H__
+#define __SP805_H__
+
+/* SP805 register offset */
+#define SP805_WDOG_LOAD_OFF 0x000
+#define SP805_WDOG_VALUE_0FF 0x004
+#define SP805_WDOG_CTRL_OFF 0x008
+#define SP805_WDOG_INT_CLR_OFF 0x00c
+#define SP805_WDOG_RIS_OFF 0x010
+#define SP805_WDOG_MIS_OFF 0x014
+#define SP805_WDOG_LOCK_OFF 0xc00
+#define SP805_WDOG_ITCR_OFF 0xf00
+#define SP805_WDOG_ITOP_OFF 0xf04
+#define SP805_WDOG_PERIPH_ID_OFF 0xfe0
+#define SP805_WDOG_PCELL_ID_OFF 0xff0
+
+/*
+ * Magic word to unlock access to all other watchdog registers, Writing any other
+ * value locks them.
+ */
+#define SP805_WDOG_UNLOCK_ACCESS 0x1ACCE551
+
+/* Register field definitions */
+#define SP805_WDOG_CTRL_MASK 0x03
+#define SP805_WDOG_CTRL_RESEN (1 << 1)
+#define SP805_WDOG_CTRL_INTEN (1 << 0)
+#define SP805_WDOG_RIS_WDOGRIS (1 << 0)
+#define SP805_WDOG_RIS_MASK 0x1
+#define SP805_WDOG_MIS_WDOGMIS (1 << 0)
+#define SP805_WDOG_MIS_MASK 0x1
+#define SP805_WDOG_ITCR_MASK 0x1
+#define SP805_WDOG_ITOP_MASK 0x3
+#define SP805_WDOG_PART_NUM_SHIFT 0
+#define SP805_WDOG_PART_NUM_MASK 0xfff
+#define SP805_WDOG_DESIGNER_ID_SHIFT 12
+#define SP805_WDOG_DESIGNER_ID_MASK 0xff
+#define SP805_WDOG_REV_SHIFT 20
+#define SP805_WDOG_REV_MASK 0xf
+#define SP805_WDOG_CFG_SHIFT 24
+#define SP805_WDOG_CFG_MASK 0xff
+#define SP805_WDOG_PCELL_ID_SHIFT 0
+#define SP805_WDOG_PCELL_ID_MASK 0xff
+
+#define ARM_SP805_TWDG_CLK_HZ 32768
+
+/* Public APIs for non-trusted watchdog module. */
+void sp805_wdog_start(unsigned int wdog_cycles);
+void sp805_wdog_stop(void);
+void sp805_wdog_refresh(void);
+
+/* Public APIs for trusted watchdog module. */
+void sp805_twdog_start(unsigned int wdog_cycles);
+void sp805_twdog_stop(void);
+void sp805_twdog_refresh(void);
+
+#endif /* __SP805_H__ */
+
diff --git a/spm/scmi/include/ext/drivers/arm/system_timer.h b/spm/scmi/include/ext/drivers/arm/system_timer.h
new file mode 100644
index 0000000..3bb5dfa
--- /dev/null
+++ b/spm/scmi/include/ext/drivers/arm/system_timer.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SYSTEM_TIMER_H__
+#define __SYSTEM_TIMER_H__
+
+#include <stdint.h>
+
+/*
+ * Program systimer to fire an interrupt after time_out_ms
+ *
+ * Always return 0
+ */
+int program_systimer(unsigned long time_out_ms);
+/*
+ * Cancel the currently programmed systimer interrupt
+ *
+ * Always return 0
+ */
+int cancel_systimer(void);
+/*
+ * Initialises the systimer so that it can be used for programming timer
+ * interrupt.
+ * Must be called by the primary CPU only.
+ *
+ * Always return 0
+ */
+int init_systimer(uintptr_t systimer_base);
+/*
+ * Handler to acknowledge and de-activate the systimer interrupt
+ *
+ * Always return 0
+ */
+int handler_systimer(void);
+
+#endif /* __SYSTEM_TIMER_H__ */
diff --git a/spm/scmi/include/ext/drivers/console.h b/spm/scmi/include/ext/drivers/console.h
new file mode 100644
index 0000000..4c22a99
--- /dev/null
+++ b/spm/scmi/include/ext/drivers/console.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CONSOLE_H__
+#define __CONSOLE_H__
+
+/* Returned by getc callbacks when receive FIFO is empty. */
+#define ERROR_NO_PENDING_CHAR -1
+/* Returned by console_xxx() if the registered console doesn't implement xxx. */
+#define ERROR_NO_VALID_CONSOLE (-128)
+
+#ifndef __ASSEMBLY__
+
+#include <stdint.h>
+
+/*
+ * Function to initialize the console without a C Runtime to print debug
+ * information. It saves the console base to the data section. Returns 1 on
+ * success, 0 on error.
+ */
+int console_init(uintptr_t base_addr,
+ unsigned int uart_clk, unsigned int baud_rate);
+
+/*
+ * Function to output a character over the console. It returns the character
+ * printed on success or an error code.
+ */
+int console_putc(int c);
+
+/*
+ * Function to get a character from the console. It returns the character
+ * grabbed on success or an error code on error. This function is blocking, it
+ * waits until there is an available character to return. Returns a character or
+ * error code.
+ */
+int console_getc(void);
+
+/*
+ * Function to get a character from the console. It returns the character
+ * grabbed on success or an error code on error. This function is non-blocking,
+ * it returns immediately.
+ */
+int console_try_getc(void);
+
+/*
+ * Function to force a write of all buffered data that hasn't been output. It
+ * returns 0 upon successful completion, otherwise it returns an error code.
+ */
+int console_flush(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __CONSOLE_H__ */
diff --git a/spm/scmi/include/ext/extensions/amu.h b/spm/scmi/include/ext/extensions/amu.h
new file mode 100644
index 0000000..d5950ca
--- /dev/null
+++ b/spm/scmi/include/ext/extensions/amu.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AMU_H
+#define AMU_H
+
+#include <stdint.h>
+
+#include <cassert.h>
+#include <platform_def.h>
+#include <utils_def.h>
+
+#define AMU_GROUP0_COUNTERS_MASK U(0xf)
+#define AMU_GROUP0_NR_COUNTERS U(4)
+
+#ifdef PLAT_AMU_GROUP1_COUNTERS_MASK
+#define AMU_GROUP1_COUNTERS_MASK PLAT_AMU_GROUP1_COUNTERS_MASK
+#else
+#define AMU_GROUP1_COUNTERS_MASK U(0)
+#endif
+
+/* Calculate number of group 1 counters */
+#if (AMU_GROUP1_COUNTERS_MASK & (1 << 15))
+#define AMU_GROUP1_NR_COUNTERS 16U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 14))
+#define AMU_GROUP1_NR_COUNTERS 15U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 13))
+#define AMU_GROUP1_NR_COUNTERS 14U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 12))
+#define AMU_GROUP1_NR_COUNTERS 13U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 11))
+#define AMU_GROUP1_NR_COUNTERS 12U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 10))
+#define AMU_GROUP1_NR_COUNTERS 11U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 9))
+#define AMU_GROUP1_NR_COUNTERS 10U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 8))
+#define AMU_GROUP1_NR_COUNTERS 9U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 7))
+#define AMU_GROUP1_NR_COUNTERS 8U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 6))
+#define AMU_GROUP1_NR_COUNTERS 7U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 5))
+#define AMU_GROUP1_NR_COUNTERS 6U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 4))
+#define AMU_GROUP1_NR_COUNTERS 5U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 3))
+#define AMU_GROUP1_NR_COUNTERS 4U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 2))
+#define AMU_GROUP1_NR_COUNTERS 3U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 1))
+#define AMU_GROUP1_NR_COUNTERS 2U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 0))
+#define AMU_GROUP1_NR_COUNTERS 1U
+#else
+#define AMU_GROUP1_NR_COUNTERS 0U
+#endif
+
+CASSERT(AMU_GROUP1_COUNTERS_MASK <= 0xffff, invalid_amu_group1_counters_mask);
+
+unsigned int amu_get_version(void);
+
+uint64_t amu_group0_cnt_read(unsigned int idx);
+#if __aarch64__
+uint64_t amu_group0_voffset_read(unsigned int idx);
+void amu_group0_voffset_write(unsigned int idx, uint64_t val);
+#endif
+
+#if AMU_GROUP1_NR_COUNTERS
+uint64_t amu_group1_cnt_read(unsigned int idx);
+#if __aarch64__
+uint64_t amu_group1_voffset_read(unsigned int idx);
+void amu_group1_voffset_write(unsigned int idx, uint64_t val);
+#endif
+#endif
+
+#endif /* AMU_H */
diff --git a/spm/scmi/include/ext/extensions/amu_private.h b/spm/scmi/include/ext/extensions/amu_private.h
new file mode 100644
index 0000000..7ae17d9
--- /dev/null
+++ b/spm/scmi/include/ext/extensions/amu_private.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2017-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AMU_PRIVATE_H
+#define AMU_PRIVATE_H
+
+#include <stdint.h>
+
+uint64_t amu_group0_cnt_read_internal(unsigned int idx);
+uint64_t amu_group1_cnt_read_internal(unsigned int idx);
+
+#if __aarch64__
+uint64_t amu_group0_voffset_read_internal(unsigned int idx);
+void amu_group0_voffset_write_internal(unsigned int idx, uint64_t val);
+
+uint64_t amu_group1_voffset_read_internal(unsigned int idx);
+void amu_group1_voffset_write_internal(unsigned int idx, uint64_t val);
+#endif
+
+#endif /* AMU_PRIVATE_H */
diff --git a/spm/scmi/include/ext/extensions/fpu.h b/spm/scmi/include/ext/extensions/fpu.h
new file mode 100644
index 0000000..d7b4f99
--- /dev/null
+++ b/spm/scmi/include/ext/extensions/fpu.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FPU_H
+#define FPU_H
+
+/* The FPU and SIMD register bank is 32 quadword (128 bits) Q registers. */
+#define FPU_Q_SIZE 16U
+#define FPU_Q_COUNT 32U
+
+/* These defines are needed by assembly code to access FPU registers. */
+#define FPU_OFFSET_Q 0U
+#define FPU_OFFSET_FPSR (FPU_Q_SIZE * FPU_Q_COUNT)
+#define FPU_OFFSET_FPCR (FPU_OFFSET_FPSR + 8)
+
+#ifndef __ASSEMBLER__
+
+#include <stdbool.h>
+#include <stdint.h>
+
+typedef struct fpu_reg_state {
+ uint8_t q[FPU_Q_COUNT][FPU_Q_SIZE];
+ unsigned long fpsr;
+ unsigned long fpcr;
+} fpu_reg_state_t __aligned(16);
+
+/*
+ * Read and compare FPU state registers with provided template values in parameters.
+ */
+bool fpu_state_compare_template(fpu_reg_state_t *fpu);
+
+/*
+ * Fill the template with random values and copy it to
+ * FPU state registers(SIMD vectors, FPCR, FPSR).
+ */
+void fpu_state_fill_regs_and_template(fpu_reg_state_t *fpu);
+
+/*
+ * This function populates the provided FPU structure with the provided template
+ * regs_val for all the 32 FPU/SMID registers, and the status registers FPCR/FPSR
+ */
+void fpu_state_set(fpu_reg_state_t *vec,
+ uint8_t regs_val);
+
+/*
+ * This function prints the content of the provided FPU structure
+ */
+void fpu_state_print(fpu_reg_state_t *vec);
+
+#endif /* __ASSEMBLER__ */
+#endif /* FPU_H */
diff --git a/spm/scmi/include/ext/extensions/pauth.h b/spm/scmi/include/ext/extensions/pauth.h
new file mode 100644
index 0000000..c8d577f
--- /dev/null
+++ b/spm/scmi/include/ext/extensions/pauth.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PAUTH_H
+#define PAUTH_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#ifdef __aarch64__
+/* Initialize 128-bit ARMv8.3-PAuth key */
+uint128_t init_apkey(void);
+
+/* Program APIAKey_EL1 key and enable ARMv8.3-PAuth */
+void pauth_init_enable(void);
+
+/* Disable ARMv8.3-PAuth */
+void pauth_disable(void);
+
+/*
+ * Fill Pauth Keys and template with random values if keys werenot initialized earlier,
+ * Else Copy PAuth key registers to template.
+ */
+void pauth_test_lib_fill_regs_and_template(void);
+
+/* Read and Compare PAuth registers with provided template values. */
+bool pauth_test_lib_compare_template(void);
+
+/* Read and Store PAuth registers in template. */
+void pauth_test_lib_read_keys(void);
+
+/* Test PAuth instructions. */
+void pauth_test_lib_test_intrs(void);
+
+#endif /* __aarch64__ */
+
+#endif /* PAUTH_H */
diff --git a/spm/scmi/include/ext/extensions/sme.h b/spm/scmi/include/ext/extensions/sme.h
new file mode 100644
index 0000000..c89e630
--- /dev/null
+++ b/spm/scmi/include/ext/extensions/sme.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SME_H
+#define SME_H
+
+#define MAX_VL (512)
+#define MAX_VL_B (MAX_VL / 8)
+#define SME_SMCR_LEN_MAX U(0x1FF)
+
+typedef enum {
+ SMSTART, /* enters streaming sve mode and enables SME ZA array */
+ SMSTART_SM, /* enters streaming sve mode only */
+ SMSTART_ZA, /* enables SME ZA array storage only */
+} smestart_instruction_type_t;
+
+typedef enum {
+ SMSTOP, /* exits streaming sve mode, & disables SME ZA array */
+ SMSTOP_SM, /* exits streaming sve mode only */
+ SMSTOP_ZA, /* disables SME ZA array storage only */
+} smestop_instruction_type_t;
+
+/* SME feature related prototypes. */
+void sme_enable(void);
+void sme_smstart(smestart_instruction_type_t smstart_type);
+void sme_smstop(smestop_instruction_type_t smstop_type);
+
+/* SME2 feature related prototypes. */
+void sme2_enable(void);
+
+/* Assembly function prototypes. */
+uint64_t sme_rdvl_1(void);
+void sme_try_illegal_instruction(void);
+void sme_vector_to_ZA(const uint64_t *input_vector);
+void sme_ZA_to_vector(const uint64_t *output_vector);
+void sme2_load_zt0_instruction(const uint64_t *inputbuf);
+void sme2_store_zt0_instruction(const uint64_t *outputbuf);
+
+#endif /* SME_H */
diff --git a/spm/scmi/include/ext/extensions/sve.h b/spm/scmi/include/ext/extensions/sve.h
new file mode 100644
index 0000000..60432a5
--- /dev/null
+++ b/spm/scmi/include/ext/extensions/sve.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SVE_H
+#define SVE_H
+
+#include <arch.h>
+#include <stdlib.h> /* for rand() */
+
+#define fill_sve_helper(num) "ldr z"#num", [%0, #"#num", MUL VL];"
+#define read_sve_helper(num) "str z"#num", [%0, #"#num", MUL VL];"
+
+/*
+ * Max. vector length permitted by the architecture:
+ * SVE: 2048 bits = 256 bytes
+ */
+#define SVE_VECTOR_LEN_BYTES 256
+#define SVE_NUM_VECTORS 32
+
+#define SVE_VQ_ARCH_MIN (0U)
+#define SVE_VQ_ARCH_MAX ((1 << ZCR_EL2_SVE_VL_WIDTH) - 1)
+
+/* convert SVE VL in bytes to VQ */
+#define SVE_VL_TO_VQ(vl_bytes) (((vl_bytes) >> 4U) - 1)
+
+/* convert SVE VQ to bits */
+#define SVE_VQ_TO_BITS(vq) (((vq) + 1U) << 7U)
+
+/* convert SVE VQ to bytes */
+#define SVE_VQ_TO_BYTES(vq) (SVE_VQ_TO_BITS(vq) / 8)
+
+/* get a random SVE VQ b/w 0 to SVE_VQ_ARCH_MAX */
+#define SVE_GET_RANDOM_VQ (rand() % (SVE_VQ_ARCH_MAX + 1))
+
+#ifndef __ASSEMBLY__
+
+typedef uint8_t sve_vector_t[SVE_VECTOR_LEN_BYTES];
+
+void sve_config_vq(uint8_t sve_vq);
+uint32_t sve_probe_vl(uint8_t sve_max_vq);
+void sve_fill_vector_regs(const sve_vector_t v[SVE_NUM_VECTORS]);
+void sve_read_vector_regs(sve_vector_t v[SVE_NUM_VECTORS]);
+
+/* Assembly routines */
+bool sve_subtract_arrays_interleaved(int *dst_array, int *src_array1,
+ int *src_array2, int array_size,
+ bool (*world_switch_cb)(void));
+
+void sve_subtract_arrays(int *dst_array, int *src_array1, int *src_array2,
+ int array_size);
+
+#ifdef __aarch64__
+
+/* Returns the SVE implemented VL in bytes (constrained by ZCR_EL3.LEN) */
+static inline uint64_t sve_vector_length_get(void)
+{
+ uint64_t vl;
+
+ __asm__ volatile(
+ ".arch_extension sve\n"
+ "rdvl %0, #1;"
+ ".arch_extension nosve\n"
+ : "=r" (vl)
+ );
+
+ return vl;
+}
+
+#endif /* __aarch64__ */
+#endif /* __ASSEMBLY__ */
+#endif /* SVE_H */
diff --git a/spm/scmi/include/ext/lib/aarch64/arch.h b/spm/scmi/include/ext/lib/aarch64/arch.h
new file mode 100644
index 0000000..739cd26
--- /dev/null
+++ b/spm/scmi/include/ext/lib/aarch64/arch.h
@@ -0,0 +1,1381 @@
+/*
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARCH_H
+#define ARCH_H
+
+#include <utils_def.h>
+
+/*******************************************************************************
+ * MIDR bit definitions
+ ******************************************************************************/
+#define MIDR_IMPL_MASK U(0xff)
+#define MIDR_IMPL_SHIFT U(0x18)
+#define MIDR_VAR_SHIFT U(20)
+#define MIDR_VAR_BITS U(4)
+#define MIDR_VAR_MASK U(0xf0)
+#define MIDR_REV_SHIFT U(0)
+#define MIDR_REV_BITS U(4)
+#define MIDR_REV_MASK U(0xf)
+#define MIDR_PN_MASK U(0xfff)
+#define MIDR_PN_SHIFT U(0x4)
+
+/*******************************************************************************
+ * MPIDR macros
+ ******************************************************************************/
+#define MPIDR_MT_MASK (ULL(1) << 24)
+#define MPIDR_CPU_MASK MPIDR_AFFLVL_MASK
+#define MPIDR_CLUSTER_MASK (MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
+#define MPIDR_AFFINITY_BITS U(8)
+#define MPIDR_AFFLVL_MASK ULL(0xff)
+#define MPIDR_AFF0_SHIFT U(0)
+#define MPIDR_AFF1_SHIFT U(8)
+#define MPIDR_AFF2_SHIFT U(16)
+#define MPIDR_AFF3_SHIFT U(32)
+#define MPIDR_AFF_SHIFT(_n) MPIDR_AFF##_n##_SHIFT
+#define MPIDR_AFFINITY_MASK ULL(0xff00ffffff)
+#define MPIDR_AFFLVL_SHIFT U(3)
+#define MPIDR_AFFLVL0 ULL(0x0)
+#define MPIDR_AFFLVL1 ULL(0x1)
+#define MPIDR_AFFLVL2 ULL(0x2)
+#define MPIDR_AFFLVL3 ULL(0x3)
+#define MPIDR_AFFLVL(_n) MPIDR_AFFLVL##_n
+#define MPIDR_AFFLVL0_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL1_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL2_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL3_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK)
+/*
+ * The MPIDR_MAX_AFFLVL count starts from 0. Take care to
+ * add one while using this macro to define array sizes.
+ * TODO: Support only the first 3 affinity levels for now.
+ */
+#define MPIDR_MAX_AFFLVL U(2)
+
+#define MPID_MASK (MPIDR_MT_MASK | \
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF3_SHIFT) | \
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT) | \
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF1_SHIFT) | \
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF0_SHIFT))
+
+#define MPIDR_AFF_ID(mpid, n) \
+ (((mpid) >> MPIDR_AFF_SHIFT(n)) & MPIDR_AFFLVL_MASK)
+
+/*
+ * An invalid MPID. This value can be used by functions that return an MPID to
+ * indicate an error.
+ */
+#define INVALID_MPID U(0xFFFFFFFF)
+
+/*******************************************************************************
+ * Definitions for CPU system register interface to GICv3
+ ******************************************************************************/
+#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7
+#define ICC_SGI1R S3_0_C12_C11_5
+#define ICC_SRE_EL1 S3_0_C12_C12_5
+#define ICC_SRE_EL2 S3_4_C12_C9_5
+#define ICC_SRE_EL3 S3_6_C12_C12_5
+#define ICC_CTLR_EL1 S3_0_C12_C12_4
+#define ICC_CTLR_EL3 S3_6_C12_C12_4
+#define ICC_PMR_EL1 S3_0_C4_C6_0
+#define ICC_RPR_EL1 S3_0_C12_C11_3
+#define ICC_IGRPEN1_EL3 S3_6_C12_C12_7
+#define ICC_IGRPEN0_EL1 S3_0_C12_C12_6
+#define ICC_HPPIR0_EL1 S3_0_C12_C8_2
+#define ICC_HPPIR1_EL1 S3_0_C12_C12_2
+#define ICC_IAR0_EL1 S3_0_C12_C8_0
+#define ICC_IAR1_EL1 S3_0_C12_C12_0
+#define ICC_EOIR0_EL1 S3_0_C12_C8_1
+#define ICC_EOIR1_EL1 S3_0_C12_C12_1
+#define ICC_SGI0R_EL1 S3_0_C12_C11_7
+
+#define ICV_CTRL_EL1 S3_0_C12_C12_4
+#define ICV_IAR1_EL1 S3_0_C12_C12_0
+#define ICV_IGRPEN1_EL1 S3_0_C12_C12_7
+#define ICV_EOIR1_EL1 S3_0_C12_C12_1
+#define ICV_PMR_EL1 S3_0_C4_C6_0
+
+/*******************************************************************************
+ * Generic timer memory mapped registers & offsets
+ ******************************************************************************/
+#define CNTCR_OFF U(0x000)
+#define CNTFID_OFF U(0x020)
+
+#define CNTCR_EN (U(1) << 0)
+#define CNTCR_HDBG (U(1) << 1)
+#define CNTCR_FCREQ(x) ((x) << 8)
+
+/*******************************************************************************
+ * System register bit definitions
+ ******************************************************************************/
+/* CLIDR definitions */
+#define LOUIS_SHIFT U(21)
+#define LOC_SHIFT U(24)
+#define CLIDR_FIELD_WIDTH U(3)
+
+/* CSSELR definitions */
+#define LEVEL_SHIFT U(1)
+
+/* Data cache set/way op type defines */
+#define DCISW U(0x0)
+#define DCCISW U(0x1)
+#define DCCSW U(0x2)
+
+/* ID_AA64PFR0_EL1 definitions */
+#define ID_AA64PFR0_EL0_SHIFT U(0)
+#define ID_AA64PFR0_EL1_SHIFT U(4)
+#define ID_AA64PFR0_EL2_SHIFT U(8)
+#define ID_AA64PFR0_EL3_SHIFT U(12)
+#define ID_AA64PFR0_AMU_SHIFT U(44)
+#define ID_AA64PFR0_AMU_LENGTH U(4)
+#define ID_AA64PFR0_AMU_MASK ULL(0xf)
+#define ID_AA64PFR0_AMU_NOT_SUPPORTED U(0x0)
+#define ID_AA64PFR0_AMU_V1 U(0x1)
+#define ID_AA64PFR0_AMU_V1P1 U(0x2)
+#define ID_AA64PFR0_ELX_MASK ULL(0xf)
+#define ID_AA64PFR0_SVE_SHIFT U(32)
+#define ID_AA64PFR0_SVE_WIDTH U(4)
+#define ID_AA64PFR0_SVE_MASK ULL(0xf)
+#define ID_AA64PFR0_SVE_LENGTH U(4)
+#define ID_AA64PFR0_MPAM_SHIFT U(40)
+#define ID_AA64PFR0_MPAM_MASK ULL(0xf)
+#define ID_AA64PFR0_DIT_SHIFT U(48)
+#define ID_AA64PFR0_DIT_MASK ULL(0xf)
+#define ID_AA64PFR0_DIT_LENGTH U(4)
+#define ID_AA64PFR0_DIT_SUPPORTED U(1)
+#define ID_AA64PFR0_CSV2_SHIFT U(56)
+#define ID_AA64PFR0_CSV2_MASK ULL(0xf)
+#define ID_AA64PFR0_CSV2_WIDTH U(4)
+#define ID_AA64PFR0_CSV2_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64PFR0_CSV2_SUPPORTED ULL(0x1)
+#define ID_AA64PFR0_CSV2_2_SUPPORTED ULL(0x2)
+#define ID_AA64PFR0_FEAT_RME_SHIFT U(52)
+#define ID_AA64PFR0_FEAT_RME_MASK ULL(0xf)
+#define ID_AA64PFR0_FEAT_RME_LENGTH U(4)
+#define ID_AA64PFR0_FEAT_RME_NOT_SUPPORTED U(0)
+#define ID_AA64PFR0_FEAT_RME_V1 U(1)
+#define ID_AA64PFR0_RAS_MASK ULL(0xf)
+#define ID_AA64PFR0_RAS_SHIFT U(28)
+#define ID_AA64PFR0_RAS_WIDTH U(4)
+#define ID_AA64PFR0_RAS_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64PFR0_RAS_SUPPORTED ULL(0x1)
+#define ID_AA64PFR0_RASV1P1_SUPPORTED ULL(0x2)
+#define ID_AA64PFR0_GIC_SHIFT U(24)
+#define ID_AA64PFR0_GIC_WIDTH U(4)
+#define ID_AA64PFR0_GIC_MASK ULL(0xf)
+#define ID_AA64PFR0_GIC_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64PFR0_GICV3_GICV4_SUPPORTED ULL(0x1)
+#define ID_AA64PFR0_GICV4_1_SUPPORTED ULL(0x2)
+
+/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
+#define ID_AA64DFR0_PMS_SHIFT U(32)
+#define ID_AA64DFR0_PMS_LENGTH U(4)
+#define ID_AA64DFR0_PMS_MASK ULL(0xf)
+#define ID_AA64DFR0_SPE_NOT_SUPPORTED U(0)
+#define ID_AA64DFR0_SPE U(1)
+#define ID_AA64DFR0_SPE_V1P1 U(2)
+#define ID_AA64DFR0_SPE_V1P2 U(3)
+#define ID_AA64DFR0_SPE_V1P3 U(4)
+#define ID_AA64DFR0_SPE_V1P4 U(5)
+
+/* ID_AA64DFR0_EL1.DEBUG definitions */
+#define ID_AA64DFR0_DEBUG_SHIFT U(0)
+#define ID_AA64DFR0_DEBUG_LENGTH U(4)
+#define ID_AA64DFR0_DEBUG_MASK ULL(0xf)
+#define ID_AA64DFR0_DEBUG_BITS (ID_AA64DFR0_DEBUG_MASK << \
+ ID_AA64DFR0_DEBUG_SHIFT)
+#define ID_AA64DFR0_V8_DEBUG_ARCH_SUPPORTED U(6)
+#define ID_AA64DFR0_V8_DEBUG_ARCH_VHE_SUPPORTED U(7)
+#define ID_AA64DFR0_V8_2_DEBUG_ARCH_SUPPORTED U(8)
+#define ID_AA64DFR0_V8_4_DEBUG_ARCH_SUPPORTED U(9)
+
+/* ID_AA64DFR0_EL1.HPMN0 definitions */
+#define ID_AA64DFR0_HPMN0_SHIFT U(60)
+#define ID_AA64DFR0_HPMN0_MASK ULL(0xf)
+#define ID_AA64DFR0_HPMN0_SUPPORTED ULL(1)
+
+/* ID_AA64DFR0_EL1.BRBE definitions */
+#define ID_AA64DFR0_BRBE_SHIFT U(52)
+#define ID_AA64DFR0_BRBE_MASK ULL(0xf)
+#define ID_AA64DFR0_BRBE_SUPPORTED ULL(1)
+
+/* ID_AA64DFR0_EL1.TraceBuffer definitions */
+#define ID_AA64DFR0_TRACEBUFFER_SHIFT U(44)
+#define ID_AA64DFR0_TRACEBUFFER_MASK ULL(0xf)
+#define ID_AA64DFR0_TRACEBUFFER_SUPPORTED ULL(1)
+
+/* ID_DFR0_EL1.Tracefilt definitions */
+#define ID_AA64DFR0_TRACEFILT_SHIFT U(40)
+#define ID_AA64DFR0_TRACEFILT_MASK U(0xf)
+#define ID_AA64DFR0_TRACEFILT_SUPPORTED U(1)
+
+/* ID_AA64DFR0_EL1.PMUVer definitions */
+#define ID_AA64DFR0_PMUVER_SHIFT U(8)
+#define ID_AA64DFR0_PMUVER_MASK ULL(0xf)
+#define ID_AA64DFR0_PMUVER_NOT_SUPPORTED ULL(0)
+
+/* ID_AA64DFR0_EL1.TraceVer definitions */
+#define ID_AA64DFR0_TRACEVER_SHIFT U(4)
+#define ID_AA64DFR0_TRACEVER_MASK ULL(0xf)
+#define ID_AA64DFR0_TRACEVER_SUPPORTED ULL(1)
+
+#define EL_IMPL_NONE ULL(0)
+#define EL_IMPL_A64ONLY ULL(1)
+#define EL_IMPL_A64_A32 ULL(2)
+
+/* ID_AA64ISAR0_EL1 definitions */
+#define ID_AA64ISAR0_EL1 S3_0_C0_C6_0
+#define ID_AA64ISAR0_TLB_MASK ULL(0xf)
+#define ID_AA64ISAR0_TLB_SHIFT U(56)
+#define ID_AA64ISAR0_TLB_WIDTH U(4)
+#define ID_AA64ISAR0_TLBIRANGE_SUPPORTED ULL(0x2)
+#define ID_AA64ISAR0_TLB_NOT_SUPPORTED ULL(0)
+
+/* ID_AA64ISAR1_EL1 definitions */
+#define ID_AA64ISAR1_EL1 S3_0_C0_C6_1
+#define ID_AA64ISAR1_GPI_SHIFT U(28)
+#define ID_AA64ISAR1_GPI_WIDTH U(4)
+#define ID_AA64ISAR1_GPI_MASK ULL(0xf)
+#define ID_AA64ISAR1_GPA_SHIFT U(24)
+#define ID_AA64ISAR1_GPA_WIDTH U(4)
+#define ID_AA64ISAR1_GPA_MASK ULL(0xf)
+#define ID_AA64ISAR1_API_SHIFT U(8)
+#define ID_AA64ISAR1_API_WIDTH U(4)
+#define ID_AA64ISAR1_API_MASK ULL(0xf)
+#define ID_AA64ISAR1_APA_SHIFT U(4)
+#define ID_AA64ISAR1_APA_WIDTH U(4)
+#define ID_AA64ISAR1_APA_MASK ULL(0xf)
+#define ID_AA64ISAR1_SPECRES_MASK ULL(0xf)
+#define ID_AA64ISAR1_SPECRES_SHIFT U(40)
+#define ID_AA64ISAR1_SPECRES_WIDTH U(4)
+#define ID_AA64ISAR1_SPECRES_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64ISAR1_SPECRES_SUPPORTED ULL(0x1)
+#define ID_AA64ISAR1_DPB_MASK ULL(0xf)
+#define ID_AA64ISAR1_DPB_SHIFT U(0)
+#define ID_AA64ISAR1_DPB_WIDTH U(4)
+#define ID_AA64ISAR1_DPB_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64ISAR1_DPB_SUPPORTED ULL(0x1)
+#define ID_AA64ISAR1_DPB2_SUPPORTED ULL(0x2)
+#define ID_AA64ISAR1_LS64_MASK ULL(0xf)
+#define ID_AA64ISAR1_LS64_SHIFT U(60)
+#define ID_AA64ISAR1_LS64_WIDTH U(4)
+#define ID_AA64ISAR1_LS64_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64ISAR1_LS64_SUPPORTED ULL(0x1)
+#define ID_AA64ISAR1_LS64_V_SUPPORTED ULL(0x2)
+#define ID_AA64ISAR1_LS64_ACCDATA_SUPPORTED ULL(0x3)
+
+/* ID_AA64ISAR2_EL1 definitions */
+#define ID_AA64ISAR2_EL1 S3_0_C0_C6_2
+#define ID_AA64ISAR2_WFXT_MASK ULL(0xf)
+#define ID_AA64ISAR2_WFXT_SHIFT U(0x0)
+#define ID_AA64ISAR2_WFXT_SUPPORTED ULL(0x2)
+#define ID_AA64ISAR2_GPA3_SHIFT U(8)
+#define ID_AA64ISAR2_GPA3_MASK ULL(0xf)
+#define ID_AA64ISAR2_APA3_SHIFT U(12)
+#define ID_AA64ISAR2_APA3_MASK ULL(0xf)
+
+/* ID_AA64MMFR0_EL1 definitions */
+#define ID_AA64MMFR0_EL1_PARANGE_SHIFT U(0)
+#define ID_AA64MMFR0_EL1_PARANGE_MASK ULL(0xf)
+
+#define PARANGE_0000 U(32)
+#define PARANGE_0001 U(36)
+#define PARANGE_0010 U(40)
+#define PARANGE_0011 U(42)
+#define PARANGE_0100 U(44)
+#define PARANGE_0101 U(48)
+#define PARANGE_0110 U(52)
+
+#define ID_AA64MMFR0_EL1_ECV_SHIFT U(60)
+#define ID_AA64MMFR0_EL1_ECV_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_ECV_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_ECV_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_ECV_SELF_SYNCH ULL(0x2)
+
+#define ID_AA64MMFR0_EL1_FGT_SHIFT U(56)
+#define ID_AA64MMFR0_EL1_FGT_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_FGT_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_FGT_SUPPORTED ULL(0x1)
+
+#define ID_AA64MMFR0_EL1_TGRAN4_SHIFT U(28)
+#define ID_AA64MMFR0_EL1_TGRAN4_WIDTH U(4)
+#define ID_AA64MMFR0_EL1_TGRAN4_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN4_52B_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN4_NOT_SUPPORTED ULL(0xf)
+
+#define ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT U(40)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_WIDTH U(4)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_AS_1 ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_NOT_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_SUPPORTED ULL(0x2)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_52B_SUPPORTED ULL(0x3)
+
+#define ID_AA64MMFR0_EL1_TGRAN64_SHIFT U(24)
+#define ID_AA64MMFR0_EL1_TGRAN64_WIDTH U(4)
+#define ID_AA64MMFR0_EL1_TGRAN64_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN64_NOT_SUPPORTED ULL(0xf)
+
+#define ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT U(36)
+#define ID_AA64MMFR0_EL1_TGRAN64_2_WIDTH U(4)
+#define ID_AA64MMFR0_EL1_TGRAN64_2_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN64_2_AS_1 ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN64_2_NOT_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN64_2_SUPPORTED ULL(0x2)
+
+#define ID_AA64MMFR0_EL1_TGRAN16_SHIFT U(20)
+#define ID_AA64MMFR0_EL1_TGRAN16_WIDTH U(4)
+#define ID_AA64MMFR0_EL1_TGRAN16_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN16_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN16_52B_SUPPORTED ULL(0x2)
+
+#define ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT U(32)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_WIDTH U(4)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_AS_1 ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_NOT_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_SUPPORTED ULL(0x2)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_52B_SUPPORTED ULL(0x3)
+
+/* ID_AA64MMFR1_EL1 definitions */
+#define ID_AA64MMFR1_EL1_PAN_SHIFT U(20)
+#define ID_AA64MMFR1_EL1_PAN_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_PAN_WIDTH U(4)
+#define ID_AA64MMFR1_EL1_PAN_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR1_EL1_PAN2_SUPPORTED ULL(0x2)
+#define ID_AA64MMFR1_EL1_PAN3_SUPPORTED ULL(0x3)
+#define ID_AA64MMFR1_EL1_HCX_SHIFT U(40)
+#define ID_AA64MMFR1_EL1_HCX_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_HCX_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR1_EL1_HCX_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR1_EL1_AFP_SHIFT U(44)
+#define ID_AA64MMFR1_EL1_AFP_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_AFP_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR1_EL1_LO_SHIFT U(16)
+#define ID_AA64MMFR1_EL1_LO_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_LO_WIDTH U(4)
+#define ID_AA64MMFR1_EL1_LOR_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR1_EL1_LOR_SUPPORTED ULL(0x1)
+
+
+/* ID_AA64MMFR2_EL1 definitions */
+#define ID_AA64MMFR2_EL1 S3_0_C0_C7_2
+
+#define ID_AA64MMFR2_EL1_ST_SHIFT U(28)
+#define ID_AA64MMFR2_EL1_ST_MASK ULL(0xf)
+
+#define ID_AA64MMFR2_EL1_CNP_SHIFT U(0)
+#define ID_AA64MMFR2_EL1_CNP_MASK ULL(0xf)
+
+/* ID_AA64PFR1_EL1 definitions */
+#define ID_AA64PFR1_EL1_SSBS_SHIFT U(4)
+#define ID_AA64PFR1_EL1_SSBS_MASK ULL(0xf)
+
+#define SSBS_UNAVAILABLE ULL(0) /* No architectural SSBS support */
+
+#define ID_AA64PFR1_EL1_BT_SHIFT U(0)
+#define ID_AA64PFR1_EL1_BT_MASK ULL(0xf)
+
+#define BTI_IMPLEMENTED ULL(1) /* The BTI mechanism is implemented */
+
+#define ID_AA64PFR1_EL1_MTE_SHIFT U(8)
+#define ID_AA64PFR1_EL1_MTE_MASK ULL(0xf)
+
+#define ID_AA64PFR1_EL1_RNDR_TRAP_SHIFT U(28)
+#define ID_AA64PFR1_EL1_RNDR_TRAP_MASK ULL(0xf)
+
+#define ID_AA64PFR1_EL1_RNG_TRAP_SUPPORTED ULL(0x1)
+#define ID_AA64PFR1_EL1_RNG_TRAP_NOT_SUPPORTED ULL(0x0)
+
+#define ID_AA64PFR1_CSV2_FRAC_MASK ULL(0xf)
+#define ID_AA64PFR1_CSV2_FRAC_SHIFT U(32)
+#define ID_AA64PFR1_CSV2_FRAC_WIDTH U(4)
+#define ID_AA64PFR1_CSV2_1P1_SUPPORTED ULL(0x1)
+#define ID_AA64PFR1_CSV2_1P2_SUPPORTED ULL(0x2)
+
+#define MTE_UNIMPLEMENTED ULL(0)
+#define MTE_IMPLEMENTED_EL0 ULL(1) /* MTE is only implemented at EL0 */
+#define MTE_IMPLEMENTED_ELX ULL(2) /* MTE is implemented at all ELs */
+
+#define ID_AA64PFR1_EL1_SME_SHIFT U(24)
+#define ID_AA64PFR1_EL1_SME_MASK ULL(0xf)
+#define ID_AA64PFR1_EL1_SME_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64PFR1_EL1_SME_SUPPORTED ULL(0x1)
+#define ID_AA64PFR1_EL1_SME2_SUPPORTED ULL(0x2)
+
+#define ID_AA64PFR1_RAS_FRAC_MASK ULL(0xf)
+#define ID_AA64PFR1_RAS_FRAC_SHIFT U(12)
+#define ID_AA64PFR1_RAS_FRAC_WIDTH U(4)
+#define ID_AA64PFR1_RASV1P1_SUPPORTED ULL(0x1)
+
+/* ID_PFR1_EL1 definitions */
+#define ID_PFR1_VIRTEXT_SHIFT U(12)
+#define ID_PFR1_VIRTEXT_MASK U(0xf)
+#define GET_VIRT_EXT(id) (((id) >> ID_PFR1_VIRTEXT_SHIFT) \
+ & ID_PFR1_VIRTEXT_MASK)
+
+/* SCTLR definitions */
+#define SCTLR_EL2_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+ (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
+ (U(1) << 11) | (U(1) << 5) | (U(1) << 4))
+
+#define SCTLR_EL1_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+ (U(1) << 22) | (U(1) << 20) | (U(1) << 11))
+#define SCTLR_AARCH32_EL1_RES1 \
+ ((U(1) << 23) | (U(1) << 22) | (U(1) << 11) | \
+ (U(1) << 4) | (U(1) << 3))
+
+#define SCTLR_EL3_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+ (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
+ (U(1) << 11) | (U(1) << 5) | (U(1) << 4))
+
+#define SCTLR_M_BIT (ULL(1) << 0)
+#define SCTLR_A_BIT (ULL(1) << 1)
+#define SCTLR_C_BIT (ULL(1) << 2)
+#define SCTLR_SA_BIT (ULL(1) << 3)
+#define SCTLR_SA0_BIT (ULL(1) << 4)
+#define SCTLR_CP15BEN_BIT (ULL(1) << 5)
+#define SCTLR_ITD_BIT (ULL(1) << 7)
+#define SCTLR_SED_BIT (ULL(1) << 8)
+#define SCTLR_UMA_BIT (ULL(1) << 9)
+#define SCTLR_I_BIT (ULL(1) << 12)
+#define SCTLR_EnDB_BIT (ULL(1) << 13)
+#define SCTLR_DZE_BIT (ULL(1) << 14)
+#define SCTLR_UCT_BIT (ULL(1) << 15)
+#define SCTLR_NTWI_BIT (ULL(1) << 16)
+#define SCTLR_NTWE_BIT (ULL(1) << 18)
+#define SCTLR_WXN_BIT (ULL(1) << 19)
+#define SCTLR_UWXN_BIT (ULL(1) << 20)
+#define SCTLR_IESB_BIT (ULL(1) << 21)
+#define SCTLR_SPAN_BIT (ULL(1) << 23)
+#define SCTLR_E0E_BIT (ULL(1) << 24)
+#define SCTLR_EE_BIT (ULL(1) << 25)
+#define SCTLR_UCI_BIT (ULL(1) << 26)
+#define SCTLR_EnDA_BIT (ULL(1) << 27)
+#define SCTLR_EnIB_BIT (ULL(1) << 30)
+#define SCTLR_EnIA_BIT (ULL(1) << 31)
+#define SCTLR_DSSBS_BIT (ULL(1) << 44)
+#define SCTLR_RESET_VAL SCTLR_EL3_RES1
+
+/* CPACR_El1 definitions */
+#define CPACR_EL1_FPEN(x) ((x) << 20)
+#define CPACR_EL1_FP_TRAP_EL0 U(0x1)
+#define CPACR_EL1_FP_TRAP_ALL U(0x2)
+#define CPACR_EL1_FP_TRAP_NONE U(0x3)
+
+#define CPACR_EL1_ZEN(x) ((x) << 16)
+#define CPACR_EL1_ZEN_TRAP_EL0 U(0x1)
+#define CPACR_EL1_ZEN_TRAP_ALL U(0x2)
+#define CPACR_EL1_ZEN_TRAP_NONE U(0x3)
+
+/* SCR definitions */
+#define SCR_RES1_BITS ((U(1) << 4) | (U(1) << 5))
+#define SCR_AMVOFFEN_BIT (UL(1) << 35)
+#define SCR_ATA_BIT (U(1) << 26)
+#define SCR_FIEN_BIT (U(1) << 21)
+#define SCR_API_BIT (U(1) << 17)
+#define SCR_APK_BIT (U(1) << 16)
+#define SCR_TWE_BIT (U(1) << 13)
+#define SCR_TWI_BIT (U(1) << 12)
+#define SCR_ST_BIT (U(1) << 11)
+#define SCR_RW_BIT (U(1) << 10)
+#define SCR_SIF_BIT (U(1) << 9)
+#define SCR_HCE_BIT (U(1) << 8)
+#define SCR_SMD_BIT (U(1) << 7)
+#define SCR_EA_BIT (U(1) << 3)
+#define SCR_FIQ_BIT (U(1) << 2)
+#define SCR_IRQ_BIT (U(1) << 1)
+#define SCR_NS_BIT (U(1) << 0)
+#define SCR_VALID_BIT_MASK U(0x2f8f)
+#define SCR_RESET_VAL SCR_RES1_BITS
+
+/* MDCR_EL3 definitions */
+#define MDCR_SPD32(x) ((x) << 14)
+#define MDCR_SPD32_LEGACY ULL(0x0)
+#define MDCR_SPD32_DISABLE ULL(0x2)
+#define MDCR_SPD32_ENABLE ULL(0x3)
+#define MDCR_SDD_BIT (ULL(1) << 16)
+#define MDCR_NSPB(x) ((x) << 12)
+#define MDCR_NSPB_EL1 ULL(0x3)
+#define MDCR_TDOSA_BIT (ULL(1) << 10)
+#define MDCR_TDA_BIT (ULL(1) << 9)
+#define MDCR_TPM_BIT (ULL(1) << 6)
+#define MDCR_SCCD_BIT (ULL(1) << 23)
+#define MDCR_EL3_RESET_VAL ULL(0x0)
+
+/* MDCR_EL2 definitions */
+#define MDCR_EL2_TPMS (U(1) << 14)
+#define MDCR_EL2_E2PB(x) ((x) << 12)
+#define MDCR_EL2_E2PB_EL1 U(0x3)
+#define MDCR_EL2_TDRA_BIT (U(1) << 11)
+#define MDCR_EL2_TDOSA_BIT (U(1) << 10)
+#define MDCR_EL2_TDA_BIT (U(1) << 9)
+#define MDCR_EL2_TDE_BIT (U(1) << 8)
+#define MDCR_EL2_HPME_BIT (U(1) << 7)
+#define MDCR_EL2_TPM_BIT (U(1) << 6)
+#define MDCR_EL2_TPMCR_BIT (U(1) << 5)
+#define MDCR_EL2_HPMN_SHIFT U(0)
+#define MDCR_EL2_HPMN_MASK ULL(0x1f)
+#define MDCR_EL2_RESET_VAL U(0x0)
+
+/* HSTR_EL2 definitions */
+#define HSTR_EL2_RESET_VAL U(0x0)
+#define HSTR_EL2_T_MASK U(0xff)
+
+/* CNTHP_CTL_EL2 definitions */
+#define CNTHP_CTL_ENABLE_BIT (U(1) << 0)
+#define CNTHP_CTL_RESET_VAL U(0x0)
+
+/* VTTBR_EL2 definitions */
+#define VTTBR_RESET_VAL ULL(0x0)
+#define VTTBR_VMID_MASK ULL(0xff)
+#define VTTBR_VMID_SHIFT U(48)
+#define VTTBR_BADDR_MASK ULL(0xffffffffffff)
+#define VTTBR_BADDR_SHIFT U(0)
+
+/* HCR definitions */
+#define HCR_AMVOFFEN_BIT (ULL(1) << 51)
+#define HCR_API_BIT (ULL(1) << 41)
+#define HCR_APK_BIT (ULL(1) << 40)
+#define HCR_E2H_BIT (ULL(1) << 34)
+#define HCR_TGE_BIT (ULL(1) << 27)
+#define HCR_RW_SHIFT U(31)
+#define HCR_RW_BIT (ULL(1) << HCR_RW_SHIFT)
+#define HCR_AMO_BIT (ULL(1) << 5)
+#define HCR_IMO_BIT (ULL(1) << 4)
+#define HCR_FMO_BIT (ULL(1) << 3)
+
+/* ISR definitions */
+#define ISR_A_SHIFT U(8)
+#define ISR_I_SHIFT U(7)
+#define ISR_F_SHIFT U(6)
+
+/* CNTHCTL_EL2 definitions */
+#define CNTHCTL_RESET_VAL U(0x0)
+#define EVNTEN_BIT (U(1) << 2)
+#define EL1PCEN_BIT (U(1) << 1)
+#define EL1PCTEN_BIT (U(1) << 0)
+
+/* CNTKCTL_EL1 definitions */
+#define EL0PTEN_BIT (U(1) << 9)
+#define EL0VTEN_BIT (U(1) << 8)
+#define EL0PCTEN_BIT (U(1) << 0)
+#define EL0VCTEN_BIT (U(1) << 1)
+#define EVNTEN_BIT (U(1) << 2)
+#define EVNTDIR_BIT (U(1) << 3)
+#define EVNTI_SHIFT U(4)
+#define EVNTI_MASK U(0xf)
+
+/* CPTR_EL3 definitions */
+#define TCPAC_BIT (U(1) << 31)
+#define TAM_BIT (U(1) << 30)
+#define TTA_BIT (U(1) << 20)
+#define ESM_BIT (U(1) << 12)
+#define TFP_BIT (U(1) << 10)
+#define CPTR_EZ_BIT (U(1) << 8)
+#define CPTR_EL3_RESET_VAL U(0x0)
+
+/* CPTR_EL2 definitions */
+#define CPTR_EL2_RES1 ((ULL(3) << 12) | (ULL(1) << 9) | (ULL(0xff)))
+#define CPTR_EL2_TCPAC_BIT (ULL(1) << 31)
+#define CPTR_EL2_TAM_BIT (ULL(1) << 30)
+#define CPTR_EL2_SMEN_MASK ULL(0x3)
+#define CPTR_EL2_SMEN_SHIFT U(24)
+#define CPTR_EL2_TTA_BIT (ULL(1) << 20)
+#define CPTR_EL2_TSM_BIT (ULL(1) << 12)
+#define CPTR_EL2_TFP_BIT (ULL(1) << 10)
+#define CPTR_EL2_TZ_BIT (ULL(1) << 8)
+#define CPTR_EL2_RESET_VAL CPTR_EL2_RES1
+
+/* CPSR/SPSR definitions */
+#define DAIF_FIQ_BIT (U(1) << 0)
+#define DAIF_IRQ_BIT (U(1) << 1)
+#define DAIF_ABT_BIT (U(1) << 2)
+#define DAIF_DBG_BIT (U(1) << 3)
+#define SPSR_DAIF_SHIFT U(6)
+#define SPSR_DAIF_MASK U(0xf)
+
+#define SPSR_AIF_SHIFT U(6)
+#define SPSR_AIF_MASK U(0x7)
+
+#define SPSR_E_SHIFT U(9)
+#define SPSR_E_MASK U(0x1)
+#define SPSR_E_LITTLE U(0x0)
+#define SPSR_E_BIG U(0x1)
+
+#define SPSR_T_SHIFT U(5)
+#define SPSR_T_MASK U(0x1)
+#define SPSR_T_ARM U(0x0)
+#define SPSR_T_THUMB U(0x1)
+
+#define SPSR_M_SHIFT U(4)
+#define SPSR_M_MASK U(0x1)
+#define SPSR_M_AARCH64 U(0x0)
+#define SPSR_M_AARCH32 U(0x1)
+
+#define DISABLE_ALL_EXCEPTIONS \
+ (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
+
+#define DISABLE_INTERRUPTS (DAIF_FIQ_BIT | DAIF_IRQ_BIT)
+
+/*
+ * RMR_EL3 definitions
+ */
+#define RMR_EL3_RR_BIT (U(1) << 1)
+#define RMR_EL3_AA64_BIT (U(1) << 0)
+
+/*
+ * HI-VECTOR address for AArch32 state
+ */
+#define HI_VECTOR_BASE U(0xFFFF0000)
+
+/*
+ * TCR defintions
+ */
+#define TCR_EL3_RES1 ((ULL(1) << 31) | (ULL(1) << 23))
+#define TCR_EL2_RES1 ((ULL(1) << 31) | (ULL(1) << 23))
+#define TCR_EL1_IPS_SHIFT U(32)
+#define TCR_EL2_PS_SHIFT U(16)
+#define TCR_EL3_PS_SHIFT U(16)
+
+#define TCR_TxSZ_MIN ULL(16)
+#define TCR_TxSZ_MAX ULL(39)
+#define TCR_TxSZ_MAX_TTST ULL(48)
+
+#define TCR_T0SZ_SHIFT U(0)
+#define TCR_T1SZ_SHIFT U(16)
+
+/* (internal) physical address size bits in EL3/EL1 */
+#define TCR_PS_BITS_4GB ULL(0x0)
+#define TCR_PS_BITS_64GB ULL(0x1)
+#define TCR_PS_BITS_1TB ULL(0x2)
+#define TCR_PS_BITS_4TB ULL(0x3)
+#define TCR_PS_BITS_16TB ULL(0x4)
+#define TCR_PS_BITS_256TB ULL(0x5)
+
+#define ADDR_MASK_48_TO_63 ULL(0xFFFF000000000000)
+#define ADDR_MASK_44_TO_47 ULL(0x0000F00000000000)
+#define ADDR_MASK_42_TO_43 ULL(0x00000C0000000000)
+#define ADDR_MASK_40_TO_41 ULL(0x0000030000000000)
+#define ADDR_MASK_36_TO_39 ULL(0x000000F000000000)
+#define ADDR_MASK_32_TO_35 ULL(0x0000000F00000000)
+
+#define TCR_RGN_INNER_NC (ULL(0x0) << 8)
+#define TCR_RGN_INNER_WBA (ULL(0x1) << 8)
+#define TCR_RGN_INNER_WT (ULL(0x2) << 8)
+#define TCR_RGN_INNER_WBNA (ULL(0x3) << 8)
+
+#define TCR_RGN_OUTER_NC (ULL(0x0) << 10)
+#define TCR_RGN_OUTER_WBA (ULL(0x1) << 10)
+#define TCR_RGN_OUTER_WT (ULL(0x2) << 10)
+#define TCR_RGN_OUTER_WBNA (ULL(0x3) << 10)
+
+#define TCR_SH_NON_SHAREABLE (ULL(0x0) << 12)
+#define TCR_SH_OUTER_SHAREABLE (ULL(0x2) << 12)
+#define TCR_SH_INNER_SHAREABLE (ULL(0x3) << 12)
+
+#define TCR_RGN1_INNER_NC (ULL(0x0) << 24)
+#define TCR_RGN1_INNER_WBA (ULL(0x1) << 24)
+#define TCR_RGN1_INNER_WT (ULL(0x2) << 24)
+#define TCR_RGN1_INNER_WBNA (ULL(0x3) << 24)
+
+#define TCR_RGN1_OUTER_NC (ULL(0x0) << 26)
+#define TCR_RGN1_OUTER_WBA (ULL(0x1) << 26)
+#define TCR_RGN1_OUTER_WT (ULL(0x2) << 26)
+#define TCR_RGN1_OUTER_WBNA (ULL(0x3) << 26)
+
+#define TCR_SH1_NON_SHAREABLE (ULL(0x0) << 28)
+#define TCR_SH1_OUTER_SHAREABLE (ULL(0x2) << 28)
+#define TCR_SH1_INNER_SHAREABLE (ULL(0x3) << 28)
+
+#define TCR_TG0_SHIFT U(14)
+#define TCR_TG0_MASK ULL(3)
+#define TCR_TG0_4K (ULL(0) << TCR_TG0_SHIFT)
+#define TCR_TG0_64K (ULL(1) << TCR_TG0_SHIFT)
+#define TCR_TG0_16K (ULL(2) << TCR_TG0_SHIFT)
+
+#define TCR_TG1_SHIFT U(30)
+#define TCR_TG1_MASK ULL(3)
+#define TCR_TG1_16K (ULL(1) << TCR_TG1_SHIFT)
+#define TCR_TG1_4K (ULL(2) << TCR_TG1_SHIFT)
+#define TCR_TG1_64K (ULL(3) << TCR_TG1_SHIFT)
+
+#define TCR_EPD0_BIT (ULL(1) << 7)
+#define TCR_EPD1_BIT (ULL(1) << 23)
+
+#define MODE_SP_SHIFT U(0x0)
+#define MODE_SP_MASK U(0x1)
+#define MODE_SP_EL0 U(0x0)
+#define MODE_SP_ELX U(0x1)
+
+#define MODE_RW_SHIFT U(0x4)
+#define MODE_RW_MASK U(0x1)
+#define MODE_RW_64 U(0x0)
+#define MODE_RW_32 U(0x1)
+
+#define MODE_EL_SHIFT U(0x2)
+#define MODE_EL_MASK U(0x3)
+#define MODE_EL3 U(0x3)
+#define MODE_EL2 U(0x2)
+#define MODE_EL1 U(0x1)
+#define MODE_EL0 U(0x0)
+
+#define MODE32_SHIFT U(0)
+#define MODE32_MASK U(0xf)
+#define MODE32_usr U(0x0)
+#define MODE32_fiq U(0x1)
+#define MODE32_irq U(0x2)
+#define MODE32_svc U(0x3)
+#define MODE32_mon U(0x6)
+#define MODE32_abt U(0x7)
+#define MODE32_hyp U(0xa)
+#define MODE32_und U(0xb)
+#define MODE32_sys U(0xf)
+
+#define GET_RW(mode) (((mode) >> MODE_RW_SHIFT) & MODE_RW_MASK)
+#define GET_EL(mode) (((mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)
+#define GET_SP(mode) (((mode) >> MODE_SP_SHIFT) & MODE_SP_MASK)
+#define GET_M32(mode) (((mode) >> MODE32_SHIFT) & MODE32_MASK)
+
+#define SPSR_64(el, sp, daif) \
+ ((MODE_RW_64 << MODE_RW_SHIFT) | \
+ (((el) & MODE_EL_MASK) << MODE_EL_SHIFT) | \
+ (((sp) & MODE_SP_MASK) << MODE_SP_SHIFT) | \
+ (((daif) & SPSR_DAIF_MASK) << SPSR_DAIF_SHIFT))
+
+#define SPSR_MODE32(mode, isa, endian, aif) \
+ ((MODE_RW_32 << MODE_RW_SHIFT) | \
+ (((mode) & MODE32_MASK) << MODE32_SHIFT) | \
+ (((isa) & SPSR_T_MASK) << SPSR_T_SHIFT) | \
+ (((endian) & SPSR_E_MASK) << SPSR_E_SHIFT) | \
+ (((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT))
+
+/*
+ * TTBR Definitions
+ */
+#define TTBR_CNP_BIT ULL(0x1)
+
+/*
+ * CTR_EL0 definitions
+ */
+#define CTR_CWG_SHIFT U(24)
+#define CTR_CWG_MASK U(0xf)
+#define CTR_ERG_SHIFT U(20)
+#define CTR_ERG_MASK U(0xf)
+#define CTR_DMINLINE_SHIFT U(16)
+#define CTR_DMINLINE_MASK U(0xf)
+#define CTR_L1IP_SHIFT U(14)
+#define CTR_L1IP_MASK U(0x3)
+#define CTR_IMINLINE_SHIFT U(0)
+#define CTR_IMINLINE_MASK U(0xf)
+
+#define MAX_CACHE_LINE_SIZE U(0x800) /* 2KB */
+
+/*
+ * FPCR definitions
+ */
+#define FPCR_FIZ_BIT (ULL(1) << 0)
+#define FPCR_AH_BIT (ULL(1) << 1)
+#define FPCR_NEP_BIT (ULL(1) << 2)
+
+/* Physical timer control register bit fields shifts and masks */
+#define CNTP_CTL_ENABLE_SHIFT U(0)
+#define CNTP_CTL_IMASK_SHIFT U(1)
+#define CNTP_CTL_ISTATUS_SHIFT U(2)
+
+#define CNTP_CTL_ENABLE_MASK U(1)
+#define CNTP_CTL_IMASK_MASK U(1)
+#define CNTP_CTL_ISTATUS_MASK U(1)
+
+/* Exception Syndrome register bits and bobs */
+#define ESR_EC_SHIFT U(26)
+#define ESR_EC_MASK U(0x3f)
+#define ESR_EC_LENGTH U(6)
+#define ESR_ISS_SHIFT U(0x0)
+#define ESR_ISS_MASK U(0x1ffffff)
+#define EC_UNKNOWN U(0x0)
+#define EC_WFE_WFI U(0x1)
+#define EC_AARCH32_CP15_MRC_MCR U(0x3)
+#define EC_AARCH32_CP15_MRRC_MCRR U(0x4)
+#define EC_AARCH32_CP14_MRC_MCR U(0x5)
+#define EC_AARCH32_CP14_LDC_STC U(0x6)
+#define EC_FP_SIMD U(0x7)
+#define EC_AARCH32_CP10_MRC U(0x8)
+#define EC_AARCH32_CP14_MRRC_MCRR U(0xc)
+#define EC_ILLEGAL U(0xe)
+#define EC_AARCH32_SVC U(0x11)
+#define EC_AARCH32_HVC U(0x12)
+#define EC_AARCH32_SMC U(0x13)
+#define EC_AARCH64_SVC U(0x15)
+#define EC_AARCH64_HVC U(0x16)
+#define EC_AARCH64_SMC U(0x17)
+#define EC_AARCH64_SYS U(0x18)
+#define EC_IABORT_LOWER_EL U(0x20)
+#define EC_IABORT_CUR_EL U(0x21)
+#define EC_PC_ALIGN U(0x22)
+#define EC_DABORT_LOWER_EL U(0x24)
+#define EC_DABORT_CUR_EL U(0x25)
+#define EC_SP_ALIGN U(0x26)
+#define EC_AARCH32_FP U(0x28)
+#define EC_AARCH64_FP U(0x2c)
+#define EC_SERROR U(0x2f)
+/* Data Fault Status code, not all error codes listed */
+#define ISS_DFSC_MASK U(0x3f)
+#define DFSC_EXT_DABORT U(0x10)
+#define DFSC_GPF_DABORT U(0x28)
+/* ISS encoding an exception from HVC or SVC instruction execution */
+#define ISS_HVC_SMC_IMM16_MASK U(0xffff)
+
+/*
+ * External Abort bit in Instruction and Data Aborts synchronous exception
+ * syndromes.
+ */
+#define ESR_ISS_EABORT_EA_BIT U(9)
+
+#define EC_BITS(x) (((x) >> ESR_EC_SHIFT) & ESR_EC_MASK)
+#define ISS_BITS(x) (((x) >> ESR_ISS_SHIFT) & ESR_ISS_MASK)
+
+/* Reset bit inside the Reset management register for EL3 (RMR_EL3) */
+#define RMR_RESET_REQUEST_SHIFT U(0x1)
+#define RMR_WARM_RESET_CPU (U(1) << RMR_RESET_REQUEST_SHIFT)
+
+/*******************************************************************************
+ * Definitions of register offsets, fields and macros for CPU system
+ * instructions.
+ ******************************************************************************/
+
+#define TLBI_ADDR_SHIFT U(12)
+#define TLBI_ADDR_MASK ULL(0x00000FFFFFFFFFFF)
+#define TLBI_ADDR(x) (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTCTLBase Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+#define CNTCTLBASE_CNTFRQ U(0x0)
+#define CNTNSAR U(0x4)
+#define CNTNSAR_NS_SHIFT(x) (x)
+
+#define CNTACR_BASE(x) (U(0x40) + ((x) << 2))
+#define CNTACR_RPCT_SHIFT U(0x0)
+#define CNTACR_RVCT_SHIFT U(0x1)
+#define CNTACR_RFRQ_SHIFT U(0x2)
+#define CNTACR_RVOFF_SHIFT U(0x3)
+#define CNTACR_RWVT_SHIFT U(0x4)
+#define CNTACR_RWPT_SHIFT U(0x5)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTBaseN Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+/* Physical Count register. */
+#define CNTPCT_LO U(0x0)
+/* Counter Frequency register. */
+#define CNTBASEN_CNTFRQ U(0x10)
+/* Physical Timer CompareValue register. */
+#define CNTP_CVAL_LO U(0x20)
+/* Physical Timer Control register. */
+#define CNTP_CTL U(0x2c)
+
+/* PMCR_EL0 definitions */
+#define PMCR_EL0_RESET_VAL U(0x0)
+#define PMCR_EL0_N_SHIFT U(11)
+#define PMCR_EL0_N_MASK U(0x1f)
+#define PMCR_EL0_N_BITS (PMCR_EL0_N_MASK << PMCR_EL0_N_SHIFT)
+#define PMCR_EL0_LC_BIT (U(1) << 6)
+#define PMCR_EL0_DP_BIT (U(1) << 5)
+#define PMCR_EL0_X_BIT (U(1) << 4)
+#define PMCR_EL0_D_BIT (U(1) << 3)
+#define PMCR_EL0_C_BIT (U(1) << 2)
+#define PMCR_EL0_P_BIT (U(1) << 1)
+#define PMCR_EL0_E_BIT (U(1) << 0)
+
+/* PMCNTENSET_EL0 definitions */
+#define PMCNTENSET_EL0_C_BIT (U(1) << 31)
+#define PMCNTENSET_EL0_P_BIT(x) (U(1) << x)
+
+/* PMEVTYPER<n>_EL0 definitions */
+#define PMEVTYPER_EL0_P_BIT (U(1) << 31)
+#define PMEVTYPER_EL0_U_BIT (U(1) << 30)
+#define PMEVTYPER_EL0_NSK_BIT (U(1) << 29)
+#define PMEVTYPER_EL0_NSU_BIT (U(1) << 28)
+#define PMEVTYPER_EL0_NSH_BIT (U(1) << 27)
+#define PMEVTYPER_EL0_M_BIT (U(1) << 26)
+#define PMEVTYPER_EL0_MT_BIT (U(1) << 25)
+#define PMEVTYPER_EL0_SH_BIT (U(1) << 24)
+#define PMEVTYPER_EL0_T_BIT (U(1) << 23)
+#define PMEVTYPER_EL0_RLK_BIT (U(1) << 22)
+#define PMEVTYPER_EL0_RLU_BIT (U(1) << 21)
+#define PMEVTYPER_EL0_RLH_BIT (U(1) << 20)
+#define PMEVTYPER_EL0_EVTCOUNT_BITS U(0x0000FFFF)
+
+/* PMCCFILTR_EL0 definitions */
+#define PMCCFILTR_EL0_P_BIT (U(1) << 31)
+#define PMCCFILTR_EL0_U_BIT (U(1) << 30)
+#define PMCCFILTR_EL0_NSK_BIT (U(1) << 29)
+#define PMCCFILTR_EL0_NSH_BIT (U(1) << 27)
+#define PMCCFILTR_EL0_M_BIT (U(1) << 26)
+#define PMCCFILTR_EL0_SH_BIT (U(1) << 24)
+#define PMCCFILTR_EL0_T_BIT (U(1) << 23)
+#define PMCCFILTR_EL0_RLK_BIT (U(1) << 22)
+#define PMCCFILTR_EL0_RLU_BIT (U(1) << 21)
+#define PMCCFILTR_EL0_RLH_BIT (U(1) << 20)
+
+/* PMSELR_EL0 definitions */
+#define PMSELR_EL0_SEL_SHIFT U(0)
+#define PMSELR_EL0_SEL_MASK U(0x1f)
+
+/* PMU event counter ID definitions */
+#define PMU_EV_PC_WRITE_RETIRED U(0x000C)
+
+/*******************************************************************************
+ * Definitions for system register interface to SVE
+ ******************************************************************************/
+#define ID_AA64ZFR0_EL1 S3_0_C0_C4_4
+
+/* ZCR_EL2 definitions */
+#define ZCR_EL2 S3_4_C1_C2_0
+#define ZCR_EL2_SVE_VL_SHIFT UL(0)
+#define ZCR_EL2_SVE_VL_WIDTH UL(4)
+
+/* ZCR_EL1 definitions */
+#define ZCR_EL1 S3_0_C1_C2_0
+#define ZCR_EL1_SVE_VL_SHIFT UL(0)
+#define ZCR_EL1_SVE_VL_WIDTH UL(4)
+
+/*******************************************************************************
+ * Definitions for system register interface to SME
+ ******************************************************************************/
+#define ID_AA64SMFR0_EL1 S3_0_C0_C4_5
+#define SVCR S3_3_C4_C2_2
+#define TPIDR2_EL0 S3_3_C13_C0_5
+#define SMCR_EL2 S3_4_C1_C2_6
+
+/* ID_AA64SMFR0_EL1 definitions */
+#define ID_AA64SMFR0_EL1_FA64_BIT (UL(1) << 63)
+
+/* SVCR definitions */
+#define SVCR_ZA_BIT (U(1) << 1)
+#define SVCR_SM_BIT (U(1) << 0)
+
+/* SMPRI_EL1 definitions */
+#define SMPRI_EL1_PRIORITY_SHIFT U(0)
+#define SMPRI_EL1_PRIORITY_MASK U(0xf)
+
+/* SMPRIMAP_EL2 definitions */
+/* Register is composed of 16 priority map fields of 4 bits numbered 0-15. */
+#define SMPRIMAP_EL2_MAP_SHIFT(pri) U((pri) * 4)
+#define SMPRIMAP_EL2_MAP_MASK U(0xf)
+
+/* SMCR_ELx definitions */
+#define SMCR_ELX_LEN_SHIFT U(0)
+#define SMCR_ELX_LEN_MASK U(0x1ff)
+#define SMCR_ELX_EZT0_BIT (U(1) << 30)
+#define SMCR_ELX_FA64_BIT (U(1) << 31)
+
+/*******************************************************************************
+ * Definitions of MAIR encodings for device and normal memory
+ ******************************************************************************/
+/*
+ * MAIR encodings for device memory attributes.
+ */
+#define MAIR_DEV_nGnRnE ULL(0x0)
+#define MAIR_DEV_nGnRE ULL(0x4)
+#define MAIR_DEV_nGRE ULL(0x8)
+#define MAIR_DEV_GRE ULL(0xc)
+
+/*
+ * MAIR encodings for normal memory attributes.
+ *
+ * Cache Policy
+ * WT: Write Through
+ * WB: Write Back
+ * NC: Non-Cacheable
+ *
+ * Transient Hint
+ * NTR: Non-Transient
+ * TR: Transient
+ *
+ * Allocation Policy
+ * RA: Read Allocate
+ * WA: Write Allocate
+ * RWA: Read and Write Allocate
+ * NA: No Allocation
+ */
+#define MAIR_NORM_WT_TR_WA ULL(0x1)
+#define MAIR_NORM_WT_TR_RA ULL(0x2)
+#define MAIR_NORM_WT_TR_RWA ULL(0x3)
+#define MAIR_NORM_NC ULL(0x4)
+#define MAIR_NORM_WB_TR_WA ULL(0x5)
+#define MAIR_NORM_WB_TR_RA ULL(0x6)
+#define MAIR_NORM_WB_TR_RWA ULL(0x7)
+#define MAIR_NORM_WT_NTR_NA ULL(0x8)
+#define MAIR_NORM_WT_NTR_WA ULL(0x9)
+#define MAIR_NORM_WT_NTR_RA ULL(0xa)
+#define MAIR_NORM_WT_NTR_RWA ULL(0xb)
+#define MAIR_NORM_WB_NTR_NA ULL(0xc)
+#define MAIR_NORM_WB_NTR_WA ULL(0xd)
+#define MAIR_NORM_WB_NTR_RA ULL(0xe)
+#define MAIR_NORM_WB_NTR_RWA ULL(0xf)
+
+#define MAIR_NORM_OUTER_SHIFT U(4)
+
+#define MAKE_MAIR_NORMAL_MEMORY(inner, outer) \
+ ((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
+
+/* PAR_EL1 fields */
+#define PAR_F_SHIFT U(0)
+#define PAR_F_MASK ULL(0x1)
+#define PAR_ADDR_SHIFT U(12)
+#define PAR_ADDR_MASK (BIT(40) - ULL(1)) /* 40-bits-wide page address */
+
+/*******************************************************************************
+ * Definitions for system register interface to SPE
+ ******************************************************************************/
+#define PMSCR_EL1 S3_0_C9_C9_0
+#define PMSNEVFR_EL1 S3_0_C9_C9_1
+#define PMSICR_EL1 S3_0_C9_C9_2
+#define PMSIRR_EL1 S3_0_C9_C9_3
+#define PMSFCR_EL1 S3_0_C9_C9_4
+#define PMSEVFR_EL1 S3_0_C9_C9_5
+#define PMSLATFR_EL1 S3_0_C9_C9_6
+#define PMSIDR_EL1 S3_0_C9_C9_7
+#define PMBLIMITR_EL1 S3_0_C9_C10_0
+#define PMBPTR_EL1 S3_0_C9_C10_1
+#define PMBSR_EL1 S3_0_C9_C10_3
+#define PMSCR_EL2 S3_4_C9_C9_0
+
+/*******************************************************************************
+ * Definitions for system register interface to MPAM
+ ******************************************************************************/
+#define MPAMIDR_EL1 S3_0_C10_C4_4
+#define MPAM2_EL2 S3_4_C10_C5_0
+#define MPAMHCR_EL2 S3_4_C10_C4_0
+#define MPAM3_EL3 S3_6_C10_C5_0
+
+/*******************************************************************************
+ * Definitions for system register interface to AMU for ARMv8.4 onwards
+ ******************************************************************************/
+#define AMCR_EL0 S3_3_C13_C2_0
+#define AMCFGR_EL0 S3_3_C13_C2_1
+#define AMCGCR_EL0 S3_3_C13_C2_2
+#define AMUSERENR_EL0 S3_3_C13_C2_3
+#define AMCNTENCLR0_EL0 S3_3_C13_C2_4
+#define AMCNTENSET0_EL0 S3_3_C13_C2_5
+#define AMCNTENCLR1_EL0 S3_3_C13_C3_0
+#define AMCNTENSET1_EL0 S3_3_C13_C3_1
+
+/* Activity Monitor Group 0 Event Counter Registers */
+#define AMEVCNTR00_EL0 S3_3_C13_C4_0
+#define AMEVCNTR01_EL0 S3_3_C13_C4_1
+#define AMEVCNTR02_EL0 S3_3_C13_C4_2
+#define AMEVCNTR03_EL0 S3_3_C13_C4_3
+
+/* Activity Monitor Group 0 Event Type Registers */
+#define AMEVTYPER00_EL0 S3_3_C13_C6_0
+#define AMEVTYPER01_EL0 S3_3_C13_C6_1
+#define AMEVTYPER02_EL0 S3_3_C13_C6_2
+#define AMEVTYPER03_EL0 S3_3_C13_C6_3
+
+/* Activity Monitor Group 1 Event Counter Registers */
+#define AMEVCNTR10_EL0 S3_3_C13_C12_0
+#define AMEVCNTR11_EL0 S3_3_C13_C12_1
+#define AMEVCNTR12_EL0 S3_3_C13_C12_2
+#define AMEVCNTR13_EL0 S3_3_C13_C12_3
+#define AMEVCNTR14_EL0 S3_3_C13_C12_4
+#define AMEVCNTR15_EL0 S3_3_C13_C12_5
+#define AMEVCNTR16_EL0 S3_3_C13_C12_6
+#define AMEVCNTR17_EL0 S3_3_C13_C12_7
+#define AMEVCNTR18_EL0 S3_3_C13_C13_0
+#define AMEVCNTR19_EL0 S3_3_C13_C13_1
+#define AMEVCNTR1A_EL0 S3_3_C13_C13_2
+#define AMEVCNTR1B_EL0 S3_3_C13_C13_3
+#define AMEVCNTR1C_EL0 S3_3_C13_C13_4
+#define AMEVCNTR1D_EL0 S3_3_C13_C13_5
+#define AMEVCNTR1E_EL0 S3_3_C13_C13_6
+#define AMEVCNTR1F_EL0 S3_3_C13_C13_7
+
+/* Activity Monitor Group 1 Event Type Registers */
+#define AMEVTYPER10_EL0 S3_3_C13_C14_0
+#define AMEVTYPER11_EL0 S3_3_C13_C14_1
+#define AMEVTYPER12_EL0 S3_3_C13_C14_2
+#define AMEVTYPER13_EL0 S3_3_C13_C14_3
+#define AMEVTYPER14_EL0 S3_3_C13_C14_4
+#define AMEVTYPER15_EL0 S3_3_C13_C14_5
+#define AMEVTYPER16_EL0 S3_3_C13_C14_6
+#define AMEVTYPER17_EL0 S3_3_C13_C14_7
+#define AMEVTYPER18_EL0 S3_3_C13_C15_0
+#define AMEVTYPER19_EL0 S3_3_C13_C15_1
+#define AMEVTYPER1A_EL0 S3_3_C13_C15_2
+#define AMEVTYPER1B_EL0 S3_3_C13_C15_3
+#define AMEVTYPER1C_EL0 S3_3_C13_C15_4
+#define AMEVTYPER1D_EL0 S3_3_C13_C15_5
+#define AMEVTYPER1E_EL0 S3_3_C13_C15_6
+#define AMEVTYPER1F_EL0 S3_3_C13_C15_7
+
+/* AMCFGR_EL0 definitions */
+#define AMCFGR_EL0_NCG_SHIFT U(28)
+#define AMCFGR_EL0_NCG_MASK U(0xf)
+
+/* AMCGCR_EL0 definitions */
+#define AMCGCR_EL0_CG1NC_SHIFT U(8)
+#define AMCGCR_EL0_CG1NC_LENGTH U(8)
+#define AMCGCR_EL0_CG1NC_MASK U(0xff)
+
+/* MPAM register definitions */
+#define MPAM3_EL3_MPAMEN_BIT (ULL(1) << 63)
+#define MPAMHCR_EL2_TRAP_MPAMIDR_EL1 (ULL(1) << 31)
+
+#define MPAM2_EL2_TRAPMPAM0EL1 (ULL(1) << 49)
+#define MPAM2_EL2_TRAPMPAM1EL1 (ULL(1) << 48)
+
+#define MPAMIDR_HAS_HCR_BIT (ULL(1) << 17)
+
+/*******************************************************************************
+ * Definitions for system register interface to AMU for ARMv8.6 enhancements
+ ******************************************************************************/
+
+/* Definition for register defining which virtual offsets are implemented. */
+#define AMCG1IDR_EL0 S3_3_C13_C2_6
+#define AMCG1IDR_CTR_MASK ULL(0xffff)
+#define AMCG1IDR_CTR_SHIFT U(0)
+#define AMCG1IDR_VOFF_MASK ULL(0xffff)
+#define AMCG1IDR_VOFF_SHIFT U(16)
+
+/* New bit added to AMCR_EL0 */
+#define AMCR_CG1RZ_BIT (ULL(0x1) << 17)
+
+/* Definitions for virtual offset registers for architected event counters. */
+/* AMEVCNTR01_EL0 intentionally left undefined, as it does not exist. */
+#define AMEVCNTVOFF00_EL2 S3_4_C13_C8_0
+#define AMEVCNTVOFF02_EL2 S3_4_C13_C8_2
+#define AMEVCNTVOFF03_EL2 S3_4_C13_C8_3
+
+/* Definitions for virtual offset registers for auxiliary event counters. */
+#define AMEVCNTVOFF10_EL2 S3_4_C13_C10_0
+#define AMEVCNTVOFF11_EL2 S3_4_C13_C10_1
+#define AMEVCNTVOFF12_EL2 S3_4_C13_C10_2
+#define AMEVCNTVOFF13_EL2 S3_4_C13_C10_3
+#define AMEVCNTVOFF14_EL2 S3_4_C13_C10_4
+#define AMEVCNTVOFF15_EL2 S3_4_C13_C10_5
+#define AMEVCNTVOFF16_EL2 S3_4_C13_C10_6
+#define AMEVCNTVOFF17_EL2 S3_4_C13_C10_7
+#define AMEVCNTVOFF18_EL2 S3_4_C13_C11_0
+#define AMEVCNTVOFF19_EL2 S3_4_C13_C11_1
+#define AMEVCNTVOFF1A_EL2 S3_4_C13_C11_2
+#define AMEVCNTVOFF1B_EL2 S3_4_C13_C11_3
+#define AMEVCNTVOFF1C_EL2 S3_4_C13_C11_4
+#define AMEVCNTVOFF1D_EL2 S3_4_C13_C11_5
+#define AMEVCNTVOFF1E_EL2 S3_4_C13_C11_6
+#define AMEVCNTVOFF1F_EL2 S3_4_C13_C11_7
+
+/*******************************************************************************
+ * RAS system registers
+ ******************************************************************************/
+#define DISR_EL1 S3_0_C12_C1_1
+#define DISR_A_BIT U(31)
+
+#define ERRIDR_EL1 S3_0_C5_C3_0
+#define ERRIDR_MASK U(0xffff)
+
+#define ERRSELR_EL1 S3_0_C5_C3_1
+
+/* System register access to Standard Error Record registers */
+#define ERXFR_EL1 S3_0_C5_C4_0
+#define ERXCTLR_EL1 S3_0_C5_C4_1
+#define ERXSTATUS_EL1 S3_0_C5_C4_2
+#define ERXADDR_EL1 S3_0_C5_C4_3
+#define ERXPFGF_EL1 S3_0_C5_C4_4
+#define ERXPFGCTL_EL1 S3_0_C5_C4_5
+#define ERXPFGCDN_EL1 S3_0_C5_C4_6
+#define ERXMISC0_EL1 S3_0_C5_C5_0
+#define ERXMISC1_EL1 S3_0_C5_C5_1
+
+#define ERXCTLR_ED_BIT (U(1) << 0)
+#define ERXCTLR_UE_BIT (U(1) << 4)
+
+#define ERXPFGCTL_UC_BIT (U(1) << 1)
+#define ERXPFGCTL_UEU_BIT (U(1) << 2)
+#define ERXPFGCTL_CDEN_BIT (U(1) << 31)
+
+/*******************************************************************************
+ * Armv8.1 Registers - Privileged Access Never Registers
+ ******************************************************************************/
+#define PAN S3_0_C4_C2_3
+#define PAN_BIT BIT(22)
+
+/*******************************************************************************
+ * Armv8.3 Pointer Authentication Registers
+ ******************************************************************************/
+#define APIAKeyLo_EL1 S3_0_C2_C1_0
+#define APIAKeyHi_EL1 S3_0_C2_C1_1
+#define APIBKeyLo_EL1 S3_0_C2_C1_2
+#define APIBKeyHi_EL1 S3_0_C2_C1_3
+#define APDAKeyLo_EL1 S3_0_C2_C2_0
+#define APDAKeyHi_EL1 S3_0_C2_C2_1
+#define APDBKeyLo_EL1 S3_0_C2_C2_2
+#define APDBKeyHi_EL1 S3_0_C2_C2_3
+#define APGAKeyLo_EL1 S3_0_C2_C3_0
+#define APGAKeyHi_EL1 S3_0_C2_C3_1
+
+/*******************************************************************************
+ * Armv8.4 Data Independent Timing Registers
+ ******************************************************************************/
+#define DIT S3_3_C4_C2_5
+#define DIT_BIT BIT(24)
+
+/*******************************************************************************
+ * Armv8.5 - new MSR encoding to directly access PSTATE.SSBS field
+ ******************************************************************************/
+#define SSBS S3_3_C4_C2_6
+
+/*******************************************************************************
+ * Armv8.5 - Memory Tagging Extension Registers
+ ******************************************************************************/
+#define TFSRE0_EL1 S3_0_C5_C6_1
+#define TFSR_EL1 S3_0_C5_C6_0
+#define RGSR_EL1 S3_0_C1_C0_5
+#define GCR_EL1 S3_0_C1_C0_6
+
+/*******************************************************************************
+ * Armv8.6 - Fine Grained Virtualization Traps Registers
+ ******************************************************************************/
+#define HFGRTR_EL2 S3_4_C1_C1_4
+#define HFGWTR_EL2 S3_4_C1_C1_5
+#define HFGITR_EL2 S3_4_C1_C1_6
+#define HDFGRTR_EL2 S3_4_C3_C1_4
+#define HDFGWTR_EL2 S3_4_C3_C1_5
+
+/*******************************************************************************
+ * Armv8.6 - Enhanced Counter Virtualization Registers
+ ******************************************************************************/
+#define CNTPOFF_EL2 S3_4_C14_C0_6
+
+/*******************************************************************************
+ * Armv9.0 - Trace Buffer Extension System Registers
+ ******************************************************************************/
+#define TRBLIMITR_EL1 S3_0_C9_C11_0
+#define TRBPTR_EL1 S3_0_C9_C11_1
+#define TRBBASER_EL1 S3_0_C9_C11_2
+#define TRBSR_EL1 S3_0_C9_C11_3
+#define TRBMAR_EL1 S3_0_C9_C11_4
+#define TRBTRG_EL1 S3_0_C9_C11_6
+#define TRBIDR_EL1 S3_0_C9_C11_7
+
+/*******************************************************************************
+ * FEAT_BRBE - Branch Record Buffer Extension System Registers
+ ******************************************************************************/
+
+#define BRBCR_EL1 S2_1_C9_C0_0
+#define BRBCR_EL2 S2_4_C9_C0_0
+#define BRBFCR_EL1 S2_1_C9_C0_1
+#define BRBTS_EL1 S2_1_C9_C0_2
+#define BRBINFINJ_EL1 S2_1_C9_C1_0
+#define BRBSRCINJ_EL1 S2_1_C9_C1_1
+#define BRBTGTINJ_EL1 S2_1_C9_C1_2
+#define BRBIDR0_EL1 S2_1_C9_C2_0
+
+/*******************************************************************************
+ * Armv8.4 - Trace Filter System Registers
+ ******************************************************************************/
+#define TRFCR_EL1 S3_0_C1_C2_1
+#define TRFCR_EL2 S3_4_C1_C2_1
+
+/*******************************************************************************
+ * Trace System Registers
+ ******************************************************************************/
+#define TRCAUXCTLR S2_1_C0_C6_0
+#define TRCRSR S2_1_C0_C10_0
+#define TRCCCCTLR S2_1_C0_C14_0
+#define TRCBBCTLR S2_1_C0_C15_0
+#define TRCEXTINSELR0 S2_1_C0_C8_4
+#define TRCEXTINSELR1 S2_1_C0_C9_4
+#define TRCEXTINSELR2 S2_1_C0_C10_4
+#define TRCEXTINSELR3 S2_1_C0_C11_4
+#define TRCCLAIMSET S2_1_c7_c8_6
+#define TRCCLAIMCLR S2_1_c7_c9_6
+#define TRCDEVARCH S2_1_c7_c15_6
+
+/*******************************************************************************
+ * FEAT_HCX - Extended Hypervisor Configuration Register
+ ******************************************************************************/
+#define HCRX_EL2 S3_4_C1_C2_2
+#define HCRX_EL2_MSCEn_BIT (UL(1) << 11)
+#define HCRX_EL2_MCE2_BIT (UL(1) << 10)
+#define HCRX_EL2_CMOW_BIT (UL(1) << 9)
+#define HCRX_EL2_VFNMI_BIT (UL(1) << 8)
+#define HCRX_EL2_VINMI_BIT (UL(1) << 7)
+#define HCRX_EL2_TALLINT_BIT (UL(1) << 6)
+#define HCRX_EL2_SMPME_BIT (UL(1) << 5)
+#define HCRX_EL2_FGTnXS_BIT (UL(1) << 4)
+#define HCRX_EL2_FnXS_BIT (UL(1) << 3)
+#define HCRX_EL2_EnASR_BIT (UL(1) << 2)
+#define HCRX_EL2_EnALS_BIT (UL(1) << 1)
+#define HCRX_EL2_EnAS0_BIT (UL(1) << 0)
+#define HCRX_EL2_INIT_VAL ULL(0x0)
+
+/*******************************************************************************
+ * PFR0_EL1 - Definitions for AArch32 Processor Feature Register 0
+ ******************************************************************************/
+#define ID_PFR0_EL1 S3_0_C0_C1_0
+#define ID_PFR0_EL1_RAS_MASK ULL(0xf)
+#define ID_PFR0_EL1_RAS_SHIFT U(28)
+#define ID_PFR0_EL1_RAS_WIDTH U(4)
+#define ID_PFR0_EL1_RAS_SUPPORTED ULL(0x1)
+#define ID_PFR0_EL1_RASV1P1_SUPPORTED ULL(0x2)
+
+/*******************************************************************************
+ * PFR2_EL1 - Definitions for AArch32 Processor Feature Register 2
+ ******************************************************************************/
+#define ID_PFR2_EL1 S3_0_C0_C3_4
+#define ID_PFR2_EL1_RAS_FRAC_MASK ULL(0xf)
+#define ID_PFR2_EL1_RAS_FRAC_SHIFT U(8)
+#define ID_PFR2_EL1_RAS_FRAC_WIDTH U(4)
+#define ID_PFR2_EL1_RASV1P1_SUPPORTED ULL(0x1)
+
+/*******************************************************************************
+ * FEAT_FGT - Definitions for Fine-Grained Trap registers
+ ******************************************************************************/
+#define HFGITR_EL2_INIT_VAL ULL(0x180000000000000)
+#define HFGITR_EL2_FEAT_BRBE_MASK ULL(0x180000000000000)
+#define HFGITR_EL2_FEAT_SPECRES_MASK ULL(0x7000000000000)
+#define HFGITR_EL2_FEAT_TLBIRANGE_MASK ULL(0x3fc00000000)
+#define HFGITR_EL2_FEAT_TLBIRANGE_TLBIOS_MASK ULL(0xf000000)
+#define HFGITR_EL2_FEAT_TLBIOS_MASK ULL(0xfc0000)
+#define HFGITR_EL2_FEAT_PAN2_MASK ULL(0x30000)
+#define HFGITR_EL2_FEAT_DPB2_MASK ULL(0x200)
+#define HFGITR_EL2_NON_FEAT_DEPENDENT_MASK ULL(0x78fc03f000fdff)
+
+#define HFGRTR_EL2_INIT_VAL ULL(0xc4000000000000)
+#define HFGRTR_EL2_FEAT_SME_MASK ULL(0xc0000000000000)
+#define HFGRTR_EL2_FEAT_LS64_ACCDATA_MASK ULL(0x4000000000000)
+#define HFGRTR_EL2_FEAT_RAS_MASK ULL(0x27f0000000000)
+#define HFGRTR_EL2_FEAT_RASV1P1_MASK ULL(0x1800000000000)
+#define HFGRTR_EL2_FEAT_GICV3_MASK ULL(0x800000000)
+#define HFGRTR_EL2_FEAT_CSV2_2_CSV2_1P2_MASK ULL(0xc0000000)
+#define HFGRTR_EL2_FEAT_LOR_MASK ULL(0xf80000)
+#define HFGRTR_EL2_FEAT_PAUTH_MASK ULL(0x1f0)
+#define HFGRTR_EL2_NON_FEAT_DEPENDENT_MASK ULL(0x7f3f07fe0f)
+
+#define HFGWTR_EL2_INIT_VAL ULL(0xc4000000000000)
+#define HFGWTR_EL2_FEAT_SME_MASK ULL(0xc0000000000000)
+#define HFGWTR_EL2_FEAT_LS64_ACCDATA_MASK ULL(0x4000000000000)
+#define HFGWTR_EL2_FEAT_RAS_MASK ULL(0x23a0000000000)
+#define HFGWTR_EL2_FEAT_RASV1P1_MASK ULL(0x1800000000000)
+#define HFGWTR_EL2_FEAT_GICV3_MASK ULL(0x8000000000)
+#define HFGWTR_EL2_FEAT_CSV2_2_CSV2_1P2_MASK ULL(0xc0000000)
+#define HFGWTR_EL2_FEAT_LOR_MASK ULL(0xf80000)
+#define HFGWTR_EL2_FEAT_PAUTH_MASK ULL(0x1f0)
+#define HFGWTR_EL2_NON_FEAT_DEPENDENT_MASK ULL(0x7f2903380b)
+
+
+#endif /* ARCH_H */
diff --git a/spm/scmi/include/ext/lib/aarch64/arch_features.h b/spm/scmi/include/ext/lib/aarch64/arch_features.h
new file mode 100644
index 0000000..85f8952
--- /dev/null
+++ b/spm/scmi/include/ext/lib/aarch64/arch_features.h
@@ -0,0 +1,409 @@
+/*
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARCH_FEATURES_H
+#define ARCH_FEATURES_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <utils_def.h>
+
+static inline bool is_armv7_gentimer_present(void)
+{
+ /* The Generic Timer is always present in an ARMv8-A implementation */
+ return true;
+}
+
+static inline bool is_armv8_1_pan_present(void)
+{
+ u_register_t id_aa64mmfr1_pan =
+ EXTRACT(ID_AA64MMFR1_EL1_PAN, read_id_aa64mmfr1_el1());
+ return (id_aa64mmfr1_pan >= ID_AA64MMFR1_EL1_PAN_SUPPORTED) &&
+ (id_aa64mmfr1_pan <= ID_AA64MMFR1_EL1_PAN3_SUPPORTED);
+}
+
+static inline bool is_armv8_2_pan2_present(void)
+{
+ u_register_t id_aa64mmfr1_pan =
+ EXTRACT(ID_AA64MMFR1_EL1_PAN, read_id_aa64mmfr1_el1());
+ return (id_aa64mmfr1_pan >= ID_AA64MMFR1_EL1_PAN2_SUPPORTED) &&
+ (id_aa64mmfr1_pan <= ID_AA64MMFR1_EL1_PAN3_SUPPORTED);
+}
+
+static inline bool is_armv8_2_sve_present(void)
+{
+ return ((read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT) &
+ ID_AA64PFR0_SVE_MASK) == 1U;
+}
+
+static inline bool is_armv8_2_ttcnp_present(void)
+{
+ return ((read_id_aa64mmfr2_el1() >> ID_AA64MMFR2_EL1_CNP_SHIFT) &
+ ID_AA64MMFR2_EL1_CNP_MASK) != 0U;
+}
+
+static inline bool is_feat_pacqarma3_present(void)
+{
+ uint64_t mask_id_aa64isar2 =
+ (ID_AA64ISAR2_GPA3_MASK << ID_AA64ISAR2_GPA3_SHIFT) |
+ (ID_AA64ISAR2_APA3_MASK << ID_AA64ISAR2_APA3_SHIFT);
+
+ /* If any of the fields is not zero, QARMA3 algorithm is present */
+ return (read_id_aa64isar2_el1() & mask_id_aa64isar2) != 0U;
+}
+
+static inline bool is_armv8_3_pauth_present(void)
+{
+ uint64_t mask_id_aa64isar1 =
+ (ID_AA64ISAR1_GPI_MASK << ID_AA64ISAR1_GPI_SHIFT) |
+ (ID_AA64ISAR1_GPA_MASK << ID_AA64ISAR1_GPA_SHIFT) |
+ (ID_AA64ISAR1_API_MASK << ID_AA64ISAR1_API_SHIFT) |
+ (ID_AA64ISAR1_APA_MASK << ID_AA64ISAR1_APA_SHIFT);
+
+ /*
+ * If any of the fields is not zero or QARMA3 is present,
+ * PAuth is present.
+ */
+ return ((read_id_aa64isar1_el1() & mask_id_aa64isar1) != 0U ||
+ is_feat_pacqarma3_present());
+}
+
+static inline bool is_armv8_3_pauth_apa_api_apa3_present(void)
+{
+ uint64_t mask_id_aa64isar1 =
+ (ID_AA64ISAR1_API_MASK << ID_AA64ISAR1_API_SHIFT) |
+ (ID_AA64ISAR1_APA_MASK << ID_AA64ISAR1_APA_SHIFT);
+
+ uint64_t mask_id_aa64isar2 =
+ (ID_AA64ISAR2_APA3_MASK << ID_AA64ISAR2_APA3_SHIFT);
+
+ return ((read_id_aa64isar1_el1() & mask_id_aa64isar1) |
+ (read_id_aa64isar2_el1() & mask_id_aa64isar2)) != 0U;
+}
+
+static inline bool is_armv8_3_pauth_gpa_gpi_gpa3_present(void)
+{
+ uint64_t mask_id_aa64isar1 =
+ (ID_AA64ISAR1_GPI_MASK << ID_AA64ISAR1_GPI_SHIFT) |
+ (ID_AA64ISAR1_GPA_MASK << ID_AA64ISAR1_GPA_SHIFT);
+
+ uint64_t mask_id_aa64isar2 =
+ (ID_AA64ISAR2_GPA3_MASK << ID_AA64ISAR2_GPA3_SHIFT);
+
+ return ((read_id_aa64isar1_el1() & mask_id_aa64isar1) |
+ (read_id_aa64isar2_el1() & mask_id_aa64isar2)) != 0U;
+}
+
+static inline bool is_armv8_4_dit_present(void)
+{
+ return ((read_id_aa64pfr0_el1() >> ID_AA64PFR0_DIT_SHIFT) &
+ ID_AA64PFR0_DIT_MASK) == 1U;
+}
+
+static inline bool is_armv8_4_ttst_present(void)
+{
+ return ((read_id_aa64mmfr2_el1() >> ID_AA64MMFR2_EL1_ST_SHIFT) &
+ ID_AA64MMFR2_EL1_ST_MASK) == 1U;
+}
+
+static inline bool is_armv8_5_bti_present(void)
+{
+ return ((read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_BT_SHIFT) &
+ ID_AA64PFR1_EL1_BT_MASK) == BTI_IMPLEMENTED;
+}
+
+static inline unsigned int get_armv8_5_mte_support(void)
+{
+ return ((read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_MTE_SHIFT) &
+ ID_AA64PFR1_EL1_MTE_MASK);
+}
+
+static inline bool is_armv8_6_fgt_present(void)
+{
+ return ((read_id_aa64mmfr0_el1() >> ID_AA64MMFR0_EL1_FGT_SHIFT) &
+ ID_AA64MMFR0_EL1_FGT_MASK) == ID_AA64MMFR0_EL1_FGT_SUPPORTED;
+}
+
+static inline unsigned long int get_armv8_6_ecv_support(void)
+{
+ return ((read_id_aa64mmfr0_el1() >> ID_AA64MMFR0_EL1_ECV_SHIFT) &
+ ID_AA64MMFR0_EL1_ECV_MASK);
+}
+
+static inline unsigned long int get_pa_range(void)
+{
+ return ((read_id_aa64mmfr0_el1() >> ID_AA64MMFR0_EL1_PARANGE_SHIFT) &
+ ID_AA64MMFR0_EL1_PARANGE_MASK);
+}
+
+static inline uint32_t arch_get_debug_version(void)
+{
+ return ((read_id_aa64dfr0_el1() & ID_AA64DFR0_DEBUG_BITS) >>
+ ID_AA64DFR0_DEBUG_SHIFT);
+}
+
+static inline bool get_armv9_0_trbe_support(void)
+{
+ return ((read_id_aa64dfr0_el1() >> ID_AA64DFR0_TRACEBUFFER_SHIFT) &
+ ID_AA64DFR0_TRACEBUFFER_MASK) ==
+ ID_AA64DFR0_TRACEBUFFER_SUPPORTED;
+}
+
+static inline bool get_armv8_4_trf_support(void)
+{
+ return ((read_id_aa64dfr0_el1() >> ID_AA64DFR0_TRACEFILT_SHIFT) &
+ ID_AA64DFR0_TRACEFILT_MASK) ==
+ ID_AA64DFR0_TRACEFILT_SUPPORTED;
+}
+
+static inline bool get_armv8_0_sys_reg_trace_support(void)
+{
+ return ((read_id_aa64dfr0_el1() >> ID_AA64DFR0_TRACEVER_SHIFT) &
+ ID_AA64DFR0_TRACEVER_MASK) ==
+ ID_AA64DFR0_TRACEVER_SUPPORTED;
+}
+
+static inline unsigned int get_armv9_2_feat_rme_support(void)
+{
+ /*
+ * Return the RME version, zero if not supported. This function can be
+ * used as both an integer value for the RME version or compared to zero
+ * to detect RME presence.
+ */
+ return (unsigned int)(read_id_aa64pfr0_el1() >>
+ ID_AA64PFR0_FEAT_RME_SHIFT) & ID_AA64PFR0_FEAT_RME_MASK;
+}
+
+static inline bool get_feat_hcx_support(void)
+{
+ return (((read_id_aa64mmfr1_el1() >> ID_AA64MMFR1_EL1_HCX_SHIFT) &
+ ID_AA64MMFR1_EL1_HCX_MASK) == ID_AA64MMFR1_EL1_HCX_SUPPORTED);
+}
+
+static inline bool get_feat_afp_present(void)
+{
+ return (((read_id_aa64mmfr1_el1() >> ID_AA64MMFR1_EL1_AFP_SHIFT) &
+ ID_AA64MMFR1_EL1_AFP_MASK) == ID_AA64MMFR1_EL1_AFP_SUPPORTED);
+}
+
+static inline bool get_feat_brbe_support(void)
+{
+ return ((read_id_aa64dfr0_el1() >> ID_AA64DFR0_BRBE_SHIFT) &
+ ID_AA64DFR0_BRBE_MASK) ==
+ ID_AA64DFR0_BRBE_SUPPORTED;
+}
+
+static inline bool get_feat_wfxt_present(void)
+{
+ return (((read_id_aa64isar2_el1() >> ID_AA64ISAR2_WFXT_SHIFT) &
+ ID_AA64ISAR2_WFXT_MASK) == ID_AA64ISAR2_WFXT_SUPPORTED);
+}
+
+static inline bool is_feat_rng_trap_present(void)
+{
+ return (((read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_RNDR_TRAP_SHIFT) &
+ ID_AA64PFR1_EL1_RNDR_TRAP_MASK)
+ == ID_AA64PFR1_EL1_RNG_TRAP_SUPPORTED);
+}
+
+static inline unsigned int spe_get_version(void)
+{
+ return (unsigned int)((read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT) &
+ ID_AA64DFR0_PMS_MASK);
+}
+
+static inline bool get_feat_pmuv3_supported(void)
+{
+ return (((read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMUVER_SHIFT) &
+ ID_AA64DFR0_PMUVER_MASK) != ID_AA64DFR0_PMUVER_NOT_SUPPORTED);
+}
+
+static inline bool get_feat_hpmn0_supported(void)
+{
+ return (((read_id_aa64dfr0_el1() >> ID_AA64DFR0_HPMN0_SHIFT) &
+ ID_AA64DFR0_HPMN0_MASK) == ID_AA64DFR0_HPMN0_SUPPORTED);
+}
+
+static inline bool is_feat_sme_supported(void)
+{
+ uint64_t features;
+
+ features = read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_SME_SHIFT;
+ return (features & ID_AA64PFR1_EL1_SME_MASK) >= ID_AA64PFR1_EL1_SME_SUPPORTED;
+}
+
+static inline bool is_feat_sme_fa64_supported(void)
+{
+ uint64_t features;
+
+ features = read_id_aa64smfr0_el1();
+ return (features & ID_AA64SMFR0_EL1_FA64_BIT) != 0U;
+}
+
+static inline bool is_feat_sme2_supported(void)
+{
+ uint64_t features;
+
+ features = read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_SME_SHIFT;
+ return (features & ID_AA64PFR1_EL1_SME_MASK) >= ID_AA64PFR1_EL1_SME2_SUPPORTED;
+}
+
+static inline u_register_t get_id_aa64mmfr0_el0_tgran4(void)
+{
+ return EXTRACT(ID_AA64MMFR0_EL1_TGRAN4, read_id_aa64mmfr0_el1());
+}
+
+static inline u_register_t get_id_aa64mmfr0_el0_tgran4_2(void)
+{
+ return EXTRACT(ID_AA64MMFR0_EL1_TGRAN4_2, read_id_aa64mmfr0_el1());
+}
+
+static inline u_register_t get_id_aa64mmfr0_el0_tgran16(void)
+{
+ return EXTRACT(ID_AA64MMFR0_EL1_TGRAN16, read_id_aa64mmfr0_el1());
+}
+
+static inline u_register_t get_id_aa64mmfr0_el0_tgran16_2(void)
+{
+ return EXTRACT(ID_AA64MMFR0_EL1_TGRAN16_2, read_id_aa64mmfr0_el1());
+}
+
+static inline u_register_t get_id_aa64mmfr0_el0_tgran64(void)
+{
+ return EXTRACT(ID_AA64MMFR0_EL1_TGRAN64, read_id_aa64mmfr0_el1());
+}
+
+static inline u_register_t get_id_aa64mmfr0_el0_tgran64_2(void)
+{
+ return EXTRACT(ID_AA64MMFR0_EL1_TGRAN64_2, read_id_aa64mmfr0_el1());
+}
+
+static inline bool is_feat_52b_on_4k_supported(void)
+{
+ return (get_id_aa64mmfr0_el0_tgran4() ==
+ ID_AA64MMFR0_EL1_TGRAN4_52B_SUPPORTED);
+}
+
+static inline bool is_feat_52b_on_4k_2_supported(void)
+{
+ u_register_t tgran4_2 = get_id_aa64mmfr0_el0_tgran4_2();
+
+ return ((tgran4_2 == ID_AA64MMFR0_EL1_TGRAN4_2_52B_SUPPORTED) ||
+ ((tgran4_2 == ID_AA64MMFR0_EL1_TGRAN4_2_AS_1)
+ && (is_feat_52b_on_4k_supported() == true)));
+}
+
+static inline bool is_feat_specres_present(void)
+{
+ return EXTRACT(ID_AA64ISAR1_SPECRES, read_id_aa64isar1_el1())
+ == ID_AA64ISAR1_SPECRES_SUPPORTED;
+}
+
+static inline bool is_feat_tlbirange_present(void)
+{
+ return EXTRACT(ID_AA64ISAR0_TLB, read_id_aa64isar0_el1())
+ == ID_AA64ISAR0_TLBIRANGE_SUPPORTED;
+}
+
+static inline bool is_feat_tlbios_present(void)
+{
+ return EXTRACT(ID_AA64ISAR0_TLB, read_id_aa64isar0_el1())
+ != ID_AA64ISAR0_TLB_NOT_SUPPORTED;
+}
+
+static inline bool is_feat_dpb_present(void)
+{
+ return EXTRACT(ID_AA64ISAR1_DPB, read_id_aa64isar1_el1())
+ >= ID_AA64ISAR1_DPB_SUPPORTED;
+}
+
+static inline bool is_feat_dpb2_present(void)
+{
+ return EXTRACT(ID_AA64ISAR1_DPB, read_id_aa64isar1_el1())
+ >= ID_AA64ISAR1_DPB2_SUPPORTED;
+}
+
+static inline bool is_feat_ls64_present(void)
+{
+ return EXTRACT(ID_AA64ISAR1_LS64, read_id_aa64isar1_el1())
+ >= ID_AA64ISAR1_LS64_SUPPORTED;
+}
+
+static inline bool is_feat_ls64_v_present(void)
+{
+ return EXTRACT(ID_AA64ISAR1_LS64, read_id_aa64isar1_el1())
+ >= ID_AA64ISAR1_LS64_V_SUPPORTED;
+}
+
+static inline bool is_feat_ls64_accdata_present(void)
+{
+ return EXTRACT(ID_AA64ISAR1_LS64, read_id_aa64isar1_el1())
+ >= ID_AA64ISAR1_LS64_ACCDATA_SUPPORTED;
+}
+
+static inline bool is_feat_ras_present(void)
+{
+ return EXTRACT(ID_AA64PFR0_RAS, read_id_aa64pfr0_el1())
+ == ID_AA64PFR0_RAS_SUPPORTED;
+}
+
+static inline bool is_feat_rasv1p1_present(void)
+{
+ return (EXTRACT(ID_AA64PFR0_RAS, read_id_aa64pfr0_el1())
+ == ID_AA64PFR0_RASV1P1_SUPPORTED)
+ || (is_feat_ras_present() &&
+ (EXTRACT(ID_AA64PFR1_RAS_FRAC, read_id_aa64pfr1_el1())
+ == ID_AA64PFR1_RASV1P1_SUPPORTED))
+ || (EXTRACT(ID_PFR0_EL1_RAS, read_id_pfr0_el1())
+ == ID_PFR0_EL1_RASV1P1_SUPPORTED)
+ || ((EXTRACT(ID_PFR0_EL1_RAS, read_id_pfr0_el1())
+ == ID_PFR0_EL1_RAS_SUPPORTED) &&
+ (EXTRACT(ID_PFR2_EL1_RAS_FRAC, read_id_pfr2_el1())
+ == ID_PFR2_EL1_RASV1P1_SUPPORTED));
+}
+
+static inline bool is_feat_gicv3_gicv4_present(void)
+{
+ return EXTRACT(ID_AA64PFR0_GIC, read_id_aa64pfr0_el1())
+ == ID_AA64PFR0_GICV3_GICV4_SUPPORTED;
+}
+
+static inline bool is_feat_csv2_present(void)
+{
+ return EXTRACT(ID_AA64PFR0_CSV2, read_id_aa64pfr0_el1())
+ == ID_AA64PFR0_CSV2_SUPPORTED;
+}
+
+static inline bool is_feat_csv2_2_present(void)
+{
+ return EXTRACT(ID_AA64PFR0_CSV2, read_id_aa64pfr0_el1())
+ == ID_AA64PFR0_CSV2_2_SUPPORTED;
+}
+
+static inline bool is_feat_csv2_1p1_present(void)
+{
+ return is_feat_csv2_present() &&
+ (EXTRACT(ID_AA64PFR1_CSV2_FRAC, read_id_aa64pfr1_el1())
+ == ID_AA64PFR1_CSV2_1P1_SUPPORTED);
+}
+
+static inline bool is_feat_csv2_1p2_present(void)
+{
+ return is_feat_csv2_present() &&
+ (EXTRACT(ID_AA64PFR1_CSV2_FRAC, read_id_aa64pfr1_el1())
+ == ID_AA64PFR1_CSV2_1P2_SUPPORTED);
+}
+
+static inline bool is_feat_lor_present(void)
+{
+ return EXTRACT(ID_AA64MMFR1_EL1_LO, read_id_aa64mmfr1_el1())
+ != ID_AA64MMFR1_EL1_LOR_NOT_SUPPORTED;
+}
+
+#endif /* ARCH_FEATURES_H */
diff --git a/spm/scmi/include/ext/lib/aarch64/arch_helpers.h b/spm/scmi/include/ext/lib/aarch64/arch_helpers.h
new file mode 100644
index 0000000..39461d5
--- /dev/null
+++ b/spm/scmi/include/ext/lib/aarch64/arch_helpers.h
@@ -0,0 +1,676 @@
+/*
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARCH_HELPERS_H
+#define ARCH_HELPERS_H
+
+#include <arch.h>
+#include <cdefs.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+/**********************************************************************
+ * Macros which create inline functions to read or write CPU system
+ * registers
+ *********************************************************************/
+
+#define _DEFINE_SYSREG_READ_FUNC(_name, _reg_name) \
+static inline u_register_t read_ ## _name(void) \
+{ \
+ u_register_t v; \
+ __asm__ volatile ("mrs %0, " #_reg_name : "=r" (v)); \
+ return v; \
+}
+
+#define _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name) \
+static inline void write_ ## _name(u_register_t v) \
+{ \
+ __asm__ volatile ("msr " #_reg_name ", %0" : : "r" (v)); \
+}
+
+#define SYSREG_WRITE_CONST(reg_name, v) \
+ __asm__ volatile ("msr " #reg_name ", %0" : : "i" (v))
+
+/* Define read function for system register */
+#define DEFINE_SYSREG_READ_FUNC(_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _name)
+
+/* Define read & write function for system register */
+#define DEFINE_SYSREG_RW_FUNCS(_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _name) \
+ _DEFINE_SYSREG_WRITE_FUNC(_name, _name)
+
+/* Define read & write function for renamed system register */
+#define DEFINE_RENAME_SYSREG_RW_FUNCS(_name, _reg_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _reg_name) \
+ _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)
+
+/* Define read function for renamed system register */
+#define DEFINE_RENAME_SYSREG_READ_FUNC(_name, _reg_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _reg_name)
+
+/* Define write function for renamed system register */
+#define DEFINE_RENAME_SYSREG_WRITE_FUNC(_name, _reg_name) \
+ _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)
+
+/**********************************************************************
+ * Macros to create inline functions for system instructions
+ *********************************************************************/
+
+/* Define function for simple system instruction */
+#define DEFINE_SYSOP_FUNC(_op) \
+static inline void _op(void) \
+{ \
+ __asm__ (#_op); \
+}
+
+/* Define function for system instruction with type specifier */
+#define DEFINE_SYSOP_TYPE_FUNC(_op, _type) \
+static inline void _op ## _type(void) \
+{ \
+ __asm__ (#_op " " #_type); \
+}
+
+/* Define function for system instruction with register with variable parameter */
+#define DEFINE_SYSOP_PARAM_FUNC(_op) \
+static inline void _op(uint64_t v) \
+{ \
+ __asm__ (#_op " " "%0" : : "r" (v)); \
+}
+
+/* Define function for system instruction with register parameter */
+#define DEFINE_SYSOP_TYPE_PARAM_FUNC(_op, _type) \
+static inline void _op ## _type(uint64_t v) \
+{ \
+ __asm__ (#_op " " #_type ", %0" : : "r" (v)); \
+}
+
+/*******************************************************************************
+ * TLB maintenance accessor prototypes
+ ******************************************************************************/
+
+#if ERRATA_A57_813419
+/*
+ * Define function for TLBI instruction with type specifier that implements
+ * the workaround for errata 813419 of Cortex-A57.
+ */
+#define DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(_type)\
+static inline void tlbi ## _type(void) \
+{ \
+ __asm__("tlbi " #_type "\n" \
+ "dsb ish\n" \
+ "tlbi " #_type); \
+}
+
+/*
+ * Define function for TLBI instruction with register parameter that implements
+ * the workaround for errata 813419 of Cortex-A57.
+ */
+#define DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(_type) \
+static inline void tlbi ## _type(uint64_t v) \
+{ \
+ __asm__("tlbi " #_type ", %0\n" \
+ "dsb ish\n" \
+ "tlbi " #_type ", %0" : : "r" (v)); \
+}
+#endif /* ERRATA_A57_813419 */
+
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1is)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2is)
+#if ERRATA_A57_813419
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(alle3)
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(alle3is)
+#else
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3is)
+#endif
+DEFINE_SYSOP_TYPE_FUNC(tlbi, vmalle1)
+
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaae1is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaale1is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae2is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale2is)
+#if ERRATA_A57_813419
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(vae3is)
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(vale3is)
+#else
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae3is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale3is)
+#endif
+
+/*******************************************************************************
+ * Cache maintenance accessor prototypes
+ ******************************************************************************/
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, isw)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cisw)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, csw)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvac)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, ivac)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, civac)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvau)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, zva)
+
+/*******************************************************************************
+ * Address translation accessor prototypes
+ ******************************************************************************/
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e1r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e1w)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0w)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e1r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e2r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e3r)
+
+void flush_dcache_range(uintptr_t addr, size_t size);
+void clean_dcache_range(uintptr_t addr, size_t size);
+void inv_dcache_range(uintptr_t addr, size_t size);
+
+void dcsw_op_louis(u_register_t op_type);
+void dcsw_op_all(u_register_t op_type);
+
+void disable_mmu(void);
+void disable_mmu_icache(void);
+
+/*******************************************************************************
+ * Misc. accessor prototypes
+ ******************************************************************************/
+
+#define write_daifclr(val) SYSREG_WRITE_CONST(daifclr, val)
+#define write_daifset(val) SYSREG_WRITE_CONST(daifset, val)
+
+DEFINE_SYSREG_RW_FUNCS(par_el1)
+DEFINE_SYSREG_READ_FUNC(id_pfr1_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64isar0_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64isar1_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64pfr0_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64pfr1_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64dfr0_el1)
+DEFINE_SYSREG_READ_FUNC(id_afr0_el1)
+DEFINE_SYSREG_READ_FUNC(id_pfr0_el1)
+DEFINE_SYSREG_READ_FUNC(CurrentEl)
+DEFINE_SYSREG_READ_FUNC(ctr_el0)
+DEFINE_SYSREG_RW_FUNCS(daif)
+DEFINE_SYSREG_RW_FUNCS(nzcv)
+DEFINE_SYSREG_READ_FUNC(spsel)
+DEFINE_SYSREG_RW_FUNCS(spsr_el1)
+DEFINE_SYSREG_RW_FUNCS(spsr_el2)
+DEFINE_SYSREG_RW_FUNCS(spsr_el3)
+DEFINE_SYSREG_RW_FUNCS(elr_el1)
+DEFINE_SYSREG_RW_FUNCS(elr_el2)
+DEFINE_SYSREG_RW_FUNCS(elr_el3)
+
+DEFINE_SYSOP_FUNC(wfi)
+DEFINE_SYSOP_FUNC(wfe)
+DEFINE_SYSOP_FUNC(sev)
+DEFINE_SYSOP_FUNC(sevl)
+DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, st)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dsb, nsh)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, oshld)
+DEFINE_SYSOP_TYPE_FUNC(dmb, oshst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, osh)
+DEFINE_SYSOP_TYPE_FUNC(dmb, nshld)
+DEFINE_SYSOP_TYPE_FUNC(dmb, nshst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, nsh)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ishld)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
+DEFINE_SYSOP_FUNC(isb)
+
+DEFINE_SYSOP_PARAM_FUNC(wfit)
+DEFINE_SYSOP_PARAM_FUNC(wfet)
+
+static inline void enable_irq(void)
+{
+ /*
+ * The compiler memory barrier will prevent the compiler from
+ * scheduling non-volatile memory access after the write to the
+ * register.
+ *
+ * This could happen if some initialization code issues non-volatile
+ * accesses to an area used by an interrupt handler, in the assumption
+ * that it is safe as the interrupts are disabled at the time it does
+ * that (according to program order). However, non-volatile accesses
+ * are not necessarily in program order relatively with volatile inline
+ * assembly statements (and volatile accesses).
+ */
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_IRQ_BIT);
+ isb();
+}
+
+static inline void enable_fiq(void)
+{
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_FIQ_BIT);
+ isb();
+}
+
+static inline void enable_serror(void)
+{
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_ABT_BIT);
+ isb();
+}
+
+static inline void enable_debug_exceptions(void)
+{
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_DBG_BIT);
+ isb();
+}
+
+static inline void disable_irq(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_IRQ_BIT);
+ isb();
+}
+
+static inline void disable_fiq(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_FIQ_BIT);
+ isb();
+}
+
+static inline void disable_serror(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_ABT_BIT);
+ isb();
+}
+
+static inline void disable_debug_exceptions(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_DBG_BIT);
+ isb();
+}
+
+void __dead2 smc(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3,
+ uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7);
+
+/*******************************************************************************
+ * System register accessor prototypes
+ ******************************************************************************/
+DEFINE_SYSREG_READ_FUNC(midr_el1)
+DEFINE_SYSREG_READ_FUNC(mpidr_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64mmfr0_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64mmfr1_el1)
+
+DEFINE_SYSREG_RW_FUNCS(scr_el3)
+DEFINE_SYSREG_RW_FUNCS(hcr_el2)
+
+DEFINE_SYSREG_RW_FUNCS(vbar_el1)
+DEFINE_SYSREG_RW_FUNCS(vbar_el2)
+DEFINE_SYSREG_RW_FUNCS(vbar_el3)
+
+DEFINE_SYSREG_RW_FUNCS(sctlr_el1)
+DEFINE_SYSREG_RW_FUNCS(sctlr_el2)
+DEFINE_SYSREG_RW_FUNCS(sctlr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(actlr_el1)
+DEFINE_SYSREG_RW_FUNCS(actlr_el2)
+DEFINE_SYSREG_RW_FUNCS(actlr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(esr_el1)
+DEFINE_SYSREG_RW_FUNCS(esr_el2)
+DEFINE_SYSREG_RW_FUNCS(esr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(afsr0_el1)
+DEFINE_SYSREG_RW_FUNCS(afsr0_el2)
+DEFINE_SYSREG_RW_FUNCS(afsr0_el3)
+
+DEFINE_SYSREG_RW_FUNCS(afsr1_el1)
+DEFINE_SYSREG_RW_FUNCS(afsr1_el2)
+DEFINE_SYSREG_RW_FUNCS(afsr1_el3)
+
+DEFINE_SYSREG_RW_FUNCS(far_el1)
+DEFINE_SYSREG_RW_FUNCS(far_el2)
+DEFINE_SYSREG_RW_FUNCS(far_el3)
+
+DEFINE_SYSREG_RW_FUNCS(mair_el1)
+DEFINE_SYSREG_RW_FUNCS(mair_el2)
+DEFINE_SYSREG_RW_FUNCS(mair_el3)
+
+DEFINE_SYSREG_RW_FUNCS(amair_el1)
+DEFINE_SYSREG_RW_FUNCS(amair_el2)
+DEFINE_SYSREG_RW_FUNCS(amair_el3)
+
+DEFINE_SYSREG_READ_FUNC(rvbar_el1)
+DEFINE_SYSREG_READ_FUNC(rvbar_el2)
+DEFINE_SYSREG_READ_FUNC(rvbar_el3)
+
+DEFINE_SYSREG_RW_FUNCS(rmr_el1)
+DEFINE_SYSREG_RW_FUNCS(rmr_el2)
+DEFINE_SYSREG_RW_FUNCS(rmr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(tcr_el1)
+DEFINE_SYSREG_RW_FUNCS(tcr_el2)
+DEFINE_SYSREG_RW_FUNCS(tcr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el1)
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el2)
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el3)
+
+DEFINE_SYSREG_RW_FUNCS(ttbr1_el1)
+
+DEFINE_SYSREG_RW_FUNCS(vttbr_el2)
+
+DEFINE_SYSREG_RW_FUNCS(cptr_el2)
+DEFINE_SYSREG_RW_FUNCS(cptr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(cpacr_el1)
+DEFINE_SYSREG_RW_FUNCS(cntfrq_el0)
+DEFINE_SYSREG_RW_FUNCS(cnthp_ctl_el2)
+DEFINE_SYSREG_RW_FUNCS(cnthp_tval_el2)
+DEFINE_SYSREG_RW_FUNCS(cnthp_cval_el2)
+DEFINE_SYSREG_RW_FUNCS(cntps_ctl_el1)
+DEFINE_SYSREG_RW_FUNCS(cntps_tval_el1)
+DEFINE_SYSREG_RW_FUNCS(cntps_cval_el1)
+DEFINE_SYSREG_RW_FUNCS(cntp_ctl_el0)
+DEFINE_SYSREG_RW_FUNCS(cntp_tval_el0)
+DEFINE_SYSREG_RW_FUNCS(cntp_cval_el0)
+DEFINE_SYSREG_READ_FUNC(cntpct_el0)
+DEFINE_SYSREG_READ_FUNC(cntvct_el0)
+DEFINE_SYSREG_RW_FUNCS(cnthctl_el2)
+
+#define get_cntp_ctl_enable(x) (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
+ CNTP_CTL_ENABLE_MASK)
+#define get_cntp_ctl_imask(x) (((x) >> CNTP_CTL_IMASK_SHIFT) & \
+ CNTP_CTL_IMASK_MASK)
+#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
+ CNTP_CTL_ISTATUS_MASK)
+
+#define set_cntp_ctl_enable(x) ((x) |= (U(1) << CNTP_CTL_ENABLE_SHIFT))
+#define set_cntp_ctl_imask(x) ((x) |= (U(1) << CNTP_CTL_IMASK_SHIFT))
+
+#define clr_cntp_ctl_enable(x) ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
+#define clr_cntp_ctl_imask(x) ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
+
+#define read_midr() read_midr_el1()
+
+DEFINE_SYSREG_RW_FUNCS(tpidr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(cntvoff_el2)
+
+DEFINE_SYSREG_RW_FUNCS(vpidr_el2)
+DEFINE_SYSREG_RW_FUNCS(vmpidr_el2)
+
+DEFINE_SYSREG_READ_FUNC(isr_el1)
+
+DEFINE_SYSREG_RW_FUNCS(mdcr_el2)
+DEFINE_SYSREG_RW_FUNCS(mdcr_el3)
+DEFINE_SYSREG_RW_FUNCS(hstr_el2)
+
+DEFINE_SYSREG_RW_FUNCS(pmcr_el0)
+DEFINE_SYSREG_RW_FUNCS(pmcntenclr_el0)
+DEFINE_SYSREG_RW_FUNCS(pmcntenset_el0)
+DEFINE_SYSREG_RW_FUNCS(pmccntr_el0)
+DEFINE_SYSREG_RW_FUNCS(pmccfiltr_el0)
+DEFINE_SYSREG_RW_FUNCS(pmevtyper0_el0)
+DEFINE_SYSREG_RW_FUNCS(pmevcntr0_el0)
+DEFINE_SYSREG_RW_FUNCS(pmovsclr_el0)
+DEFINE_SYSREG_RW_FUNCS(pmovsset_el0)
+DEFINE_SYSREG_RW_FUNCS(pmselr_el0)
+DEFINE_SYSREG_RW_FUNCS(pmuserenr_el0);
+DEFINE_SYSREG_RW_FUNCS(pmxevtyper_el0)
+DEFINE_SYSREG_RW_FUNCS(pmxevcntr_el0)
+DEFINE_SYSREG_RW_FUNCS(pmintenclr_el1)
+DEFINE_SYSREG_RW_FUNCS(pmintenset_el1)
+
+/* parameterised event counter accessors */
+static inline u_register_t read_pmevcntrn_el0(int ctr_num)
+{
+ write_pmselr_el0(ctr_num & PMSELR_EL0_SEL_MASK);
+ return read_pmxevcntr_el0();
+}
+
+static inline void write_pmevcntrn_el0(int ctr_num, u_register_t val)
+{
+ write_pmselr_el0(ctr_num & PMSELR_EL0_SEL_MASK);
+ write_pmxevcntr_el0(val);
+}
+
+static inline u_register_t read_pmevtypern_el0(int ctr_num)
+{
+ write_pmselr_el0(ctr_num & PMSELR_EL0_SEL_MASK);
+ return read_pmxevtyper_el0();
+}
+
+static inline void write_pmevtypern_el0(int ctr_num, u_register_t val)
+{
+ write_pmselr_el0(ctr_num & PMSELR_EL0_SEL_MASK);
+ write_pmxevtyper_el0(val);
+}
+
+/* Armv8.5 FEAT_RNG Registers */
+DEFINE_SYSREG_READ_FUNC(rndr)
+DEFINE_SYSREG_READ_FUNC(rndrrs)
+
+/* GICv3 System Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el1, ICC_SRE_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el2, ICC_SRE_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el3, ICC_SRE_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_pmr_el1, ICC_PMR_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_rpr_el1, ICC_RPR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen1_el3, ICC_IGRPEN1_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen1_el1, ICC_IGRPEN1_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen0_el1, ICC_IGRPEN0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_hppir0_el1, ICC_HPPIR0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_hppir1_el1, ICC_HPPIR1_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_iar0_el1, ICC_IAR0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_iar1_el1, ICC_IAR1_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir0_el1, ICC_EOIR0_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_sgi0r_el1, ICC_SGI0R_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sgi1r, ICC_SGI1R)
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(icv_ctrl_el1, ICV_CTRL_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icv_iar1_el1, ICV_IAR1_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icv_igrpen1_el1, ICV_IGRPEN1_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icv_eoir1_el1, ICV_EOIR1_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icv_pmr_el1, ICV_PMR_EL1)
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcr_el0, AMCR_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcgcr_el0, AMCGCR_EL0)
+DEFINE_RENAME_SYSREG_READ_FUNC(amcfgr_el0, AMCFGR_EL0)
+DEFINE_RENAME_SYSREG_READ_FUNC(amcg1idr_el0, AMCG1IDR_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr0_el0, AMCNTENCLR0_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset0_el0, AMCNTENSET0_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr1_el0, AMCNTENCLR1_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset1_el0, AMCNTENSET1_EL0)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(mpamidr_el1, MPAMIDR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(mpam3_el3, MPAM3_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(mpam2_el2, MPAM2_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(mpamhcr_el2, MPAMHCR_EL2)
+
+/* Static profiling control registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmscr_el1, PMSCR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmsevfr_el1, PMSEVFR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmsfcr_el1, PMSFCR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmsicr_el1, PMSICR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmsidr_el1, PMSIDR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmsirr_el1, PMSIRR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmslatfr_el1, PMSLATFR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmsnevfr_el1, PMSNEVFR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmblimitr_el1, PMBLIMITR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmbptr_el1, PMBPTR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmbsr_el1, PMBSR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmscr_el2, PMSCR_EL2)
+
+/* Definitions for system register interface to SVE */
+DEFINE_RENAME_SYSREG_READ_FUNC(id_aa64zfr0_el1, ID_AA64ZFR0_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(zcr_el2, ZCR_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(zcr_el1, ZCR_EL1)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(id_aa64smfr0_el1, ID_AA64SMFR0_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(svcr, SVCR)
+DEFINE_RENAME_SYSREG_RW_FUNCS(tpidr2_el0, TPIDR2_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(smcr_el2, SMCR_EL2)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(erridr_el1, ERRIDR_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(errselr_el1, ERRSELR_EL1)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(erxfr_el1, ERXFR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(erxctlr_el1, ERXCTLR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(erxstatus_el1, ERXSTATUS_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxaddr_el1, ERXADDR_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc0_el1, ERXMISC0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc1_el1, ERXMISC1_EL1)
+
+/* Armv8.1 Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(pan, PAN)
+
+/* Armv8.2 Registers */
+DEFINE_RENAME_SYSREG_READ_FUNC(id_aa64mmfr2_el1, ID_AA64MMFR2_EL1)
+
+/* Armv8.3 Pointer Authentication Registers */
+/* Instruction keys A and B */
+DEFINE_RENAME_SYSREG_RW_FUNCS(apiakeyhi_el1, APIAKeyHi_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(apiakeylo_el1, APIAKeyLo_EL1)
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(apibkeyhi_el1, APIBKeyHi_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(apibkeylo_el1, APIBKeyLo_EL1)
+
+/* Data keys A and B */
+DEFINE_RENAME_SYSREG_RW_FUNCS(apdakeyhi_el1, APDAKeyHi_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(apdakeylo_el1, APDAKeyLo_EL1)
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(apdbkeyhi_el1, APDBKeyHi_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(apdbkeylo_el1, APDBKeyLo_EL1)
+
+/* Generic key */
+DEFINE_RENAME_SYSREG_RW_FUNCS(apgakeyhi_el1, APGAKeyHi_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(apgakeylo_el1, APGAKeyLo_EL1)
+
+/* MTE registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(tfsre0_el1, TFSRE0_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(tfsr_el1, TFSR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(rgsr_el1, RGSR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(gcr_el1, GCR_EL1)
+
+/* Armv8.4 Data Independent Timing */
+DEFINE_RENAME_SYSREG_RW_FUNCS(dit, DIT)
+
+/* Armv8.6 Fine Grained Virtualization Traps Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(hfgrtr_el2, HFGRTR_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(hfgwtr_el2, HFGWTR_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(hfgitr_el2, HFGITR_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(hdfgrtr_el2, HDFGRTR_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(hdfgwtr_el2, HDFGWTR_EL2)
+
+/* Armv8.6 Enhanced Counter Virtualization Register */
+DEFINE_RENAME_SYSREG_RW_FUNCS(cntpoff_el2, CNTPOFF_EL2)
+
+/* Armv9.0 Trace buffer extension System Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(trblimitr_el1, TRBLIMITR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trbptr_el1, TRBPTR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trbbaser_el1, TRBBASER_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trbsr_el1, TRBSR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trbmar_el1, TRBMAR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trbtrg_el1, TRBTRG_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(trbidr_el1, TRBIDR_EL1)
+
+/* FEAT_BRBE Branch record buffer extension system registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(brbcr_el1, BRBCR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(brbcr_el2, BRBCR_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(brbfcr_el1, BRBFCR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(brbts_el1, BRBTS_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(brbinfinj_el1, BRBINFINJ_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(brbsrcinj_el1, BRBSRCINJ_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(brbtgtinj_el1, BRBTGTINJ_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(brbidr0_el1, BRBIDR0_EL1)
+
+/* Armv8.4 Trace filter control System Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(trfcr_el1, TRFCR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trfcr_el2, TRFCR_EL2)
+
+/* Trace System Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcauxctlr, TRCAUXCTLR)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcrsr, TRCRSR)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcbbctlr, TRCBBCTLR)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcccctlr, TRCCCCTLR)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcextinselr0, TRCEXTINSELR0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcextinselr1, TRCEXTINSELR1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcextinselr2, TRCEXTINSELR2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcextinselr3, TRCEXTINSELR3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcclaimset, TRCCLAIMSET)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcclaimclr, TRCCLAIMCLR)
+DEFINE_RENAME_SYSREG_READ_FUNC(trcdevarch, TRCDEVARCH)
+
+/* FEAT_HCX HCRX_EL2 */
+DEFINE_RENAME_SYSREG_RW_FUNCS(hcrx_el2, HCRX_EL2)
+
+/* Control floating point behaviour */
+DEFINE_RENAME_SYSREG_RW_FUNCS(fpcr, FPCR)
+
+/* ID_AA64ISAR2_EL1 */
+DEFINE_RENAME_SYSREG_READ_FUNC(id_aa64isar2_el1, ID_AA64ISAR2_EL1)
+
+/* ID_PFR2_EL1 */
+DEFINE_RENAME_SYSREG_READ_FUNC(id_pfr2_el1, ID_PFR2_EL1)
+
+#define IS_IN_EL(x) \
+ (GET_EL(read_CurrentEl()) == MODE_EL##x)
+
+#define IS_IN_EL1() IS_IN_EL(1)
+#define IS_IN_EL2() IS_IN_EL(2)
+#define IS_IN_EL3() IS_IN_EL(3)
+
+static inline unsigned int get_current_el(void)
+{
+ return GET_EL(read_CurrentEl());
+}
+
+/*
+ * Check if an EL is implemented from AA64PFR0 register fields.
+ */
+static inline uint64_t el_implemented(unsigned int el)
+{
+ if (el > 3U) {
+ return EL_IMPL_NONE;
+ } else {
+ unsigned int shift = ID_AA64PFR0_EL1_SHIFT * el;
+
+ return (read_id_aa64pfr0_el1() >> shift) & ID_AA64PFR0_ELX_MASK;
+ }
+}
+
+/* Read the count value of the system counter. */
+static inline uint64_t syscounter_read(void)
+{
+ /*
+ * The instruction barrier is needed to guarantee that we read an
+ * accurate value. Otherwise, the CPU might speculatively read it and
+ * return a stale value.
+ */
+ isb();
+ return read_cntpct_el0();
+}
+
+/* Read the value of the Counter-timer virtual count. */
+static inline uint64_t virtualcounter_read(void)
+{
+ /*
+ * The instruction barrier is needed to guarantee that we read an
+ * accurate value. Otherwise, the CPU might speculatively read it and
+ * return a stale value.
+ */
+ isb();
+ return read_cntvct_el0();
+}
+
+#endif /* ARCH_HELPERS_H */
diff --git a/spm/scmi/include/ext/lib/aarch64/serror.h b/spm/scmi/include/ext/lib/aarch64/serror.h
new file mode 100644
index 0000000..ac25f87
--- /dev/null
+++ b/spm/scmi/include/ext/lib/aarch64/serror.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SERROR_H__
+#define __SERROR_H__
+
+typedef bool (*exception_handler_t)(void);
+void register_custom_serror_handler(exception_handler_t handler);
+void unregister_custom_serror_handler(void);
+
+#endif /* __SERROR_H__ */
diff --git a/spm/scmi/include/ext/lib/aarch64/sync.h b/spm/scmi/include/ext/lib/aarch64/sync.h
new file mode 100644
index 0000000..5058980
--- /dev/null
+++ b/spm/scmi/include/ext/lib/aarch64/sync.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SYNC_H__
+#define __SYNC_H__
+
+typedef bool (*exception_handler_t)(void);
+void register_custom_sync_exception_handler(exception_handler_t handler);
+void unregister_custom_sync_exception_handler(void);
+
+#endif /* __SYNC_H__ */
diff --git a/spm/scmi/include/ext/lib/cassert.h b/spm/scmi/include/ext/lib/cassert.h
new file mode 100644
index 0000000..8844e8b
--- /dev/null
+++ b/spm/scmi/include/ext/lib/cassert.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CASSERT_H__
+#define __CASSERT_H__
+
+/*******************************************************************************
+ * Macro to flag a compile time assertion. It uses the preprocessor to generate
+ * an invalid C construct if 'cond' evaluates to false.
+ * The following compilation error is triggered if the assertion fails:
+ * "error: size of array 'msg' is negative"
+ ******************************************************************************/
+#define CASSERT(cond, msg) typedef char msg[(cond) ? 1 : -1]
+
+#endif /* __CASSERT_H__ */
diff --git a/spm/scmi/include/ext/lib/events.h b/spm/scmi/include/ext/lib/events.h
new file mode 100644
index 0000000..34c5b2f
--- /dev/null
+++ b/spm/scmi/include/ext/lib/events.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __EVENTS_H__
+#define __EVENTS_H__
+
+#include <spinlock.h>
+
+typedef struct {
+ /*
+ * Counter that keeps track of the minimum number of recipients of the
+ * event. When the event is sent, this counter is incremented. When it
+ * is received, it is decremented. Therefore, a zero value means that
+ * the event hasn't been sent yet, or that all recipients have already
+ * received it.
+ *
+ * Volatile is needed as it will enforce ordering relatively to
+ * accesses to the lock.
+ */
+ volatile unsigned int cnt;
+
+ /* Lock used to avoid concurrent accesses to the counter */
+ spinlock_t lock;
+} event_t;
+
+/*
+ * Initialise an event.
+ * event: Address of the event to initialise
+ *
+ * This function can be used either to initialise a newly created event
+ * structure or to recycle one.
+ *
+ * Note: This function is not MP-safe. It can't use the event lock as it is
+ * responsible for initialising it. Care must be taken to ensure this function
+ * is called in the right circumstances.
+ */
+void tftf_init_event(event_t *event);
+
+/*
+ * Send an event to a CPU.
+ * event: Address of the variable that acts as a synchronisation object.
+ *
+ * Which CPU receives the event is determined on a first-come, first-served
+ * basis. If several CPUs are waiting for the same event then the first CPU
+ * which takes the event will reflect that in the event structure.
+ *
+ * Note: This is equivalent to calling:
+ * tftf_send_event_to(event, 1);
+ */
+void tftf_send_event(event_t *event);
+
+/*
+ * Send an event to all CPUs.
+ * event: Address of the variable that acts as a synchronisation object.
+ *
+ * Note: This is equivalent to calling:
+ * tftf_send_event_to(event, PLATFORM_CORE_COUNT);
+ */
+void tftf_send_event_to_all(event_t *event);
+
+/*
+ * Send an event to a given number of CPUs.
+ * event: Address of the variable that acts as a synchronisation object.
+ * cpus_count: Number of CPUs to send the event to.
+ *
+ * Which CPUs receive the event is determined on a first-come, first-served
+ * basis. If more than 'cpus_count' CPUs are waiting for the same event then the
+ * first 'cpus_count' CPUs which take the event will reflect that in the event
+ * structure.
+ */
+void tftf_send_event_to(event_t *event, unsigned int cpus_count);
+
+/*
+ * Wait for an event.
+ * event: Address of the variable that acts as a synchronisation object.
+ */
+void tftf_wait_for_event(event_t *event);
+
+#endif /* __EVENTS_H__ */
diff --git a/spm/scmi/include/ext/lib/extensions/amu.h b/spm/scmi/include/ext/lib/extensions/amu.h
new file mode 100644
index 0000000..d5950ca
--- /dev/null
+++ b/spm/scmi/include/ext/lib/extensions/amu.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AMU_H
+#define AMU_H
+
+#include <stdint.h>
+
+#include <cassert.h>
+#include <platform_def.h>
+#include <utils_def.h>
+
+#define AMU_GROUP0_COUNTERS_MASK U(0xf)
+#define AMU_GROUP0_NR_COUNTERS U(4)
+
+#ifdef PLAT_AMU_GROUP1_COUNTERS_MASK
+#define AMU_GROUP1_COUNTERS_MASK PLAT_AMU_GROUP1_COUNTERS_MASK
+#else
+#define AMU_GROUP1_COUNTERS_MASK U(0)
+#endif
+
+/* Calculate number of group 1 counters */
+#if (AMU_GROUP1_COUNTERS_MASK & (1 << 15))
+#define AMU_GROUP1_NR_COUNTERS 16U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 14))
+#define AMU_GROUP1_NR_COUNTERS 15U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 13))
+#define AMU_GROUP1_NR_COUNTERS 14U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 12))
+#define AMU_GROUP1_NR_COUNTERS 13U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 11))
+#define AMU_GROUP1_NR_COUNTERS 12U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 10))
+#define AMU_GROUP1_NR_COUNTERS 11U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 9))
+#define AMU_GROUP1_NR_COUNTERS 10U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 8))
+#define AMU_GROUP1_NR_COUNTERS 9U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 7))
+#define AMU_GROUP1_NR_COUNTERS 8U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 6))
+#define AMU_GROUP1_NR_COUNTERS 7U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 5))
+#define AMU_GROUP1_NR_COUNTERS 6U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 4))
+#define AMU_GROUP1_NR_COUNTERS 5U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 3))
+#define AMU_GROUP1_NR_COUNTERS 4U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 2))
+#define AMU_GROUP1_NR_COUNTERS 3U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 1))
+#define AMU_GROUP1_NR_COUNTERS 2U
+#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 0))
+#define AMU_GROUP1_NR_COUNTERS 1U
+#else
+#define AMU_GROUP1_NR_COUNTERS 0U
+#endif
+
+CASSERT(AMU_GROUP1_COUNTERS_MASK <= 0xffff, invalid_amu_group1_counters_mask);
+
+unsigned int amu_get_version(void);
+
+uint64_t amu_group0_cnt_read(unsigned int idx);
+#if __aarch64__
+uint64_t amu_group0_voffset_read(unsigned int idx);
+void amu_group0_voffset_write(unsigned int idx, uint64_t val);
+#endif
+
+#if AMU_GROUP1_NR_COUNTERS
+uint64_t amu_group1_cnt_read(unsigned int idx);
+#if __aarch64__
+uint64_t amu_group1_voffset_read(unsigned int idx);
+void amu_group1_voffset_write(unsigned int idx, uint64_t val);
+#endif
+#endif
+
+#endif /* AMU_H */
diff --git a/spm/scmi/include/ext/lib/extensions/amu_private.h b/spm/scmi/include/ext/lib/extensions/amu_private.h
new file mode 100644
index 0000000..7ae17d9
--- /dev/null
+++ b/spm/scmi/include/ext/lib/extensions/amu_private.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2017-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AMU_PRIVATE_H
+#define AMU_PRIVATE_H
+
+#include <stdint.h>
+
+uint64_t amu_group0_cnt_read_internal(unsigned int idx);
+uint64_t amu_group1_cnt_read_internal(unsigned int idx);
+
+#if __aarch64__
+uint64_t amu_group0_voffset_read_internal(unsigned int idx);
+void amu_group0_voffset_write_internal(unsigned int idx, uint64_t val);
+
+uint64_t amu_group1_voffset_read_internal(unsigned int idx);
+void amu_group1_voffset_write_internal(unsigned int idx, uint64_t val);
+#endif
+
+#endif /* AMU_PRIVATE_H */
diff --git a/spm/scmi/include/ext/lib/extensions/fpu.h b/spm/scmi/include/ext/lib/extensions/fpu.h
new file mode 100644
index 0000000..d7b4f99
--- /dev/null
+++ b/spm/scmi/include/ext/lib/extensions/fpu.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FPU_H
+#define FPU_H
+
+/* The FPU and SIMD register bank is 32 quadword (128 bits) Q registers. */
+#define FPU_Q_SIZE 16U
+#define FPU_Q_COUNT 32U
+
+/* These defines are needed by assembly code to access FPU registers. */
+#define FPU_OFFSET_Q 0U
+#define FPU_OFFSET_FPSR (FPU_Q_SIZE * FPU_Q_COUNT)
+#define FPU_OFFSET_FPCR (FPU_OFFSET_FPSR + 8)
+
+#ifndef __ASSEMBLER__
+
+#include <stdbool.h>
+#include <stdint.h>
+
+typedef struct fpu_reg_state {
+ uint8_t q[FPU_Q_COUNT][FPU_Q_SIZE];
+ unsigned long fpsr;
+ unsigned long fpcr;
+} fpu_reg_state_t __aligned(16);
+
+/*
+ * Read and compare FPU state registers with provided template values in parameters.
+ */
+bool fpu_state_compare_template(fpu_reg_state_t *fpu);
+
+/*
+ * Fill the template with random values and copy it to
+ * FPU state registers(SIMD vectors, FPCR, FPSR).
+ */
+void fpu_state_fill_regs_and_template(fpu_reg_state_t *fpu);
+
+/*
+ * This function populates the provided FPU structure with the provided template
+ * regs_val for all the 32 FPU/SMID registers, and the status registers FPCR/FPSR
+ */
+void fpu_state_set(fpu_reg_state_t *vec,
+ uint8_t regs_val);
+
+/*
+ * This function prints the content of the provided FPU structure
+ */
+void fpu_state_print(fpu_reg_state_t *vec);
+
+#endif /* __ASSEMBLER__ */
+#endif /* FPU_H */
diff --git a/spm/scmi/include/ext/lib/extensions/pauth.h b/spm/scmi/include/ext/lib/extensions/pauth.h
new file mode 100644
index 0000000..c8d577f
--- /dev/null
+++ b/spm/scmi/include/ext/lib/extensions/pauth.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PAUTH_H
+#define PAUTH_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#ifdef __aarch64__
+/* Initialize 128-bit ARMv8.3-PAuth key */
+uint128_t init_apkey(void);
+
+/* Program APIAKey_EL1 key and enable ARMv8.3-PAuth */
+void pauth_init_enable(void);
+
+/* Disable ARMv8.3-PAuth */
+void pauth_disable(void);
+
+/*
+ * Fill Pauth Keys and template with random values if keys werenot initialized earlier,
+ * Else Copy PAuth key registers to template.
+ */
+void pauth_test_lib_fill_regs_and_template(void);
+
+/* Read and Compare PAuth registers with provided template values. */
+bool pauth_test_lib_compare_template(void);
+
+/* Read and Store PAuth registers in template. */
+void pauth_test_lib_read_keys(void);
+
+/* Test PAuth instructions. */
+void pauth_test_lib_test_intrs(void);
+
+#endif /* __aarch64__ */
+
+#endif /* PAUTH_H */
diff --git a/spm/scmi/include/ext/lib/extensions/sme.h b/spm/scmi/include/ext/lib/extensions/sme.h
new file mode 100644
index 0000000..c89e630
--- /dev/null
+++ b/spm/scmi/include/ext/lib/extensions/sme.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SME_H
+#define SME_H
+
+#define MAX_VL (512)
+#define MAX_VL_B (MAX_VL / 8)
+#define SME_SMCR_LEN_MAX U(0x1FF)
+
+typedef enum {
+ SMSTART, /* enters streaming sve mode and enables SME ZA array */
+ SMSTART_SM, /* enters streaming sve mode only */
+ SMSTART_ZA, /* enables SME ZA array storage only */
+} smestart_instruction_type_t;
+
+typedef enum {
+ SMSTOP, /* exits streaming sve mode, & disables SME ZA array */
+ SMSTOP_SM, /* exits streaming sve mode only */
+ SMSTOP_ZA, /* disables SME ZA array storage only */
+} smestop_instruction_type_t;
+
+/* SME feature related prototypes. */
+void sme_enable(void);
+void sme_smstart(smestart_instruction_type_t smstart_type);
+void sme_smstop(smestop_instruction_type_t smstop_type);
+
+/* SME2 feature related prototypes. */
+void sme2_enable(void);
+
+/* Assembly function prototypes. */
+uint64_t sme_rdvl_1(void);
+void sme_try_illegal_instruction(void);
+void sme_vector_to_ZA(const uint64_t *input_vector);
+void sme_ZA_to_vector(const uint64_t *output_vector);
+void sme2_load_zt0_instruction(const uint64_t *inputbuf);
+void sme2_store_zt0_instruction(const uint64_t *outputbuf);
+
+#endif /* SME_H */
diff --git a/spm/scmi/include/ext/lib/extensions/sve.h b/spm/scmi/include/ext/lib/extensions/sve.h
new file mode 100644
index 0000000..60432a5
--- /dev/null
+++ b/spm/scmi/include/ext/lib/extensions/sve.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SVE_H
+#define SVE_H
+
+#include <arch.h>
+#include <stdlib.h> /* for rand() */
+
+#define fill_sve_helper(num) "ldr z"#num", [%0, #"#num", MUL VL];"
+#define read_sve_helper(num) "str z"#num", [%0, #"#num", MUL VL];"
+
+/*
+ * Max. vector length permitted by the architecture:
+ * SVE: 2048 bits = 256 bytes
+ */
+#define SVE_VECTOR_LEN_BYTES 256
+#define SVE_NUM_VECTORS 32
+
+#define SVE_VQ_ARCH_MIN (0U)
+#define SVE_VQ_ARCH_MAX ((1 << ZCR_EL2_SVE_VL_WIDTH) - 1)
+
+/* convert SVE VL in bytes to VQ */
+#define SVE_VL_TO_VQ(vl_bytes) (((vl_bytes) >> 4U) - 1)
+
+/* convert SVE VQ to bits */
+#define SVE_VQ_TO_BITS(vq) (((vq) + 1U) << 7U)
+
+/* convert SVE VQ to bytes */
+#define SVE_VQ_TO_BYTES(vq) (SVE_VQ_TO_BITS(vq) / 8)
+
+/* get a random SVE VQ b/w 0 to SVE_VQ_ARCH_MAX */
+#define SVE_GET_RANDOM_VQ (rand() % (SVE_VQ_ARCH_MAX + 1))
+
+#ifndef __ASSEMBLY__
+
+typedef uint8_t sve_vector_t[SVE_VECTOR_LEN_BYTES];
+
+void sve_config_vq(uint8_t sve_vq);
+uint32_t sve_probe_vl(uint8_t sve_max_vq);
+void sve_fill_vector_regs(const sve_vector_t v[SVE_NUM_VECTORS]);
+void sve_read_vector_regs(sve_vector_t v[SVE_NUM_VECTORS]);
+
+/* Assembly routines */
+bool sve_subtract_arrays_interleaved(int *dst_array, int *src_array1,
+ int *src_array2, int array_size,
+ bool (*world_switch_cb)(void));
+
+void sve_subtract_arrays(int *dst_array, int *src_array1, int *src_array2,
+ int array_size);
+
+#ifdef __aarch64__
+
+/* Returns the SVE implemented VL in bytes (constrained by ZCR_EL3.LEN) */
+static inline uint64_t sve_vector_length_get(void)
+{
+ uint64_t vl;
+
+ __asm__ volatile(
+ ".arch_extension sve\n"
+ "rdvl %0, #1;"
+ ".arch_extension nosve\n"
+ : "=r" (vl)
+ );
+
+ return vl;
+}
+
+#endif /* __aarch64__ */
+#endif /* __ASSEMBLY__ */
+#endif /* SVE_H */
diff --git a/spm/scmi/include/ext/lib/heap/page_alloc.h b/spm/scmi/include/ext/lib/heap/page_alloc.h
new file mode 100644
index 0000000..7580b78
--- /dev/null
+++ b/spm/scmi/include/ext/lib/heap/page_alloc.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PAGE_ALLOC_H
+#define PAGE_ALLOC_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define HEAP_NULL_PTR 0U
+#define HEAP_INVALID_LEN -1
+#define HEAP_OUT_OF_RANGE -2
+#define HEAP_INIT_FAILED -3
+#define HEAP_INIT_SUCCESS 0
+
+/*
+ * Initialize the memory heap space to be used
+ * @heap_base: heap base address
+ * @heap_len: heap size for use
+ */
+int page_pool_init(uint64_t heap_base, uint64_t heap_len);
+
+/*
+ * Return the pointer to the allocated pages
+ * @bytes_size: pages to allocate in byte unit
+ */
+void *page_alloc(u_register_t bytes_size);
+
+/*
+ * Reset heap memory usage cursor to heap base address
+ */
+void page_pool_reset(void);
+void page_free(u_register_t ptr);
+
+#endif /* PAGE_ALLOC_H */
diff --git a/spm/scmi/include/ext/lib/io_storage.h b/spm/scmi/include/ext/lib/io_storage.h
new file mode 100644
index 0000000..42cebd3
--- /dev/null
+++ b/spm/scmi/include/ext/lib/io_storage.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __IO_H__
+#define __IO_H__
+
+#include <stdint.h>
+#include <stdio.h> /* For ssize_t */
+#include <uuid.h>
+
+/* Device type which can be used to enable policy decisions about which device
+ * to access */
+typedef enum {
+ IO_TYPE_INVALID,
+ IO_TYPE_FLASH,
+ IO_TYPE_MEMMAP,
+ IO_TYPE_FIRMWARE_IMAGE_PACKAGE,
+ IO_TYPE_MAX
+} io_type_t;
+
+
+/* Modes used when seeking data on a supported device */
+typedef enum {
+ IO_SEEK_INVALID,
+ IO_SEEK_SET,
+ IO_SEEK_END,
+ IO_SEEK_CUR,
+ IO_SEEK_MAX
+} io_seek_mode_t;
+
+
+/* Connector type, providing a means of identifying a device to open */
+struct io_dev_connector;
+
+
+/* File specification - used to refer to data on a device supporting file-like
+ * entities */
+typedef struct io_file_spec {
+ const char *path;
+ unsigned int mode;
+} io_file_spec_t;
+
+/* UUID specification - used to refer to data accessed using UUIDs (i.e. FIP
+ * images) */
+typedef struct io_uuid_spec {
+ const uuid_t uuid;
+} io_uuid_spec_t;
+
+
+/* Block specification - used to refer to data on a device supporting
+ * block-like entities */
+typedef struct io_block_spec {
+ size_t offset;
+ size_t length;
+} io_block_spec_t;
+
+
+/* Access modes used when accessing data on a device */
+#define IO_MODE_INVALID (0)
+#define IO_MODE_RO (1 << 0)
+#define IO_MODE_RW (1 << 1)
+
+
+/* Return codes reported by 'io_*' APIs */
+#define IO_SUCCESS (0)
+#define IO_FAIL (-1)
+#define IO_NOT_SUPPORTED (-2)
+#define IO_RESOURCES_EXHAUSTED (-3)
+
+
+/* Open a connection to a device */
+int io_dev_open(const struct io_dev_connector *dev_con,
+ const uintptr_t dev_spec,
+ uintptr_t *dev_handle);
+
+
+/* Initialise a device explicitly - to permit lazy initialisation or
+ * re-initialisation */
+int io_dev_init(uintptr_t dev_handle, const uintptr_t init_params);
+
+/* TODO: Consider whether an explicit "shutdown" API should be included */
+
+/* Close a connection to a device */
+int io_dev_close(uintptr_t dev_handle);
+
+
+/* Synchronous operations */
+int io_open(uintptr_t dev_handle, const uintptr_t spec, uintptr_t *handle);
+
+int io_seek(uintptr_t handle, io_seek_mode_t mode, ssize_t offset);
+
+int io_size(uintptr_t handle, size_t *length);
+
+int io_read(uintptr_t handle, uintptr_t buffer, size_t length,
+ size_t *length_read);
+
+int io_write(uintptr_t handle, const uintptr_t buffer, size_t length,
+ size_t *length_written);
+
+int io_close(uintptr_t handle);
+
+
+#endif /* __IO_H__ */
diff --git a/spm/scmi/include/ext/lib/irq.h b/spm/scmi/include/ext/lib/irq.h
new file mode 100644
index 0000000..6a37e05
--- /dev/null
+++ b/spm/scmi/include/ext/lib/irq.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __IRQ_H__
+#define __IRQ_H__
+
+#include <cdefs.h>
+#include <platform_def.h> /* For CACHE_WRITEBACK_GRANULE */
+#include <stdint.h>
+
+/*
+ * SGI sent by the timer management framework to notify CPUs when the system
+ * timer fires off
+ */
+#define IRQ_WAKE_SGI IRQ_NS_SGI_7
+
+#ifndef __ASSEMBLY__
+
+/* Prototype of a handler function for an IRQ */
+typedef int (*irq_handler_t)(void *data);
+
+/* Keep track of the IRQ handler registered for a given SPI */
+typedef struct {
+ irq_handler_t handler;
+} spi_desc;
+
+/* Keep track of the IRQ handler registered for a spurious interrupt */
+typedef irq_handler_t spurious_desc;
+
+/*
+ * PPIs and SGIs are interrupts that are private to a GIC CPU interface. These
+ * interrupts are banked in the GIC Distributor. Therefore, each CPU can
+ * set up a different IRQ handler for a given PPI/SGI.
+ *
+ * So we define a data structure representing an IRQ handler aligned on the
+ * size of a cache line. This guarantees that in an array of these, each element
+ * is loaded in a separate cache line. This allows efficient concurrent
+ * manipulation of these elements on different CPUs.
+ */
+typedef struct {
+ irq_handler_t handler;
+} __aligned(CACHE_WRITEBACK_GRANULE) irq_handler_banked_t;
+
+typedef irq_handler_banked_t ppi_desc;
+typedef irq_handler_banked_t sgi_desc;
+
+void tftf_irq_setup(void);
+
+/*
+ * Generic handler called upon reception of an IRQ.
+ *
+ * This function acknowledges the interrupt, calls the user-defined handler
+ * if one has been registered then marks the processing of the interrupt as
+ * complete.
+ */
+int tftf_irq_handler_dispatcher(void);
+
+/*
+ * Enable interrupt #irq_num for the calling core.
+ */
+void tftf_irq_enable(unsigned int irq_num, uint8_t irq_priority);
+
+/*
+ * Disable interrupt #irq_num for the calling core.
+ */
+void tftf_irq_disable(unsigned int irq_num);
+
+/*
+ * Register an interrupt handler for a given interrupt number.
+ * Will fail if there is already an interrupt handler registered for the same
+ * interrupt.
+ *
+ * Return 0 on success, a negative value otherwise.
+ */
+int tftf_irq_register_handler(unsigned int num, irq_handler_t irq_handler);
+
+/*
+ * Unregister an interrupt handler for a given interrupt number.
+ * Will fail if there is no interrupt handler registered for that interrupt.
+ *
+ * Return 0 on success, a negative value otherwise.
+ */
+int tftf_irq_unregister_handler(unsigned int irq_num);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __IRQ_H__ */
diff --git a/spm/scmi/include/ext/lib/libc/aarch32/endian_.h b/spm/scmi/include/ext/lib/libc/aarch32/endian_.h
new file mode 100644
index 0000000..0cf2c75
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/aarch32/endian_.h
@@ -0,0 +1,146 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001 David E. O'Brien
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)endian.h 8.1 (Berkeley) 6/10/93
+ * $NetBSD: endian.h,v 1.7 1999/08/21 05:53:51 simonb Exp $
+ * $FreeBSD$
+ */
+/*
+ * Portions copyright (c) 2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef ENDIAN__H
+#define ENDIAN__H
+
+#include <stdint.h>
+
+/*
+ * Definitions for byte order, according to byte significance from low
+ * address to high.
+ */
+#define _LITTLE_ENDIAN 1234 /* LSB first: i386, vax */
+#define _BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net */
+#define _PDP_ENDIAN 3412 /* LSB first in word, MSW first in long */
+
+#ifdef __ARMEB__
+#define _BYTE_ORDER _BIG_ENDIAN
+#else
+#define _BYTE_ORDER _LITTLE_ENDIAN
+#endif /* __ARMEB__ */
+
+#if __BSD_VISIBLE
+#define LITTLE_ENDIAN _LITTLE_ENDIAN
+#define BIG_ENDIAN _BIG_ENDIAN
+#define PDP_ENDIAN _PDP_ENDIAN
+#define BYTE_ORDER _BYTE_ORDER
+#endif
+
+#ifdef __ARMEB__
+#define _QUAD_HIGHWORD 0
+#define _QUAD_LOWWORD 1
+#define __ntohl(x) ((uint32_t)(x))
+#define __ntohs(x) ((uint16_t)(x))
+#define __htonl(x) ((uint32_t)(x))
+#define __htons(x) ((uint16_t)(x))
+#else
+#define _QUAD_HIGHWORD 1
+#define _QUAD_LOWWORD 0
+#define __ntohl(x) (__bswap32(x))
+#define __ntohs(x) (__bswap16(x))
+#define __htonl(x) (__bswap32(x))
+#define __htons(x) (__bswap16(x))
+#endif /* __ARMEB__ */
+
+static __inline uint64_t
+__bswap64(uint64_t _x)
+{
+
+ return ((_x >> 56) | ((_x >> 40) & 0xff00) | ((_x >> 24) & 0xff0000) |
+ ((_x >> 8) & 0xff000000) | ((_x << 8) & ((uint64_t)0xff << 32)) |
+ ((_x << 24) & ((uint64_t)0xff << 40)) |
+ ((_x << 40) & ((uint64_t)0xff << 48)) | ((_x << 56)));
+}
+
+static __inline uint32_t
+__bswap32_var(uint32_t v)
+{
+ uint32_t t1;
+
+ __asm __volatile("eor %1, %0, %0, ror #16\n"
+ "bic %1, %1, #0x00ff0000\n"
+ "mov %0, %0, ror #8\n"
+ "eor %0, %0, %1, lsr #8\n"
+ : "+r" (v), "=r" (t1));
+
+ return (v);
+}
+
+static __inline uint16_t
+__bswap16_var(uint16_t v)
+{
+ uint32_t ret = v & 0xffff;
+
+ __asm __volatile(
+ "mov %0, %0, ror #8\n"
+ "orr %0, %0, %0, lsr #16\n"
+ "bic %0, %0, %0, lsl #16"
+ : "+r" (ret));
+
+ return ((uint16_t)ret);
+}
+
+#ifdef __OPTIMIZE__
+
+#define __bswap32_constant(x) \
+ ((((x) & 0xff000000U) >> 24) | \
+ (((x) & 0x00ff0000U) >> 8) | \
+ (((x) & 0x0000ff00U) << 8) | \
+ (((x) & 0x000000ffU) << 24))
+
+#define __bswap16_constant(x) \
+ ((((x) & 0xff00) >> 8) | \
+ (((x) & 0x00ff) << 8))
+
+#define __bswap16(x) \
+ ((uint16_t)(__builtin_constant_p(x) ? \
+ __bswap16_constant(x) : \
+ __bswap16_var(x)))
+
+#define __bswap32(x) \
+ ((uint32_t)(__builtin_constant_p(x) ? \
+ __bswap32_constant(x) : \
+ __bswap32_var(x)))
+
+#else
+#define __bswap16(x) __bswap16_var(x)
+#define __bswap32(x) __bswap32_var(x)
+
+#endif /* __OPTIMIZE__ */
+#endif /* ENDIAN__H */
diff --git a/spm/scmi/include/ext/lib/libc/aarch32/limits_.h b/spm/scmi/include/ext/lib/libc/aarch32/limits_.h
new file mode 100644
index 0000000..26cec17
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/aarch32/limits_.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#define SCHAR_MAX 0x7F
+#define SCHAR_MIN (-SCHAR_MIN - 1)
+#define CHAR_MAX 0x7F
+#define CHAR_MIN (-CHAR_MAX - 1)
+#define UCHAR_MAX 0xFFU
+#define SHRT_MAX 0x7FFF
+#define SHRT_MIN (-SHRT_MAX - 1)
+#define USHRT_MAX 0xFFFFU
+#define INT_MAX 0x7FFFFFFF
+#define INT_MIN (-INT_MAX - 1)
+#define UINT_MAX 0xFFFFFFFFU
+#define LONG_MAX 0x7FFFFFFFL
+#define LONG_MIN (-LONG_MAX - 1L)
+#define ULONG_MAX 0xFFFFFFFFUL
+#define LLONG_MAX 0x7FFFFFFFFFFFFFFFLL
+#define LLONG_MIN (-LLONG_MAX - 1LL)
+#define ULLONG_MAX 0xFFFFFFFFFFFFFFFFULL
+
+#define __LONG_BIT 32
+#define __WORD_BIT 32
diff --git a/spm/scmi/include/ext/lib/libc/aarch32/stddef_.h b/spm/scmi/include/ext/lib/libc/aarch32/stddef_.h
new file mode 100644
index 0000000..36dc20b
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/aarch32/stddef_.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef STDDEF__H
+#define STDDEF__H
+
+#ifndef SIZET_
+typedef unsigned int size_t;
+#define SIZET_
+#endif
+
+#endif /* STDDEF__H */
diff --git a/spm/scmi/include/ext/lib/libc/aarch32/stdio_.h b/spm/scmi/include/ext/lib/libc/aarch32/stdio_.h
new file mode 100644
index 0000000..5e49425
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/aarch32/stdio_.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef STDIO__H
+#define STDIO__H
+
+#ifndef SSIZET_
+typedef int ssize_t;
+#define SSIZET_
+#endif
+
+#endif /* STDIO__H */
diff --git a/spm/scmi/include/ext/lib/libc/aarch64/endian_.h b/spm/scmi/include/ext/lib/libc/aarch64/endian_.h
new file mode 100644
index 0000000..7c79fd4
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/aarch64/endian_.h
@@ -0,0 +1,128 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001 David E. O'Brien
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)endian.h 8.1 (Berkeley) 6/10/93
+ * $NetBSD: endian.h,v 1.7 1999/08/21 05:53:51 simonb Exp $
+ * $FreeBSD$
+ */
+/*
+ * Portions copyright (c) 2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef ENDIAN__H
+#define ENDIAN__H
+
+#include <stdint.h>
+
+/*
+ * Definitions for byte order, according to byte significance from low
+ * address to high.
+ */
+#define _LITTLE_ENDIAN 1234 /* LSB first: i386, vax */
+#define _BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net */
+#define _PDP_ENDIAN 3412 /* LSB first in word, MSW first in long */
+
+#define _BYTE_ORDER _LITTLE_ENDIAN
+
+#if __BSD_VISIBLE
+#define LITTLE_ENDIAN _LITTLE_ENDIAN
+#define BIG_ENDIAN _BIG_ENDIAN
+#define PDP_ENDIAN _PDP_ENDIAN
+#define BYTE_ORDER _BYTE_ORDER
+#endif
+
+#define _QUAD_HIGHWORD 1
+#define _QUAD_LOWWORD 0
+#define __ntohl(x) (__bswap32(x))
+#define __ntohs(x) (__bswap16(x))
+#define __htonl(x) (__bswap32(x))
+#define __htons(x) (__bswap16(x))
+
+static __inline uint64_t
+__bswap64(uint64_t x)
+{
+ uint64_t ret;
+
+ __asm __volatile("rev %0, %1\n"
+ : "=&r" (ret), "+r" (x));
+
+ return (ret);
+}
+
+static __inline uint32_t
+__bswap32_var(uint32_t v)
+{
+ uint32_t ret;
+
+ __asm __volatile("rev32 %x0, %x1\n"
+ : "=&r" (ret), "+r" (v));
+
+ return (ret);
+}
+
+static __inline uint16_t
+__bswap16_var(uint16_t v)
+{
+ uint32_t ret;
+
+ __asm __volatile("rev16 %w0, %w1\n"
+ : "=&r" (ret), "+r" (v));
+
+ return ((uint16_t)ret);
+}
+
+#ifdef __OPTIMIZE__
+
+#define __bswap32_constant(x) \
+ ((((x) & 0xff000000U) >> 24) | \
+ (((x) & 0x00ff0000U) >> 8) | \
+ (((x) & 0x0000ff00U) << 8) | \
+ (((x) & 0x000000ffU) << 24))
+
+#define __bswap16_constant(x) \
+ ((((x) & 0xff00) >> 8) | \
+ (((x) & 0x00ff) << 8))
+
+#define __bswap16(x) \
+ ((uint16_t)(__builtin_constant_p(x) ? \
+ __bswap16_constant((uint16_t)(x)) : \
+ __bswap16_var(x)))
+
+#define __bswap32(x) \
+ ((uint32_t)(__builtin_constant_p(x) ? \
+ __bswap32_constant((uint32_t)(x)) : \
+ __bswap32_var(x)))
+
+#else
+#define __bswap16(x) __bswap16_var(x)
+#define __bswap32(x) __bswap32_var(x)
+
+#endif /* __OPTIMIZE__ */
+#endif /* ENDIAN__H */
diff --git a/spm/scmi/include/ext/lib/libc/aarch64/limits_.h b/spm/scmi/include/ext/lib/libc/aarch64/limits_.h
new file mode 100644
index 0000000..e36cfe7
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/aarch64/limits_.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#define SCHAR_MAX 0x7F
+#define SCHAR_MIN (-SCHAR_MIN - 1)
+#define CHAR_MAX 0x7F
+#define CHAR_MIN (-CHAR_MAX - 1)
+#define UCHAR_MAX 0xFFU
+#define SHRT_MAX 0x7FFF
+#define SHRT_MIN (-SHRT_MAX - 1)
+#define USHRT_MAX 0xFFFFU
+#define INT_MAX 0x7FFFFFFF
+#define INT_MIN (-INT_MAX - 1)
+#define UINT_MAX 0xFFFFFFFFU
+#define LONG_MAX 0x7FFFFFFFFFFFFFFFL
+#define LONG_MIN (-LONG_MAX - 1L)
+#define ULONG_MAX 0xFFFFFFFFFFFFFFFFUL
+#define LLONG_MAX 0x7FFFFFFFFFFFFFFFLL
+#define LLONG_MIN (-LLONG_MAX - 1LL)
+#define ULLONG_MAX 0xFFFFFFFFFFFFFFFFULL
+
+#define __LONG_BIT 64
+#define __WORD_BIT 32
diff --git a/spm/scmi/include/ext/lib/libc/aarch64/setjmp_.h b/spm/scmi/include/ext/lib/libc/aarch64/setjmp_.h
new file mode 100644
index 0000000..174b3eb
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/aarch64/setjmp_.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SETJMP__H
+#define SETJMP__H
+
+#define JMP_CTX_X19 0x0
+#define JMP_CTX_X21 0x10
+#define JMP_CTX_X23 0x20
+#define JMP_CTX_X25 0x30
+#define JMP_CTX_X27 0x40
+#define JMP_CTX_X29 0x50
+#define JMP_CTX_SP 0x60
+#define JMP_CTX_END 0x70 /* Aligned to 16 bytes */
+
+#define JMP_SIZE (JMP_CTX_END >> 3)
+
+#ifndef __ASSEMBLY__
+
+#include <cdefs.h>
+
+/* Jump buffer hosting x18 - x30 and sp_el0 registers */
+typedef uint64_t jmp_buf[JMP_SIZE] __aligned(16);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* SETJMP__H */
diff --git a/spm/scmi/include/ext/lib/libc/aarch64/stddef_.h b/spm/scmi/include/ext/lib/libc/aarch64/stddef_.h
new file mode 100644
index 0000000..6ecc606
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/aarch64/stddef_.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef STDDEF__H
+#define STDDEF__H
+
+#ifndef SIZET_
+typedef unsigned long size_t;
+#define SIZET_
+#endif
+
+#endif /* STDDEF__H */
diff --git a/spm/scmi/include/ext/lib/libc/aarch64/stdio_.h b/spm/scmi/include/ext/lib/libc/aarch64/stdio_.h
new file mode 100644
index 0000000..afaeadc
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/aarch64/stdio_.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef STDIO__H
+#define STDIO__H
+
+#ifndef SSIZET_
+typedef long ssize_t;
+#define SSIZET_
+#endif
+
+#endif /* STDIO__H */
diff --git a/spm/scmi/include/ext/lib/libc/assert.h b/spm/scmi/include/ext/lib/libc/assert.h
new file mode 100644
index 0000000..ce631e3
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/assert.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ASSERT_H
+#define ASSERT_H
+
+#include <cdefs.h>
+
+#include <common/debug.h>
+
+#if ENABLE_ASSERTIONS
+#define assert(e) ((e) ? (void)0 : __assert(__FILE__, __LINE__, #e))
+#else
+#define assert(e) ((void)0)
+#endif /* ENABLE_ASSERTIONS */
+
+__dead2 void __assert(const char *file, unsigned int line,
+ const char *assertion);
+
+#endif /* ASSERT_H */
diff --git a/spm/scmi/include/ext/lib/libc/cdefs.h b/spm/scmi/include/ext/lib/libc/cdefs.h
new file mode 100644
index 0000000..c3dd6f1
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/cdefs.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CDEFS_H
+#define CDEFS_H
+
+#define __dead2 __attribute__((__noreturn__))
+#define __deprecated __attribute__((__deprecated__))
+#define __packed __attribute__((__packed__))
+#define __used __attribute__((__used__))
+#define __unused __attribute__((__unused__))
+#define __aligned(x) __attribute__((__aligned__(x)))
+#define __section(x) __attribute__((__section__(x)))
+/*
+ * For compatibility with TF-A codebase.
+ */
+#define __init
+
+
+#define __printflike(fmtarg, firstvararg) \
+ __attribute__((__format__ (__printf__, fmtarg, firstvararg)))
+
+#define __weak_reference(sym, alias) \
+ __asm__(".weak alias"); \
+ __asm__(".equ alias, sym")
+
+#define __STRING(x) #x
+#define __XSTRING(x) __STRING(x)
+
+#endif /* CDEFS_H */
diff --git a/spm/scmi/include/ext/lib/libc/endian.h b/spm/scmi/include/ext/lib/libc/endian.h
new file mode 100644
index 0000000..4100f57
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/endian.h
@@ -0,0 +1,191 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2002 Thomas Moestl <tmm@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * Portions copyright (c) 2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef ENDIAN_H
+#define ENDIAN_H
+
+#include <cdefs.h>
+#include <stdint.h>
+#include <endian_.h>
+
+/*
+ * General byte order swapping functions.
+ */
+#define bswap16(x) __bswap16(x)
+#define bswap32(x) __bswap32(x)
+#define bswap64(x) __bswap64(x)
+
+/*
+ * Host to big endian, host to little endian, big endian to host, and little
+ * endian to host byte order functions as detailed in byteorder(9).
+ */
+#if _BYTE_ORDER == _LITTLE_ENDIAN
+#define htobe16(x) bswap16((x))
+#define htobe32(x) bswap32((x))
+#define htobe64(x) bswap64((x))
+#define htole16(x) ((uint16_t)(x))
+#define htole32(x) ((uint32_t)(x))
+#define htole64(x) ((uint64_t)(x))
+
+#define be16toh(x) bswap16((x))
+#define be32toh(x) bswap32((x))
+#define be64toh(x) bswap64((x))
+#define le16toh(x) ((uint16_t)(x))
+#define le32toh(x) ((uint32_t)(x))
+#define le64toh(x) ((uint64_t)(x))
+#else /* _BYTE_ORDER != _LITTLE_ENDIAN */
+#define htobe16(x) ((uint16_t)(x))
+#define htobe32(x) ((uint32_t)(x))
+#define htobe64(x) ((uint64_t)(x))
+#define htole16(x) bswap16((x))
+#define htole32(x) bswap32((x))
+#define htole64(x) bswap64((x))
+
+#define be16toh(x) ((uint16_t)(x))
+#define be32toh(x) ((uint32_t)(x))
+#define be64toh(x) ((uint64_t)(x))
+#define le16toh(x) bswap16((x))
+#define le32toh(x) bswap32((x))
+#define le64toh(x) bswap64((x))
+#endif /* _BYTE_ORDER == _LITTLE_ENDIAN */
+
+/* Alignment-agnostic encode/decode bytestream to/from little/big endian. */
+
+static __inline uint16_t
+be16dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return ((p[0] << 8) | p[1]);
+}
+
+static __inline uint32_t
+be32dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return (((unsigned)p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]);
+}
+
+static __inline uint64_t
+be64dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return (((uint64_t)be32dec(p) << 32) | be32dec(p + 4));
+}
+
+static __inline uint16_t
+le16dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return ((p[1] << 8) | p[0]);
+}
+
+static __inline uint32_t
+le32dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return (((unsigned)p[3] << 24) | (p[2] << 16) | (p[1] << 8) | p[0]);
+}
+
+static __inline uint64_t
+le64dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return (((uint64_t)le32dec(p + 4) << 32) | le32dec(p));
+}
+
+static __inline void
+be16enc(void *pp, uint16_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ p[0] = (u >> 8) & 0xff;
+ p[1] = u & 0xff;
+}
+
+static __inline void
+be32enc(void *pp, uint32_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ p[0] = (u >> 24) & 0xff;
+ p[1] = (u >> 16) & 0xff;
+ p[2] = (u >> 8) & 0xff;
+ p[3] = u & 0xff;
+}
+
+static __inline void
+be64enc(void *pp, uint64_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ be32enc(p, (uint32_t)(u >> 32));
+ be32enc(p + 4, (uint32_t)(u & 0xffffffffU));
+}
+
+static __inline void
+le16enc(void *pp, uint16_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ p[0] = u & 0xff;
+ p[1] = (u >> 8) & 0xff;
+}
+
+static __inline void
+le32enc(void *pp, uint32_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ p[0] = u & 0xff;
+ p[1] = (u >> 8) & 0xff;
+ p[2] = (u >> 16) & 0xff;
+ p[3] = (u >> 24) & 0xff;
+}
+
+static __inline void
+le64enc(void *pp, uint64_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ le32enc(p, (uint32_t)(u & 0xffffffffU));
+ le32enc(p + 4, (uint32_t)(u >> 32));
+}
+
+#endif /* ENDIAN_H */
diff --git a/spm/scmi/include/ext/lib/libc/errno.h b/spm/scmi/include/ext/lib/libc/errno.h
new file mode 100644
index 0000000..029912f
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/errno.h
@@ -0,0 +1,169 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1982, 1986, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)errno.h 8.5 (Berkeley) 1/21/94
+ * $FreeBSD$
+ */
+/*
+ * Portions copyright (c) 2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef ERRNO_H
+#define ERRNO_H
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* Input/output error */
+#define ENXIO 6 /* Device not configured */
+#define E2BIG 7 /* Argument list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file descriptor */
+#define ECHILD 10 /* No child processes */
+#define EDEADLK 11 /* Resource deadlock avoided */
+ /* 11 was EAGAIN */
+#define ENOMEM 12 /* Cannot allocate memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* Operation not supported by device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* Too many open files in system */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Inappropriate ioctl for device */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only filesystem */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+
+/* math software */
+#define EDOM 33 /* Numerical argument out of domain */
+#define ERANGE 34 /* Result too large */
+
+/* non-blocking and interrupt i/o */
+#define EAGAIN 35 /* Resource temporarily unavailable */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define EINPROGRESS 36 /* Operation now in progress */
+#define EALREADY 37 /* Operation already in progress */
+
+/* ipc/network software -- argument errors */
+#define ENOTSOCK 38 /* Socket operation on non-socket */
+#define EDESTADDRREQ 39 /* Destination address required */
+#define EMSGSIZE 40 /* Message too long */
+#define EPROTOTYPE 41 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 42 /* Protocol not available */
+#define EPROTONOSUPPORT 43 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
+#define EOPNOTSUPP 45 /* Operation not supported */
+#define ENOTSUP EOPNOTSUPP /* Operation not supported */
+#define EPFNOSUPPORT 46 /* Protocol family not supported */
+#define EAFNOSUPPORT 47 /* Address family not supported by protocol family */
+#define EADDRINUSE 48 /* Address already in use */
+#define EADDRNOTAVAIL 49 /* Can't assign requested address */
+
+/* ipc/network software -- operational errors */
+#define ENETDOWN 50 /* Network is down */
+#define ENETUNREACH 51 /* Network is unreachable */
+#define ENETRESET 52 /* Network dropped connection on reset */
+#define ECONNABORTED 53 /* Software caused connection abort */
+#define ECONNRESET 54 /* Connection reset by peer */
+#define ENOBUFS 55 /* No buffer space available */
+#define EISCONN 56 /* Socket is already connected */
+#define ENOTCONN 57 /* Socket is not connected */
+#define ESHUTDOWN 58 /* Can't send after socket shutdown */
+#define ETOOMANYREFS 59 /* Too many references: can't splice */
+#define ETIMEDOUT 60 /* Operation timed out */
+#define ECONNREFUSED 61 /* Connection refused */
+
+#define ELOOP 62 /* Too many levels of symbolic links */
+#define ENAMETOOLONG 63 /* File name too long */
+
+/* should be rearranged */
+#define EHOSTDOWN 64 /* Host is down */
+#define EHOSTUNREACH 65 /* No route to host */
+#define ENOTEMPTY 66 /* Directory not empty */
+
+/* quotas & mush */
+#define EPROCLIM 67 /* Too many processes */
+#define EUSERS 68 /* Too many users */
+#define EDQUOT 69 /* Disc quota exceeded */
+
+/* Network File System */
+#define ESTALE 70 /* Stale NFS file handle */
+#define EREMOTE 71 /* Too many levels of remote in path */
+#define EBADRPC 72 /* RPC struct is bad */
+#define ERPCMISMATCH 73 /* RPC version wrong */
+#define EPROGUNAVAIL 74 /* RPC prog. not avail */
+#define EPROGMISMATCH 75 /* Program version wrong */
+#define EPROCUNAVAIL 76 /* Bad procedure for program */
+
+#define ENOLCK 77 /* No locks available */
+#define ENOSYS 78 /* Function not implemented */
+
+#define EFTYPE 79 /* Inappropriate file type or format */
+#define EAUTH 80 /* Authentication error */
+#define ENEEDAUTH 81 /* Need authenticator */
+#define EIDRM 82 /* Identifier removed */
+#define ENOMSG 83 /* No message of desired type */
+#define EOVERFLOW 84 /* Value too large to be stored in data type */
+#define ECANCELED 85 /* Operation canceled */
+#define EILSEQ 86 /* Illegal byte sequence */
+#define ENOATTR 87 /* Attribute not found */
+
+#define EDOOFUS 88 /* Programming error */
+
+#define EBADMSG 89 /* Bad message */
+#define EMULTIHOP 90 /* Multihop attempted */
+#define ENOLINK 91 /* Link has been severed */
+#define EPROTO 92 /* Protocol error */
+
+#define ENOTCAPABLE 93 /* Capabilities insufficient */
+#define ECAPMODE 94 /* Not permitted in capability mode */
+#define ENOTRECOVERABLE 95 /* State not recoverable */
+#define EOWNERDEAD 96 /* Previous owner died */
+
+#define ELAST 96 /* Must be equal largest errno */
+
+#endif /* ERRNO_H */
diff --git a/spm/scmi/include/ext/lib/libc/limits.h b/spm/scmi/include/ext/lib/libc/limits.h
new file mode 100644
index 0000000..41bb658
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/limits.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2012-2017 Roberto E. Vargas Caballero
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef LIMITS_H
+#define LIMITS_H
+
+#include <limits_.h>
+
+#define CHAR_BIT 8
+#define MB_LEN_MAX 1
+
+#endif /* LIMITS_H */
diff --git a/spm/scmi/include/ext/lib/libc/setjmp.h b/spm/scmi/include/ext/lib/libc/setjmp.h
new file mode 100644
index 0000000..5661201
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/setjmp.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SETJMP_H
+#define SETJMP_H
+
+#include <setjmp_.h>
+
+#ifndef __ASSEMBLY__
+
+#include <cdefs.h>
+
+int setjmp(jmp_buf env);
+__dead2 void longjmp(jmp_buf env, int val);
+
+#endif /* __ASSEMBLY__ */
+#endif /* SETJMP_H */
diff --git a/spm/scmi/include/ext/lib/libc/stdarg.h b/spm/scmi/include/ext/lib/libc/stdarg.h
new file mode 100644
index 0000000..e260b9b
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/stdarg.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2012-2017 Roberto E. Vargas Caballero
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef STDARG_H
+#define STDARG_H
+
+#define va_list __builtin_va_list
+#define va_start(ap, last) __builtin_va_start(ap, last)
+#define va_end(ap) __builtin_va_end(ap)
+#define va_copy(to, from) __builtin_va_copy(to, from)
+#define va_arg(to, type) __builtin_va_arg(to, type)
+
+#endif /* STDARG_H */
diff --git a/spm/scmi/include/ext/lib/libc/stdbool.h b/spm/scmi/include/ext/lib/libc/stdbool.h
new file mode 100644
index 0000000..e39aef7
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/stdbool.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef STDBOOL_H
+#define STDBOOL_H
+
+#define bool _Bool
+
+#define true 1
+#define false 0
+
+#define __bool_true_false_are_defined 1
+
+#endif /* STDBOOL_H */
diff --git a/spm/scmi/include/ext/lib/libc/stddef.h b/spm/scmi/include/ext/lib/libc/stddef.h
new file mode 100644
index 0000000..58a519e
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/stddef.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2012-2017 Roberto E. Vargas Caballero
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2018-2019, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef STDDEF_H
+#define STDDEF_H
+
+#include <stddef_.h>
+
+#ifndef _PTRDIFF_T
+typedef long ptrdiff_t;
+#define _PTRDIFF_T
+#endif
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#define offsetof(st, m) __builtin_offsetof(st, m)
+
+#endif /* STDDEF_H */
diff --git a/spm/scmi/include/ext/lib/libc/stdint.h b/spm/scmi/include/ext/lib/libc/stdint.h
new file mode 100644
index 0000000..818870e
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/stdint.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2012-2017 Roberto E. Vargas Caballero
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2018-2019, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef STDINT_H
+#define STDINT_H
+
+#include <limits.h>
+
+#define INT8_MAX CHAR_MAX
+#define INT8_MIN CHAR_MIN
+#define UINT8_MAX UCHAR_MAX
+
+#define INT16_MAX SHRT_MAX
+#define INT16_MIN SHRT_MIN
+#define UINT16_MAX USHRT_MAX
+
+#define INT32_MAX INT_MAX
+#define INT32_MIN INT_MIN
+#define UINT32_MAX UINT_MAX
+
+#define INT64_MAX LLONG_MAX
+#define INT64_MIN LLONG_MIN
+#define UINT64_MAX ULLONG_MAX
+
+#define INT_LEAST8_MIN INT8_MIN
+#define INT_LEAST8_MAX INT8_MAX
+#define UINT_LEAST8_MAX UINT8_MAX
+
+#define INT_LEAST16_MIN INT16_MIN
+#define INT_LEAST16_MAX INT16_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+
+#define INT_LEAST32_MIN INT32_MIN
+#define INT_LEAST32_MAX INT32_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+
+#define INT_LEAST64_MIN INT64_MIN
+#define INT_LEAST64_MAX INT64_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+
+#define INT_FAST8_MIN INT32_MIN
+#define INT_FAST8_MAX INT32_MAX
+#define UINT_FAST8_MAX UINT32_MAX
+
+#define INT_FAST16_MIN INT32_MIN
+#define INT_FAST16_MAX INT32_MAX
+#define UINT_FAST16_MAX UINT32_MAX
+
+#define INT_FAST32_MIN INT32_MIN
+#define INT_FAST32_MAX INT32_MAX
+#define UINT_FAST32_MAX UINT32_MAX
+
+#define INT_FAST64_MIN INT64_MIN
+#define INT_FAST64_MAX INT64_MAX
+#define UINT_FAST64_MAX UINT64_MAX
+
+#define INTPTR_MIN LONG_MIN
+#define INTPTR_MAX LONG_MAX
+#define UINTPTR_MAX ULONG_MAX
+
+#define INTMAX_MIN LLONG_MIN
+#define INTMAX_MAX LLONG_MAX
+#define UINTMAX_MAX ULLONG_MAX
+
+#define PTRDIFF_MIN LONG_MIN
+#define PTRDIFF_MAX LONG_MAX
+
+#define SIZE_MAX ULONG_MAX
+
+#define INT8_C(x) x
+#define INT16_C(x) x
+#define INT32_C(x) x
+#define INT64_C(x) x ## LL
+
+#define UINT8_C(x) x
+#define UINT16_C(x) x
+#define UINT32_C(x) x ## U
+#define UINT64_C(x) x ## ULL
+
+#define INTMAX_C(x) x ## LL
+#define UINTMAX_C(x) x ## ULL
+
+typedef signed char int8_t;
+typedef short int16_t;
+typedef int int32_t;
+typedef long long int64_t;
+
+typedef unsigned char uint8_t;
+typedef unsigned short uint16_t;
+typedef unsigned int uint32_t;
+typedef unsigned long long uint64_t;
+
+typedef signed char int8_least_t;
+typedef short int16_least_t;
+typedef int int32_least_t;
+typedef long long int64_least_t;
+
+typedef unsigned char uint8_least_t;
+typedef unsigned short uint16_least_t;
+typedef unsigned int uint32_least_t;
+typedef unsigned long long uint64_least_t;
+
+typedef int int8_fast_t;
+typedef int int16_fast_t;
+typedef int int32_fast_t;
+typedef long long int64_fast_t;
+
+typedef unsigned int uint8_fast_t;
+typedef unsigned int uint16_fast_t;
+typedef unsigned int uint32_fast_t;
+typedef unsigned long long uint64_fast_t;
+
+typedef long intptr_t;
+typedef unsigned long uintptr_t;
+
+/*
+* Conceptually, these are supposed to be the largest integers representable in C,
+* but GCC and Clang define them as long long for compatibility.
+*/
+typedef long long intmax_t;
+typedef unsigned long long uintmax_t;
+
+typedef long register_t;
+typedef unsigned long u_register_t;
+
+#ifdef __aarch64__
+typedef __int128 int128_t;
+typedef unsigned __int128 uint128_t;
+#endif /* __aarch64__ */
+
+#endif /* STDINT_H */
diff --git a/spm/scmi/include/ext/lib/libc/stdio.h b/spm/scmi/include/ext/lib/libc/stdio.h
new file mode 100644
index 0000000..1da175c
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/stdio.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2012-2017 Roberto E. Vargas Caballero
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2018-2019, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef STDIO_H
+#define STDIO_H
+
+#include <cdefs.h>
+#include <stddef.h>
+#include <stdio_.h>
+
+#define EOF -1
+
+int printf(const char *fmt, ...) __printflike(1, 2);
+int snprintf(char *s, size_t n, const char *fmt, ...) __printflike(3, 4);
+
+#ifdef STDARG_H
+int vprintf(const char *fmt, va_list args) __printflike(1, 0);
+int vsnprintf(char *str, size_t size, const char *format, va_list ap) __printflike(3, 0);
+#endif
+
+int putchar(int c);
+int puts(const char *s);
+
+#endif /* STDIO_H */
diff --git a/spm/scmi/include/ext/lib/libc/stdlib.h b/spm/scmi/include/ext/lib/libc/stdlib.h
new file mode 100644
index 0000000..69eab9e
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/stdlib.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2012-2017 Roberto E. Vargas Caballero
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2018-2019, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef STDLIB_H
+#define STDLIB_H
+
+#include <stddef.h>
+
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+
+#define _ATEXIT_MAX 1
+
+#define RAND_MAX 0x7ffffffd
+
+extern void abort(void);
+extern int atexit(void (*func)(void));
+extern void exit(int status);
+
+int rand(void);
+void srand(unsigned int seed);
+
+#endif /* STDLIB_H */
diff --git a/spm/scmi/include/ext/lib/libc/string.h b/spm/scmi/include/ext/lib/libc/string.h
new file mode 100644
index 0000000..8df8cf9
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/string.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2012-2017 Roberto E. Vargas Caballero
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2018-2019, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef STRING_H
+#define STRING_H
+
+#include <stddef.h>
+
+void *memcpy(void *dst, const void *src, size_t len);
+void *memmove(void *dst, const void *src, size_t len);
+int memcmp(const void *s1, const void *s2, size_t len);
+int strcmp(const char *s1, const char *s2);
+int strncmp(const char *s1, const char *s2, size_t n);
+void *memchr(const void *src, int c, size_t len);
+char *strchr(const char *s, int c);
+void *memset(void *dst, int val, size_t count);
+size_t strlen(const char *s);
+size_t strnlen(const char *s, size_t maxlen);
+char *strrchr(const char *p, int ch);
+size_t strlcpy(char * dst, const char * src, size_t dsize);
+char *strncpy(char *dst, const char *src, size_t n);
+
+#endif /* STRING_H */
diff --git a/spm/scmi/include/ext/lib/libc/time.h b/spm/scmi/include/ext/lib/libc/time.h
new file mode 100644
index 0000000..c1c95e5
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/time.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2012-2017 Roberto E. Vargas Caballero
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2018-2019, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef TIME_H
+#define TIME_H
+
+#include <stddef.h>
+
+typedef long int time_t;
+
+#endif /* TIME_H */
diff --git a/spm/scmi/include/ext/lib/libc/uuid.h b/spm/scmi/include/ext/lib/libc/uuid.h
new file mode 100644
index 0000000..3a1699b
--- /dev/null
+++ b/spm/scmi/include/ext/lib/libc/uuid.h
@@ -0,0 +1,56 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2002 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Portions copyright (c) 2014-2020, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_UUID_H_
+#define _SYS_UUID_H_
+
+#include <cdefs.h>
+#include <stdint.h>
+
+/* Length of a node address (an IEEE 802 address). */
+#define _UUID_NODE_LEN 6
+
+struct uuid {
+ uint8_t time_low[4];
+ uint8_t time_mid[2];
+ uint8_t time_hi_and_version[2];
+ uint8_t clock_seq_hi_and_reserved;
+ uint8_t clock_seq_low;
+ uint8_t node[_UUID_NODE_LEN];
+};
+
+typedef struct uuid uuid_t;
+
+#endif /* _SYS_UUID_H_ */
diff --git a/spm/scmi/include/ext/lib/mmio.h b/spm/scmi/include/ext/lib/mmio.h
new file mode 100644
index 0000000..c788af3
--- /dev/null
+++ b/spm/scmi/include/ext/lib/mmio.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MMIO_H__
+#define __MMIO_H__
+
+#include <stdint.h>
+
+static inline void mmio_write_8(uintptr_t addr, uint8_t value)
+{
+ *(volatile uint8_t*)addr = value;
+}
+
+static inline uint8_t mmio_read_8(uintptr_t addr)
+{
+ return *(volatile uint8_t*)addr;
+}
+
+static inline void mmio_write_32(uintptr_t addr, uint32_t value)
+{
+ *(volatile uint32_t*)addr = value;
+}
+
+static inline void mmio_write32_offset(uintptr_t addr, uint32_t byte_off,
+ uint32_t data)
+{
+ mmio_write_32((uintptr_t)((uint8_t *)addr + byte_off), data);
+}
+
+static inline uint32_t mmio_read_32(uintptr_t addr)
+{
+ return *(volatile uint32_t*)addr;
+}
+
+static inline uint32_t mmio_read32_offset(uintptr_t addr, uint32_t byte_off)
+{
+ return mmio_read_32((uintptr_t)((uint8_t *)addr + byte_off));
+}
+
+static inline void mmio_write_64(uintptr_t addr, uint64_t value)
+{
+ *(volatile uint64_t*)addr = value;
+}
+
+static inline void mmio_write64_offset(uintptr_t addr, uint32_t byte_off,
+ uint64_t data)
+{
+ mmio_write_64((uintptr_t)((uint8_t *)addr + byte_off), data);
+}
+
+static inline uint64_t mmio_read_64(uintptr_t addr)
+{
+ return *(volatile uint64_t*)addr;
+}
+
+#endif /* __MMIO_H__ */
diff --git a/spm/scmi/include/ext/lib/power_management.h b/spm/scmi/include/ext/lib/power_management.h
new file mode 100644
index 0000000..3d8e961
--- /dev/null
+++ b/spm/scmi/include/ext/lib/power_management.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __POWER_MANAGEMENT_H__
+#define __POWER_MANAGEMENT_H__
+
+#include <platform_def.h>
+#include <psci.h>
+#include <spinlock.h>
+#include <stdint.h>
+
+/* Set of states of an affinity node as seen by the Test Framework */
+typedef enum {
+ TFTF_AFFINITY_STATE_OFF = 0,
+ TFTF_AFFINITY_STATE_ON_PENDING,
+ TFTF_AFFINITY_STATE_ON,
+} tftf_affinity_info_t;
+
+/* Structure for keeping track of CPU state */
+typedef struct {
+ volatile tftf_affinity_info_t state;
+ spinlock_t lock;
+} __aligned(CACHE_WRITEBACK_GRANULE) tftf_cpu_state_t;
+
+/*
+ * Suspend information passed to the TFTF suspend helpers.
+ */
+typedef struct suspend_info {
+ /* The power state parameter to be passed to PSCI_CPU_SUSPEND */
+ unsigned int power_state;
+ /* SMC function ID of the PSCI suspend call */
+ unsigned int psci_api;
+ /* Whether the system context needs to be saved and restored */
+ unsigned int save_system_context;
+} suspend_info_t;
+
+/*
+ * Power up a core.
+ * This uses the PSCI CPU_ON API, which means it relies on the EL3 firmware's
+ * runtime services capabilities.
+ * The core will be boostrapped by the framework before handing it over
+ * to the entry point specified as the 2nd argument.
+ *
+ * target_cpu: MPID of the CPU to power up
+ * entrypoint: Address where the CPU will jump once the framework has
+ * initialized it
+ * context_id: Context identifier as defined by the PSCI specification
+ *
+ * Return: Return code of the PSCI CPU_ON call
+ * (refer to the PSCI specification for details)
+ */
+int32_t tftf_cpu_on(u_register_t target_cpu,
+ uintptr_t entrypoint,
+ u_register_t context_id);
+
+/*
+ * Tries to power up a core.
+ * This API is similar to tftf_cpu_on API with the difference being it
+ * does a SMC call to EL3 firmware without checking the status of the
+ * core with respect to the framework.
+ *
+ * A caller is expected to handle the return code given by the EL3 firmware.
+ *
+ * target_cpu: MPID of the CPU to power up
+ * entrypoint: Address where the CPU will jump once the framework has
+ * initialised it
+ * context_id: Context identifier as defined by the PSCI specification
+ *
+ * Return: Return code of the PSCI CPU_ON call
+ * (refer to the PSCI specification for details)
+ */
+int32_t tftf_try_cpu_on(u_register_t target_cpu,
+ uintptr_t entrypoint,
+ u_register_t context_id);
+
+/*
+ * Power down the calling core.
+ * This uses the PSCI CPU_OFF API, which means it relies on the EL3 firmware's
+ * runtime services capabilities.
+ *
+ * Return: This function does not return when successful.
+ * Otherwise, return the same error code as the PSCI CPU_OFF call
+ * (refer to the PSCI specification for details)
+ */
+int32_t tftf_cpu_off(void);
+
+/*
+ * It is an Api used to enter a suspend state. It does the following:
+ * - Allocates space for saving architectural and non-architectural CPU state on
+ * stack
+ * - Saves architecture state of the CPU in the space allocated which consists:
+ * a. Callee registers
+ * b. System control registers. ex: MMU, SCTLR_EL1
+ * - Depending on the state of `save_system_context` flag in suspend_info
+ * saves the context of system peripherals like GIC, timer etc.
+ * - Sets context ID to the base of the stack allocated for saving context
+ * - Calls Secure Platform Firmware to enter suspend
+ * - If suspend fails, It restores the callee registers
+ * power state: PSCI power state to be sent via SMC
+ * Returns: PSCI_E_SUCCESS or PSCI_E_INVALID_PARAMS
+ *
+ * Note: This api might not test all use cases, as the context ID and resume
+ * entrypoint is in the control of the framework.
+ */
+int tftf_suspend(const suspend_info_t *info);
+
+
+/* ----------------------------------------------------------------------------
+ * The above APIs might not be suitable in all test scenarios.
+ * A test case could want to bypass those APIs i.e. call the PSCI APIs
+ * directly. In this case, it is the responsibility of the test case to preserve
+ * the state of the framework. The below APIs are provided to this end.
+ * ----------------------------------------------------------------------------
+ */
+
+/*
+ * The 3 following functions are used to manipulate the reference count tracking
+ * the number of CPUs participating in a test.
+ */
+
+/*
+ * Increment the reference count.
+ * Return the new, incremented value.
+ */
+unsigned int tftf_inc_ref_cnt(void);
+
+/*
+ * Decrement the reference count.
+ * Return the new, decremented value.
+ */
+unsigned int tftf_dec_ref_cnt(void);
+
+/* Return the current reference count value */
+unsigned int tftf_get_ref_cnt(void);
+
+/*
+ * Set the calling CPU online/offline. This only adjusts the view of the core
+ * from the framework's point of view, it doesn't actually power up/down the
+ * core.
+ */
+void tftf_set_cpu_online(void);
+void tftf_init_cpus_status_map(void);
+void tftf_set_cpu_offline(void);
+
+/*
+ * Query the state of a core.
+ * Return: 1 if the core is online, 0 otherwise.
+ */
+unsigned int tftf_is_cpu_online(unsigned int mpid);
+
+unsigned int tftf_is_core_pos_online(unsigned int core_pos);
+
+/* TFTF Suspend helpers */
+static inline int tftf_cpu_suspend(unsigned int pwr_state)
+{
+ suspend_info_t info = {
+ .power_state = pwr_state,
+ .save_system_context = 0,
+ .psci_api = SMC_PSCI_CPU_SUSPEND,
+ };
+
+ return tftf_suspend(&info);
+}
+
+static inline int tftf_cpu_suspend_save_sys_ctx(unsigned int pwr_state)
+{
+ suspend_info_t info = {
+ .power_state = pwr_state,
+ .save_system_context = 1,
+ .psci_api = SMC_PSCI_CPU_SUSPEND,
+ };
+
+ return tftf_suspend(&info);
+}
+
+
+static inline int tftf_system_suspend(void)
+{
+ suspend_info_t info = {
+ .power_state = 0,
+ .save_system_context = 1,
+ .psci_api = SMC_PSCI_SYSTEM_SUSPEND,
+ };
+
+ return tftf_suspend(&info);
+}
+
+#endif /* __POWER_MANAGEMENT_H__ */
diff --git a/spm/scmi/include/ext/lib/sgi.h b/spm/scmi/include/ext/lib/sgi.h
new file mode 100644
index 0000000..d2f4b37
--- /dev/null
+++ b/spm/scmi/include/ext/lib/sgi.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SGI_H__
+#define __SGI_H__
+
+/* Data associated with the reception of an SGI */
+typedef struct {
+ /* Interrupt ID of the signaled interrupt */
+ unsigned int irq_id;
+} sgi_data_t;
+
+/*
+ * Send an SGI to a given core.
+ */
+void tftf_send_sgi(unsigned int sgi_id, unsigned int core_pos);
+
+#endif /* __SGI_H__ */
diff --git a/spm/scmi/include/ext/lib/spinlock.h b/spm/scmi/include/ext/lib/spinlock.h
new file mode 100644
index 0000000..27ea730
--- /dev/null
+++ b/spm/scmi/include/ext/lib/spinlock.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SPINLOCK_H__
+#define __SPINLOCK_H__
+
+typedef struct spinlock {
+ volatile unsigned int lock;
+} spinlock_t;
+
+void init_spinlock(spinlock_t *lock);
+void spin_lock(spinlock_t *lock);
+void spin_unlock(spinlock_t *lock);
+
+#endif /* __SPINLOCK_H__ */
diff --git a/spm/scmi/include/ext/lib/status.h b/spm/scmi/include/ext/lib/status.h
new file mode 100644
index 0000000..8cb6145
--- /dev/null
+++ b/spm/scmi/include/ext/lib/status.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __STATUS_H__
+#define __STATUS_H__
+
+/* Status Code definitions */
+#define STATUS_SUCCESS 0x00
+#define STATUS_INVALID_PARAMETER 0x01
+#define STATUS_UNSUPPORTED 0x02
+#define STATUS_OUT_OF_RESOURCES 0x03
+#define STATUS_NOT_FOUND 0x04
+#define STATUS_ABORTED 0x05
+#define STATUS_LOAD_ERROR 0x06
+#define STATUS_NEVER_RETURN 0x07
+#define STATUS_BUSY 0x08
+#define STATUS_NOT_INIT 0x09
+#define STATUS_BUFFER_TOO_SMALL 0x0A
+#define STATUS_COMPROMISED_DATA 0x0B
+#define STATUS_ALREADY_LOADED 0x0C
+#define STATUS_FAIL 0x0D
+
+typedef unsigned int STATUS;
+
+#endif /* __STATUS_H__ */
diff --git a/spm/scmi/include/ext/lib/tftf_lib.h b/spm/scmi/include/ext/lib/tftf_lib.h
new file mode 100644
index 0000000..d265bb9
--- /dev/null
+++ b/spm/scmi/include/ext/lib/tftf_lib.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TFTF_LIB_H__
+#define __TFTF_LIB_H__
+
+#ifndef __ASSEMBLY__
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+/*
+ * Possible error codes for signaling the result of a test
+ * TEST_RESULT_MIN and TEST_RESULT_MAX are only used as bounds in the enum.
+ */
+typedef enum {
+ /*
+ * NA = Not applicable.
+ * Initial value for a test result.
+ * Used for CPUs that don't participate in the test.
+ */
+ TEST_RESULT_NA = -1,
+
+ TEST_RESULT_MIN = 0,
+ TEST_RESULT_SKIPPED = TEST_RESULT_MIN,
+ TEST_RESULT_SUCCESS,
+ TEST_RESULT_FAIL,
+ TEST_RESULT_CRASHED,
+
+ TEST_RESULT_MAX
+} test_result_t;
+
+#define TEST_RESULT_IS_VALID(result) \
+ ((result >= TEST_RESULT_MIN) && (result < TEST_RESULT_MAX))
+
+/*
+ * PSCI Function Wrappers
+ *
+ * SMC calls to PSCI functions
+ */
+int32_t tftf_psci_cpu_on(u_register_t target_cpu,
+ uintptr_t entry_point_address,
+ u_register_t context_id);
+int32_t tftf_psci_cpu_off(void);
+int32_t tftf_psci_set_suspend_mode(uint32_t mode);
+int32_t tftf_psci_affinity_info(u_register_t target_affinity,
+ uint32_t lowest_affinity_level);
+int32_t tftf_psci_node_hw_state(u_register_t target_cpu, uint32_t power_level);
+int32_t tftf_get_psci_feature_info(uint32_t psci_func_id);
+u_register_t tftf_psci_stat_count(u_register_t target_cpu,
+ uint32_t power_state);
+u_register_t tftf_psci_stat_residency(u_register_t target_cpu,
+ uint32_t power_state);
+
+/*
+ * PSCI Helper functions
+ */
+
+/*
+ * Gets the context ID used when calling tftf_psci_cpu_on().
+ */
+u_register_t tftf_get_cpu_on_ctx_id(unsigned int core_pos);
+
+/*
+ * Sets the context ID used when calling tftf_psci_cpu_on().
+ */
+void tftf_set_cpu_on_ctx_id(unsigned int core_pos, u_register_t context_id);
+
+/*
+ * Gets the PSCI version of Trusted Firmware-A. The version number returned
+ * is a 32-bit unsigned integer, with the upper 16 bits denoting the major
+ * revision, and the lower 16 bits denoting the minor revision.
+ */
+unsigned int tftf_get_psci_version(void);
+
+/*
+ * Returns 0 if version is not a valid PSCI version supported by TFTF.
+ * Otherwise it returns a value different of 0.
+ */
+int tftf_is_valid_psci_version(unsigned int version);
+
+
+/*
+ * The function constructs a composite state_id up-to the specified
+ * affinity level querying the relevant state property from the platform.
+ * It chooses the first matching state property from the array returned
+ * by platform. In case the requested affinity level is not supported by
+ * the platform, then this function uses DUMMY_STATE_ID as the local state
+ * for that level. This allows the tests to construct composite state-id
+ * for invalid affinity levels as well. It returns the expected return
+ * value from CPU SUSPEND call.
+ */
+int tftf_psci_make_composite_state_id(uint32_t affinity_level,
+ uint32_t state_type, uint32_t *state_id);
+
+/*
+ * This function composes the power state parameter in the right format
+ * needed by PSCI. The detection of the power state format is done during
+ * cold boot by tftf_detect_psci_pstate_format() function.
+ */
+uint32_t tftf_make_psci_pstate(uint32_t affinity_level,
+ uint32_t state_type,
+ uint32_t state_id);
+
+/*
+ * Returns 1, if the EL3 software supports PSCI's original format state ID as
+ * NULL else returns zero
+ */
+unsigned int tftf_is_psci_state_id_null(void);
+
+/*
+ * Returns 1, if the EL3 software supports PSCI's original state format else
+ * returns zero
+ */
+unsigned int tftf_is_psci_pstate_format_original(void);
+
+/* Functions to wait for a specified number of ms or us */
+void waitms(uint64_t ms);
+void waitus(uint64_t us);
+
+/*
+ * SMC calls take a function identifier and up to 7 arguments.
+ * Additionally, few SMC calls that originate from EL2 leverage the seventh
+ * argument explicitly. Given that TFTF runs in EL2, we need to be able to
+ * specify it.
+ */
+typedef struct {
+ /* Function identifier. Identifies which function is being invoked. */
+ uint32_t fid;
+
+ u_register_t arg1;
+ u_register_t arg2;
+ u_register_t arg3;
+ u_register_t arg4;
+ u_register_t arg5;
+ u_register_t arg6;
+ u_register_t arg7;
+} smc_args;
+
+/* SMC calls can return up to 8 register values */
+typedef struct {
+ u_register_t ret0;
+ u_register_t ret1;
+ u_register_t ret2;
+ u_register_t ret3;
+ u_register_t ret4;
+ u_register_t ret5;
+ u_register_t ret6;
+ u_register_t ret7;
+} smc_ret_values;
+
+/*
+ * Trigger an SMC call.
+ */
+smc_ret_values tftf_smc(const smc_args *args);
+
+/*
+ * Trigger an HVC call.
+ */
+typedef smc_args hvc_args;
+
+typedef smc_ret_values hvc_ret_values;
+
+hvc_ret_values tftf_hvc(const hvc_args *args);
+
+/*
+ * Write a formatted string in the test output buffer.
+ * Just like the standard libc's printf() function, the string produced is under
+ * the control of a format string that specifies how subsequent arguments are
+ * converted.
+ *
+ * The string will appear in the test report.
+ * Use mp_printf() instead for volatile debug messages that are not meant to be
+ * stored into the test report.
+ * Note: The test output buffer referred here is a temporary buffer stored in
+ * RAM. This function doesn't write anything into NVM.
+ *
+ * Upon successful return, return the number of characters printed (not
+ * including the final '\0' character). If an output error is encountered,
+ * a negative value is returned. If the function is not able to print any
+ * character at all, this is considered as an output error. Note that a partial
+ * write (i.e. when the string is truncated) is not considered as an output
+ * error.
+ */
+__attribute__((format(printf, 1, 2)))
+int tftf_testcase_printf(const char *format, ...);
+
+/*
+ * This function is meant to be used by tests.
+ * It tells the framework that the test is going to reset the platform.
+ *
+ * It the test omits to call this function before resetting, the framework will
+ * consider the test has crashed upon resumption.
+ */
+void tftf_notify_reboot(void);
+
+/*
+ * Returns 0 if the test function is executed for the first time,
+ * or 1 if the test rebooted the platform and the test function is being
+ * executed again.
+ * This function is used for tests that reboot the platform, so that they can
+ * execute different code paths on 1st execution and subsequent executions.
+ */
+unsigned int tftf_is_rebooted(void);
+
+static inline unsigned int make_mpid(unsigned int clusterid,
+#if PLAT_MAX_PE_PER_CPU > 1
+ unsigned int coreid,
+ unsigned int threadid)
+#else
+ unsigned int coreid)
+#endif
+{
+ /*
+ * If MT bit is set then need to shift the affinities and also set the
+ * MT bit.
+ */
+ if ((read_mpidr_el1() & MPIDR_MT_MASK) != 0)
+ return MPIDR_MT_MASK |
+#if PLAT_MAX_PE_PER_CPU > 1
+ ((threadid & MPIDR_AFFLVL_MASK) << MPIDR_AFF0_SHIFT) |
+#endif
+ ((coreid & MPIDR_AFFLVL_MASK) << MPIDR_AFF1_SHIFT) |
+ ((clusterid & MPIDR_AFFLVL_MASK) << MPIDR_AFF2_SHIFT);
+ else
+ return ((coreid & MPIDR_AFFLVL_MASK) << MPIDR_AFF0_SHIFT) |
+ ((clusterid & MPIDR_AFFLVL_MASK) << MPIDR_AFF1_SHIFT);
+
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __TFTF_LIB_H__ */
diff --git a/spm/scmi/include/ext/lib/timer.h b/spm/scmi/include/ext/lib/timer.h
new file mode 100644
index 0000000..0bfff01
--- /dev/null
+++ b/spm/scmi/include/ext/lib/timer.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TIMER_H__
+#define __TIMER_H__
+
+#include <irq.h>
+
+typedef struct plat_timer {
+ int (*program)(unsigned long time_out_ms);
+ int (*cancel)(void);
+ int (*handler)(void);
+
+ /*
+ * Duration of the atomic time slice in milliseconds. All timer
+ * requests within the same time slice are merged into one. This value
+ * should be chosen such that it is greater than the time required to
+ * program the timer.
+ */
+ unsigned int timer_step_value;
+ unsigned int timer_irq;
+} plat_timer_t;
+
+/*
+ * Gets the platform specific timer implementation information and initialises
+ * the timer framework and peripheral.
+ * Returns 0 on success or return value of timer peripheral intialisation
+ * function.
+ */
+int tftf_initialise_timer(void);
+
+/*
+ * Requests the timer framework to send an interrupt after milli_secs.
+ * The interrupt is sent to the calling core of this api. The actual
+ * time the interrupt is received by the core can be greater than
+ * the requested time.
+ * Returns 0 on success and -1 on failure.
+ */
+int tftf_program_timer(unsigned long milli_secs);
+
+/*
+ * Requests the timer framework to send an interrupt after milli_secs and to
+ * suspend the CPU to the desired power state. The interrupt is sent to the
+ * calling core of this api. The actual time the interrupt is received by the
+ * core can be greater than the requested time.
+ *
+ * Return codes from tftf_program_timer calls and tftf_cpu_suspend are stored
+ * respectively in timer_rc and suspend_rc output parameters.
+ * If a function is not executed, the return value stored in the output
+ * parameters will be as if the correponding call succeeded. NULL pointers are
+ * accepted to discard the return codes.
+ * Returns 0 on success and -1 on failure.
+ */
+
+int tftf_program_timer_and_suspend(unsigned long milli_secs,
+ unsigned int pwr_state,
+ int *timer_rc, int *suspend_rc);
+
+/*
+ * Requests the timer framework to send an interrupt after milli_secs and to
+ * suspend the system. The interrupt is sent to the calling core of this api.
+ * The actual time the interrupt is received by the core can be greater than
+ * the requested time. For the system suspend to succeed, all cores other than
+ * the calling core should be in the OFF state.
+ *
+ * Return codes from tftf_program_timer calls and tftf_cpu_system suspend
+ * are stored respectively in timer_rc and suspend_rc output parameters.
+ * If a function is not executed, the return value stored in the output
+ * parameters will be as if the correponding call succeeded. NULL pointers are
+ * accepted to discard the return codes.
+ * Returns 0 on success and -1 on failure.
+ */
+int tftf_program_timer_and_sys_suspend(unsigned long milli_secs,
+ int *timer_rc, int *suspend_rc);
+
+/*
+ * Suspends the calling CPU for specified milliseconds.
+ *
+ * Returns 0 on success, and -1 otherwise.
+ */
+int tftf_timer_sleep(unsigned long milli_secs);
+
+/*
+ * Common handler for servicing all the timer interrupts. It in turn calls the
+ * peripheral specific handler. It also sends WAKE_SGI to all the cores which
+ * requested an interrupt within a time frame of timer_step_value.
+ * Also, if there are pending interrupt requests, reprograms the timer
+ * accordingly to fire an interrupt at the right time.
+ *
+ * Returns 0 on success.
+ */
+int tftf_timer_framework_handler(void *data);
+
+/*
+ * Cancels the previously programmed value by the called core.
+ * This api should be used only for cancelling the self interrupt request
+ * by a core.
+ * Returns 0 on success, negative value otherwise.
+ */
+int tftf_cancel_timer(void);
+
+/*
+ * It is used to register a handler which needs to be called when a timer
+ * interrupt is fired.
+ * Returns 0 on success, negative value otherwise.
+ */
+int tftf_timer_register_handler(irq_handler_t irq_handler);
+
+/*
+ * It is used to unregister a previously registered handler.
+ * Returns 0 on success, negative value otherwise.
+ */
+int tftf_timer_unregister_handler(void);
+
+/*
+ * Return the IRQ Number of the registered timer interrupt
+ */
+unsigned int tftf_get_timer_irq(void);
+
+/*
+ * Returns the timer step value in a platform and is used by test cases.
+ */
+unsigned int tftf_get_timer_step_value(void);
+
+/*
+ * Restore the GIC state after wake-up from system suspend
+ */
+void tftf_timer_gic_state_restore(void);
+
+#endif /* __TIMER_H__ */
diff --git a/spm/scmi/include/ext/lib/utils/math_utils.h b/spm/scmi/include/ext/lib/utils/math_utils.h
new file mode 100644
index 0000000..9d8e88d
--- /dev/null
+++ b/spm/scmi/include/ext/lib/utils/math_utils.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MATH_UTILS_H__
+#define __MATH_UTILS_H__
+
+#include <stdint.h>
+
+/* Simple utility to calculate `power` of a `base` number. */
+static inline unsigned int pow(unsigned int base, unsigned int power)
+{
+ unsigned int result = 1;
+ while (power) {
+ result *= base;
+ power--;
+ }
+ return result;
+}
+
+#endif /* __MATH_UTILS_H__ */
diff --git a/spm/scmi/include/ext/lib/utils/uuid_utils.h b/spm/scmi/include/ext/lib/utils/uuid_utils.h
new file mode 100644
index 0000000..dda8241
--- /dev/null
+++ b/spm/scmi/include/ext/lib/utils/uuid_utils.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __UUID_UTILS_H__
+#define __UUID_UTILS_H__
+
+#include <stdint.h>
+#include <uuid.h>
+
+/* Size (in bytes) of a UUID string as formatted by uuid_to_str() */
+#define UUID_STR_SIZE 79
+
+/*
+ * Convert a UUID into a string.
+ *
+ * The caller is responsible for allocating the output string buffer
+ * pointed by 'str'. It must be at least UUID_STR_SIZE bytes long.
+ *
+ * Return the UUID string.
+ */
+char *uuid_to_str(const uuid_t *uuid, char *str);
+
+/*
+ * Return 1 if uuid == uuid_null, 0 otherwise.
+ */
+unsigned int is_uuid_null(const uuid_t *uuid);
+
+/*
+ * Return 1 if uuid1 == uuid2, 0 otherwise.
+ */
+unsigned int uuid_equal(const uuid_t *uuid1, const uuid_t *uuid2);
+
+/*
+ * Take four 32-bit words of data and combine them into a UUID.
+ *
+ * The caller is responsible for allocating the output UUID variable
+ * pointed by 'uuid'.
+ *
+ * Return the UUID.
+ */
+uuid_t *make_uuid_from_4words(uuid_t *uuid,
+ uint32_t uuid0,
+ uint32_t uuid1,
+ uint32_t uuid2,
+ uint32_t uuid3);
+
+#endif /* __UUID_UTILS_H__ */
diff --git a/spm/scmi/include/ext/lib/utils_def.h b/spm/scmi/include/ext/lib/utils_def.h
new file mode 100644
index 0000000..0013d19
--- /dev/null
+++ b/spm/scmi/include/ext/lib/utils_def.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef UTILS_DEF_H
+#define UTILS_DEF_H
+
+/* Compute the number of elements in the given array */
+#define ARRAY_SIZE(a) \
+ (sizeof(a) / sizeof((a)[0]))
+
+#define IS_POWER_OF_TWO(x) \
+ (((x) & ((x) - 1)) == 0)
+
+#define SIZE_FROM_LOG2_WORDS(n) (4 << (n))
+
+#define BIT_32(nr) (U(1) << (nr))
+#define BIT_64(nr) (ULL(1) << (nr))
+
+#ifndef __aarch64__
+#define BIT BIT_32
+#else
+#define BIT BIT_64
+#endif
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#if defined(__LINKER__) || defined(__ASSEMBLY__)
+#define GENMASK_32(h, l) \
+ (((0xFFFFFFFF) << (l)) & (0xFFFFFFFF >> (32 - 1 - (h))))
+
+#define GENMASK_64(h, l) \
+ ((~0 << (l)) & (~0 >> (64 - 1 - (h))))
+#else
+#define GENMASK_32(h, l) \
+ (((~UINT32_C(0)) << (l)) & (~UINT32_C(0) >> (32 - 1 - (h))))
+
+#define GENMASK_64(h, l) \
+ (((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h))))
+#endif
+
+#ifndef __aarch64__
+#define GENMASK GENMASK_32
+#else
+#define GENMASK GENMASK_64
+#endif
+
+/*
+ * This variant of div_round_up can be used in macro definition but should not
+ * be used in C code as the `div` parameter is evaluated twice.
+ */
+#define DIV_ROUND_UP_2EVAL(n, d) (((n) + (d) - 1) / (d))
+
+#define div_round_up(val, div) __extension__ ({ \
+ __typeof__(div) _div = (div); \
+ ((val) + _div - (__typeof__(div)) 1) / _div; \
+})
+
+#define MIN(x, y) __extension__ ({ \
+ __typeof__(x) _x = (x); \
+ __typeof__(y) _y = (y); \
+ (void)(&_x == &_y); \
+ _x < _y ? _x : _y; \
+})
+
+#define MAX(x, y) __extension__ ({ \
+ __typeof__(x) _x = (x); \
+ __typeof__(y) _y = (y); \
+ (void)(&_x == &_y); \
+ _x > _y ? _x : _y; \
+})
+
+/*
+ * The round_up() macro rounds up a value to the given boundary in a
+ * type-agnostic yet type-safe manner. The boundary must be a power of two.
+ * In other words, it computes the smallest multiple of boundary which is
+ * greater than or equal to value.
+ *
+ * round_down() is similar but rounds the value down instead.
+ */
+#define round_boundary(value, boundary) \
+ ((__typeof__(value))((boundary) - 1))
+
+#define round_up(value, boundary) \
+ ((((value) - 1) | round_boundary(value, boundary)) + 1)
+
+#define round_down(value, boundary) \
+ ((value) & ~round_boundary(value, boundary))
+
+/*
+ * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise.
+ * Both arguments must be unsigned pointer values (i.e. uintptr_t).
+ */
+#define check_uptr_overflow(_ptr, _inc) \
+ ((_ptr) > (UINTPTR_MAX - (_inc)))
+
+/*
+ * Evaluates to 1 if (u32 + inc) overflows, 0 otherwise.
+ * Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t).
+ */
+#define check_u32_overflow(_u32, _inc) \
+ ((_u32) > (UINT32_MAX - (_inc)))
+
+/*
+ * For those constants to be shared between C and other sources, apply a 'U',
+ * 'UL', 'ULL', 'L' or 'LL' suffix to the argument only in C, to avoid
+ * undefined or unintended behaviour.
+ *
+ * The GNU assembler and linker do not support these suffixes (it causes the
+ * build process to fail) therefore the suffix is omitted when used in linker
+ * scripts and assembler files.
+*/
+#if defined(__LINKER__) || defined(__ASSEMBLY__)
+# define U(_x) (_x)
+# define UL(_x) (_x)
+# define ULL(_x) (_x)
+# define L(_x) (_x)
+# define LL(_x) (_x)
+#else
+# define U(_x) (_x##U)
+# define UL(_x) (_x##UL)
+# define ULL(_x) (_x##ULL)
+# define L(_x) (_x##L)
+# define LL(_x) (_x##LL)
+#endif
+
+/* Register size of the current architecture. */
+#ifndef __aarch64__
+#define REGSZ U(4)
+#else
+#define REGSZ U(8)
+#endif
+
+/*
+ * Test for the current architecture version to be at least the version
+ * expected.
+ */
+#define ARM_ARCH_AT_LEAST(_maj, _min) \
+ ((ARM_ARCH_MAJOR > (_maj)) || \
+ ((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min))))
+
+/*
+ * Import an assembly or linker symbol as a C expression with the specified
+ * type
+ */
+#define IMPORT_SYM(type, sym, name) \
+ extern char sym[];\
+ static const __attribute__((unused)) type name = (type) sym;
+
+/*
+ * When the symbol is used to hold a pointer, its alignment can be asserted
+ * with this macro. For example, if there is a linker symbol that is going to
+ * be used as a 64-bit pointer, the value of the linker symbol must also be
+ * aligned to 64 bit. This macro makes sure this is the case.
+ */
+#define ASSERT_SYM_PTR_ALIGN(sym) assert(((size_t)(sym) % __alignof__(*(sym))) == 0)
+
+#define COMPILER_BARRIER() __asm__ volatile ("" ::: "memory")
+
+#define INPLACE(regfield, val) \
+ (((val) + UL(0)) << (regfield##_SHIFT))
+
+#define MASK(regfield) \
+ ((~0ULL >> (64ULL - (regfield##_WIDTH))) << (regfield##_SHIFT))
+
+#define EXTRACT(regfield, reg) \
+ (((reg) & MASK(regfield)) >> (regfield##_SHIFT))
+
+/*
+ * Defines member of structure and reserves space
+ * for the next member with specified offset.
+ */
+#define SET_MEMBER(member, start, end) \
+ union { \
+ member; \
+ unsigned char reserved##end[end - start]; \
+ }
+
+#define CONCAT(x, y) x##y
+#define CONC(x, y) CONCAT(x, y)
+
+#endif /* UTILS_DEF_H */
diff --git a/spm/scmi/include/ext/lib/xlat_tables/aarch32/xlat_tables_aarch32.h b/spm/scmi/include/ext/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
new file mode 100644
index 0000000..4afd1c8
--- /dev/null
+++ b/spm/scmi/include/ext/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_AARCH32_H
+#define XLAT_TABLES_AARCH32_H
+
+#include <arch.h>
+#include <utils_def.h>
+#include <xlat_tables_defs.h>
+
+#if !defined(PAGE_SIZE)
+#error "PAGE_SIZE is not defined."
+#endif
+
+/*
+ * In AArch32 state, the MMU only supports 4KB page granularity, which means
+ * that the first translation table level is either 1 or 2. Both of them are
+ * allowed to have block and table descriptors. See section G4.5.6 of the
+ * ARMv8-A Architecture Reference Manual (DDI 0487A.k) for more information.
+ *
+ * The define below specifies the first table level that allows block
+ * descriptors.
+ */
+#if PAGE_SIZE != PAGE_SIZE_4KB
+#error "Invalid granule size. AArch32 supports 4KB pages only."
+#endif
+
+#define MIN_LVL_BLOCK_DESC U(1)
+
+#define XLAT_TABLE_LEVEL_MIN U(1)
+
+/*
+ * Define the architectural limits of the virtual address space in AArch32
+ * state.
+ *
+ * TTBCR.TxSZ is calculated as 32 minus the width of said address space. The
+ * value of TTBCR.TxSZ must be in the range 0 to 7 [1], which means that the
+ * virtual address space width must be in the range 32 to 25 bits.
+ *
+ * [1] See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information, Section G4.6.5
+ */
+#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) << (U(32) - TTBCR_TxSZ_MAX))
+#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) << (U(32) - TTBCR_TxSZ_MIN))
+
+/*
+ * Here we calculate the initial lookup level from the value of the given
+ * virtual address space size. For a 4 KB page size,
+ * - level 1 supports virtual address spaces of widths 32 to 31 bits;
+ * - level 2 from 30 to 25.
+ *
+ * Wider or narrower address spaces are not supported. As a result, level 3
+ * cannot be used as the initial lookup level with 4 KB granularity.
+ * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information, Section G4.6.5
+ *
+ * For example, for a 31-bit address space (i.e. virt_addr_space_size ==
+ * 1 << 31), TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table
+ * G4-5 in the ARM ARM, the initial lookup level for an address space like that
+ * is 1.
+ *
+ * Note that this macro assumes that the given virtual address space size is
+ * valid.
+ */
+#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_sz) \
+ (((_virt_addr_space_sz) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) ? \
+ U(1) : U(2))
+
+#endif /* XLAT_TABLES_AARCH32_H */
diff --git a/spm/scmi/include/ext/lib/xlat_tables/aarch64/xlat_tables_aarch64.h b/spm/scmi/include/ext/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
new file mode 100644
index 0000000..8c29d00
--- /dev/null
+++ b/spm/scmi/include/ext/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_AARCH64_H
+#define XLAT_TABLES_AARCH64_H
+
+#include <arch.h>
+#include <utils_def.h>
+#include <xlat_tables_defs.h>
+
+#if !defined(PAGE_SIZE)
+#error "PAGE_SIZE is not defined."
+#endif
+
+/*
+ * Encode a Physical Address Space size for its use in TCR_ELx.
+ */
+unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr);
+
+/*
+ * In AArch64 state, the MMU may support 4 KB, 16 KB and 64 KB page
+ * granularity. For 4KB granularity, a level 0 table descriptor doesn't support
+ * block translation. For 16KB, the same thing happens to levels 0 and 1. For
+ * 64KB, same for level 1. See section D4.3.1 of the ARMv8-A Architecture
+ * Reference Manual (DDI 0487A.k) for more information.
+ *
+ * The define below specifies the first table level that allows block
+ * descriptors.
+ */
+#if PAGE_SIZE == PAGE_SIZE_4KB
+# define MIN_LVL_BLOCK_DESC U(1)
+#elif (PAGE_SIZE == PAGE_SIZE_16KB) || (PAGE_SIZE == PAGE_SIZE_64KB)
+# define MIN_LVL_BLOCK_DESC U(2)
+#endif
+
+#define XLAT_TABLE_LEVEL_MIN U(0)
+
+/*
+ * Define the architectural limits of the virtual address space in AArch64
+ * state.
+ *
+ * TCR.TxSZ is calculated as 64 minus the width of said address space.
+ * The value of TCR.TxSZ must be in the range 16 to 39 [1] or 48 [2],
+ * depending on Small Translation Table Support which means that
+ * the virtual address space width must be in the range 48 to 25 or 16 bits.
+ *
+ * [1] See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information:
+ * Page 1730: 'Input address size', 'For all translation stages'.
+ * [2] See section 12.2.55 in the ARMv8-A Architecture Reference Manual
+ * (DDI 0487D.a)
+ */
+/* Maximum value of TCR_ELx.T(0,1)SZ is 39 */
+#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) << (U(64) - TCR_TxSZ_MAX))
+
+/* Maximum value of TCR_ELx.T(0,1)SZ is 48 */
+#define MIN_VIRT_ADDR_SPACE_SIZE_TTST \
+ (ULL(1) << (U(64) - TCR_TxSZ_MAX_TTST))
+#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) << (U(64) - TCR_TxSZ_MIN))
+
+/*
+ * Here we calculate the initial lookup level from the value of the given
+ * virtual address space size. For a 4 KB page size,
+ * - level 0 supports virtual address spaces of widths 48 to 40 bits;
+ * - level 1 from 39 to 31;
+ * - level 2 from 30 to 22.
+ * - level 3 from 21 to 16.
+ *
+ * Small Translation Table (Armv8.4-TTST) support allows the starting level
+ * of the translation table from 3 for 4KB granularity. See section 12.2.55 in
+ * the ARMv8-A Architecture Reference Manual (DDI 0487D.a). In Armv8.3 and below
+ * wider or narrower address spaces are not supported. As a result, level 3
+ * cannot be used as initial lookup level with 4 KB granularity. See section
+ * D4.2.5 in the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information.
+ *
+ * For example, for a 35-bit address space (i.e. virt_addr_space_size ==
+ * 1 << 35), TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table
+ * D4-11 in the ARM ARM, the initial lookup level for an address space like that
+ * is 1.
+ *
+ * Note that this macro assumes that the given virtual address space size is
+ * valid.
+ */
+#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_sz) \
+ (((_virt_addr_space_sz) > (ULL(1) << L0_XLAT_ADDRESS_SHIFT)) \
+ ? 0U \
+ : (((_virt_addr_space_sz) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) \
+ ? 1U \
+ : (((_virt_addr_space_sz) > (ULL(1) << L2_XLAT_ADDRESS_SHIFT)) \
+ ? 2U : 3U)))
+
+#endif /* XLAT_TABLES_AARCH64_H */
diff --git a/spm/scmi/include/ext/lib/xlat_tables/xlat_mmu_helpers.h b/spm/scmi/include/ext/lib/xlat_tables/xlat_mmu_helpers.h
new file mode 100644
index 0000000..15ab2ef
--- /dev/null
+++ b/spm/scmi/include/ext/lib/xlat_tables/xlat_mmu_helpers.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_MMU_HELPERS_H
+#define XLAT_MMU_HELPERS_H
+
+/*
+ * The following flags are passed to enable_mmu_xxx() to override the default
+ * values used to program system registers while enabling the MMU.
+ */
+
+/*
+ * When this flag is used, all data access to Normal memory from this EL and all
+ * Normal memory accesses to the translation tables of this EL are non-cacheable
+ * for all levels of data and unified cache until the caches are enabled by
+ * setting the bit SCTLR_ELx.C.
+ */
+#define DISABLE_DCACHE (U(1) << 0)
+
+/*
+ * Mark the translation tables as non-cacheable for the MMU table walker, which
+ * is a different observer from the PE/CPU. If the flag is not specified, the
+ * tables are cacheable for the MMU table walker.
+ *
+ * Note that, as far as the PE/CPU observer is concerned, the attributes used
+ * are the ones specified in the translation tables themselves. The MAIR
+ * register specifies the cacheability through the field AttrIndx of the lower
+ * attributes of the translation tables. The shareability is specified in the SH
+ * field of the lower attributes.
+ *
+ * The MMU table walker uses the attributes specified in the fields ORGNn, IRGNn
+ * and SHn of the TCR register to access the translation tables.
+ *
+ * The attributes specified in the TCR register and the tables can be different
+ * as there are no checks to prevent that. Special care must be taken to ensure
+ * that there aren't mismatches. The behaviour in that case is described in the
+ * sections 'Mismatched memory attributes' in the ARMv8 ARM.
+ */
+#define XLAT_TABLE_NC (U(1) << 1)
+
+/*
+ * Offsets into a mmu_cfg_params array generated by setup_mmu_cfg(). All
+ * parameters are 64 bits wide.
+ */
+#define MMU_CFG_MAIR 0
+#define MMU_CFG_TCR 1
+#define MMU_CFG_TTBR0 2
+#define MMU_CFG_PARAM_MAX 3
+
+#ifndef __ASSEMBLY__
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+/*
+ * Return the values that the MMU configuration registers must contain for the
+ * specified translation context. `params` must be a pointer to array of size
+ * MMU_CFG_PARAM_MAX.
+ */
+void setup_mmu_cfg(uint64_t *params, unsigned int flags,
+ const uint64_t *base_table, unsigned long long max_pa,
+ uintptr_t max_va, int xlat_regime);
+
+#ifndef __aarch64__
+/* AArch32 specific translation table API */
+void enable_mmu_svc_mon(unsigned int flags);
+void enable_mmu_hyp(unsigned int flags);
+
+void enable_mmu_direct_svc_mon(unsigned int flags);
+void enable_mmu_direct_hyp(unsigned int flags);
+#else
+/* AArch64 specific translation table APIs */
+void enable_mmu_el1(unsigned int flags);
+void enable_mmu_el2(unsigned int flags);
+void enable_mmu_el3(unsigned int flags);
+
+void enable_mmu_direct_el1(unsigned int flags);
+void enable_mmu_direct_el2(unsigned int flags);
+void enable_mmu_direct_el3(unsigned int flags);
+#endif /* !__aarch64__ */
+
+bool xlat_arch_is_granule_size_supported(size_t size);
+size_t xlat_arch_get_max_supported_granule_size(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* XLAT_MMU_HELPERS_H */
diff --git a/spm/scmi/include/ext/lib/xlat_tables/xlat_tables_arch.h b/spm/scmi/include/ext/lib/xlat_tables/xlat_tables_arch.h
new file mode 100644
index 0000000..c8f29e4
--- /dev/null
+++ b/spm/scmi/include/ext/lib/xlat_tables/xlat_tables_arch.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_ARCH_H
+#define XLAT_TABLES_ARCH_H
+
+#ifndef __aarch64__
+#include "aarch32/xlat_tables_aarch32.h"
+#else
+#include "aarch64/xlat_tables_aarch64.h"
+#endif
+
+/*
+ * Evaluates to 1 if the given physical address space size is a power of 2,
+ * or 0 if it's not.
+ */
+#define CHECK_PHY_ADDR_SPACE_SIZE(size) \
+ (IS_POWER_OF_TWO(size))
+
+/*
+ * Compute the number of entries required at the initial lookup level to address
+ * the whole virtual address space.
+ */
+#define GET_NUM_BASE_LEVEL_ENTRIES(addr_space_size) \
+ ((addr_space_size) >> \
+ XLAT_ADDR_SHIFT(GET_XLAT_TABLE_LEVEL_BASE(addr_space_size)))
+
+#endif /* XLAT_TABLES_ARCH_H */
diff --git a/spm/scmi/include/ext/lib/xlat_tables/xlat_tables_defs.h b/spm/scmi/include/ext/lib/xlat_tables/xlat_tables_defs.h
new file mode 100644
index 0000000..ba0559c
--- /dev/null
+++ b/spm/scmi/include/ext/lib/xlat_tables/xlat_tables_defs.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_DEFS_H
+#define XLAT_TABLES_DEFS_H
+
+#include <arch.h>
+#include <utils_def.h>
+#include <xlat_mmu_helpers.h>
+
+/* Miscellaneous MMU related constants */
+#define NUM_2MB_IN_GB (U(1) << 9)
+#define NUM_4K_IN_2MB (U(1) << 9)
+#define NUM_GB_IN_4GB (U(1) << 2)
+
+#define TWO_MB_SHIFT U(21)
+#define ONE_GB_SHIFT U(30)
+#define FOUR_KB_SHIFT U(12)
+
+#define ONE_GB_INDEX(x) ((x) >> ONE_GB_SHIFT)
+#define TWO_MB_INDEX(x) ((x) >> TWO_MB_SHIFT)
+#define FOUR_KB_INDEX(x) ((x) >> FOUR_KB_SHIFT)
+
+#define PAGE_SIZE_4KB U(4096)
+#define PAGE_SIZE_16KB U(16384)
+#define PAGE_SIZE_64KB U(65536)
+
+#define INVALID_DESC U(0x0)
+/*
+ * A block descriptor points to a region of memory bigger than the granule size
+ * (e.g. a 2MB region when the granule size is 4KB).
+ */
+#define BLOCK_DESC U(0x1) /* Table levels 0-2 */
+/* A table descriptor points to the next level of translation table. */
+#define TABLE_DESC U(0x3) /* Table levels 0-2 */
+/*
+ * A page descriptor points to a page, i.e. a memory region whose size is the
+ * translation granule size (e.g. 4KB).
+ */
+#define PAGE_DESC U(0x3) /* Table level 3 */
+
+#define DESC_MASK U(0x3)
+
+#define FIRST_LEVEL_DESC_N ONE_GB_SHIFT
+#define SECOND_LEVEL_DESC_N TWO_MB_SHIFT
+#define THIRD_LEVEL_DESC_N FOUR_KB_SHIFT
+
+/* XN: Translation regimes that support one VA range (EL2 and EL3). */
+#define XN (ULL(1) << 2)
+/* UXN, PXN: Translation regimes that support two VA ranges (EL1&0). */
+#define UXN (ULL(1) << 2)
+#define PXN (ULL(1) << 1)
+#define CONT_HINT (ULL(1) << 0)
+#define UPPER_ATTRS(x) (((x) & ULL(0x7)) << 52)
+
+#define NON_GLOBAL (U(1) << 9)
+#define ACCESS_FLAG (U(1) << 8)
+#define NSH (U(0x0) << 6)
+#define OSH (U(0x2) << 6)
+#define ISH (U(0x3) << 6)
+
+#ifdef __aarch64__
+/* Guarded Page bit */
+#define GP (ULL(1) << 50)
+#endif
+
+#define TABLE_ADDR_MASK ULL(0x0000FFFFFFFFF000)
+
+/*
+ * The ARMv8-A architecture allows translation granule sizes of 4KB, 16KB or
+ * 64KB. However, only 4KB are supported at the moment.
+ */
+#define PAGE_SIZE_SHIFT FOUR_KB_SHIFT
+#define PAGE_SIZE (U(1) << PAGE_SIZE_SHIFT)
+#define PAGE_SIZE_MASK (PAGE_SIZE - U(1))
+#define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_SIZE_MASK) == U(0))
+
+#if (ARM_ARCH_MAJOR == 7) && !ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING
+#define XLAT_ENTRY_SIZE_SHIFT U(2) /* Each MMU table entry is 4 bytes */
+#else
+#define XLAT_ENTRY_SIZE_SHIFT U(3) /* Each MMU table entry is 8 bytes */
+#endif
+#define XLAT_ENTRY_SIZE (U(1) << XLAT_ENTRY_SIZE_SHIFT)
+
+#define XLAT_TABLE_SIZE_SHIFT PAGE_SIZE_SHIFT /* Size of one complete table */
+#define XLAT_TABLE_SIZE (U(1) << XLAT_TABLE_SIZE_SHIFT)
+
+#define XLAT_TABLE_LEVEL_MAX U(3)
+
+/* Values for number of entries in each MMU translation table */
+#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
+#define XLAT_TABLE_ENTRIES (U(1) << XLAT_TABLE_ENTRIES_SHIFT)
+#define XLAT_TABLE_ENTRIES_MASK (XLAT_TABLE_ENTRIES - U(1))
+
+/* Values to convert a memory address to an index into a translation table */
+#define L3_XLAT_ADDRESS_SHIFT PAGE_SIZE_SHIFT
+#define L2_XLAT_ADDRESS_SHIFT (L3_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define L1_XLAT_ADDRESS_SHIFT (L2_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define L0_XLAT_ADDRESS_SHIFT (L1_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define XLAT_ADDR_SHIFT(level) (PAGE_SIZE_SHIFT + \
+ ((XLAT_TABLE_LEVEL_MAX - (level)) * XLAT_TABLE_ENTRIES_SHIFT))
+
+#define XLAT_BLOCK_SIZE(level) (UL(1) << XLAT_ADDR_SHIFT(level))
+/* Mask to get the bits used to index inside a block of a certain level */
+#define XLAT_BLOCK_MASK(level) (XLAT_BLOCK_SIZE(level) - UL(1))
+/* Mask to get the address bits common to a block of a certain table level*/
+#define XLAT_ADDR_MASK(level) (~XLAT_BLOCK_MASK(level))
+/*
+ * Extract from the given virtual address the index into the given lookup level.
+ * This macro assumes the system is using the 4KB translation granule.
+ */
+#define XLAT_TABLE_IDX(virtual_addr, level) \
+ (((virtual_addr) >> XLAT_ADDR_SHIFT(level)) & ULL(0x1FF))
+
+/*
+ * The ARMv8 translation table descriptor format defines AP[2:1] as the Access
+ * Permissions bits, and does not define an AP[0] bit.
+ *
+ * AP[1] is valid only for a stage 1 translation that supports two VA ranges
+ * (i.e. in the ARMv8A.0 architecture, that is the S-EL1&0 regime). It is RES1
+ * when stage 1 translations can only support one VA range.
+ */
+#define AP2_SHIFT U(0x7)
+#define AP2_RO ULL(0x1)
+#define AP2_RW ULL(0x0)
+
+#define AP1_SHIFT U(0x6)
+#define AP1_ACCESS_UNPRIVILEGED ULL(0x1)
+#define AP1_NO_ACCESS_UNPRIVILEGED ULL(0x0)
+#define AP1_RES1 ULL(0x1)
+
+/*
+ * The following definitions must all be passed to the LOWER_ATTRS() macro to
+ * get the right bitmask.
+ */
+#define AP_RO (AP2_RO << 5)
+#define AP_RW (AP2_RW << 5)
+#define AP_ACCESS_UNPRIVILEGED (AP1_ACCESS_UNPRIVILEGED << 4)
+#define AP_NO_ACCESS_UNPRIVILEGED (AP1_NO_ACCESS_UNPRIVILEGED << 4)
+#define AP_ONE_VA_RANGE_RES1 (AP1_RES1 << 4)
+#define NS (U(0x1) << 3)
+#define ATTR_NON_CACHEABLE_INDEX ULL(0x2)
+#define ATTR_DEVICE_INDEX ULL(0x1)
+#define ATTR_IWBWA_OWBWA_NTR_INDEX ULL(0x0)
+#define LOWER_ATTRS(x) (((x) & U(0xfff)) << 2)
+
+/* Normal Memory, Outer Write-Through non-transient, Inner Non-cacheable */
+#define ATTR_NON_CACHEABLE MAKE_MAIR_NORMAL_MEMORY(MAIR_NORM_NC, MAIR_NORM_NC)
+/* Device-nGnRE */
+#define ATTR_DEVICE MAIR_DEV_nGnRE
+/* Normal Memory, Outer Write-Back non-transient, Inner Write-Back non-transient */
+#define ATTR_IWBWA_OWBWA_NTR MAKE_MAIR_NORMAL_MEMORY(MAIR_NORM_WB_NTR_RWA, MAIR_NORM_WB_NTR_RWA)
+#define MAIR_ATTR_SET(attr, index) ((attr) << ((index) << 3))
+#define ATTR_INDEX_MASK U(0x3)
+#define ATTR_INDEX_GET(attr) (((attr) >> 2) & ATTR_INDEX_MASK)
+
+/*
+ * Shift values for the attributes fields in a block or page descriptor.
+ * See section D4.3.3 in the ARMv8-A ARM (issue B.a).
+ */
+
+/* Memory attributes index field, AttrIndx[2:0]. */
+#define ATTR_INDEX_SHIFT 2
+/* Non-secure bit, NS. */
+#define NS_SHIFT 5
+/* Shareability field, SH[1:0] */
+#define SHAREABILITY_SHIFT 8
+/* The Access Flag, AF. */
+#define ACCESS_FLAG_SHIFT 10
+/* The not global bit, nG. */
+#define NOT_GLOBAL_SHIFT 11
+/* Contiguous hint bit. */
+#define CONT_HINT_SHIFT 52
+/* Execute-never bits, XN. */
+#define PXN_SHIFT 53
+#define XN_SHIFT 54
+#define UXN_SHIFT XN_SHIFT
+
+/*
+ * Stage 2 translation Lower attributes
+ */
+#define S2TTE_AP_SHIFT 6
+#define S2TTE_AP_RW (3UL << S2TTE_AP_SHIFT)
+
+#define S2TTE_SH_SHIFT 8
+#define S2TTE_SH_MASK (3UL << S2TTE_SH_SHIFT)
+#define S2TTE_SH_NS (0UL << S2TTE_SH_SHIFT)
+#define S2TTE_SH_OS (2UL << S2TTE_SH_SHIFT)
+#define S2TTE_SH_IS (3UL << S2TTE_SH_SHIFT)
+
+/*
+ * Attributes when FEAT_S2FWB is enabled at EL2 (HCR_EL2.FWB == 1).
+ * For Normal WB cacheability attribute, set bit[4] to 1 and bits[3:2] to 0b10.
+ */
+#define S2TTE_MEMATTR_FWB_NORMAL_WB ((1UL << 4) | (2UL << 2))
+#define S2TTE_ATTR_FWB_WB_RW (S2TTE_MEMATTR_FWB_NORMAL_WB | S2TTE_AP_RW | \
+ S2TTE_SH_IS)
+
+#endif /* XLAT_TABLES_DEFS_H */
diff --git a/spm/scmi/include/ext/lib/xlat_tables/xlat_tables_v2.h b/spm/scmi/include/ext/lib/xlat_tables/xlat_tables_v2.h
new file mode 100644
index 0000000..0fe388b
--- /dev/null
+++ b/spm/scmi/include/ext/lib/xlat_tables/xlat_tables_v2.h
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_V2_H
+#define XLAT_TABLES_V2_H
+
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2_helpers.h>
+
+#ifndef __ASSEMBLY__
+#include <stddef.h>
+#include <stdint.h>
+#include <xlat_mmu_helpers.h>
+
+/*
+ * Default granularity size for an mmap_region_t.
+ * Useful when no specific granularity is required.
+ *
+ * By default, choose the biggest possible block size allowed by the
+ * architectural state and granule size in order to minimize the number of page
+ * tables required for the mapping.
+ */
+#define REGION_DEFAULT_GRANULARITY XLAT_BLOCK_SIZE(MIN_LVL_BLOCK_DESC)
+
+/* Helper macro to define an mmap_region_t. */
+#define MAP_REGION(_pa, _va, _sz, _attr) \
+ MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, REGION_DEFAULT_GRANULARITY)
+
+/* Helper macro to define an mmap_region_t with an identity mapping. */
+#define MAP_REGION_FLAT(_adr, _sz, _attr) \
+ MAP_REGION(_adr, _adr, _sz, _attr)
+
+/*
+ * Helper macro to define entries for mmap_region_t. It allows to define 'pa'
+ * and sets 'va' to 0 for each region. To be used with mmap_add_alloc_va().
+ */
+#define MAP_REGION_ALLOC_VA(pa, sz, attr) MAP_REGION(pa, 0, sz, attr)
+
+/*
+ * Helper macro to define an mmap_region_t to map with the desired granularity
+ * of translation tables.
+ *
+ * The granularity value passed to this macro must be a valid block or page
+ * size. When using a 4KB translation granule, this might be 4KB, 2MB or 1GB.
+ * Passing REGION_DEFAULT_GRANULARITY is also allowed and means that the library
+ * is free to choose the granularity for this region. In this case, it is
+ * equivalent to the MAP_REGION() macro.
+ */
+#define MAP_REGION2(_pa, _va, _sz, _attr, _gr) \
+ MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)
+
+/*
+ * Shifts and masks to access fields of an mmap attribute
+ */
+#define MT_TYPE_MASK U(0x7)
+#define MT_TYPE(_attr) ((_attr) & MT_TYPE_MASK)
+/* Access permissions (RO/RW) */
+#define MT_PERM_SHIFT U(3)
+/* Security state (SECURE/NS) */
+#define MT_SEC_SHIFT U(4)
+/* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */
+#define MT_EXECUTE_SHIFT U(5)
+/* In the EL1&0 translation regime, User (EL0) or Privileged (EL1). */
+#define MT_USER_SHIFT U(6)
+/* All other bits are reserved */
+
+/*
+ * Memory mapping attributes
+ */
+
+/*
+ * Memory types supported.
+ * These are organised so that, going down the list, the memory types are
+ * getting weaker; conversely going up the list the memory types are getting
+ * stronger.
+ */
+#define MT_DEVICE U(0)
+#define MT_NON_CACHEABLE U(1)
+#define MT_MEMORY U(2)
+/* Values up to 7 are reserved to add new memory types in the future */
+
+#define MT_RO (U(0) << MT_PERM_SHIFT)
+#define MT_RW (U(1) << MT_PERM_SHIFT)
+
+#define MT_SECURE (U(0) << MT_SEC_SHIFT)
+#define MT_NS (U(1) << MT_SEC_SHIFT)
+
+/*
+ * Access permissions for instruction execution are only relevant for normal
+ * read-only memory, i.e. MT_MEMORY | MT_RO. They are ignored (and potentially
+ * overridden) otherwise:
+ * - Device memory is always marked as execute-never.
+ * - Read-write normal memory is always marked as execute-never.
+ */
+#define MT_EXECUTE (U(0) << MT_EXECUTE_SHIFT)
+#define MT_EXECUTE_NEVER (U(1) << MT_EXECUTE_SHIFT)
+
+/*
+ * When mapping a region at EL0 or EL1, this attribute will be used to determine
+ * if a User mapping (EL0) will be created or a Privileged mapping (EL1).
+ */
+#define MT_USER (U(1) << MT_USER_SHIFT)
+#define MT_PRIVILEGED (U(0) << MT_USER_SHIFT)
+
+/* Compound attributes for most common usages */
+#define MT_CODE (MT_MEMORY | MT_RO | MT_EXECUTE)
+#define MT_RO_DATA (MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
+#define MT_RW_DATA (MT_MEMORY | MT_RW | MT_EXECUTE_NEVER)
+
+/*
+ * Structure for specifying a single region of memory.
+ */
+typedef struct mmap_region {
+ unsigned long long base_pa;
+ uintptr_t base_va;
+ size_t size;
+ unsigned int attr;
+ /* Desired granularity. See the MAP_REGION2() macro for more details. */
+ size_t granularity;
+} mmap_region_t;
+
+/*
+ * Translation regimes supported by this library. EL_REGIME_INVALID tells the
+ * library to detect it at runtime.
+ */
+#define EL1_EL0_REGIME 1
+#define EL2_REGIME 2
+#define EL3_REGIME 3
+#define EL_REGIME_INVALID -1
+
+/*
+ * Declare the translation context type.
+ * Its definition is private.
+ */
+typedef struct xlat_ctx xlat_ctx_t;
+
+/*
+ * Statically allocate a translation context and associated structures. Also
+ * initialize them.
+ *
+ * _ctx_name:
+ * Prefix for the translation context variable.
+ * E.g. If _ctx_name is 'foo', the variable will be called 'foo_xlat_ctx'.
+ * Useful to distinguish multiple contexts from one another.
+ *
+ * _mmap_count:
+ * Number of mmap_region_t to allocate.
+ * Would typically be MAX_MMAP_REGIONS for the translation context describing
+ * the software image currently executing.
+ *
+ * _xlat_tables_count:
+ * Number of sub-translation tables to allocate.
+ * Would typically be MAX_XLAT_TABLES for the translation context describing
+ * the software image currently executing.
+ * Note that this is only for sub-tables ; at the initial lookup level, there
+ * is always a single table.
+ *
+ * _virt_addr_space_size, _phy_addr_space_size:
+ * Size (in bytes) of the virtual (resp. physical) address space.
+ * Would typically be PLAT_VIRT_ADDR_SPACE_SIZE
+ * (resp. PLAT_PHY_ADDR_SPACE_SIZE) for the translation context describing the
+ * software image currently executing.
+ */
+#define REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count, \
+ _virt_addr_space_size, _phy_addr_space_size) \
+ REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
+ (_xlat_tables_count), \
+ (_virt_addr_space_size), \
+ (_phy_addr_space_size), \
+ EL_REGIME_INVALID, "xlat_table")
+
+/*
+ * Same as REGISTER_XLAT_CONTEXT plus the additional parameters:
+ *
+ * _xlat_regime:
+ * Specify the translation regime managed by this xlat_ctx_t instance. The
+ * values are the one from the EL*_REGIME definitions.
+ *
+ * _section_name:
+ * Specify the name of the section where the translation tables have to be
+ * placed by the linker.
+ */
+#define REGISTER_XLAT_CONTEXT2(_ctx_name, _mmap_count, _xlat_tables_count, \
+ _virt_addr_space_size, _phy_addr_space_size, \
+ _xlat_regime, _section_name) \
+ REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
+ (_xlat_tables_count), \
+ (_virt_addr_space_size), \
+ (_phy_addr_space_size), \
+ (_xlat_regime), (_section_name))
+
+/******************************************************************************
+ * Generic translation table APIs.
+ * Each API comes in 2 variants:
+ * - one that acts on the current translation context for this software image
+ * - another that acts on the given translation context instead. This variant
+ * is named after the 1st version, with an additional '_ctx' suffix.
+ *****************************************************************************/
+
+/*
+ * Initialize translation tables from the current list of mmap regions. Calling
+ * this function marks the transition point after which static regions can no
+ * longer be added.
+ */
+void init_xlat_tables(void);
+void init_xlat_tables_ctx(xlat_ctx_t *ctx);
+
+/*
+ * Fill all fields of a dynamic translation tables context. It must be done
+ * either statically with REGISTER_XLAT_CONTEXT() or at runtime with this
+ * function.
+ */
+void xlat_setup_dynamic_ctx(xlat_ctx_t *ctx, unsigned long long pa_max,
+ uintptr_t va_max, struct mmap_region *mmap,
+ unsigned int mmap_num, uint64_t **tables,
+ unsigned int tables_num, uint64_t *base_table,
+ int xlat_regime, int *mapped_regions);
+
+/*
+ * Add a static region with defined base PA and base VA. This function can only
+ * be used before initializing the translation tables. The region cannot be
+ * removed afterwards.
+ */
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
+ size_t size, unsigned int attr);
+void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm);
+
+/*
+ * Add an array of static regions with defined base PA and base VA. This
+ * function can only be used before initializing the translation tables. The
+ * regions cannot be removed afterwards.
+ */
+void mmap_add(const mmap_region_t *mm);
+void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm);
+
+/*
+ * Add a region with defined base PA. Returns base VA calculated using the
+ * highest existing region in the mmap array even if it fails to allocate the
+ * region.
+ */
+void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va,
+ size_t size, unsigned int attr);
+void mmap_add_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
+
+/*
+ * Add an array of static regions with defined base PA, and fill the base VA
+ * field on the array of structs. This function can only be used before
+ * initializing the translation tables. The regions cannot be removed afterwards.
+ */
+void mmap_add_alloc_va(mmap_region_t *mm);
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+/*
+ * Add a dynamic region with defined base PA and base VA. This type of region
+ * can be added and removed even after the translation tables are initialized.
+ *
+ * Returns:
+ * 0: Success.
+ * EINVAL: Invalid values were used as arguments.
+ * ERANGE: Memory limits were surpassed.
+ * ENOMEM: Not enough space in the mmap array or not enough free xlat tables.
+ * EPERM: It overlaps another region in an invalid way.
+ */
+int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
+ size_t size, unsigned int attr);
+int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
+
+/*
+ * Add a dynamic region with defined base PA. Returns base VA calculated using
+ * the highest existing region in the mmap array even if it fails to allocate
+ * the region.
+ *
+ * mmap_add_dynamic_region_alloc_va() returns the allocated VA in 'base_va'.
+ * mmap_add_dynamic_region_alloc_va_ctx() returns it in 'mm->base_va'.
+ *
+ * It returns the same error values as mmap_add_dynamic_region().
+ */
+int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,
+ uintptr_t *base_va,
+ size_t size, unsigned int attr);
+int mmap_add_dynamic_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
+
+/*
+ * Remove a region with the specified base VA and size. Only dynamic regions can
+ * be removed, and they can be removed even if the translation tables are
+ * initialized.
+ *
+ * Returns:
+ * 0: Success.
+ * EINVAL: The specified region wasn't found.
+ * EPERM: Trying to remove a static region.
+ */
+int mmap_remove_dynamic_region(uintptr_t base_va, size_t size);
+int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx,
+ uintptr_t base_va,
+ size_t size);
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/*
+ * Change the memory attributes of the memory region starting from a given
+ * virtual address in a set of translation tables.
+ *
+ * This function can only be used after the translation tables have been
+ * initialized.
+ *
+ * The base address of the memory region must be aligned on a page boundary.
+ * The size of this memory region must be a multiple of a page size.
+ * The memory region must be already mapped by the given translation tables
+ * and it must be mapped at the granularity of a page.
+ *
+ * Return 0 on success, a negative value on error.
+ *
+ * In case of error, the memory attributes remain unchanged and this function
+ * has no effect.
+ *
+ * ctx
+ * Translation context to work on.
+ * base_va:
+ * Virtual address of the 1st page to change the attributes of.
+ * size:
+ * Size in bytes of the memory region.
+ * attr:
+ * New attributes of the page tables. The attributes that can be changed are
+ * data access (MT_RO/MT_RW), instruction access (MT_EXECUTE_NEVER/MT_EXECUTE)
+ * and user/privileged access (MT_USER/MT_PRIVILEGED) in the case of contexts
+ * that are used in the EL1&0 translation regime. Also, note that this
+ * function doesn't allow to remap a region as RW and executable, or to remap
+ * device memory as executable.
+ *
+ * NOTE: The caller of this function must be able to write to the translation
+ * tables, i.e. the memory where they are stored must be mapped with read-write
+ * access permissions. This function assumes it is the case. If this is not
+ * the case then this function might trigger a data abort exception.
+ *
+ * NOTE2: The caller is responsible for making sure that the targeted
+ * translation tables are not modified by any other code while this function is
+ * executing.
+ */
+int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+ size_t size, uint32_t attr);
+int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr);
+
+/*
+ * Query the memory attributes of a memory page in a set of translation tables.
+ *
+ * Return 0 on success, a negative error code on error.
+ * On success, the attributes are stored into *attr.
+ *
+ * ctx
+ * Translation context to work on.
+ * base_va
+ * Virtual address of the page to get the attributes of.
+ * There are no alignment restrictions on this address. The attributes of the
+ * memory page it lies within are returned.
+ * attr
+ * Output parameter where to store the attributes of the targeted memory page.
+ */
+int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+ uint32_t *attr);
+int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr);
+
+#endif /*__ASSEMBLY__*/
+#endif /* XLAT_TABLES_V2_H */
diff --git a/spm/scmi/include/ext/lib/xlat_tables/xlat_tables_v2_helpers.h b/spm/scmi/include/ext/lib/xlat_tables/xlat_tables_v2_helpers.h
new file mode 100644
index 0000000..aa2bd66
--- /dev/null
+++ b/spm/scmi/include/ext/lib/xlat_tables/xlat_tables_v2_helpers.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This header file contains internal definitions that are not supposed to be
+ * used outside of this library code.
+ */
+
+#ifndef XLAT_TABLES_V2_HELPERS_H
+#define XLAT_TABLES_V2_HELPERS_H
+
+#ifndef XLAT_TABLES_V2_H
+#error "Do not include this header file directly. Include xlat_tables_v2.h instead."
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <cassert.h>
+#include <platform_def.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <xlat_tables_arch.h>
+#include <xlat_tables_defs.h>
+
+/* Forward declaration */
+struct mmap_region;
+
+/*
+ * Helper macro to define an mmap_region_t. This macro allows to specify all
+ * the fields of the structure but its parameter list is not guaranteed to
+ * remain stable as we add members to mmap_region_t.
+ */
+#define MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr) \
+ { \
+ .base_pa = (_pa), \
+ .base_va = (_va), \
+ .size = (_sz), \
+ .attr = (_attr), \
+ .granularity = (_gr), \
+ }
+
+/* Struct that holds all information about the translation tables. */
+struct xlat_ctx {
+ /*
+ * Max allowed Virtual and Physical Addresses.
+ */
+ unsigned long long pa_max_address;
+ uintptr_t va_max_address;
+
+ /*
+ * Array of all memory regions stored in order of ascending end address
+ * and ascending size to simplify the code that allows overlapping
+ * regions. The list is terminated by the first entry with size == 0.
+ * The max size of the list is stored in `mmap_num`. `mmap` points to an
+ * array of mmap_num + 1 elements, so that there is space for the final
+ * null entry.
+ */
+ struct mmap_region *mmap;
+ int mmap_num;
+
+ /*
+ * Array of finer-grain translation tables.
+ * For example, if the initial lookup level is 1 then this array would
+ * contain both level-2 and level-3 entries.
+ */
+ uint64_t (*tables)[XLAT_TABLE_ENTRIES];
+ int tables_num;
+ /*
+ * Keep track of how many regions are mapped in each table. The base
+ * table can't be unmapped so it isn't needed to keep track of it.
+ */
+#if PLAT_XLAT_TABLES_DYNAMIC
+ int *tables_mapped_regions;
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+ int next_table;
+
+ /*
+ * Base translation table. It doesn't need to have the same amount of
+ * entries as the ones used for other levels.
+ */
+ uint64_t *base_table;
+ unsigned int base_table_entries;
+
+ /*
+ * Max Physical and Virtual addresses currently in use by the
+ * translation tables. These might get updated as we map/unmap memory
+ * regions but they will never go beyond pa/va_max_address.
+ */
+ unsigned long long max_pa;
+ uintptr_t max_va;
+
+ /* Level of the base translation table. */
+ unsigned int base_level;
+
+ /* Set to true when the translation tables are initialized. */
+ bool initialized;
+
+ /*
+ * Translation regime managed by this xlat_ctx_t. It should be one of
+ * the EL*_REGIME defines.
+ */
+ int xlat_regime;
+};
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+#define XLAT_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
+ static int _ctx_name##_mapped_regions[_xlat_tables_count];
+
+#define XLAT_REGISTER_DYNMAP_STRUCT(_ctx_name) \
+ .tables_mapped_regions = _ctx_name##_mapped_regions,
+#else
+#define XLAT_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
+ /* do nothing */
+
+#define XLAT_REGISTER_DYNMAP_STRUCT(_ctx_name) \
+ /* do nothing */
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+#define REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count, \
+ _xlat_tables_count, _virt_addr_space_size, \
+ _phy_addr_space_size, _xlat_regime, _section_name)\
+ CASSERT(CHECK_PHY_ADDR_SPACE_SIZE(_phy_addr_space_size), \
+ assert_invalid_physical_addr_space_sizefor_##_ctx_name);\
+ \
+ static mmap_region_t _ctx_name##_mmap[_mmap_count + 1]; \
+ \
+ static uint64_t _ctx_name##_xlat_tables[_xlat_tables_count] \
+ [XLAT_TABLE_ENTRIES] \
+ __aligned(XLAT_TABLE_SIZE) __section(_section_name); \
+ \
+ static uint64_t _ctx_name##_base_xlat_table \
+ [GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)] \
+ __aligned(GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)\
+ * sizeof(uint64_t)); \
+ \
+ XLAT_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
+ \
+ static xlat_ctx_t _ctx_name##_xlat_ctx = { \
+ .va_max_address = (_virt_addr_space_size) - 1UL, \
+ .pa_max_address = (_phy_addr_space_size) - 1ULL, \
+ .mmap = _ctx_name##_mmap, \
+ .mmap_num = (_mmap_count), \
+ .base_level = GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size),\
+ .base_table = _ctx_name##_base_xlat_table, \
+ .base_table_entries = \
+ GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size),\
+ .tables = _ctx_name##_xlat_tables, \
+ .tables_num = _xlat_tables_count, \
+ XLAT_REGISTER_DYNMAP_STRUCT(_ctx_name) \
+ .xlat_regime = (_xlat_regime), \
+ .max_pa = 0U, \
+ .max_va = 0U, \
+ .next_table = 0, \
+ .initialized = false, \
+ }
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* XLAT_TABLES_V2_HELPERS_H */
diff --git a/spm/scmi/include/ext/plat/arm/common/plat_arm.h b/spm/scmi/include/ext/plat/arm/common/plat_arm.h
new file mode 100644
index 0000000..3b0b5a6
--- /dev/null
+++ b/spm/scmi/include/ext/plat/arm/common/plat_arm.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_ARM_H__
+#define __PLAT_ARM_H__
+
+/*
+ * Initialises the IO
+ * Returns: IO_SUCCESS
+ * IO_FAIL
+ * IO_NOT_SUPPORTED
+ * IO_RESOURCES_EXHAUSTED
+ */
+int arm_io_setup(void);
+
+/* Initialises the IO and the GIC. */
+void arm_platform_setup(void);
+
+/*******************************************************************************
+ * ARM platforms porting interfaces are located below.
+ ******************************************************************************/
+
+/* Initialises the Generic Interrupt Controller (GIC). */
+void plat_arm_gic_init(void);
+
+#endif /* __PLAT_ARM_H__ */
diff --git a/spm/scmi/include/ext/plat/common/common_def.h b/spm/scmi/include/ext/plat/common/common_def.h
new file mode 100644
index 0000000..844c0c8
--- /dev/null
+++ b/spm/scmi/include/ext/plat/common/common_def.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _COMMON_DEF_H_
+#define _COMMON_DEF_H_
+
+#define SZ_1K 0x00000400
+#define SZ_2K 0x00000800
+#define SZ_4K 0x00001000
+#define SZ_8K 0x00002000
+#define SZ_16K 0x00004000
+#define SZ_32K 0x00008000
+#define SZ_64K 0x00010000
+#define SZ_128K 0x00020000
+#define SZ_256K 0x00040000
+#define SZ_512K 0x00080000
+
+#define SZ_1M 0x00100000
+#define SZ_2M 0x00200000
+#define SZ_4M 0x00400000
+#define SZ_8M 0x00800000
+#define SZ_16M 0x01000000
+#define SZ_32M 0x02000000
+#define SZ_64M 0x04000000
+#define SZ_128M 0x08000000
+#define SZ_256M 0x10000000
+#define SZ_512M 0x20000000
+
+#define SZ_1G 0x40000000
+#define SZ_2G 0x80000000
+
+#endif /* _COMMON_DEF_H_ */
diff --git a/spm/scmi/include/ext/plat/common/plat_topology.h b/spm/scmi/include/ext/plat/common/plat_topology.h
new file mode 100644
index 0000000..fbae878
--- /dev/null
+++ b/spm/scmi/include/ext/plat/common/plat_topology.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_TOPOLOGY_H__
+#define __PLAT_TOPOLOGY_H__
+
+#include <arch.h>
+#include <platform_def.h>
+#include <stdint.h>
+
+/*
+ * This is the initial value of the power domain index when used
+ * as argument to the tftf topology helpers. They are also used
+ * to indicate the end of iterative topology navigation when returned
+ * by the tftf topology helpers.
+ */
+#define PWR_DOMAIN_INIT ((unsigned int) -1)
+
+/*
+ * Return the total number of clusters in the system.
+ * Currently there is correspondence between power level and affinity
+ * level and hence cluster power level corresponds to affinity level 1.
+ */
+#define tftf_get_total_clusters_count() tftf_get_total_aff_count(1)
+
+/*
+ * Return the total number of CPUs in the system (across all clusters).
+ * Currently there is correspondence between power level and affinity
+ * level and hence CPU power level correspond to affinity level 0.
+ */
+#define tftf_get_total_cpus_count() tftf_get_total_aff_count(0)
+
+/*
+ * Converts a 'core_pos' into an MPIDR. The 'core_pos' is a unique number
+ * corresponding to a CPU as returned by platform_get_core_pos() API
+ */
+#define tftf_core_pos_to_mpidr(core_pos) \
+ tftf_get_mpidr_from_node(core_pos + tftf_pwr_domain_start_idx[0])
+
+/*
+ * The following array stores the start index of each level in the power
+ * domain topology tree.
+ */
+extern unsigned int tftf_pwr_domain_start_idx[PLATFORM_MAX_AFFLVL + 1];
+
+/*
+ * The following data structure represents a TFTF power domain node.
+ */
+typedef struct tftf_pwr_domain_node {
+ /*
+ * Array index of the first CPU in the topology array for which this
+ * power domain is the parent. If this power domain is a CPU, then
+ * `cpu_start_node` will be its own index in the power domain
+ * topology array.
+ */
+ unsigned int cpu_start_node;
+
+ /*
+ * Number of CPU power domains which belong to this power domain.
+ * i.e. all the CPUs in the range 'cpu_start_node
+ * -> cpu_start_node + ncpus - 1 ' will belong to this power domain.
+ * If this power domain is a CPU then 'ncpus' will be 1.
+ */
+ unsigned int ncpus;
+
+ /* Valid only for CPU power domains */
+ unsigned int mpidr;
+
+ /* Index of the parent power domain node */
+ unsigned int parent_node;
+
+ /*
+ * The level of this power domain node in the power domain topology
+ * tree. It could correspond to the affinity level but the platform
+ * could have power levels which do not match affinity levels.
+ */
+ unsigned int level;
+
+ /*
+ * The 'is_present' field is used to cater for power domains
+ * which are absent.
+ */
+ unsigned char is_present;
+} tftf_pwr_domain_node_t;
+
+extern tftf_pwr_domain_node_t tftf_pd_nodes[PLATFORM_NUM_AFFS];
+
+/*
+ * Detect and store the platform topology so that test cases can query it later.
+ */
+void tftf_init_topology(void);
+
+/*
+ * Return the number of affinity instances implemented at the affinity level
+ * passed as an argument. This function returns 0 for any other affinity
+ * level higher than that supported by the platform.
+ */
+unsigned int tftf_get_total_aff_count(unsigned int aff_lvl);
+
+/*
+ * Returns the index of the next power domain after `pwr_domain_idx`
+ * in the topology tree at the same `aff_lvl`. The `pwr_domain_idx`
+ * has to be initialized to PWR_DOMAIN_INIT to get the first entry.
+ * It returns PWR_DOMAIN_INIT if none is found.
+ */
+unsigned int tftf_get_next_peer_domain(unsigned int pwr_domain_idx,
+ unsigned int pwr_lvl);
+
+/*
+ * Returns the index of the next CPU after the current CPU `cpu_node`
+ * which belongs to the power domain `pwr_domain_idx`. The `cpu_node`
+ * has to be initialized to PWR_DOMAIN_INIT to get the first entry.
+ * It returns PWR_DOMAIN_INIT if none is found.
+ */
+unsigned int tftf_get_next_cpu_in_pwr_domain(unsigned int pwr_domain_idx,
+ unsigned int cpu_node);
+
+/*
+ * Return the node index of the next CPU after 'cpu_node' in the topology tree.
+ * Skip absent CPUs.
+ * cpu_node: Node index of the current CPU.
+ */
+unsigned int tftf_topology_next_cpu(unsigned int cpu_node);
+
+/*
+ * Iterate over every CPU. Skip absent CPUs.
+ * cpu: unsigned integer corresponding to the index of the cpu in
+ * the topology array.
+ */
+#define for_each_cpu(cpu) \
+ for (cpu = tftf_topology_next_cpu(PWR_DOMAIN_INIT); \
+ cpu != PWR_DOMAIN_INIT; \
+ cpu = tftf_topology_next_cpu(cpu))
+
+/*
+ * Iterate over every power domain idx for a given level.
+ * - idx: unsigned integer corresponding to the power domain index.
+ * - lvl: level
+ */
+#define for_each_power_domain_idx(idx, lvl) \
+ for (idx = tftf_get_next_peer_domain(PWR_DOMAIN_INIT, (lvl)); \
+ idx != PWR_DOMAIN_INIT; \
+ idx = tftf_get_next_peer_domain(idx, (lvl)))
+
+/*
+ * Iterate over every CPU in a power domain idx.
+ * - cpu_idx: CPU index.
+ * - pwr_domain_idx: unsigned integer corresponding to the power domain index.
+ */
+#define for_each_cpu_in_power_domain(cpu_idx, pwr_domain_idx) \
+ for (cpu_idx = tftf_get_next_cpu_in_pwr_domain( \
+ (pwr_domain_idx), PWR_DOMAIN_INIT); \
+ cpu_idx != PWR_DOMAIN_INIT; \
+ cpu_idx = tftf_get_next_cpu_in_pwr_domain( \
+ (pwr_domain_idx), cpu_idx))
+
+/*
+ * Returns the MPIDR of the CPU power domain node indexed by `cpu_node`
+ * or INVALID_MPID if it is absent.
+ */
+unsigned int tftf_get_mpidr_from_node(unsigned int cpu_node);
+
+
+/*
+ * Returns the index corresponding to the parent power domain at `pwrlvl` of the
+ * CPU specified by `mpidr`. Returns POWER_DOMAIN_INIT if any of input arguments
+ * are incorrect.
+ */
+unsigned int tftf_get_parent_node_from_mpidr(unsigned int mpidr,
+ unsigned int pwrlvl);
+
+
+/*
+ * Query the platform topology to find another CPU than the one specified
+ * as an argument.
+ * Return the MPID of this other CPU, or INVALID_MPID if none could be found.
+ */
+unsigned int tftf_find_any_cpu_other_than(unsigned int exclude_mpid);
+
+/*
+ * Query the platform topology to find a random CPU other than the one specified
+ * as an argument.
+ * The difference between this function and tftf_find_any_cpu_other_than is
+ * the randomness in selecting a CPU.
+ * Return the MPID of this other CPU, or INVALID_MPID if none could be found.
+ */
+unsigned int tftf_find_random_cpu_other_than(unsigned int exclude_mpid);
+
+#endif /* __PLAT_TOPOLOGY_H__ */
diff --git a/spm/scmi/include/ffa_endpoints.h b/spm/scmi/include/ffa_endpoints.h
new file mode 100644
index 0000000..26297bc
--- /dev/null
+++ b/spm/scmi/include/ffa_endpoints.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FFA_ENDPOINTS_H
+#define FFA_ENDPOINTS_H
+
+#include <platform_def.h>
+
+/* UUID of cactus SPs as defined in the respective manifests. */
+#define PRIMARY_UUID {0x1e67b5b4, 0xe14f904a, 0x13fb1fb8, 0xcbdae1da}
+#define SECONDARY_UUID {0x092358d1, 0xb94723f0, 0x64447c82, 0xc88f57f5}
+#define TERTIARY_UUID {0x735cb579, 0xb9448c1d, 0xe1619385, 0xd2d80a77}
+#define IVY_UUID {0xd883baea, 0xaf4eafba, 0xfdf74481, 0xa744e5cb}
+#define EL3_SPMD_LP_UUID {0xe98e43ad, 0xb7db524f, 0x47a3bf57, 0x1588f4e3}
+
+/* vcpu_count of cactus SPs. */
+#define PRIMARY_EXEC_CTX_COUNT PLATFORM_CORE_COUNT
+#define SECONDARY_EXEC_CTX_COUNT PLATFORM_CORE_COUNT
+#define TERTIARY_EXEC_CTX_COUNT (1)
+#define IVY_EXEC_CTX_COUNT (1)
+#define EL3_SPMD_LP_EXEC_CTX_COUNT (1)
+
+/* UUID of OPTEE SP as defined in the respective manifest. */
+#define OPTEE_UUID {0x486178e0, 0xe7f811e3, 0xbc5e0002, 0xa5d5c51b}
+
+#define OPTEE_FFA_GET_API_VERSION (0)
+#define OPTEE_FFA_GET_OS_VERSION (1)
+#define OPTEE_FFA_GET_OS_VERSION_MAJOR (3)
+#define OPTEE_FFA_GET_OS_VERSION_MINOR (10)
+
+#endif
diff --git a/spm/scmi/include/ffa_helpers.h b/spm/scmi/include/ffa_helpers.h
new file mode 100644
index 0000000..a7cdcb5
--- /dev/null
+++ b/spm/scmi/include/ffa_helpers.h
@@ -0,0 +1,762 @@
+/*
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FFA_HELPERS_H
+#define FFA_HELPERS_H
+
+#include <ffa_svc.h>
+#include <tftf_lib.h>
+#include <utils_def.h>
+
+/* This error code must be different to the ones used by FFA */
+#define FFA_TFTF_ERROR -42
+
+typedef unsigned short ffa_id_t;
+typedef unsigned short ffa_vm_count_t;
+typedef unsigned short ffa_vcpu_count_t;
+typedef uint64_t ffa_memory_handle_t;
+/** Flags to indicate properties of receivers during memory region retrieval. */
+typedef uint8_t ffa_memory_receiver_flags_t;
+
+struct ffa_uuid {
+ uint32_t uuid[4];
+};
+
+/** Length in bytes of the name in boot information descriptor. */
+#define FFA_BOOT_INFO_NAME_LEN 16
+
+/**
+ * The FF-A boot info descriptor, as defined in table 5.8 of section 5.4.1, of
+ * the FF-A v1.1 EAC0 specification.
+ */
+struct ffa_boot_info_desc {
+ char name[FFA_BOOT_INFO_NAME_LEN];
+ uint8_t type;
+ uint8_t reserved;
+ uint16_t flags;
+ uint32_t size;
+ uint64_t content;
+};
+
+/** FF-A boot information type mask. */
+#define FFA_BOOT_INFO_TYPE_SHIFT 7
+#define FFA_BOOT_INFO_TYPE_MASK (0x1U << FFA_BOOT_INFO_TYPE_SHIFT)
+#define FFA_BOOT_INFO_TYPE_STD 0U
+#define FFA_BOOT_INFO_TYPE_IMPDEF 1U
+
+/** Standard boot info type IDs. */
+#define FFA_BOOT_INFO_TYPE_ID_MASK 0x7FU
+#define FFA_BOOT_INFO_TYPE_ID_FDT 0U
+#define FFA_BOOT_INFO_TYPE_ID_HOB 1U
+
+/** FF-A Boot Info descriptors flags. */
+#define FFA_BOOT_INFO_FLAG_MBZ_MASK 0xFFF0U
+
+/** Bits [1:0] encode the format of the name field in ffa_boot_info_desc. */
+#define FFA_BOOT_INFO_FLAG_NAME_FORMAT_SHIFT 0U
+#define FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK \
+ (0x3U << FFA_BOOT_INFO_FLAG_NAME_FORMAT_SHIFT)
+#define FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING 0x0U
+#define FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID 0x1U
+
+/** Bits [3:2] encode the format of the content field in ffa_boot_info_desc. */
+#define FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT 2
+#define FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK \
+ (0x3U << FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT)
+#define FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_VALUE 0x1U
+#define FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR 0x0U
+
+static inline uint16_t ffa_boot_info_content_format(
+ struct ffa_boot_info_desc *desc)
+{
+ return (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >>
+ FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT;
+}
+
+static inline uint16_t ffa_boot_info_name_format(
+ struct ffa_boot_info_desc *desc)
+{
+ return (desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK) >>
+ FFA_BOOT_INFO_FLAG_NAME_FORMAT_SHIFT;
+}
+
+static inline uint8_t ffa_boot_info_type_id(struct ffa_boot_info_desc *desc)
+{
+ return desc->type & FFA_BOOT_INFO_TYPE_ID_MASK;
+}
+
+static inline uint8_t ffa_boot_info_type(struct ffa_boot_info_desc *desc)
+{
+ return (desc->type & FFA_BOOT_INFO_TYPE_MASK) >>
+ FFA_BOOT_INFO_TYPE_SHIFT;
+}
+
+/** Length in bytes of the signature in the boot descriptor. */
+#define FFA_BOOT_INFO_HEADER_SIGNATURE_LEN 4
+
+/**
+ * The FF-A boot information header, as defined in table 5.9 of section 5.4.2,
+ * of the FF-A v1.1 EAC0 specification.
+ */
+struct ffa_boot_info_header {
+ uint32_t signature;
+ uint32_t version;
+ uint32_t info_blob_size;
+ uint32_t desc_size;
+ uint32_t desc_count;
+ uint32_t desc_offset;
+ uint64_t reserved;
+ struct ffa_boot_info_desc boot_info[];
+};
+
+#ifndef __ASSEMBLY__
+
+#include <cassert.h>
+#include <stdint.h>
+
+/**
+ * FF-A Feature ID, to be used with interface FFA_FEATURES.
+ * As defined in the FF-A v1.1 Beta specification, table 13.10, in section
+ * 13.2.
+ */
+
+/** Query interrupt ID of Notification Pending Interrupt. */
+#define FFA_FEATURE_NPI 0x1U
+
+/** Query interrupt ID of Schedule Receiver Interrupt. */
+#define FFA_FEATURE_SRI 0x2U
+
+/** Query interrupt ID of the Managed Exit Interrupt. */
+#define FFA_FEATURE_MEI 0x3U
+
+/** Partition property: partition supports receipt of direct requests. */
+#define FFA_PARTITION_DIRECT_REQ_RECV (UINT32_C(1) << 0)
+
+/** Partition property: partition can send direct requests. */
+#define FFA_PARTITION_DIRECT_REQ_SEND (UINT32_C(1) << 1)
+
+/** Partition property: partition can send and receive indirect messages. */
+#define FFA_PARTITION_INDIRECT_MSG (UINT32_C(1) << 2)
+
+/** Partition property: partition can receive notifications. */
+#define FFA_PARTITION_NOTIFICATION (UINT32_C(1) << 3)
+
+/** Partition property: partition runs in the AArch64 execution state. */
+#define FFA_PARTITION_AARCH64_EXEC (UINT32_C(1) << 8)
+
+/** Partition info descriptor as defined in FF-A v1.1 EAC0 Table 13.37 */
+struct ffa_partition_info {
+ /** The ID of the VM the information is about */
+ ffa_id_t id;
+ /** The number of execution contexts implemented by the partition */
+ uint16_t exec_context;
+ /** The Partition's properties, e.g. supported messaging methods */
+ uint32_t properties;
+ /** The uuid of the partition */
+ struct ffa_uuid uuid;
+};
+
+/**
+ * Bits[31:3] of partition properties must be zero for FF-A v1.0.
+ * This corresponds to table 8.25 "Partition information descriptor"
+ * in DEN0077A FF-A 1.0 REL specification.
+ */
+#define FFA_PARTITION_v1_0_RES_MASK (~(UINT32_C(0x7)))
+
+/**
+ * Partition info descriptor as defined in Table 8.25 of the v1.0
+ * FF-A Specification (DEN0077A).
+ */
+struct ffa_partition_info_v1_0 {
+ /** The ID of the VM the information is about */
+ ffa_id_t id;
+ /** The number of execution contexts implemented by the partition */
+ uint16_t exec_context;
+ /** The Partition's properties, e.g. supported messaging methods */
+ uint32_t properties;
+};
+
+struct ffa_value {
+ u_register_t fid;
+ u_register_t arg1;
+ u_register_t arg2;
+ u_register_t arg3;
+ u_register_t arg4;
+ u_register_t arg5;
+ u_register_t arg6;
+ u_register_t arg7;
+ u_register_t arg8;
+ u_register_t arg9;
+ u_register_t arg10;
+ u_register_t arg11;
+ u_register_t arg12;
+ u_register_t arg13;
+ u_register_t arg14;
+ u_register_t arg15;
+ u_register_t arg16;
+ u_register_t arg17;
+};
+
+/* Function to make an SMC or SVC service call depending on the exception
+ * level of the SP.
+ */
+struct ffa_value ffa_service_call(struct ffa_value *args);
+
+/*
+ * Functions to trigger a service call.
+ *
+ * The arguments to pass through the service call must be stored in the
+ * ffa_value structure. The return values of the service call will be stored
+ * in the same structure (overriding the input arguments).
+ *
+ * Return the first return value. It is equivalent to args.fid but is also
+ * provided as the return value for convenience.
+ */
+u_register_t ffa_svc(struct ffa_value *args);
+u_register_t ffa_smc(struct ffa_value *args);
+
+static inline uint32_t ffa_func_id(struct ffa_value val)
+{
+ return (uint32_t)val.fid;
+}
+
+static inline int32_t ffa_error_code(struct ffa_value val)
+{
+ return (int32_t)val.arg2;
+}
+
+static inline ffa_id_t ffa_endpoint_id(struct ffa_value val) {
+ return (ffa_id_t)val.arg2 & 0xffff;
+}
+
+static inline uint32_t ffa_partition_info_count(struct ffa_value val)
+{
+ return (uint32_t)val.arg2;
+}
+
+static inline uint32_t ffa_partition_info_desc_size(struct ffa_value val)
+{
+ return (uint32_t)val.arg3;
+}
+
+static inline uint32_t ffa_feature_intid(struct ffa_value val)
+{
+ return (uint32_t)val.arg2;
+}
+
+static inline uint16_t ffa_partition_info_regs_get_last_idx(
+ struct ffa_value args)
+{
+ return args.arg2 & 0xFFFF;
+}
+
+static inline uint16_t ffa_partition_info_regs_get_curr_idx(
+ struct ffa_value args)
+{
+ return (args.arg2 >> 16) & 0xFFFF;
+}
+
+static inline uint16_t ffa_partition_info_regs_get_tag(struct ffa_value args)
+{
+ return (args.arg2 >> 32) & 0xFFFF;
+}
+
+static inline uint16_t ffa_partition_info_regs_get_desc_size(
+ struct ffa_value args)
+{
+ return (args.arg2 >> 48);
+}
+
+static inline uint32_t ffa_partition_info_regs_partition_count(
+ struct ffa_value args)
+{
+ return ffa_partition_info_regs_get_last_idx(args) + 1;
+}
+
+static inline uint32_t ffa_partition_info_regs_entry_count(
+ struct ffa_value args, uint16_t start_idx)
+{
+ return (ffa_partition_info_regs_get_curr_idx(args) - start_idx + 1);
+}
+
+static inline uint16_t ffa_partition_info_regs_entry_size(
+ struct ffa_value args)
+{
+ return (args.arg2 >> 48) & 0xFFFFU;
+}
+
+typedef uint64_t ffa_notification_bitmap_t;
+
+#define FFA_NOTIFICATION(ID) (UINT64_C(1) << ID)
+
+#define MAX_FFA_NOTIFICATIONS UINT32_C(64)
+
+#define FFA_NOTIFICATIONS_FLAG_PER_VCPU UINT32_C(0x1 << 0)
+
+/** Flag to delay Schedule Receiver Interrupt. */
+#define FFA_NOTIFICATIONS_FLAG_DELAY_SRI UINT32_C(0x1 << 1)
+
+#define FFA_NOTIFICATIONS_FLAGS_VCPU_ID(id) UINT32_C((id & 0xFFFF) << 16)
+
+#define FFA_NOTIFICATIONS_FLAG_BITMAP_SP UINT32_C(0x1 << 0)
+#define FFA_NOTIFICATIONS_FLAG_BITMAP_VM UINT32_C(0x1 << 1)
+#define FFA_NOTIFICATIONS_FLAG_BITMAP_SPM UINT32_C(0x1 << 2)
+#define FFA_NOTIFICATIONS_FLAG_BITMAP_HYP UINT32_C(0x1 << 3)
+
+/**
+ * The following is an SGI ID, that the SPMC configures as non-secure, as
+ * suggested by the FF-A v1.1 specification, in section 9.4.1.
+ */
+#define FFA_SCHEDULE_RECEIVER_INTERRUPT_ID 8
+
+#define FFA_NOTIFICATIONS_BITMAP(lo, hi) \
+ (ffa_notification_bitmap_t)(lo) | \
+ (((ffa_notification_bitmap_t)hi << 32) & 0xFFFFFFFF00000000ULL)
+
+#define FFA_NOTIFICATIONS_FLAGS_VCPU_ID(id) UINT32_C((id & 0xFFFF) << 16)
+
+static inline ffa_notification_bitmap_t ffa_notifications_get_from_sp(
+ struct ffa_value val)
+{
+ return FFA_NOTIFICATIONS_BITMAP(val.arg2, val.arg3);
+}
+
+static inline ffa_notification_bitmap_t ffa_notifications_get_from_vm(
+ struct ffa_value val)
+{
+ return FFA_NOTIFICATIONS_BITMAP(val.arg4, val.arg5);
+}
+
+/*
+ * FFA_NOTIFICATION_INFO_GET is a SMC64 interface.
+ * The following macros are defined for SMC64 implementation.
+ */
+#define FFA_NOTIFICATIONS_INFO_GET_MAX_IDS 20U
+
+#define FFA_NOTIFICATIONS_INFO_GET_FLAG_MORE_PENDING UINT64_C(0x1)
+
+#define FFA_NOTIFICATIONS_LISTS_COUNT_SHIFT 0x7U
+#define FFA_NOTIFICATIONS_LISTS_COUNT_MASK 0x1FU
+#define FFA_NOTIFICATIONS_LIST_SHIFT(l) (2 * (l - 1) + 12)
+#define FFA_NOTIFICATIONS_LIST_SIZE_MASK 0x3U
+
+static inline uint32_t ffa_notifications_info_get_lists_count(
+ struct ffa_value ret)
+{
+ return (uint32_t)(ret.arg2 >> FFA_NOTIFICATIONS_LISTS_COUNT_SHIFT)
+ & FFA_NOTIFICATIONS_LISTS_COUNT_MASK;
+}
+
+static inline uint32_t ffa_notifications_info_get_list_size(
+ struct ffa_value ret, uint32_t list)
+{
+ return (uint32_t)(ret.arg2 >> FFA_NOTIFICATIONS_LIST_SHIFT(list)) &
+ FFA_NOTIFICATIONS_LIST_SIZE_MASK;
+}
+
+static inline bool ffa_notifications_info_get_more_pending(struct ffa_value ret)
+{
+ return (ret.arg2 & FFA_NOTIFICATIONS_INFO_GET_FLAG_MORE_PENDING) != 0U;
+}
+
+enum ffa_data_access {
+ FFA_DATA_ACCESS_NOT_SPECIFIED,
+ FFA_DATA_ACCESS_RO,
+ FFA_DATA_ACCESS_RW,
+ FFA_DATA_ACCESS_RESERVED,
+};
+
+enum ffa_instruction_access {
+ FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
+ FFA_INSTRUCTION_ACCESS_NX,
+ FFA_INSTRUCTION_ACCESS_X,
+ FFA_INSTRUCTION_ACCESS_RESERVED,
+};
+
+enum ffa_memory_type {
+ FFA_MEMORY_NOT_SPECIFIED_MEM,
+ FFA_MEMORY_DEVICE_MEM,
+ FFA_MEMORY_NORMAL_MEM,
+};
+
+enum ffa_memory_cacheability {
+ FFA_MEMORY_CACHE_RESERVED = 0x0,
+ FFA_MEMORY_CACHE_NON_CACHEABLE = 0x1,
+ FFA_MEMORY_CACHE_RESERVED_1 = 0x2,
+ FFA_MEMORY_CACHE_WRITE_BACK = 0x3,
+ FFA_MEMORY_DEV_NGNRNE = 0x0,
+ FFA_MEMORY_DEV_NGNRE = 0x1,
+ FFA_MEMORY_DEV_NGRE = 0x2,
+ FFA_MEMORY_DEV_GRE = 0x3,
+};
+
+enum ffa_memory_shareability {
+ FFA_MEMORY_SHARE_NON_SHAREABLE,
+ FFA_MEMORY_SHARE_RESERVED,
+ FFA_MEMORY_OUTER_SHAREABLE,
+ FFA_MEMORY_INNER_SHAREABLE,
+};
+
+typedef uint8_t ffa_memory_access_permissions_t;
+
+/**
+ * This corresponds to table 10.18 of the FF-A v1.1 EAC0 specification, "Memory
+ * region attributes descriptor".
+ */
+typedef uint16_t ffa_memory_attributes_t;
+
+#define FFA_DATA_ACCESS_OFFSET (0x0U)
+#define FFA_DATA_ACCESS_MASK ((0x3U) << FFA_DATA_ACCESS_OFFSET)
+
+#define FFA_INSTRUCTION_ACCESS_OFFSET (0x2U)
+#define FFA_INSTRUCTION_ACCESS_MASK ((0x3U) << FFA_INSTRUCTION_ACCESS_OFFSET)
+
+#define FFA_MEMORY_TYPE_OFFSET (0x4U)
+#define FFA_MEMORY_TYPE_MASK ((0x3U) << FFA_MEMORY_TYPE_OFFSET)
+
+#define FFA_MEMORY_CACHEABILITY_OFFSET (0x2U)
+#define FFA_MEMORY_CACHEABILITY_MASK ((0x3U) << FFA_MEMORY_CACHEABILITY_OFFSET)
+
+#define FFA_MEMORY_SHAREABILITY_OFFSET (0x0U)
+#define FFA_MEMORY_SHAREABILITY_MASK ((0x3U) << FFA_MEMORY_SHAREABILITY_OFFSET)
+
+#define ATTR_FUNCTION_SET(name, container_type, offset, mask) \
+ static inline void ffa_set_##name##_attr(container_type *attr, \
+ const enum ffa_##name perm) \
+ { \
+ *attr = (*attr & ~(mask)) | ((perm << offset) & mask); \
+ }
+
+#define ATTR_FUNCTION_GET(name, container_type, offset, mask) \
+ static inline enum ffa_##name ffa_get_##name##_attr( \
+ container_type attr) \
+ { \
+ return (enum ffa_##name)((attr & mask) >> offset); \
+ }
+
+ATTR_FUNCTION_SET(data_access, ffa_memory_access_permissions_t,
+ FFA_DATA_ACCESS_OFFSET, FFA_DATA_ACCESS_MASK)
+ATTR_FUNCTION_GET(data_access, ffa_memory_access_permissions_t,
+ FFA_DATA_ACCESS_OFFSET, FFA_DATA_ACCESS_MASK)
+
+ATTR_FUNCTION_SET(instruction_access, ffa_memory_access_permissions_t,
+ FFA_INSTRUCTION_ACCESS_OFFSET, FFA_INSTRUCTION_ACCESS_MASK)
+ATTR_FUNCTION_GET(instruction_access, ffa_memory_access_permissions_t,
+ FFA_INSTRUCTION_ACCESS_OFFSET, FFA_INSTRUCTION_ACCESS_MASK)
+
+ATTR_FUNCTION_SET(memory_type, ffa_memory_attributes_t, FFA_MEMORY_TYPE_OFFSET,
+ FFA_MEMORY_TYPE_MASK)
+ATTR_FUNCTION_GET(memory_type, ffa_memory_attributes_t, FFA_MEMORY_TYPE_OFFSET,
+ FFA_MEMORY_TYPE_MASK)
+
+ATTR_FUNCTION_SET(memory_cacheability, ffa_memory_attributes_t,
+ FFA_MEMORY_CACHEABILITY_OFFSET, FFA_MEMORY_CACHEABILITY_MASK)
+ATTR_FUNCTION_GET(memory_cacheability, ffa_memory_attributes_t,
+ FFA_MEMORY_CACHEABILITY_OFFSET, FFA_MEMORY_CACHEABILITY_MASK)
+
+ATTR_FUNCTION_SET(memory_shareability, ffa_memory_attributes_t,
+ FFA_MEMORY_SHAREABILITY_OFFSET, FFA_MEMORY_SHAREABILITY_MASK)
+ATTR_FUNCTION_GET(memory_shareability, ffa_memory_attributes_t,
+ FFA_MEMORY_SHAREABILITY_OFFSET, FFA_MEMORY_SHAREABILITY_MASK)
+
+#define FFA_MEMORY_HANDLE_ALLOCATOR_MASK \
+ ((ffa_memory_handle_t)(UINT64_C(1) << 63))
+#define FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR \
+ ((ffa_memory_handle_t)(UINT64_C(1) << 63))
+#define FFA_MEMORY_HANDLE_INVALID (~UINT64_C(0))
+
+/**
+ * A set of contiguous pages which is part of a memory region. This corresponds
+ * to table 10.14 of the FF-A v1.1 EAC0 specification, "Constituent memory
+ * region descriptor".
+ */
+struct ffa_memory_region_constituent {
+ /**
+ * The base IPA of the constituent memory region, aligned to 4 kiB page
+ * size granularity.
+ */
+ void *address;
+ /** The number of 4 kiB pages in the constituent memory region. */
+ uint32_t page_count;
+ /** Reserved field, must be 0. */
+ uint32_t reserved;
+};
+
+/**
+ * A set of pages comprising a memory region. This corresponds to table 10.13 of
+ * the FF-A v1.1 EAC0 specification, "Composite memory region descriptor".
+ */
+struct ffa_composite_memory_region {
+ /**
+ * The total number of 4 kiB pages included in this memory region. This
+ * must be equal to the sum of page counts specified in each
+ * `ffa_memory_region_constituent`.
+ */
+ uint32_t page_count;
+ /**
+ * The number of constituents (`ffa_memory_region_constituent`)
+ * included in this memory region range.
+ */
+ uint32_t constituent_count;
+ /** Reserved field, must be 0. */
+ uint64_t reserved_0;
+ /** An array of `constituent_count` memory region constituents. */
+ struct ffa_memory_region_constituent constituents[];
+};
+
+/**
+ * This corresponds to table "Memory access permissions descriptor" of the FFA
+ * 1.0 specification.
+ */
+struct ffa_memory_region_attributes {
+ /** The ID of the VM to which the memory is being given or shared. */
+ ffa_id_t receiver;
+ /**
+ * The permissions with which the memory region should be mapped in the
+ * receiver's page table.
+ */
+ ffa_memory_access_permissions_t permissions;
+ /**
+ * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP
+ * for memory regions with multiple borrowers.
+ */
+ ffa_memory_receiver_flags_t flags;
+};
+
+/** Flags to control the behaviour of a memory sharing transaction. */
+typedef uint32_t ffa_memory_region_flags_t;
+
+/**
+ * Clear memory region contents after unmapping it from the sender and before
+ * mapping it for any receiver.
+ */
+#define FFA_MEMORY_REGION_FLAG_CLEAR 0x1U
+
+/**
+ * Whether the hypervisor may time slice the memory sharing or retrieval
+ * operation.
+ */
+#define FFA_MEMORY_REGION_FLAG_TIME_SLICE 0x2U
+
+/**
+ * Whether the hypervisor should clear the memory region after the receiver
+ * relinquishes it or is aborted.
+ */
+#define FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH 0x4U
+
+#define FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK ((0x3U) << 3)
+#define FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED ((0x0U) << 3)
+#define FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE ((0x1U) << 3)
+#define FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND ((0x2U) << 3)
+#define FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE ((0x3U) << 3)
+
+/** The maximum number of recipients a memory region may be sent to. */
+#define MAX_MEM_SHARE_RECIPIENTS 1U
+
+/**
+ * This corresponds to table "Endpoint memory access descriptor" of the FFA 1.0
+ * specification.
+ */
+struct ffa_memory_access {
+ struct ffa_memory_region_attributes receiver_permissions;
+ /**
+ * Offset in bytes from the start of the outer `ffa_memory_region` to
+ * an `ffa_composite_memory_region` struct.
+ */
+ uint32_t composite_memory_region_offset;
+ uint64_t reserved_0;
+};
+
+/**
+ * Information about a set of pages which are being shared. This corresponds to
+ * table 10.20 of the FF-A v1.1 EAC0 specification, "Lend, donate or share
+ * memory transaction descriptor". Note that it is also used for retrieve
+ * requests and responses.
+ */
+struct ffa_memory_region {
+ /**
+ * The ID of the VM which originally sent the memory region, i.e. the
+ * owner.
+ */
+ ffa_id_t sender;
+ ffa_memory_attributes_t attributes;
+ /** Flags to control behaviour of the transaction. */
+ ffa_memory_region_flags_t flags;
+ ffa_memory_handle_t handle;
+ /**
+ * An implementation defined value associated with the receiver and the
+ * memory region.
+ */
+ uint64_t tag;
+ /** Size of the memory access descriptor. */
+ uint32_t memory_access_desc_size;
+ /**
+ * The number of `ffa_memory_access` entries included in this
+ * transaction.
+ */
+ uint32_t receiver_count;
+ /**
+ * Offset of the 'receivers' field, which relates to the memory access
+ * descriptors.
+ */
+ uint32_t receivers_offset;
+ /** Reserved field (12 bytes) must be 0. */
+ uint32_t reserved[3];
+ /**
+ * An array of `receiver_count` endpoint memory access descriptors.
+ * Each one specifies a memory region offset, an endpoint and the
+ * attributes with which this memory region should be mapped in that
+ * endpoint's page table.
+ */
+ struct ffa_memory_access receivers[];
+};
+
+/**
+ * Descriptor used for FFA_MEM_RELINQUISH requests. This corresponds to table
+ * 16.25 of the FF-A v1.1 EAC0 specification, "Descriptor to relinquish a memory
+ * region".
+ */
+struct ffa_mem_relinquish {
+ ffa_memory_handle_t handle;
+ ffa_memory_region_flags_t flags;
+ uint32_t endpoint_count;
+ ffa_id_t endpoints[];
+};
+
+static inline ffa_memory_handle_t ffa_assemble_handle(uint32_t h1, uint32_t h2)
+{
+ return (ffa_notification_bitmap_t)h1 |
+ (ffa_notification_bitmap_t)h2 << 32;
+}
+
+static inline ffa_memory_handle_t ffa_mem_success_handle(struct ffa_value r)
+{
+ return ffa_assemble_handle(r.arg2, r.arg3);
+}
+
+/**
+ * Gets the `ffa_composite_memory_region` for the given receiver from an
+ * `ffa_memory_region`, or NULL if it is not valid.
+ */
+static inline struct ffa_composite_memory_region *
+ffa_memory_region_get_composite(struct ffa_memory_region *memory_region,
+ uint32_t receiver_index)
+{
+ uint32_t offset = memory_region->receivers[receiver_index]
+ .composite_memory_region_offset;
+
+ if (offset == 0) {
+ return NULL;
+ }
+
+ return (struct ffa_composite_memory_region *)((uint8_t *)memory_region +
+ offset);
+}
+
+static inline uint32_t ffa_mem_relinquish_init(
+ struct ffa_mem_relinquish *relinquish_request,
+ ffa_memory_handle_t handle, ffa_memory_region_flags_t flags,
+ ffa_id_t sender)
+{
+ relinquish_request->handle = handle;
+ relinquish_request->flags = flags;
+ relinquish_request->endpoint_count = 1;
+ relinquish_request->endpoints[0] = sender;
+ return sizeof(struct ffa_mem_relinquish) + sizeof(ffa_id_t);
+}
+
+uint32_t ffa_memory_retrieve_request_init(
+ struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
+ ffa_id_t sender, ffa_id_t receiver, uint32_t tag,
+ ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
+ enum ffa_instruction_access instruction_access,
+ enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
+ enum ffa_memory_shareability shareability);
+
+uint32_t ffa_memory_region_init(
+ struct ffa_memory_region *memory_region, size_t memory_region_max_size,
+ ffa_id_t sender, ffa_id_t receiver,
+ const struct ffa_memory_region_constituent constituents[],
+ uint32_t constituent_count, uint32_t tag,
+ ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
+ enum ffa_instruction_access instruction_access,
+ enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
+ enum ffa_memory_shareability shareability, uint32_t *total_length,
+ uint32_t *fragment_length);
+
+static inline ffa_id_t ffa_dir_msg_dest(struct ffa_value val) {
+ return (ffa_id_t)val.arg1 & U(0xFFFF);
+}
+
+static inline ffa_id_t ffa_dir_msg_source(struct ffa_value val) {
+ return (ffa_id_t)(val.arg1 >> 16U);
+}
+
+struct ffa_value ffa_msg_send_direct_req64(ffa_id_t source_id,
+ ffa_id_t dest_id, uint64_t arg0,
+ uint64_t arg1, uint64_t arg2,
+ uint64_t arg3, uint64_t arg4);
+
+struct ffa_value ffa_msg_send_direct_req32(ffa_id_t source_id,
+ ffa_id_t dest_id, uint32_t arg0,
+ uint32_t arg1, uint32_t arg2,
+ uint32_t arg3, uint32_t arg4);
+
+struct ffa_value ffa_msg_send_direct_resp64(ffa_id_t source_id,
+ ffa_id_t dest_id, uint64_t arg0,
+ uint64_t arg1, uint64_t arg2,
+ uint64_t arg3, uint64_t arg4);
+
+struct ffa_value ffa_msg_send_direct_resp32(ffa_id_t source_id,
+ ffa_id_t dest_id, uint32_t arg0,
+ uint32_t arg1, uint32_t arg2,
+ uint32_t arg3, uint32_t arg4);
+
+struct ffa_value ffa_run(uint32_t dest_id, uint32_t vcpu_id);
+struct ffa_value ffa_version(uint32_t input_version);
+struct ffa_value ffa_id_get(void);
+struct ffa_value ffa_spm_id_get(void);
+struct ffa_value ffa_msg_wait(void);
+struct ffa_value ffa_error(int32_t error_code);
+struct ffa_value ffa_features(uint32_t feature);
+struct ffa_value ffa_features_with_input_property(uint32_t feature,
+ uint32_t param);
+struct ffa_value ffa_partition_info_get(const struct ffa_uuid uuid);
+struct ffa_value ffa_rx_release(void);
+struct ffa_value ffa_rxtx_map(uintptr_t send, uintptr_t recv, uint32_t pages);
+struct ffa_value ffa_rxtx_unmap(void);
+struct ffa_value ffa_mem_donate(uint32_t descriptor_length,
+ uint32_t fragment_length);
+struct ffa_value ffa_mem_lend(uint32_t descriptor_length,
+ uint32_t fragment_length);
+struct ffa_value ffa_mem_share(uint32_t descriptor_length,
+ uint32_t fragment_length);
+struct ffa_value ffa_mem_retrieve_req(uint32_t descriptor_length,
+ uint32_t fragment_length);
+struct ffa_value ffa_mem_relinquish(void);
+struct ffa_value ffa_mem_reclaim(uint64_t handle, uint32_t flags);
+struct ffa_value ffa_notification_bitmap_create(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count);
+struct ffa_value ffa_notification_bitmap_destroy(ffa_id_t vm_id);
+struct ffa_value ffa_notification_bind(ffa_id_t sender, ffa_id_t receiver,
+ uint32_t flags,
+ ffa_notification_bitmap_t notifications);
+struct ffa_value ffa_notification_unbind(ffa_id_t sender, ffa_id_t receiver,
+ ffa_notification_bitmap_t notifications);
+struct ffa_value ffa_notification_set(ffa_id_t sender, ffa_id_t receiver,
+ uint32_t flags,
+ ffa_notification_bitmap_t bitmap);
+struct ffa_value ffa_notification_get(ffa_id_t receiver, uint32_t vcpu_id,
+ uint32_t flags);
+struct ffa_value ffa_notification_info_get(void);
+
+struct ffa_value ffa_console_log(const char* message, size_t char_count);
+struct ffa_value ffa_partition_info_get_regs(const struct ffa_uuid uuid,
+ const uint16_t start_index,
+ const uint16_t tag);
+#endif /* __ASSEMBLY__ */
+
+#endif /* FFA_HELPERS_H */
diff --git a/spm/scmi/include/ffa_svc.h b/spm/scmi/include/ffa_svc.h
new file mode 100644
index 0000000..bf535ea
--- /dev/null
+++ b/spm/scmi/include/ffa_svc.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FFA_SVC_H
+#define FFA_SVC_H
+
+#include <lib/utils_def.h>
+#include <smccc.h>
+#include <uuid.h>
+
+/* FFA error codes. */
+#define FFA_ERROR_NOT_SUPPORTED -1
+#define FFA_ERROR_INVALID_PARAMETER -2
+#define FFA_ERROR_NO_MEMORY -3
+#define FFA_ERROR_BUSY -4
+#define FFA_ERROR_INTERRUPTED -5
+#define FFA_ERROR_DENIED -6
+#define FFA_ERROR_RETRY -7
+#define FFA_ERROR_ABORTED -8
+#define FFA_ERROR_NO_DATA -9
+
+/* The macros below are used to identify FFA calls from the SMC function ID */
+#define FFA_FNUM_MIN_VALUE U(0x60)
+#define FFA_FNUM_MAX_VALUE U(0x87)
+#define is_ffa_fid(fid) __extension__ ({ \
+ __typeof__(fid) _fid = (fid); \
+ ((GET_SMC_NUM(_fid) >= FFA_FNUM_MIN_VALUE) && \
+ (GET_SMC_NUM(_fid) <= FFA_FNUM_MAX_VALUE)); })
+
+/* FFA_VERSION helpers */
+#define FFA_VERSION_MAJOR U(1)
+#define FFA_VERSION_MAJOR_SHIFT 16
+#define FFA_VERSION_MAJOR_MASK U(0x7FFF)
+#define FFA_VERSION_MINOR U(1)
+#define FFA_VERSION_MINOR_SHIFT 0
+#define FFA_VERSION_MINOR_MASK U(0xFFFF)
+#define FFA_VERSION_BIT31_MASK U(1 << 31)
+
+#define MAKE_FFA_VERSION(major, minor) \
+ ((((major) & FFA_VERSION_MAJOR_MASK) << FFA_VERSION_MAJOR_SHIFT) | \
+ (((minor) & FFA_VERSION_MINOR_MASK) << FFA_VERSION_MINOR_SHIFT))
+#define FFA_VERSION_COMPILED MAKE_FFA_VERSION(FFA_VERSION_MAJOR, \
+ FFA_VERSION_MINOR)
+
+/* FFA_MSG_SEND helpers */
+#define FFA_MSG_SEND_ATTRS_BLK_SHIFT U(0)
+#define FFA_MSG_SEND_ATTRS_BLK_MASK U(0x1)
+#define FFA_MSG_SEND_ATTRS_BLK U(0)
+#define FFA_MSG_SEND_ATTRS_BLK_NOT U(1)
+#define FFA_MSG_SEND_ATTRS(blk) \
+ (((blk) & FFA_MSG_SEND_ATTRS_BLK_MASK) \
+ << FFA_MSG_SEND_ATTRS_BLK_SHIFT)
+
+/* Get FFA fastcall std FID from function number */
+#define FFA_FID(smc_cc, func_num) \
+ ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) | \
+ ((smc_cc) << FUNCID_CC_SHIFT) | \
+ (OEN_STD_START << FUNCID_OEN_SHIFT) | \
+ ((func_num) << FUNCID_NUM_SHIFT))
+
+/* FFA function numbers */
+#define FFA_FNUM_ERROR U(0x60)
+#define FFA_FNUM_SUCCESS U(0x61)
+#define FFA_FNUM_INTERRUPT U(0x62)
+#define FFA_FNUM_VERSION U(0x63)
+#define FFA_FNUM_FEATURES U(0x64)
+#define FFA_FNUM_RX_RELEASE U(0x65)
+#define FFA_FNUM_RXTX_MAP U(0x66)
+#define FFA_FNUM_RXTX_UNMAP U(0x67)
+#define FFA_FNUM_PARTITION_INFO_GET U(0x68)
+#define FFA_FNUM_ID_GET U(0x69)
+#define FFA_FNUM_MSG_POLL U(0x6A) /* Legacy FF-A v1.0 */
+#define FFA_FNUM_MSG_WAIT U(0x6B)
+#define FFA_FNUM_MSG_YIELD U(0x6C)
+#define FFA_FNUM_RUN U(0x6D)
+#define FFA_FNUM_MSG_SEND U(0x6E) /* Legacy FF-A v1.0 */
+#define FFA_FNUM_MSG_SEND_DIRECT_REQ U(0x6F)
+#define FFA_FNUM_MSG_SEND_DIRECT_RESP U(0x70)
+#define FFA_FNUM_MEM_DONATE U(0x71)
+#define FFA_FNUM_MEM_LEND U(0x72)
+#define FFA_FNUM_MEM_SHARE U(0x73)
+#define FFA_FNUM_MEM_RETRIEVE_REQ U(0x74)
+#define FFA_FNUM_MEM_RETRIEVE_RESP U(0x75)
+#define FFA_FNUM_MEM_RELINQUISH U(0x76)
+#define FFA_FNUM_MEM_RECLAIM U(0x77)
+#define FFA_FNUM_NORMAL_WORLD_RESUME U(0x7C)
+
+/* FF-A v1.1 */
+#define FFA_FNUM_NOTIFICATION_BITMAP_CREATE U(0x7D)
+#define FFA_FNUM_NOTIFICATION_BITMAP_DESTROY U(0x7E)
+#define FFA_FNUM_NOTIFICATION_BIND U(0x7F)
+#define FFA_FNUM_NOTIFICATION_UNBIND U(0x80)
+#define FFA_FNUM_NOTIFICATION_SET U(0x81)
+#define FFA_FNUM_NOTIFICATION_GET U(0x82)
+#define FFA_FNUM_NOTIFICATION_INFO_GET U(0x83)
+#define FFA_FNUM_RX_ACQUIRE U(0x84)
+#define FFA_FNUM_SPM_ID_GET U(0x85)
+#define FFA_FNUM_MSG_SEND2 U(0x86)
+#define FFA_FNUM_SECONDARY_EP_REGISTER U(0x87)
+#define FFA_FNUM_PARTITION_INFO_GET_REGS U(0x8B)
+
+/* Implementation defined function numbers */
+#define FFA_FNUM_CONSOLE_LOG U(0x8A)
+
+/* FFA SMC32 FIDs */
+#define FFA_ERROR FFA_FID(SMC_32, FFA_FNUM_ERROR)
+#define FFA_SUCCESS_SMC32 FFA_FID(SMC_32, FFA_FNUM_SUCCESS)
+#define FFA_INTERRUPT FFA_FID(SMC_32, FFA_FNUM_INTERRUPT)
+#define FFA_VERSION FFA_FID(SMC_32, FFA_FNUM_VERSION)
+#define FFA_FEATURES FFA_FID(SMC_32, FFA_FNUM_FEATURES)
+#define FFA_RX_RELEASE FFA_FID(SMC_32, FFA_FNUM_RX_RELEASE)
+#define FFA_RXTX_MAP_SMC32 FFA_FID(SMC_32, FFA_FNUM_RXTX_MAP)
+#define FFA_RXTX_UNMAP FFA_FID(SMC_32, FFA_FNUM_RXTX_UNMAP)
+#define FFA_PARTITION_INFO_GET FFA_FID(SMC_32, FFA_FNUM_PARTITION_INFO_GET)
+#define FFA_ID_GET FFA_FID(SMC_32, FFA_FNUM_ID_GET)
+#define FFA_MSG_POLL FFA_FID(SMC_32, FFA_FNUM_MSG_POLL)
+#define FFA_MSG_WAIT FFA_FID(SMC_32, FFA_FNUM_MSG_WAIT)
+#define FFA_MSG_YIELD FFA_FID(SMC_32, FFA_FNUM_MSG_YIELD)
+#define FFA_RUN FFA_FID(SMC_32, FFA_FNUM_RUN)
+#define FFA_MSG_SEND FFA_FID(SMC_32, FFA_FNUM_MSG_SEND)
+#define FFA_MSG_SEND_DIRECT_REQ_SMC32 \
+ FFA_FID(SMC_32, FFA_FNUM_MSG_SEND_DIRECT_REQ)
+#define FFA_MSG_SEND_DIRECT_RESP_SMC32 \
+ FFA_FID(SMC_32, FFA_FNUM_MSG_SEND_DIRECT_RESP)
+#define FFA_MEM_DONATE_SMC32 FFA_FID(SMC_32, FFA_FNUM_MEM_DONATE)
+#define FFA_MEM_LEND_SMC32 FFA_FID(SMC_32, FFA_FNUM_MEM_LEND)
+#define FFA_MEM_SHARE_SMC32 FFA_FID(SMC_32, FFA_FNUM_MEM_SHARE)
+#define FFA_MEM_RETRIEVE_REQ_SMC32 \
+ FFA_FID(SMC_32, FFA_FNUM_MEM_RETRIEVE_REQ)
+#define FFA_MEM_RETRIEVE_RESP FFA_FID(SMC_32, FFA_FNUM_MEM_RETRIEVE_RESP)
+#define FFA_MEM_RELINQUISH FFA_FID(SMC_32, FFA_FNUM_MEM_RELINQUISH)
+#define FFA_MEM_RECLAIM FFA_FID(SMC_32, FFA_FNUM_MEM_RECLAIM)
+#define FFA_NOTIFICATION_BITMAP_CREATE \
+ FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_BITMAP_CREATE)
+#define FFA_NOTIFICATION_BITMAP_DESTROY \
+ FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_BITMAP_DESTROY)
+#define FFA_NOTIFICATION_BIND FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_BIND)
+#define FFA_NOTIFICATION_UNBIND FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_UNBIND)
+#define FFA_NOTIFICATION_SET FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_SET)
+#define FFA_NOTIFICATION_GET FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_GET)
+#define FFA_NOTIFICATION_INFO_GET \
+ FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_INFO_GET)
+#define FFA_SPM_ID_GET FFA_FID(SMC_32, FFA_FNUM_SPM_ID_GET)
+
+/* Implementation defined SMC32 FIDs */
+#define FFA_CONSOLE_LOG_SMC32 FFA_FID(SMC_32, FFA_FNUM_CONSOLE_LOG)
+
+/* FFA SMC64 FIDs */
+#define FFA_SUCCESS_SMC64 FFA_FID(SMC_64, FFA_FNUM_SUCCESS)
+#define FFA_RXTX_MAP_SMC64 FFA_FID(SMC_64, FFA_FNUM_RXTX_MAP)
+#define FFA_MSG_SEND_DIRECT_REQ_SMC64 \
+ FFA_FID(SMC_64, FFA_FNUM_MSG_SEND_DIRECT_REQ)
+#define FFA_MSG_SEND_DIRECT_RESP_SMC64 \
+ FFA_FID(SMC_64, FFA_FNUM_MSG_SEND_DIRECT_RESP)
+#define FFA_MEM_DONATE_SMC64 FFA_FID(SMC_64, FFA_FNUM_MEM_DONATE)
+#define FFA_MEM_LEND_SMC64 FFA_FID(SMC_64, FFA_FNUM_MEM_LEND)
+#define FFA_MEM_SHARE_SMC64 FFA_FID(SMC_64, FFA_FNUM_MEM_SHARE)
+#define FFA_MEM_RETRIEVE_REQ_SMC64 \
+ FFA_FID(SMC_64, FFA_FNUM_MEM_RETRIEVE_REQ)
+#define FFA_SECONDARY_EP_REGISTER_SMC64 \
+ FFA_FID(SMC_64, FFA_FNUM_SECONDARY_EP_REGISTER)
+#define FFA_NOTIFICATION_INFO_GET_SMC64 \
+ FFA_FID(SMC_64, FFA_FNUM_NOTIFICATION_INFO_GET)
+
+#define FFA_FEATURES_MEM_RETRIEVE_REQ_NS_SUPPORT (UINT32_C(1) << 1)
+#define FFA_PARTITION_INFO_GET_REGS_SMC64 \
+ FFA_FID(SMC_64, FFA_FNUM_PARTITION_INFO_GET_REGS)
+
+/* Implementation defined SMC64 FIDs */
+#define FFA_CONSOLE_LOG_SMC64 FFA_FID(SMC_64, FFA_FNUM_CONSOLE_LOG)
+/*
+ * Reserve a special value for traffic targeted to the Hypervisor or SPM.
+ */
+#define FFA_TARGET_INFO_MBZ U(0x0)
+
+/*
+ * Reserve a special value for MBZ parameters.
+ */
+#define FFA_PARAM_MBZ U(0x0)
+
+#endif /* FFA_SVC_H */
diff --git a/spm/scmi/include/platform.h b/spm/scmi/include/platform.h
new file mode 100644
index 0000000..c8b785c
--- /dev/null
+++ b/spm/scmi/include/platform.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_H__
+#define __PLATFORM_H__
+
+#include <stdint.h>
+#include <arch_helpers.h>
+#include <timer.h>
+#include <xlat_tables_v2.h>
+
+#define PLAT_PSCI_DUMMY_STATE_ID 0xF
+
+#define PWR_STATE_INIT_INDEX (-1)
+
+#define INIT_PWR_LEVEL_INDEX(array_name) \
+ do { \
+ unsigned int var; \
+ assert(ARRAY_SIZE(array_name) == (PLAT_MAX_PWR_LEVEL + 1)); \
+ for (var = 0; var <= PLAT_MAX_PWR_LEVEL; var++) \
+ array_name[var] = PWR_STATE_INIT_INDEX; \
+ } while (0)
+
+/*
+ * The platform structure to represent the valid local power state
+ * properties for a particular affinity level. The platform needs to
+ * export the array of valid local low power states for each affinity level
+ * it supports which can be queried by TFTF tests to construct the required
+ * composite power state.
+ *
+ * TODO: Currently the power levels are identity mapped to affinity level in
+ * TFTF which need to be decoupled.
+ */
+typedef struct plat_state_prop {
+ /*
+ * This field has a value in the increasing order of the suspend
+ * depth. Deeper the suspend state, higher the value.
+ */
+ unsigned int suspend_depth;
+ /* The local state ID for the idle state at this level. */
+ unsigned int state_ID;
+ /* Flag which indicates whether is a retention or power down state */
+ unsigned int is_pwrdown;
+} plat_state_prop_t;
+
+void tftf_plat_arch_setup(void);
+void tftf_early_platform_setup(void);
+void tftf_platform_setup(void);
+
+void tftf_plat_enable_mmu(void);
+void tftf_plat_configure_mmu(void);
+
+void tftf_platform_end(void);
+void tftf_platform_watchdog_set(void);
+void tftf_platform_watchdog_reset(void);
+
+/* Helper that returns a linear core ID from a MPID */
+unsigned int platform_get_core_pos(u_register_t mpid);
+
+/* Crash console functions */
+int plat_crash_console_init(void);
+int plat_crash_console_putc(int c);
+int plat_crash_console_flush(void);
+
+/* Gets a handle for the initialised IO entity */
+void plat_get_nvm_handle(uintptr_t *handle);
+
+/*
+ * Returns the platform topology description array. The size of this
+ * array should be PLATFORM_NUM_AFFS - PLATFORM_CORE_COUNT + 1.
+ */
+const unsigned char *tftf_plat_get_pwr_domain_tree_desc(void);
+
+/*
+ * Function to query the MPIDR of a CPU identified by 'core_pos' which is
+ * the number returned by platform_get_core() API.
+ * In case the CPU is absent, then this API returns INVALID_MPID. This
+ * function will be queried only during topology setup in TFTF and thereafter
+ * the internal node data will be used to get the MPIDR corresponding
+ * to the 'core_pos'.
+ */
+uint64_t tftf_plat_get_mpidr(unsigned int core_pos);
+
+/*
+ * Get the state property array for all the valid states from platform for
+ * a specified 'level'. The array is expected to be NULL terminated after the
+ * last entry.
+ */
+const plat_state_prop_t *plat_get_state_prop(unsigned int level);
+
+/*
+ * Initialises state info data structures for generating various combinations
+ * of state ID's. It also calls tftf_detect_pstate_format() which detects the
+ * PSTATE format accepted by EL3 firmware.
+ * This function needs to be invoked once during cold boot prior to the
+ * invocation of any PSCI power state helper functions.
+ */
+void tftf_init_pstate_framework(void);
+
+/*
+ * This function is used to generate all possible combinations of composite
+ * state ID's possible for a given set of power states at each level.
+ * Ex: If a system implements 4 levels and each level has 3 local power states.
+ * Then, the total combinations of composite power down states possible are:
+ * 3 * 3 * 3 * 3 = 81
+ *
+ * A single call to set_next_state_id_pointers(), sets pointer to pstate_id_idx
+ * at all levels for a possible combination out of 81.
+ *
+ * A caller can confirm when all combinations are completed by checking if
+ * pwr_lvel_state_indexes for power_level 0 is PWR_STATE_INIT_INDEX
+ */
+void tftf_set_next_state_id_idx(unsigned int power_level,
+ unsigned int pstate_id_idx[]);
+
+/*
+ * This function sets the index for the next state ID of the given power level
+ */
+void tftf_set_next_local_state_id_idx(unsigned int power_level,
+ unsigned int pstate_id_idx[]);
+
+/*
+ * This function sets the index corresponding to the deepest power state at
+ * a given power level.
+ */
+void tftf_set_deepest_pstate_idx(unsigned int power_level,
+ unsigned int pstate_id_idx[]);
+
+/*
+ * Helper function to get the state ID, state type, power level in power_state
+ * parameter of CPU_SUSPEND. The generated values are based on the
+ * pstate_id_idx values of a core.
+ *
+ * This helper expects a valid pstate_id_idx till the max valid levels
+ * and it detects the max valid level to be terminated by PWR_STATE_INIT value
+ *
+ * It returns the expected PSCI return value of a suspend request
+ */
+int tftf_get_pstate_vars(unsigned int *test_power_level,
+ unsigned int *test_suspend_type,
+ unsigned int *suspend_state_id,
+ unsigned int pstate_id_idx[]);
+
+/*
+ * This function gets the platform specific timer driver information and
+ * initialises platform specific drivers.
+ * Returns 0 on success.
+ */
+int plat_initialise_timer_ops(const plat_timer_t **timer_ops);
+
+struct mem_region {
+ uintptr_t addr;
+ size_t size;
+};
+
+typedef struct mem_region mem_region_t;
+
+/*******************************************************************************
+ * Optional functions. A default, weak implementation of those functions is
+ * provided, it may be overridden by platform code.
+ ******************************************************************************/
+unsigned long platform_get_stack(unsigned long mpidr);
+/*
+ * plat_get_prot_regions: It returns a pointer to a
+ * set of regions used to test mem_protect_check.
+ * The number of elements are stored in the variable
+ * pointed by nelem.
+ */
+const mem_region_t *plat_get_prot_regions(int *nelem);
+
+void tftf_plat_reset(void);
+
+const mmap_region_t *tftf_platform_get_mmap(void);
+
+/*
+ * Return an IO device handle and specification which can be used
+ * to access an image. Use this to enforce platform load policy.
+ */
+int plat_get_image_source(unsigned int image_id,
+ uintptr_t *dev_handle,
+ uintptr_t *image_spec);
+
+void plat_fwu_io_setup(void);
+
+/**
+ * Returns current executing core.
+ */
+static inline uint32_t get_current_core_id(void)
+{
+ return platform_get_core_pos(read_mpidr_el1() & MPID_MASK);
+}
+
+#endif /* __PLATFORM_H__ */
diff --git a/spm/scmi/include/smccc.h b/spm/scmi/include/smccc.h
new file mode 100644
index 0000000..283b463
--- /dev/null
+++ b/spm/scmi/include/smccc.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SMCCC_H__
+#define __SMCCC_H__
+
+#include <utils_def.h>
+
+#define SMCCC_VERSION_MAJOR_SHIFT U(16)
+#define SMCCC_VERSION_MAJOR_MASK U(0x7FFF)
+#define SMCCC_VERSION_MINOR_SHIFT U(0)
+#define SMCCC_VERSION_MINOR_MASK U(0xFFFF)
+#define MAKE_SMCCC_VERSION(_major, _minor) \
+ ((((uint32_t)(_major) & SMCCC_VERSION_MAJOR_MASK) << \
+ SMCCC_VERSION_MAJOR_SHIFT) \
+ | (((uint32_t)(_minor) & SMCCC_VERSION_MINOR_MASK) << \
+ SMCCC_VERSION_MINOR_SHIFT))
+
+#define SMC_UNKNOWN -1
+#define SMC_OK 0
+
+/* Return codes for Arm Architecture Service SMC calls */
+#define SMC_ARCH_CALL_SUCCESS 0
+#define SMC_ARCH_CALL_NOT_SUPPORTED -1
+#define SMC_ARCH_CALL_NOT_REQUIRED -2
+#define SMC_ARCH_CALL_INVAL_PARAM -3
+
+/*******************************************************************************
+ * Bit definitions inside the function id as per the SMC calling convention
+ ******************************************************************************/
+#define FUNCID_TYPE_SHIFT 31
+#define FUNCID_CC_SHIFT 30
+#define FUNCID_OEN_SHIFT 24
+#define FUNCID_NUM_SHIFT 0
+
+#define FUNCID_TYPE_MASK 0x1
+#define FUNCID_CC_MASK 0x1
+#define FUNCID_OEN_MASK 0x3f
+#define FUNCID_NUM_MASK 0xffff
+
+#define FUNCID_TYPE_WIDTH 1
+#define FUNCID_CC_WIDTH 1
+#define FUNCID_OEN_WIDTH 6
+#define FUNCID_NUM_WIDTH 16
+
+#define SMC_64 1
+#define SMC_32 0
+#define SMC_TYPE_FAST 1
+#define SMC_TYPE_STD 0
+
+/*******************************************************************************
+ * Owning entity number definitions inside the function id as per the SMC
+ * calling convention
+ ******************************************************************************/
+#define OEN_ARM_START 0
+#define OEN_ARM_END 0
+#define OEN_CPU_START 1
+#define OEN_CPU_END 1
+#define OEN_SIP_START 2
+#define OEN_SIP_END 2
+#define OEN_OEM_START 3
+#define OEN_OEM_END 3
+#define OEN_STD_START 4 /* Standard Calls */
+#define OEN_STD_END 4
+#define OEN_TAP_START 48 /* Trusted Applications */
+#define OEN_TAP_END 49
+#define OEN_TOS_START 50 /* Trusted OS */
+#define OEN_TOS_END 63
+#define OEN_LIMIT 64
+
+/*******************************************************************************
+ * Argument definitions passed to SMC call
+ ******************************************************************************/
+#define SMC_GET_SOC_VERSION 0
+#define SMC_GET_SOC_REVISION 1
+
+#endif /* __SMCCC_H__ */
diff --git a/spm/scmi/include/sp805.h b/spm/scmi/include/sp805.h
new file mode 100644
index 0000000..75bcc12
--- /dev/null
+++ b/spm/scmi/include/sp805.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SP805_H__
+#define __SP805_H__
+
+/* SP805 register offset */
+#define SP805_WDOG_LOAD_OFF 0x000
+#define SP805_WDOG_VALUE_0FF 0x004
+#define SP805_WDOG_CTRL_OFF 0x008
+#define SP805_WDOG_INT_CLR_OFF 0x00c
+#define SP805_WDOG_RIS_OFF 0x010
+#define SP805_WDOG_MIS_OFF 0x014
+#define SP805_WDOG_LOCK_OFF 0xc00
+#define SP805_WDOG_ITCR_OFF 0xf00
+#define SP805_WDOG_ITOP_OFF 0xf04
+#define SP805_WDOG_PERIPH_ID_OFF 0xfe0
+#define SP805_WDOG_PCELL_ID_OFF 0xff0
+
+/*
+ * Magic word to unlock access to all other watchdog registers, Writing any other
+ * value locks them.
+ */
+#define SP805_WDOG_UNLOCK_ACCESS 0x1ACCE551
+
+/* Register field definitions */
+#define SP805_WDOG_CTRL_MASK 0x03
+#define SP805_WDOG_CTRL_RESEN (1 << 1)
+#define SP805_WDOG_CTRL_INTEN (1 << 0)
+#define SP805_WDOG_RIS_WDOGRIS (1 << 0)
+#define SP805_WDOG_RIS_MASK 0x1
+#define SP805_WDOG_MIS_WDOGMIS (1 << 0)
+#define SP805_WDOG_MIS_MASK 0x1
+#define SP805_WDOG_ITCR_MASK 0x1
+#define SP805_WDOG_ITOP_MASK 0x3
+#define SP805_WDOG_PART_NUM_SHIFT 0
+#define SP805_WDOG_PART_NUM_MASK 0xfff
+#define SP805_WDOG_DESIGNER_ID_SHIFT 12
+#define SP805_WDOG_DESIGNER_ID_MASK 0xff
+#define SP805_WDOG_REV_SHIFT 20
+#define SP805_WDOG_REV_MASK 0xf
+#define SP805_WDOG_CFG_SHIFT 24
+#define SP805_WDOG_CFG_MASK 0xff
+#define SP805_WDOG_PCELL_ID_SHIFT 0
+#define SP805_WDOG_PCELL_ID_MASK 0xff
+
+#define ARM_SP805_TWDG_CLK_HZ 32768
+
+/* Public APIs for non-trusted watchdog module. */
+void sp805_wdog_start(unsigned int wdog_cycles);
+void sp805_wdog_stop(void);
+void sp805_wdog_refresh(void);
+
+/* Public APIs for trusted watchdog module. */
+void sp805_twdog_start(unsigned int wdog_cycles);
+void sp805_twdog_stop(void);
+void sp805_twdog_refresh(void);
+
+#endif /* __SP805_H__ */
+
diff --git a/spm/scmi/include/spm_common.h b/spm/scmi/include/spm_common.h
new file mode 100644
index 0000000..3549518
--- /dev/null
+++ b/spm/scmi/include/spm_common.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPM_COMMON_H
+#define SPM_COMMON_H
+
+#include <platform.h>
+
+#include <stdint.h>
+#include <string.h>
+
+#include <ffa_helpers.h>
+
+#include <lib/extensions/sve.h>
+
+/* Hypervisor ID at physical FFA instance */
+#define HYP_ID (0)
+/* SPMC ID */
+#define SPMC_ID U(0x8000)
+
+/* ID for the first Secure Partition. */
+#define SPM_VM_ID_FIRST SP_ID(1)
+
+/* INTID for the managed exit virtual interrupt. */
+#define MANAGED_EXIT_INTERRUPT_ID U(4)
+
+/* INTID for the notification pending interrupt. */
+#define NOTIFICATION_PENDING_INTERRUPT_INTID 5
+
+/** IRQ/FIQ pin used for signaling a virtual interrupt. */
+enum interrupt_pin {
+ INTERRUPT_TYPE_IRQ,
+ INTERRUPT_TYPE_FIQ,
+};
+
+/*
+ * The bit 15 of the FF-A ID indicates whether the partition is executing
+ * in the normal world, in case it is a Virtual Machine (VM); or in the
+ * secure world, in case it is a Secure Partition (SP).
+ *
+ * If bit 15 is set partition is an SP; if bit 15 is clear partition is
+ * a VM.
+ */
+#define SP_ID_MASK U(1 << 15)
+#define SP_ID(x) ((x) | SP_ID_MASK)
+#define VM_ID(x) (x & ~SP_ID_MASK)
+#define IS_SP_ID(x) ((x & SP_ID_MASK) != 0U)
+
+#define NULL_UUID (const struct ffa_uuid) { .uuid = {0} }
+
+struct ffa_features_test {
+ const char *test_name;
+ unsigned int feature;
+ unsigned int expected_ret;
+ unsigned int param;
+ unsigned int version_added;
+};
+
+struct mailbox_buffers {
+ void *recv;
+ void *send;
+};
+
+#define CONFIGURE_MAILBOX(mb_name, buffers_size) \
+ do { \
+ /* Declare RX/TX buffers at virtual FF-A instance */ \
+ static struct { \
+ uint8_t rx[buffers_size]; \
+ uint8_t tx[buffers_size]; \
+ } __aligned(PAGE_SIZE) mb_buffers; \
+ mb_name.recv = (void *)mb_buffers.rx; \
+ mb_name.send = (void *)mb_buffers.tx; \
+ } while (false)
+
+#define CONFIGURE_AND_MAP_MAILBOX(mb_name, buffers_size, smc_ret) \
+ do { \
+ CONFIGURE_MAILBOX(mb_name, buffers_size); \
+ smc_ret = ffa_rxtx_map( \
+ (uintptr_t)mb_name.send, \
+ (uintptr_t)mb_name.recv, \
+ buffers_size / PAGE_SIZE \
+ ); \
+ } while (false)
+
+/**
+ * Helpers to evaluate returns of FF-A calls.
+ */
+bool is_ffa_call_error(struct ffa_value val);
+bool is_expected_ffa_error(struct ffa_value ret, int32_t error_code);
+bool is_ffa_direct_response(struct ffa_value ret);
+bool is_expected_ffa_return(struct ffa_value ret, uint32_t func_id);
+bool is_expected_cactus_response(struct ffa_value ret, uint32_t expected_resp,
+ uint32_t arg);
+void dump_ffa_value(struct ffa_value ret);
+
+/*
+ * Fills SIMD/SVE registers with the content of the container v.
+ * Number of vectors is assumed to be SIMD/SVE_NUM_VECTORS.
+ */
+void fill_sve_vector_regs(const sve_vector_t v[SVE_NUM_VECTORS]);
+
+/*
+ * Reads contents of SIMD/SVE registers into the provided container v.
+ * Number of vectors is assumed to be SIMD/SVE_NUM_VECTORS.
+ */
+void read_sve_vector_regs(sve_vector_t v[SVE_NUM_VECTORS]);
+
+bool check_spmc_execution_level(void);
+
+unsigned int get_ffa_feature_test_target(const struct ffa_features_test **test_target);
+
+/**
+ * Helper to conduct a memory retrieve. This is to be called by the receiver
+ * of a memory share operation.
+ */
+bool memory_retrieve(struct mailbox_buffers *mb,
+ struct ffa_memory_region **retrieved, uint64_t handle,
+ ffa_id_t sender, ffa_id_t receiver,
+ ffa_memory_region_flags_t flags);
+
+/**
+ * Helper to conduct a memory relinquish. The caller is usually the receiver,
+ * after it being done with the memory shared, identified by the 'handle'.
+ */
+bool memory_relinquish(struct ffa_mem_relinquish *m, uint64_t handle,
+ ffa_id_t id);
+
+ffa_memory_handle_t memory_send(
+ struct ffa_memory_region *memory_region, uint32_t mem_func,
+ uint32_t fragment_length, uint32_t total_length, struct ffa_value *ret);
+
+ffa_memory_handle_t memory_init_and_send(
+ struct ffa_memory_region *memory_region, size_t memory_region_max_size,
+ ffa_id_t sender, ffa_id_t receiver,
+ const struct ffa_memory_region_constituent* constituents,
+ uint32_t constituents_count, uint32_t mem_func, struct ffa_value *ret);
+
+bool ffa_partition_info_helper(struct mailbox_buffers *mb,
+ const struct ffa_uuid uuid,
+ const struct ffa_partition_info *expected,
+ const uint16_t expected_size);
+bool enable_trusted_wdog_interrupt(ffa_id_t source, ffa_id_t dest);
+bool disable_trusted_wdog_interrupt(ffa_id_t source, ffa_id_t dest);
+
+bool ffa_partition_info_regs_helper(const struct ffa_uuid uuid,
+ const struct ffa_partition_info *expected,
+ const uint16_t expected_size);
+#endif /* SPM_COMMON_H */
diff --git a/spm/scmi/include/std_svc.h b/spm/scmi/include/std_svc.h
new file mode 100644
index 0000000..75ca4e6
--- /dev/null
+++ b/spm/scmi/include/std_svc.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Definitions related to the Standard Service as per the SMC Calling Convention
+ *
+ * Although PSCI calls are part of the Standard Service call range, PSCI-related
+ * definitions are not in this header file but in psci.h instead.
+ */
+
+#ifndef __STD_SVC_H__
+#define __STD_SVC_H__
+
+/* SMC function IDs for Standard Service queries */
+#define SMC_STD_SVC_CALL_COUNT 0x8400ff00
+#define SMC_STD_SVC_UID 0x8400ff01
+/* 0x8400ff02 is reserved */
+#define SMC_STD_SVC_REVISION 0x8400ff03
+
+/* Standard Service Calls revision numbers */
+#define STD_SVC_REVISION_MAJOR 0x0
+#define STD_SVC_REVISION_MINOR 0x1
+
+#endif /* __STD_SVC_H__ */
diff --git a/spm/scmi/include/tftf.h b/spm/scmi/include/tftf.h
new file mode 100644
index 0000000..43f1e7e
--- /dev/null
+++ b/spm/scmi/include/tftf.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TFTF_H__
+#define __TFTF_H__
+
+#ifndef __ASSEMBLY__
+#include <status.h>
+#include <stddef.h>
+#include <tftf_lib.h>
+
+#define TFTF_WELCOME_STR "Booting trusted firmware test framework"
+
+/* Maximum size of test output (in bytes) */
+#define TESTCASE_OUTPUT_MAX_SIZE 512
+
+/* Size of build message used to differentiate different TFTF binaries */
+#define BUILD_MESSAGE_SIZE 0x20
+
+extern const char build_message[];
+
+typedef test_result_t (*test_function_t)(void);
+
+typedef struct {
+ /* Test result (success, crashed, failed, ...). */
+ test_result_t result;
+ unsigned long long duration;
+ /*
+ * Offset of test output string from TEST_NVM_RESULT_BUFFER_OFFSET.
+ * Only relevant if test has an output, i.e. if \a output_size is not
+ * zero.
+ */
+ unsigned output_offset;
+ /* Size of test output string, excluding final \0. */
+ unsigned output_size;
+} TESTCASE_RESULT;
+
+typedef struct {
+ unsigned index;
+ const char *name;
+ const char *description;
+ test_function_t test;
+} test_case_t;
+
+typedef struct {
+ const char *name;
+ const char *description;
+ const test_case_t *testcases;
+} test_suite_t;
+
+/*
+ * Reference to a specific test.
+ */
+typedef struct {
+ unsigned int testsuite_idx;
+ unsigned int testcase_idx;
+} test_ref_t;
+
+/*
+ * The progress in the execution of a test.
+ * This is used to implement the following state machine.
+ *
+ * +-> TEST_READY (initial state of the test) <--------------+
+ * | | |
+ * | | Test framework prepares the test environment. |
+ * | | |
+ * | v |
+ * | TEST_IN_PROGRESS |
+ * | | |
+ * | | Hand over to the test function. |
+ * | | If the test wants to reboot the platform ---> TEST_REBOOTING |
+ * | | Test function returns into framework. | |
+ * | | | Reboot |
+ * | | | |
+ * | | +---------+
+ * | v
+ * | TEST_COMPLETE
+ * | |
+ * | | Do some framework management.
+ * | | Move to next test.
+ * +--------+
+ */
+typedef enum {
+ TEST_PROGRESS_MIN = 0,
+ TEST_READY = TEST_PROGRESS_MIN,
+ TEST_IN_PROGRESS,
+ TEST_COMPLETE,
+ TEST_REBOOTING,
+
+ TEST_PROGRESS_MAX,
+} test_progress_t;
+
+#define TEST_PROGRESS_IS_VALID(_progress) \
+ ((_progress >= TEST_PROGRESS_MIN) && (_progress < TEST_PROGRESS_MAX))
+
+/*
+ * The definition of this global variable is generated by the script
+ * 'tftf_generate_test_list' during the build process
+ */
+extern const test_suite_t testsuites[];
+
+extern TESTCASE_RESULT testcase_results[];
+
+/* Set/Get the test to run in NVM */
+STATUS tftf_set_test_to_run(const test_ref_t test_to_run);
+STATUS tftf_get_test_to_run(test_ref_t *test_to_run);
+/* Set/Get the progress of the current test in NVM */
+STATUS tftf_set_test_progress(test_progress_t test_progress);
+STATUS tftf_get_test_progress(test_progress_t *test_progress);
+
+/**
+** Save test result into NVM.
+*/
+STATUS tftf_testcase_set_result(const test_case_t *testcase,
+ test_result_t result,
+ unsigned long long duration);
+/**
+** Get a testcase result from NVM.
+**
+** @param[in] testcase The targeted testcase.
+** @param[out] result Testcase result. Only \a result.result and
+** \a result.duration are of interest for the caller and the 2 other fields
+** should be ignored (they correspond to a location in NVM).
+** @param[out] test_output Buffer to store the test output, if any.
+** \a test_output must be big enough to hold the whole test output.
+** Test output will be \a TESTCASE_OUTPUT_MAX_SIZE bytes maximum.
+*/
+STATUS tftf_testcase_get_result(const test_case_t *testcase, TESTCASE_RESULT *result, char *test_output);
+
+void print_testsuite_start(const test_suite_t *testsuite);
+void print_test_start(const test_case_t *test);
+void print_test_end(const test_case_t *test);
+void print_tests_summary(void);
+
+/*
+ * Exit the TFTF.
+ * This function can be used when a fatal error is encountered or as part of the
+ * normal termination process. It does the necessary cleanups then put the
+ * core in a low-power state.
+ */
+void __dead2 tftf_exit(void);
+
+void tftf_arch_setup(void);
+
+/*
+ * This function detects the power state format used by PSCI which can
+ * be either extended or original format. For the Original format,
+ * the State-ID can either be NULL or can be using the recommended encoding.
+ * This function needs to be invoked once during cold boot prior to the
+ * invocation of any PSCI power state helper functions.
+ */
+void tftf_detect_psci_pstate_format(void);
+
+/*
+ * Run the next test on the calling CPU.
+ * Once the test is complete, if the calling CPU is the last one to exit the
+ * test then do the necessary bookkeeping, report the overall test result and
+ * move on to the next test. Otherwise, shut down the calling CPU.
+ *
+ * This function never returns.
+ */
+void __dead2 run_tests(void);
+
+/* Entry point for a CPU that has just been powered up */
+void tftf_hotplug_entry(void);
+
+#endif /*__ASSEMBLY__*/
+
+#endif
diff --git a/spm/scmi/libc/aarch64/setjmp.S b/spm/scmi/libc/aarch64/setjmp.S
new file mode 100644
index 0000000..9d9eb49
--- /dev/null
+++ b/spm/scmi/libc/aarch64/setjmp.S
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <setjmp.h>
+
+ .globl setjmp
+ .globl longjmp
+
+/*
+ * int setjmp(jmp_buf env);
+ */
+func setjmp
+ mov x7, sp
+
+ stp x19, x20, [x0, #JMP_CTX_X19]
+ stp x21, x22, [x0, #JMP_CTX_X21]
+ stp x23, x24, [x0, #JMP_CTX_X23]
+ stp x25, x26, [x0, #JMP_CTX_X25]
+ stp x27, x28, [x0, #JMP_CTX_X27]
+ stp x29, x30, [x0, #JMP_CTX_X29]
+ stp x7, xzr, [x0, #JMP_CTX_SP]
+
+ mov x0, #0
+ ret
+endfunc setjmp
+
+
+/*
+ * void longjmp(jmp_buf env, int val);
+ */
+func longjmp
+ ldp x7, xzr, [x0, #JMP_CTX_SP]
+
+#if ENABLE_ASSERTIONS
+ /*
+ * Since we're unwinding the stack, assert that the stack being reset to
+ * is shallower.
+ */
+ mov x19, sp
+ cmp x7, x19
+ ASM_ASSERT(ge)
+#endif
+
+ ldp x19, x20, [x0, #JMP_CTX_X19]
+ ldp x21, x22, [x0, #JMP_CTX_X21]
+ ldp x23, x24, [x0, #JMP_CTX_X23]
+ ldp x25, x26, [x0, #JMP_CTX_X25]
+ ldp x27, x28, [x0, #JMP_CTX_X27]
+ ldp x29, x30, [x0, #JMP_CTX_X29]
+
+ mov sp, x7
+
+ ands x0, x1, x1 /* Move val to x0 and set flags */
+ cinc x0, x0, eq /* If val is 0, return 1 */
+ ret
+endfunc longjmp
diff --git a/spm/scmi/libc/abort.c b/spm/scmi/libc/abort.c
new file mode 100644
index 0000000..432b1d0
--- /dev/null
+++ b/spm/scmi/libc/abort.c
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+#include <common/debug.h>
+
+void abort(void)
+{
+ ERROR("ABORT\n");
+ panic();
+}
diff --git a/spm/scmi/libc/assert.c b/spm/scmi/libc/assert.c
new file mode 100644
index 0000000..dbf8507
--- /dev/null
+++ b/spm/scmi/libc/assert.c
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <cdefs.h>
+#include <stdio.h>
+
+#include <common/debug.h>
+
+void __assert(const char *file, unsigned int line, const char *assertion)
+{
+ printf("ASSERT: %s:%d:%s\n", file, line, assertion);
+ panic();
+}
diff --git a/spm/scmi/libc/exit.c b/spm/scmi/libc/exit.c
new file mode 100644
index 0000000..f4ffe27
--- /dev/null
+++ b/spm/scmi/libc/exit.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+static void (*exitfun)(void);
+
+void exit(int status)
+{
+ if (exitfun != NULL)
+ (*exitfun)();
+ for (;;)
+ ;
+}
+
+int atexit(void (*fun)(void))
+{
+ if (exitfun != NULL)
+ return -1;
+ exitfun = fun;
+
+ return 0;
+}
diff --git a/spm/scmi/libc/libc.mk b/spm/scmi/libc/libc.mk
new file mode 100644
index 0000000..729b91c
--- /dev/null
+++ b/spm/scmi/libc/libc.mk
@@ -0,0 +1,36 @@
+#
+# Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+LIBC_SRCS := $(addprefix lib/libc/, \
+ abort.c \
+ assert.c \
+ exit.c \
+ memchr.c \
+ memcmp.c \
+ memcpy.c \
+ memmove.c \
+ memset.c \
+ printf.c \
+ putchar.c \
+ puts.c \
+ rand.c \
+ snprintf.c \
+ strchr.c \
+ strcmp.c \
+ strlcpy.c \
+ strlen.c \
+ strncmp.c \
+ strncpy.c \
+ strnlen.c \
+ strrchr.c)
+
+ifeq (${ARCH},aarch64)
+LIBC_SRCS += $(addprefix lib/libc/aarch64/, \
+ setjmp.S)
+endif
+
+INCLUDES += -Iinclude/lib/libc \
+ -Iinclude/lib/libc/$(ARCH) \
diff --git a/spm/scmi/libc/memchr.c b/spm/scmi/libc/memchr.c
new file mode 100644
index 0000000..7bd3a7e
--- /dev/null
+++ b/spm/scmi/libc/memchr.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+void *memchr(const void *src, int c, size_t len)
+{
+ const unsigned char *s = src;
+
+ while (len--) {
+ if (*s == (unsigned char)c)
+ return (void *) s;
+ s++;
+ }
+
+ return NULL;
+}
diff --git a/spm/scmi/libc/memcmp.c b/spm/scmi/libc/memcmp.c
new file mode 100644
index 0000000..a4c798b
--- /dev/null
+++ b/spm/scmi/libc/memcmp.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+int memcmp(const void *s1, const void *s2, size_t len)
+{
+ const unsigned char *s = s1;
+ const unsigned char *d = s2;
+ unsigned char sc;
+ unsigned char dc;
+
+ while (len--) {
+ sc = *s++;
+ dc = *d++;
+ if (sc - dc)
+ return (sc - dc);
+ }
+
+ return 0;
+}
diff --git a/spm/scmi/libc/memcpy.c b/spm/scmi/libc/memcpy.c
new file mode 100644
index 0000000..fc0c9fe
--- /dev/null
+++ b/spm/scmi/libc/memcpy.c
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+void *memcpy(void *dst, const void *src, size_t len)
+{
+ const char *s = src;
+ char *d = dst;
+
+ while (len--)
+ *d++ = *s++;
+
+ return dst;
+}
diff --git a/spm/scmi/libc/memmove.c b/spm/scmi/libc/memmove.c
new file mode 100644
index 0000000..63acf26
--- /dev/null
+++ b/spm/scmi/libc/memmove.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+void *memmove(void *dst, const void *src, size_t len)
+{
+ /*
+ * The following test makes use of unsigned arithmetic overflow to
+ * more efficiently test the condition !(src <= dst && dst < str+len).
+ * It also avoids the situation where the more explicit test would give
+ * incorrect results were the calculation str+len to overflow (though
+ * that issue is probably moot as such usage is probably undefined
+ * behaviour and a bug anyway.
+ */
+ if ((size_t)dst - (size_t)src >= len) {
+ /* destination not in source data, so can safely use memcpy */
+ return memcpy(dst, src, len);
+ } else {
+ /* copy backwards... */
+ const char *end = dst;
+ const char *s = (const char *)src + len;
+ char *d = (char *)dst + len;
+ while (d != end)
+ *--d = *--s;
+ }
+ return dst;
+}
diff --git a/spm/scmi/libc/memset.c b/spm/scmi/libc/memset.c
new file mode 100644
index 0000000..03aa809
--- /dev/null
+++ b/spm/scmi/libc/memset.c
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+void *memset(void *dst, int val, size_t count)
+{
+ char *ptr = dst;
+
+ while (count--)
+ *ptr++ = val;
+
+ return dst;
+}
diff --git a/spm/scmi/libc/printf.c b/spm/scmi/libc/printf.c
new file mode 100644
index 0000000..60203fb
--- /dev/null
+++ b/spm/scmi/libc/printf.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <common/debug.h>
+
+#define get_num_va_args(_args, _lcount) \
+ (((_lcount) > 1) ? va_arg(_args, long long int) : \
+ (((_lcount) == 1) ? va_arg(_args, long int) : \
+ va_arg(_args, int)))
+
+#define get_unum_va_args(_args, _lcount) \
+ (((_lcount) > 1) ? va_arg(_args, unsigned long long int) : \
+ (((_lcount) == 1) ? va_arg(_args, unsigned long int) : \
+ va_arg(_args, unsigned int)))
+
+static int string_print(const char *str, char padc, int padn)
+{
+ int i = 0, count = 0;
+
+ assert(str != NULL);
+
+ while (str[i] != '\0')
+ i++;
+
+ if (padn > 0) {
+ while (i < padn) {
+ (void)putchar(padc);
+ count++;
+ padn--;
+ }
+ }
+
+ for ( ; *str != '\0'; str++) {
+ (void)putchar(*str);
+ count++;
+ }
+
+ if (padn < 0) {
+ while (i < -padn) {
+ (void)putchar(padc);
+ count++;
+ padn++;
+ }
+ }
+
+ return count;
+}
+
+static int unsigned_num_print(unsigned long long int unum, unsigned int radix,
+ char padc, int padn)
+{
+ /* Just need enough space to store 64 bit decimal integer */
+ char num_buf[20];
+ int i = 0, count = 0;
+ int width;
+ unsigned int rem;
+
+ do {
+ rem = unum % radix;
+ if (rem < 0xa)
+ num_buf[i] = '0' + rem;
+ else
+ num_buf[i] = 'a' + (rem - 0xa);
+ i++;
+ unum /= radix;
+ } while (unum > 0U);
+
+ width = i;
+
+ if (padn > 0) {
+ while (width < padn) {
+ (void)putchar(padc);
+ count++;
+ padn--;
+ }
+ }
+
+ while (--i >= 0) {
+ (void)putchar(num_buf[i]);
+ count++;
+ }
+
+ if (padn < 0) {
+ while (width < -padn) {
+ (void)putchar(padc);
+ count++;
+ padn++;
+ }
+ }
+
+ return count;
+}
+
+/*******************************************************************
+ * Simplified version of printf() with smaller memory footprint.
+ * The following type specifiers are supported by this print
+ * %x - hexadecimal format
+ * %s - string format
+ * %d or %i - signed decimal format
+ * %u - unsigned decimal format
+ * %p - pointer format
+ *
+ * The following length specifiers are supported by this print
+ * %l - long int (64-bit on AArch64)
+ * %ll - long long int (64-bit on AArch64)
+ * %z - size_t sized integer formats (64 bit on AArch64)
+ *
+ * The following padding specifiers are supported by this print
+ * %0NN - Left-pad the number with 0s (NN is a decimal number)
+ * %NN - Left-pad the number or string with spaces (NN is a decimal number)
+ * %-NN - Right-pad the number or string with spaces (NN is a decimal number)
+ *
+ * The print exits on all other formats specifiers other than valid
+ * combinations of the above specifiers.
+ *******************************************************************/
+int vprintf(const char *fmt, va_list args)
+{
+ int l_count;
+ int left;
+ long long int num;
+ unsigned long long int unum;
+ char *str;
+ char padc; /* Padding character */
+ int padn; /* Number of characters to pad */
+ int count = 0; /* Number of printed characters */
+
+ while (*fmt != '\0') {
+ l_count = 0;
+ left = 0;
+ padc = '\0';
+ padn = 0;
+
+ if (*fmt == '%') {
+ fmt++;
+ /* Check the format specifier */
+loop:
+ switch (*fmt) {
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ padc = ' ';
+ for (padn = 0; *fmt >= '0' && *fmt <= '9'; fmt++)
+ padn = (padn * 10) + (*fmt - '0');
+ if (left)
+ padn = -padn;
+ goto loop;
+ case '-':
+ left = 1;
+ fmt++;
+ goto loop;
+ case 'i': /* Fall through to next one */
+ case 'd':
+ num = get_num_va_args(args, l_count);
+ if (num < 0) {
+ (void)putchar('-');
+ unum = (unsigned long long int)-num;
+ padn--;
+ } else
+ unum = (unsigned long long int)num;
+
+ count += unsigned_num_print(unum, 10,
+ padc, padn);
+ break;
+ case 's':
+ str = va_arg(args, char *);
+ count += string_print(str, padc, padn);
+ break;
+ case 'p':
+ unum = (uintptr_t)va_arg(args, void *);
+ if (unum > 0U) {
+ count += string_print("0x", padc, 0);
+ padn -= 2;
+ }
+
+ count += unsigned_num_print(unum, 16,
+ padc, padn);
+ break;
+ case 'x':
+ unum = get_unum_va_args(args, l_count);
+ count += unsigned_num_print(unum, 16,
+ padc, padn);
+ break;
+ case 'z':
+ if (sizeof(size_t) == 8U)
+ l_count = 2;
+
+ fmt++;
+ goto loop;
+ case 'l':
+ l_count++;
+ fmt++;
+ goto loop;
+ case 'u':
+ unum = get_unum_va_args(args, l_count);
+ count += unsigned_num_print(unum, 10,
+ padc, padn);
+ break;
+ case '0':
+ padc = '0';
+ padn = 0;
+ fmt++;
+
+ for (;;) {
+ char ch = *fmt;
+ if ((ch < '0') || (ch > '9')) {
+ goto loop;
+ }
+ padn = (padn * 10) + (ch - '0');
+ fmt++;
+ }
+ assert(0); /* Unreachable */
+ default:
+ /* Exit on any other format specifier */
+ return -1;
+ }
+ fmt++;
+ continue;
+ }
+ (void)putchar(*fmt);
+ fmt++;
+ count++;
+ }
+
+ return count;
+}
+
+int printf(const char *fmt, ...)
+{
+ int count;
+ va_list va;
+
+ va_start(va, fmt);
+ count = vprintf(fmt, va);
+ va_end(va);
+
+ return count;
+}
diff --git a/spm/scmi/libc/putchar.c b/spm/scmi/libc/putchar.c
new file mode 100644
index 0000000..037e28a
--- /dev/null
+++ b/spm/scmi/libc/putchar.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+
+#include <drivers/console.h>
+
+int putchar(int c)
+{
+ int res;
+ if (console_putc((unsigned char)c) >= 0)
+ res = c;
+ else
+ res = EOF;
+
+ return res;
+}
diff --git a/spm/scmi/libc/puts.c b/spm/scmi/libc/puts.c
new file mode 100644
index 0000000..2a0ca11
--- /dev/null
+++ b/spm/scmi/libc/puts.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+
+int puts(const char *s)
+{
+ int count = 0;
+
+ while (*s != '\0') {
+ if (putchar(*s) == EOF)
+ return EOF;
+ s++;
+ count++;
+ }
+
+ if (putchar('\n') == EOF)
+ return EOF;
+
+ return count + 1;
+}
diff --git a/spm/scmi/libc/rand.c b/spm/scmi/libc/rand.c
new file mode 100644
index 0000000..59cb796
--- /dev/null
+++ b/spm/scmi/libc/rand.c
@@ -0,0 +1,65 @@
+/*-
+ * Portions Copyright (c) 2010, Intel Corporation. All rights reserved.<BR>
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+//__FBSDID("$FreeBSD: src/lib/libc/stdlib/rand.c,v 1.17.2.1.2.1 2009/10/25 01:10:29 kensmith Exp $");
+#include <stdlib.h>
+
+static unsigned int next = 1;
+
+/** Compute a pseudo-random number.
+ *
+ * Compute x = (7^5 * x) mod (2^31 - 1)
+ * without overflowing 31 bits:
+ * (2^31 - 1) = 127773 * (7^5) + 2836
+ * From "Random number generators: good ones are hard to find",
+ * Park and Miller, Communications of the ACM, vol. 31, no. 10,
+ * October 1988, p. 1195.
+**/
+int
+rand()
+{
+ int hi, lo, x;
+
+ /* Can't be initialized with 0, so use another value. */
+ if (next == 0)
+ next = 123459876;
+ hi = next / 127773;
+ lo = next % 127773;
+ x = 16807 * lo - 2836 * hi;
+ if (x < 0)
+ x += 0x7fffffff;
+ return ((next = x) % ((unsigned int)RAND_MAX + 1));
+}
+
+void
+srand(unsigned int seed)
+{
+ next = (unsigned int)seed;
+}
diff --git a/spm/scmi/libc/snprintf.c b/spm/scmi/libc/snprintf.c
new file mode 100644
index 0000000..6ad284f
--- /dev/null
+++ b/spm/scmi/libc/snprintf.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdlib.h>
+
+#include <common/debug.h>
+
+#define get_num_va_args(_args, _lcount) \
+ (((_lcount) > 1) ? va_arg(_args, long long int) : \
+ (((_lcount) == 1) ? va_arg(_args, long int) : \
+ va_arg(_args, int)))
+
+#define get_unum_va_args(_args, _lcount) \
+ (((_lcount) > 1) ? va_arg(_args, unsigned long long int) : \
+ (((_lcount) == 1) ? va_arg(_args, unsigned long int) : \
+ va_arg(_args, unsigned int)))
+
+static void string_print(char **s, size_t n, size_t *chars_printed,
+ const char *str)
+{
+ while (*str != '\0') {
+ if (*chars_printed < n) {
+ *(*s) = *str;
+ (*s)++;
+ }
+
+ (*chars_printed)++;
+ str++;
+ }
+}
+
+static void unsigned_num_print(char **s, size_t n, size_t *count,
+ unsigned long long int unum, unsigned int radix,
+ char padc, int padn)
+{
+ /* Just need enough space to store 64 bit decimal integer */
+ char num_buf[20];
+ int i = 0;
+ int width;
+ unsigned int rem;
+
+ do {
+ rem = unum % radix;
+ if (rem < 0xa)
+ num_buf[i] = '0' + rem;
+ else
+ num_buf[i] = 'a' + (rem - 0xa);
+ i++;
+ unum /= radix;
+ } while (unum > 0U);
+
+ width = i;
+
+ if (padn > 0) {
+ while (width < padn) {
+ if (*count < n) {
+ *(*s) = padc;
+ (*s)++;
+ }
+ (*count)++;
+ padn--;
+ }
+ }
+
+ while (--i >= 0) {
+ if (*count < n) {
+ *(*s) = num_buf[i];
+ (*s)++;
+ }
+ (*count)++;
+ }
+
+ if (padn < 0) {
+ while (width < -padn) {
+ if (*count < n) {
+ *(*s) = padc;
+ (*s)++;
+ }
+ (*count)++;
+ padn++;
+ }
+ }
+}
+
+/*
+ * Scaled down version of vsnprintf(3).
+ */
+int vsnprintf(char *s, size_t n, const char *fmt, va_list args)
+{
+ int l_count;
+ int left;
+ char *str;
+ int num;
+ unsigned long long int unum;
+ char padc; /* Padding character */
+ int padn; /* Number of characters to pad */
+ size_t count = 0U;
+
+ if (n == 0U) {
+ /* There isn't space for anything. */
+ } else if (n == 1U) {
+ /* Buffer is too small to actually write anything else. */
+ *s = '\0';
+ n = 0U;
+ } else {
+ /* Reserve space for the terminator character. */
+ n--;
+ }
+
+ while (*fmt != '\0') {
+ l_count = 0;
+ left = 0;
+ padc = '\0';
+ padn = 0;
+
+ if (*fmt == '%') {
+ fmt++;
+ /* Check the format specifier. */
+loop:
+ switch (*fmt) {
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ padc = ' ';
+ for (padn = 0; *fmt >= '0' && *fmt <= '9'; fmt++)
+ padn = (padn * 10) + (*fmt - '0');
+ if (left)
+ padn = -padn;
+ goto loop;
+ case '-':
+ left = 1;
+ fmt++;
+ goto loop;
+ case 'i':
+ case 'd':
+ num = get_num_va_args(args, l_count);
+
+ if (num < 0) {
+ if (count < n) {
+ *s = '-';
+ s++;
+ }
+ count++;
+
+ unum = (unsigned int)-num;
+ } else {
+ unum = (unsigned int)num;
+ }
+
+ unsigned_num_print(&s, n, &count, unum, 10,
+ padc, padn);
+ break;
+ case 'l':
+ l_count++;
+ fmt++;
+ goto loop;
+ case 's':
+ str = va_arg(args, char *);
+ string_print(&s, n, &count, str);
+ break;
+ case 'u':
+ unum = get_unum_va_args(args, l_count);
+ unsigned_num_print(&s, n, &count, unum, 10,
+ padc, padn);
+ break;
+ case 'x':
+ unum = get_unum_va_args(args, l_count);
+ unsigned_num_print(&s, n, &count, unum, 16,
+ padc, padn);
+ break;
+ case '0':
+ padc = '0';
+ padn = 0;
+ fmt++;
+
+ for (;;) {
+ char ch = *fmt;
+ if ((ch < '0') || (ch > '9')) {
+ goto loop;
+ }
+ padn = (padn * 10) + (ch - '0');
+ fmt++;
+ }
+ assert(0); /* Unreachable */
+ default:
+ /*
+ * Exit on any other format specifier and abort
+ * when in debug mode.
+ */
+ WARN("snprintf: specifier with ASCII code '%d' not supported.\n",
+ *fmt);
+ assert(0);
+ return -1;
+ }
+ fmt++;
+ continue;
+ }
+
+ if (count < n) {
+ *s = *fmt;
+ s++;
+ }
+
+ fmt++;
+ count++;
+ }
+
+ if (n > 0U)
+ *s = '\0';
+
+ return (int)count;
+}
+
+/*******************************************************************
+ * Reduced snprintf to be used for Trusted firmware.
+ * The following type specifiers are supported:
+ *
+ * %d or %i - signed decimal format
+ * %s - string format
+ * %u - unsigned decimal format
+ *
+ * The function panics on all other formats specifiers.
+ *
+ * It returns the number of characters that would be written if the
+ * buffer was big enough. If it returns a value lower than n, the
+ * whole string has been written.
+ *******************************************************************/
+int snprintf(char *s, size_t n, const char *fmt, ...)
+{
+ va_list args;
+ int chars_printed;
+
+ va_start(args, fmt);
+ chars_printed = vsnprintf(s, n, fmt, args);
+ va_end(args);
+
+ return chars_printed;
+}
diff --git a/spm/scmi/libc/strchr.c b/spm/scmi/libc/strchr.c
new file mode 100644
index 0000000..d94bb9e
--- /dev/null
+++ b/spm/scmi/libc/strchr.c
@@ -0,0 +1,53 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <stddef.h>
+#include <string.h>
+
+char *
+strchr(const char *p, int ch)
+{
+ char c;
+
+ c = ch;
+ for (;; ++p) {
+ if (*p == c)
+ return ((char *)p);
+ if (*p == '\0')
+ return (NULL);
+ }
+ /* NOTREACHED */
+}
diff --git a/spm/scmi/libc/strcmp.c b/spm/scmi/libc/strcmp.c
new file mode 100644
index 0000000..b742f9b
--- /dev/null
+++ b/spm/scmi/libc/strcmp.c
@@ -0,0 +1,52 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <string.h>
+
+/*
+ * Compare strings.
+ */
+int
+strcmp(const char *s1, const char *s2)
+{
+ while (*s1 == *s2++)
+ if (*s1++ == '\0')
+ return (0);
+ return (*(const unsigned char *)s1 - *(const unsigned char *)(s2 - 1));
+}
diff --git a/spm/scmi/libc/strlcpy.c b/spm/scmi/libc/strlcpy.c
new file mode 100644
index 0000000..c4f39bb
--- /dev/null
+++ b/spm/scmi/libc/strlcpy.c
@@ -0,0 +1,52 @@
+/* $OpenBSD: strlcpy.c,v 1.12 2015/01/15 03:54:12 millert Exp $ */
+
+/*
+ * SPDX-License-Identifier: ISC
+ *
+ * Copyright (c) 1998, 2015 Todd C. Miller <Todd.Miller@courtesan.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+/*
+ * Copy string src to buffer dst of size dsize. At most dsize-1
+ * chars will be copied. Always NUL terminates (unless dsize == 0).
+ * Returns strlen(src); if retval >= dsize, truncation occurred.
+ */
+size_t
+strlcpy(char * dst, const char * src, size_t dsize)
+{
+ const char *osrc = src;
+ size_t nleft = dsize;
+
+ /* Copy as many bytes as will fit. */
+ if (nleft != 0) {
+ while (--nleft != 0) {
+ if ((*dst++ = *src++) == '\0')
+ break;
+ }
+ }
+
+ /* Not enough room in dst, add NUL and traverse rest of src. */
+ if (nleft == 0) {
+ if (dsize != 0)
+ *dst = '\0'; /* NUL-terminate dst */
+ while (*src++)
+ ;
+ }
+
+ return(src - osrc - 1); /* count does not include NUL */
+}
diff --git a/spm/scmi/libc/strlen.c b/spm/scmi/libc/strlen.c
new file mode 100644
index 0000000..3c27630
--- /dev/null
+++ b/spm/scmi/libc/strlen.c
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+size_t strlen(const char *s)
+{
+ const char *cursor = s;
+
+ while (*cursor)
+ cursor++;
+
+ return cursor - s;
+}
diff --git a/spm/scmi/libc/strncmp.c b/spm/scmi/libc/strncmp.c
new file mode 100644
index 0000000..ce9e5ed
--- /dev/null
+++ b/spm/scmi/libc/strncmp.c
@@ -0,0 +1,53 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <string.h>
+
+int
+strncmp(const char *s1, const char *s2, size_t n)
+{
+
+ if (n == 0)
+ return (0);
+ do {
+ if (*s1 != *s2++)
+ return (*(const unsigned char *)s1 -
+ *(const unsigned char *)(s2 - 1));
+ if (*s1++ == '\0')
+ break;
+ } while (--n != 0);
+ return (0);
+}
diff --git a/spm/scmi/libc/strncpy.c b/spm/scmi/libc/strncpy.c
new file mode 100644
index 0000000..00e4b7a
--- /dev/null
+++ b/spm/scmi/libc/strncpy.c
@@ -0,0 +1,61 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2015-2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <string.h>
+
+/*
+ * Copy src to dst, truncating or null-padding to always copy n bytes.
+ * Return dst.
+ */
+char *
+strncpy(char * __restrict dst, const char * __restrict src, size_t n)
+{
+ if (n != 0) {
+ char *d = dst;
+ const char *s = src;
+
+ do {
+ if ((*d++ = *s++) == '\0') {
+ /* NUL pad the remaining n-1 bytes */
+ while (--n != 0)
+ *d++ = '\0';
+ break;
+ }
+ } while (--n != 0);
+ }
+ return (dst);
+}
diff --git a/spm/scmi/libc/strnlen.c b/spm/scmi/libc/strnlen.c
new file mode 100644
index 0000000..b944e95
--- /dev/null
+++ b/spm/scmi/libc/strnlen.c
@@ -0,0 +1,46 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2009 David Schultz <das@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Portions copyright (c) 2018, ARM Limited and Contributors.
+ * All rights reserved.
+ */
+
+#include <string.h>
+
+size_t
+strnlen(const char *s, size_t maxlen)
+{
+ size_t len;
+
+ for (len = 0; len < maxlen; len++, s++) {
+ if (!*s)
+ break;
+ }
+ return (len);
+}
diff --git a/spm/scmi/libc/strrchr.c b/spm/scmi/libc/strrchr.c
new file mode 100644
index 0000000..cd435ff
--- /dev/null
+++ b/spm/scmi/libc/strrchr.c
@@ -0,0 +1,49 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+#include <string.h>
+
+char *
+strrchr(const char *p, int ch)
+{
+ char *save;
+ char c;
+
+ c = ch;
+ for (save = NULL;; ++p) {
+ if (*p == c)
+ save = (char *)p;
+ if (*p == '\0')
+ return (save);
+ }
+ /* NOTREACHED */
+}
diff --git a/spm/scmi/mp_printf.c b/spm/scmi/mp_printf.c
new file mode 100644
index 0000000..777c736
--- /dev/null
+++ b/spm/scmi/mp_printf.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <spinlock.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+/* Lock to avoid concurrent accesses to the serial console */
+static spinlock_t printf_lock;
+
+void mp_printf(const char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+
+ spin_lock(&printf_lock);
+ vprintf(fmt, args);
+ spin_unlock(&printf_lock);
+
+ va_end(args);
+}
diff --git a/spm/scmi/plat/arm/fvp/aarch64/plat_helpers.S b/spm/scmi/plat/arm/fvp/aarch64/plat_helpers.S
new file mode 100644
index 0000000..18cbbd4
--- /dev/null
+++ b/spm/scmi/plat/arm/fvp/aarch64/plat_helpers.S
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <drivers/arm/pl011.h>
+#include "../fvp_def.h"
+
+ .globl platform_get_core_pos
+ .globl plat_crash_console_init
+ .globl plat_crash_console_putc
+ .globl plat_crash_console_flush
+
+/*----------------------------------------------------------------------
+ * unsigned int platform_get_core_pos(unsigned long mpid)
+ *
+ * Function to calculate the core position on FVP.
+ *
+ * (ClusterId * FVP_MAX_CPUS_PER_CLUSTER * FVP_MAX_PE_PER_CPU) +
+ * (CPUId * FVP_MAX_PE_PER_CPU) +
+ * ThreadId
+ *
+ * which can be simplified as:
+ *
+ * ((ClusterId * FVP_MAX_CPUS_PER_CLUSTER + CPUId) * FVP_MAX_PE_PER_CPU)
+ * + ThreadId
+ * ---------------------------------------------------------------------
+ */
+func platform_get_core_pos
+ /*
+ * Check for MT bit in MPIDR. If not set, shift MPIDR to left to make it
+ * look as if in a multi-threaded implementation.
+ */
+ tst x0, #MPIDR_MT_MASK
+ lsl x3, x0, #MPIDR_AFFINITY_BITS
+ csel x3, x3, x0, eq
+
+ /* Extract individual affinity fields from MPIDR */
+ ubfx x0, x3, #MPIDR_AFF0_SHIFT, #MPIDR_AFFINITY_BITS
+ ubfx x1, x3, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS
+ ubfx x2, x3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
+
+ /* Compute linear position */
+ mov x3, #FVP_MAX_CPUS_PER_CLUSTER
+ madd x1, x2, x3, x1
+ mov x3, #FVP_MAX_PE_PER_CPU
+ madd x0, x1, x3, x0
+ ret
+endfunc platform_get_core_pos
+
+ /* ---------------------------------------------
+ * int plat_crash_console_init(void)
+ * Function to initialize the crash console
+ * without a C Runtime to print crash report.
+ * Clobber list : x0 - x4
+ * ---------------------------------------------
+ */
+func plat_crash_console_init
+ mov_imm x0, PLAT_ARM_UART_BASE
+ mov_imm x1, PLAT_ARM_UART_CLK_IN_HZ
+ mov_imm x2, PL011_BAUDRATE
+ b console_core_init
+endfunc plat_crash_console_init
+
+ /* ---------------------------------------------
+ * int plat_crash_console_putc(int c)
+ * Function to print a character on the crash
+ * console without a C Runtime.
+ * Clobber list : x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_putc
+ mov_imm x1, PLAT_ARM_UART_BASE
+ b console_core_putc
+endfunc plat_crash_console_putc
+
+ /* ---------------------------------------------
+ * int plat_crash_console_flush()
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * Out : return -1 on error else return 0.
+ * Clobber list : r0 - r1
+ * ---------------------------------------------
+ */
+func plat_crash_console_flush
+ mov_imm x1, PLAT_ARM_UART_BASE
+ b console_core_flush
+endfunc plat_crash_console_flush
diff --git a/spm/scmi/plat/arm/fvp/fdts/scmi.dts b/spm/scmi/plat/arm/fvp/fdts/scmi.dts
new file mode 100644
index 0000000..821a615
--- /dev/null
+++ b/spm/scmi/plat/arm/fvp/fdts/scmi.dts
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * This file is a Partition Manifest (PM) for a minimal Secure Partition (SP)
+ * that has additional optional properties defined.
+ *
+ */
+
+/dts-v1/;
+
+/ {
+ compatible = "arm,ffa-manifest-1.0";
+
+ /* Properties */
+ description = "Base-1";
+ ffa-version = <0x00010001>; /* 31:16 - Major, 15:0 - Minor */
+ uuid = <0x735cb579 0xb9448c1d 0xe1619385 0xd2d80a77>;
+ id = <3>;
+ auxiliary-id = <0xae>;
+ stream-endpoint-ids = <0 1 2 3>;
+ execution-ctx-count = <1>;
+ exception-level = <2>; /* S-EL1 */
+ execution-state = <0>; /* AARCH64 */
+ load-address = <0x7200000>;
+ entrypoint-offset = <0x00004000>;
+ xlat-granule = <0>; /* 4KiB */
+ boot-order = <2>;
+ messaging-method = <3>; /* Direct messaging only */
+ ns-interrupts-action = <2>; /* Managed exit is not supported */
+ notification-support; /* Support receipt of notifications. */
+
+ memory-regions {
+ compatible = "arm,ffa-manifest-memory-regions";
+
+ /* Memory to be shared in memory sharing tests. */
+ share-memory {
+ description = "share-memory";
+ pages-count = <1>;
+ base-address = <0x00000000 0x7502000>;
+ attributes = <0x3>; /* read-write */
+ };
+
+ };
+
+ device-regions {
+ compatible = "arm,ffa-manifest-device-regions";
+
+ uart2 {
+ base-address = <0x00000000 0x1c0b0000>;
+ pages-count = <1>;
+ attributes = <0x3>; /* read-write */
+ };
+ };
+
+};
diff --git a/spm/scmi/plat/arm/fvp/fvp_def.h b/spm/scmi/plat/arm/fvp/fvp_def.h
new file mode 100644
index 0000000..bcd3a7c
--- /dev/null
+++ b/spm/scmi/plat/arm/fvp/fvp_def.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*******************************************************************************
+ * FVP specific definitions. Used only by FVP specific code.
+ ******************************************************************************/
+
+#ifndef __FVP_DEF_H__
+#define __FVP_DEF_H__
+
+#include <platform_def.h>
+
+/*******************************************************************************
+ * Cluster Topology definitions
+ ******************************************************************************/
+#ifndef FVP_CLUSTER_COUNT
+#error "FVP_CLUSTER_COUNT is not set in makefile"
+#endif
+
+#ifndef FVP_MAX_CPUS_PER_CLUSTER
+#error "FVP_MAX_CPUS_PER_CLUSTER is not set in makefile"
+#endif
+
+/*******************************************************************************
+ * FVP memory map related constants
+ ******************************************************************************/
+
+#define DEVICE0_BASE 0x1a000000
+#define DEVICE0_SIZE 0x12200000
+
+#define DEVICE1_BASE 0x2f000000
+#define DEVICE1_SIZE 0x400000
+
+/*******************************************************************************
+ * GIC-400 & interrupt handling related constants
+ ******************************************************************************/
+/* Base FVP compatible GIC memory map */
+#define GICD_BASE 0x2f000000
+#define GICR_BASE 0x2f100000
+#define GICC_BASE 0x2c000000
+
+/*******************************************************************************
+ * PL011 related constants
+ ******************************************************************************/
+#define PL011_UART0_BASE 0x1c090000
+#define PL011_UART1_BASE 0x1c0a0000
+#define PL011_UART2_BASE 0x1c0b0000
+#define PL011_UART3_BASE 0x1c0c0000
+
+#define PL011_UART0_CLK_IN_HZ 24000000
+#define PL011_UART1_CLK_IN_HZ 24000000
+#define PL011_UART2_CLK_IN_HZ 24000000
+#define PL011_UART3_CLK_IN_HZ 24000000
+
+#define PLAT_ARM_UART_BASE PL011_UART0_BASE
+#define PLAT_ARM_UART_CLK_IN_HZ PL011_UART0_CLK_IN_HZ
+
+#endif /* __FVP_DEF_H__ */
diff --git a/spm/scmi/plat/arm/fvp/include/platform_def.h b/spm/scmi/plat/arm/fvp/include/platform_def.h
new file mode 100644
index 0000000..cc52663
--- /dev/null
+++ b/spm/scmi/plat/arm/fvp/include/platform_def.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <utils_def.h>
+
+#include "../fvp_def.h"
+
+/*******************************************************************************
+ * Platform definitions used by common code
+ ******************************************************************************/
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+/*******************************************************************************
+ * Platform binary types for linking
+ ******************************************************************************/
+#ifdef __aarch64__
+#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH aarch64
+#else
+#define PLATFORM_LINKER_FORMAT "elf32-littlearm"
+#define PLATFORM_LINKER_ARCH arm
+#endif
+
+/*******************************************************************************
+ * Run-time address of the TFTF image.
+ * It has to match the location where the Trusted Firmware-A loads the BL33
+ * image.
+ ******************************************************************************/
+#define TFTF_BASE 0x88000000
+
+/* Base address of non-trusted watchdog (SP805) */
+#define SP805_WDOG_BASE 0x1C0F0000
+
+/* Base address of trusted watchdog (SP805) */
+#define SP805_TWDOG_BASE 0x2A490000
+#define IRQ_TWDOG_INTID 56
+
+/*******************************************************************************
+ * Base address and size of external NVM flash
+ ******************************************************************************/
+#define FLASH_BASE 0x08000000
+
+/*
+ * The flash memory in FVP resembles as a SCSP package of 2-die's and
+ * of a total size of 512Mb, we are using only the main blocks of size
+ * 128KB for storing results. Also the FVP performs data striping and
+ * splits the word into half to each flash die's which leads to a
+ * virtual block size of 256KB to software.
+ */
+#define NOR_FLASH_BLOCK_SIZE 0x40000 /* 256KB */
+#define NOR_FLASH_BLOCKS_COUNT 255
+#define FLASH_SIZE (NOR_FLASH_BLOCK_SIZE * NOR_FLASH_BLOCKS_COUNT)
+
+/**********************************
+ * Addresses to test invalid access
+ **********************************/
+/*
+ * The top 16MB (or 64MB if RME is enabled) of DRAM1 is configured as
+ * follows for FVP platform:
+ * - L1 GPT DRAM: Reserved for L1 GPT if RME is enabled
+ * - REALM DRAM: Reserved for Realm world if RME is enabled
+ * - AP TZC DRAM: The remaining TZC secured DRAM reserved for AP use
+ *
+ * RME enabled(64MB) RME not enabled(16MB)
+ * 0xFC00_0000 -------------------- ------------------- 0xFF00_0000
+ * | | | |
+ * 0xFD000000 | AP Secure (~28MB)| | AP TZC (~14MB) |
+ * -------------------- ------------------- 0xFFE0_0000
+ * | | | |
+ * | REALM (32MB) | | EL3 TZC (2MB) |
+ * -------------------- ------------------- 0xFFFF_FFFF
+ * | |
+ * 0xFFE0_0000 | EL3 Root (3MB) |
+ * --------------------
+ * | L1 GPT (1MB) |
+ * | |
+ * 0xFFFF_FFFF --------------------
+ *
+ *
+ */
+/* For both RME & non-RME case top 2MB will be EL3 memory */
+#define EL3_MEMORY_ACCESS_ADDR U(0xFFE00000)
+#define SECURE_MEMORY_ACCESS_ADDR U(0xFD000000)
+
+/*******************************************************************************
+ * Base address and size for the FIP that contains FWU images.
+ ******************************************************************************/
+#define PLAT_ARM_FWU_FIP_BASE (FLASH_BASE + 0x400000)
+#define PLAT_ARM_FWU_FIP_SIZE (0x100000)
+
+/*******************************************************************************
+ * This is the temporary DDR address for loading backup fip.bin
+ * image from NVM which is used for replacing original fip.bin
+ * This address is chosen such that the NS_BL2U can be expanded
+ * in future and also considering the large size of fip.bin.
+ ******************************************************************************/
+#define FIP_IMAGE_TMP_DDR_ADDRESS (DRAM_BASE + 0x100000)
+
+/*******************************************************************************
+ * This offset is used to corrupt data in fip.bin
+ * The offset is from the base where fip.bin is
+ * located in NVM. This particular value is chosen
+ * to make sure the corruption is done beyond fip header.
+ ******************************************************************************/
+#define FIP_CORRUPT_OFFSET (0x400)
+
+/*******************************************************************************
+ * This offset is used to corrupt data in fip.bin
+ * This is the base address for backup fip.bin image in NVM
+ * which is used for replacing original fip.bin
+ * This address is chosen such that it can stay with all
+ * the other images in the NVM.
+ ******************************************************************************/
+#define FIP_BKP_ADDRESS (FLASH_BASE + 0x1000000)
+
+/*******************************************************************************
+ * Base address and size for non-trusted SRAM.
+ ******************************************************************************/
+#define NSRAM_BASE (0x2e000000)
+#define NSRAM_SIZE (0x00010000)
+
+/*******************************************************************************
+ * NS_BL1U specific defines.
+ * NS_BL1U RW data is relocated from NS-ROM to NS-RAM at runtime so we
+ * need 2 sets of addresses.
+ ******************************************************************************/
+#define NS_BL1U_BASE (0x08000000 + 0x03EB8000)
+#define NS_BL1U_RO_LIMIT (NS_BL1U_BASE + 0xC000)
+
+/*******************************************************************************
+ * Put NS_BL1U RW at the top of the Non-Trusted SRAM. NS_BL1U_RW_BASE is
+ * calculated using the current NS_BL1U RW debug size plus a little space
+ * for growth.
+ ******************************************************************************/
+#define NS_BL1U_RW_SIZE (0x9000)
+#define NS_BL1U_RW_BASE (NSRAM_BASE)
+#define NS_BL1U_RW_LIMIT (NS_BL1U_RW_BASE + NS_BL1U_RW_SIZE)
+
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+#define FVP_DRAM1_BASE 0x80000000
+#define FVP_DRAM2_BASE 0x880000000
+#define DRAM_BASE FVP_DRAM1_BASE
+#define DRAM_SIZE 0x80000000
+
+/*******************************************************************************
+ * Base address and limit for NS_BL2U image.
+ ******************************************************************************/
+#define NS_BL2U_BASE DRAM_BASE
+#define NS_BL2U_LIMIT (NS_BL2U_BASE + 0x4E000)
+
+/******************************************************************************
+ * Memory mapped Generic timer interfaces
+ ******************************************************************************/
+/* REFCLK CNTControl, Generic Timer. Secure Access only. */
+#define SYS_CNT_CONTROL_BASE 0x2a430000
+/* REFCLK CNTRead, Generic Timer. */
+#define SYS_CNT_READ_BASE 0x2a800000
+/* AP_REFCLK CNTBase1, Generic Timer. */
+#define SYS_CNT_BASE1 0x2a830000
+
+/* V2M motherboard system registers & offsets */
+#define VE_SYSREGS_BASE 0x1c010000
+#define V2M_SYS_LED 0x8
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+
+/* Size of cacheable stacks */
+#if IMAGE_NS_BL1U || IMAGE_NS_BL2U
+#define PLATFORM_STACK_SIZE 0x1000
+#else
+#define PLATFORM_STACK_SIZE 0x1400
+#endif
+
+/* Size of coherent stacks for debug and release builds */
+#if DEBUG
+#define PCPU_DV_MEM_STACK_SIZE 0x600
+#else
+#define PCPU_DV_MEM_STACK_SIZE 0x500
+#endif
+
+#define PLATFORM_CORE_COUNT (FVP_CLUSTER_COUNT * \
+ FVP_MAX_CPUS_PER_CLUSTER * \
+ FVP_MAX_PE_PER_CPU)
+#define PLATFORM_NUM_AFFS (1 + FVP_CLUSTER_COUNT + \
+ PLATFORM_CORE_COUNT)
+#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL2
+
+#define PLAT_MAX_PE_PER_CPU FVP_MAX_PE_PER_CPU
+
+/* TODO : Migrate complete TFTF from affinity level to power levels */
+#define PLAT_MAX_PWR_LEVEL PLATFORM_MAX_AFFLVL
+#define PLAT_MAX_PWR_STATES_PER_LVL 2
+
+#if IMAGE_NS_BL1U
+#define MAX_IO_DEVICES 2
+#define MAX_IO_HANDLES 2
+#else
+#define MAX_IO_DEVICES 1
+#define MAX_IO_HANDLES 1
+#endif
+
+/* Local state bit width for each level in the state-ID field of power state */
+#define PLAT_LOCAL_PSTATE_WIDTH 4
+
+#if USE_NVM
+/*
+ * The Flash memory is used to store the TFTF data on FVP.
+ * However, it might contain other data that must not be overwritten.
+ * For example, when using the Trusted Firmware-A, the FIP image
+ * (containing the bootloader images) is also stored in Flash.
+ * Hence, consider the first 40MB of Flash as reserved for firmware usage.
+ * The TFTF can use the rest of the Flash memory.
+ */
+#define TFTF_NVM_OFFSET 0x2800000 /* 40 MB */
+#define TFTF_NVM_SIZE (FLASH_SIZE - TFTF_NVM_OFFSET)
+#else
+/*
+ * If you want to run without support for non-volatile memory (due to
+ * e.g. unavailability of a flash driver), DRAM can be used instead as
+ * a workaround. The TFTF binary itself is loaded at 0x88000000 so the
+ * first 128MB can be used
+ * Please note that this won't be suitable for all test scenarios and
+ * for this reason some tests will be disabled in this configuration.
+ */
+#define TFTF_NVM_OFFSET 0x0
+#define TFTF_NVM_SIZE (TFTF_BASE - DRAM_BASE - TFTF_NVM_OFFSET)
+#endif
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#ifdef __aarch64__
+#define PLAT_PHY_ADDR_SPACE_SIZE (ULL(1) << PA_SIZE)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (ULL(1) << PA_SIZE)
+#else
+#define PLAT_PHY_ADDR_SPACE_SIZE (ULL(1) << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (ULL(1) << 32)
+#endif
+
+#if IMAGE_TFTF
+/* For testing xlat tables lib v2 */
+#define MAX_XLAT_TABLES 20
+#define MAX_MMAP_REGIONS 50
+#else
+#if IMAGE_CACTUS
+#define MAX_XLAT_TABLES 9
+#elif IMAGE_SCMI
+#define MAX_XLAT_TABLES 9
+#else
+#define MAX_XLAT_TABLES 5
+#endif
+#define MAX_MMAP_REGIONS 16
+#endif
+
+/*******************************************************************************
+ * Used to align variables on the biggest cache line size in the platform.
+ * This is known only to the platform as it might have a combination of
+ * integrated and external caches.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT 6
+#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
+
+/*******************************************************************************
+ * Non-Secure Software Generated Interupts IDs
+ ******************************************************************************/
+#define IRQ_NS_SGI_0 0
+#define IRQ_NS_SGI_1 1
+#define IRQ_NS_SGI_2 2
+#define IRQ_NS_SGI_3 3
+#define IRQ_NS_SGI_4 4
+#define IRQ_NS_SGI_5 5
+#define IRQ_NS_SGI_6 6
+#define IRQ_NS_SGI_7 7
+
+/*
+ * On FVP, consider that the last SPI is the Trusted Random Number Generator
+ * interrupt.
+ */
+#define PLAT_MAX_SPI_OFFSET_ID 107
+
+/* AP_REFCLK, Generic Timer, CNTPSIRQ1. */
+#define IRQ_CNTPSIRQ1 58
+/* Per-CPU Hypervisor Timer Interrupt ID */
+#define IRQ_PCPU_HP_TIMER 26
+/* Per-CPU Non-Secure Timer Interrupt ID */
+#define IRQ_PCPU_NS_TIMER 30
+
+
+/* Times(in ms) used by test code for completion of different events */
+#define PLAT_SUSPEND_ENTRY_TIME 15
+#define PLAT_SUSPEND_ENTRY_EXIT_TIME 30
+
+/*******************************************************************************
+ * Location of the memory buffer shared between Normal World (i.e. TFTF) and the
+ * Secure Partition (e.g. Cactus-MM) to pass data associated to secure service
+ * requests. This is only needed for SPM based on MM.
+ * Note: This address has to match the one used in TF (see ARM_SP_IMAGE_NS_BUF_*
+ * macros).
+ ******************************************************************************/
+#define ARM_SECURE_SERVICE_BUFFER_BASE 0xff600000ull
+#define ARM_SECURE_SERVICE_BUFFER_SIZE 0x10000ull
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/spm/scmi/plat/arm/fvp/include/sp_platform_def.h b/spm/scmi/plat/arm/fvp/include/sp_platform_def.h
new file mode 100644
index 0000000..e1b4694
--- /dev/null
+++ b/spm/scmi/plat/arm/fvp/include/sp_platform_def.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This file contains common defines for a secure partition. The correct
+ * platform_def.h header file is selected according to the secure partition
+ * and platform being built using the make scripts.
+ */
+
+#ifndef SP_PLATFORM_DEF_H
+#define SP_PLATFORM_DEF_H
+
+#include <platform_def.h>
+
+#define PLAT_SP_RX_BASE ULL(0x7300000)
+#define PLAT_SP_CORE_COUNT U(8)
+
+#define PLAT_ARM_DEVICE0_BASE DEVICE0_BASE
+#define PLAT_ARM_DEVICE0_SIZE DEVICE0_SIZE
+
+#define CACTUS_PL011_UART_BASE PL011_UART2_BASE
+#define CACTUS_PL011_UART_CLK_IN_HZ PL011_UART2_CLK_IN_HZ
+
+#endif /* SP_PLATFORM_DEF_H */
diff --git a/spm/scmi/plat/arm/fvp/platform.mk b/spm/scmi/plat/arm/fvp/platform.mk
new file mode 100644
index 0000000..002a2a8
--- /dev/null
+++ b/spm/scmi/plat/arm/fvp/platform.mk
@@ -0,0 +1,15 @@
+#
+# Copyright (c) 2020, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+FVP_SCMI_BASE = spm/scmi/plat/arm/fvp
+
+PLAT_INCLUDES += -I${FVP_SCMI_BASE}/include/
+
+# Add the FDT source
+SCMI_DTS = ${FVP_SCMI_BASE}/fdts/scmi.dts
+
+# List of FDTS to copy
+#FDTS_CP_LIST = ${FVP_SCMI_BASE}/fdts/scmi.dts
diff --git a/spm/scmi/sp_debug.c b/spm/scmi/sp_debug.c
new file mode 100644
index 0000000..9c91c56
--- /dev/null
+++ b/spm/scmi/sp_debug.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <drivers/arm/pl011.h>
+#include <drivers/console.h>
+#include <ffa_helpers.h>
+#include <ffa_svc.h>
+#include <sp_debug.h>
+#include <spm_helpers.h>
+
+static int (*putc_impl)(int);
+
+static int putc_hypcall(int c)
+{
+ hvc_args args = {
+ .fid = FFA_CONSOLE_LOG_SMC32,
+ .arg1 = 1,
+ .arg2 = c
+ };
+
+ (void)tftf_hvc(&args);
+ return c;
+}
+static int putc_ffacall(int c)
+{
+ struct ffa_value args = {
+ .fid = FFA_CONSOLE_LOG_SMC32,
+ .arg1 = 1,
+ .arg2 = c
+ };
+
+ ffa_service_call(&args);
+
+ return c;
+}
+
+static int putc_uart(int c)
+{
+ console_pl011_putc(c);
+
+ return c;
+}
+
+void set_putc_impl(enum stdout_route route)
+{
+ switch (route) {
+
+ case FFA_HVC_CALL_AS_STDOUT:
+ putc_impl = putc_hypcall;
+ return;
+ case FFA_SVC_SMC_CALL_AS_STDOUT:
+ putc_impl = putc_ffacall;
+ return;
+ case PL011_AS_STDOUT:
+ default:
+ break;
+ }
+
+ putc_impl = putc_uart;
+}
+
+int console_putc(int c)
+{
+ if (!putc_impl) {
+ return -1;
+ }
+
+ return putc_impl(c);
+}
diff --git a/spm/scmi/sp_debug.h b/spm/scmi/sp_debug.h
new file mode 100644
index 0000000..49bf5e7
--- /dev/null
+++ b/spm/scmi/sp_debug.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+enum stdout_route {
+ PL011_AS_STDOUT = 0,
+ FFA_HVC_CALL_AS_STDOUT,
+ FFA_SVC_SMC_CALL_AS_STDOUT,
+};
+
+void set_putc_impl(enum stdout_route);
diff --git a/spm/scmi/sp_def.h b/spm/scmi/sp_def.h
new file mode 100644
index 0000000..2ff0974
--- /dev/null
+++ b/spm/scmi/sp_def.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SP_DEF_H
+#define SP_DEF_H
+
+#include <utils_def.h>
+#include <sp_platform_def.h>
+
+/*
+ * Layout of the Secure Partition image.
+ */
+
+/* Up to 2 MiB at an arbitrary address that doesn't overlap the devices. */
+#define SP_IMAGE_BASE ULL(0x1000)
+#define SP_IMAGE_SIZE ULL(0x200000)
+
+/* Memory reserved for stacks */
+#define SP_STACKS_SIZE ULL(0x8000)
+
+/*
+ * RX/TX buffer used by VM's in SPM for memory sharing
+ * Each VM allocated 2 pages, one for RX and one for TX buffer.
+ */
+#define SP_RX_BASE PLAT_SP_RX_BASE
+#define SP_TX_BASE SP_RX_BASE + PAGE_SIZE
+#define SP_RX_TX_SIZE PAGE_SIZE * 2
+
+/*
+ * RX/TX buffer helpers.
+ */
+#define get_sp_rx_start(sp_id) (SP_RX_BASE \
+ + (((sp_id & 0x7FFFU) - 1U) * SP_RX_TX_SIZE))
+#define get_sp_rx_end(sp_id) (SP_RX_BASE \
+ + (((sp_id & 0x7FFFU) - 1U) * SP_RX_TX_SIZE) \
+ + PAGE_SIZE)
+#define get_sp_tx_start(sp_id) (SP_TX_BASE + \
+ (((sp_id & 0x7FFFU) - 1U) * SP_RX_TX_SIZE))
+#define get_sp_tx_end(sp_id) (SP_TX_BASE \
+ + (((sp_id & 0x7FFFU) - 1U) * SP_RX_TX_SIZE) \
+ + PAGE_SIZE)
+
+#endif /* SP_DEF_H */
diff --git a/spm/scmi/sp_helpers.h b/spm/scmi/sp_helpers.h
new file mode 100644
index 0000000..e0196f6
--- /dev/null
+++ b/spm/scmi/sp_helpers.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SP_HELPERS_H
+#define SP_HELPERS_H
+
+#include <stdint.h>
+#include <tftf_lib.h>
+#include <spm_common.h>
+#include <spinlock.h>
+
+/* Currently, Hafnium/SPM supports 1024 virtual interrupt IDs. */
+#define NUM_VINT_ID 1024
+
+typedef struct {
+ u_register_t fid;
+ u_register_t arg1;
+ u_register_t arg2;
+ u_register_t arg3;
+ u_register_t arg4;
+ u_register_t arg5;
+ u_register_t arg6;
+ u_register_t arg7;
+} svc_args;
+
+/*
+ * Trigger an SVC call.
+ *
+ * The arguments to pass through the SVC call must be stored in the svc_args
+ * structure. The return values of the SVC call will be stored in the same
+ * structure (overriding the input arguments).
+ *
+ * Return the first return value. It is equivalent to args.fid but is also
+ * provided as the return value for convenience.
+ */
+u_register_t sp_svc(svc_args *args);
+
+/*
+ * Check that expr == expected.
+ * If not, loop forever.
+ */
+void expect(int expr, int expected);
+
+/*
+ * Test framework functions
+ */
+
+void sp_handler_spin_lock_init(void);
+
+/* Handler invoked by SP while processing interrupt. */
+extern void (*sp_interrupt_handler[NUM_VINT_ID])(void);
+
+/* Register the handler. */
+void sp_register_interrupt_handler(void (*handler)(void),
+ uint32_t interrupt_id);
+
+/* Un-register the handler. */
+void sp_unregister_interrupt_handler(uint32_t interrupt_id);
+
+void discover_managed_exit_interrupt_id(void);
+
+void register_maintenance_interrupt_handlers(void);
+
+#endif /* SP_HELPERS_H */
diff --git a/spm/scmi/spm_common.c b/spm/scmi/spm_common.c
new file mode 100644
index 0000000..35951e3
--- /dev/null
+++ b/spm/scmi/spm_common.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cactus_test_cmds.h>
+#include <debug.h>
+#include <ffa_endpoints.h>
+#include <ffa_svc.h>
+#include <lib/extensions/sve.h>
+#include <spm_common.h>
+#include <xlat_tables_v2.h>
+
+bool memory_retrieve(struct mailbox_buffers *mb,
+ struct ffa_memory_region **retrieved, uint64_t handle,
+ ffa_id_t sender, ffa_id_t receiver,
+ ffa_memory_region_flags_t flags)
+{
+ struct ffa_value ret;
+ uint32_t fragment_size;
+ uint32_t total_size;
+ uint32_t descriptor_size;
+
+ if (retrieved == NULL || mb == NULL) {
+ ERROR("Invalid parameters!\n");
+ return false;
+ }
+
+ descriptor_size = ffa_memory_retrieve_request_init(
+ mb->send, handle, sender, receiver, 0, flags,
+ FFA_DATA_ACCESS_RW,
+ FFA_INSTRUCTION_ACCESS_NX,
+ FFA_MEMORY_NORMAL_MEM,
+ FFA_MEMORY_CACHE_WRITE_BACK,
+ FFA_MEMORY_INNER_SHAREABLE);
+
+ ret = ffa_mem_retrieve_req(descriptor_size, descriptor_size);
+
+ if (ffa_func_id(ret) != FFA_MEM_RETRIEVE_RESP) {
+ ERROR("Couldn't retrieve the memory page. Error: %x\n",
+ ffa_error_code(ret));
+ return false;
+ }
+
+ /*
+ * Following total_size and fragment_size are useful to keep track
+ * of the state of transaction. When the sum of all fragment_size of all
+ * fragments is equal to total_size, the memory transaction has been
+ * completed.
+ * This is a simple test with only one segment. As such, upon
+ * successful ffa_mem_retrieve_req, total_size must be equal to
+ * fragment_size.
+ */
+ total_size = ret.arg1;
+ fragment_size = ret.arg2;
+
+ if (total_size != fragment_size) {
+ ERROR("Only expect one memory segment to be sent!\n");
+ return false;
+ }
+
+ if (fragment_size > PAGE_SIZE) {
+ ERROR("Fragment should be smaller than RX buffer!\n");
+ return false;
+ }
+
+ *retrieved = (struct ffa_memory_region *)mb->recv;
+
+ if ((*retrieved)->receiver_count > MAX_MEM_SHARE_RECIPIENTS) {
+ VERBOSE("SPMC memory sharing operations support max of %u "
+ "receivers!\n", MAX_MEM_SHARE_RECIPIENTS);
+ return false;
+ }
+
+ VERBOSE("Memory Retrieved!\n");
+
+ return true;
+}
+
+bool memory_relinquish(struct ffa_mem_relinquish *m, uint64_t handle,
+ ffa_id_t id)
+{
+ struct ffa_value ret;
+
+ ffa_mem_relinquish_init(m, handle, 0, id);
+ ret = ffa_mem_relinquish();
+ if (ffa_func_id(ret) != FFA_SUCCESS_SMC32) {
+ ERROR("%s failed to relinquish memory! error: %x\n",
+ __func__, ffa_error_code(ret));
+ return false;
+ }
+
+ VERBOSE("Memory Relinquished!\n");
+ return true;
+}
diff --git a/spm/scmi/spm_helpers.h b/spm/scmi/spm_helpers.h
new file mode 100644
index 0000000..1d3ddc2
--- /dev/null
+++ b/spm/scmi/spm_helpers.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPMC_H
+#define SPMC_H
+
+#include <ffa_helpers.h>
+#include <spm_common.h>
+
+/* Should match with IDs defined in SPM/Hafnium */
+#define SPM_INTERRUPT_ENABLE (0xFF03)
+#define SPM_INTERRUPT_GET (0xFF04)
+#define SPM_INTERRUPT_DEACTIVATE (0xFF08)
+
+/*
+ * Hypervisor Calls Wrappers
+ */
+
+uint32_t spm_interrupt_get(void);
+int64_t spm_interrupt_enable(uint32_t int_id, bool enable, enum interrupt_pin pin);
+int64_t spm_interrupt_deactivate(uint32_t vint_id);
+
+#endif /* SPMC_H */
diff --git a/spm/scmi/xlat_tables_context.c b/spm/scmi/xlat_tables_context.c
new file mode 100644
index 0000000..0f3bab9
--- /dev/null
+++ b/spm/scmi/xlat_tables_context.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <platform_def.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+#include "xlat_tables_private.h"
+
+/*
+ * MMU configuration register values for the active translation context. Used
+ * from the MMU assembly helpers.
+ */
+uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
+/*
+ * Allocate and initialise the default translation context for the software
+ * image currently executing.
+ */
+REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
+ PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
+
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
+ unsigned int attr)
+{
+ mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
+
+ mmap_add_region_ctx(&tf_xlat_ctx, &mm);
+}
+
+void mmap_add(const mmap_region_t *mm)
+{
+ mmap_add_ctx(&tf_xlat_ctx, mm);
+}
+
+void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va,
+ size_t size, unsigned int attr)
+{
+ mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
+
+ mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
+
+ *base_va = mm.base_va;
+}
+
+void mmap_add_alloc_va(mmap_region_t *mm)
+{
+ while (mm->granularity != 0U) {
+ assert(mm->base_va == 0U);
+ mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, mm);
+ mm++;
+ }
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
+ size_t size, unsigned int attr)
+{
+ mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
+
+ return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
+}
+
+int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,
+ uintptr_t *base_va, size_t size,
+ unsigned int attr)
+{
+ mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);
+
+ int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx, &mm);
+
+ *base_va = mm.base_va;
+
+ return rc;
+}
+
+
+int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
+{
+ return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
+ base_va, size);
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+void __init init_xlat_tables(void)
+{
+ assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
+
+ unsigned int current_el = xlat_arch_current_el();
+
+ if (current_el == 1U) {
+ tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
+ } else if (current_el == 2U) {
+ tf_xlat_ctx.xlat_regime = EL2_REGIME;
+ } else {
+ assert(current_el == 3U);
+ tf_xlat_ctx.xlat_regime = EL3_REGIME;
+ }
+
+ init_xlat_tables_ctx(&tf_xlat_ctx);
+}
+
+int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr)
+{
+ return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr);
+}
+
+int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
+{
+ return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr);
+}
+
+/*
+ * If dynamic allocation of new regions is disabled then by the time we call the
+ * function enabling the MMU, we'll have registered all the memory regions to
+ * map for the system's lifetime. Therefore, at this point we know the maximum
+ * physical address that will ever be mapped.
+ *
+ * If dynamic allocation is enabled then we can't make any such assumption
+ * because the maximum physical address could get pushed while adding a new
+ * region. Therefore, in this case we have to assume that the whole address
+ * space size might be mapped.
+ */
+#ifdef PLAT_XLAT_TABLES_DYNAMIC
+#define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address
+#else
+#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
+#endif
+
+#ifndef __aarch64__
+
+void enable_mmu_svc_mon(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
+ enable_mmu_direct_svc_mon(flags);
+}
+
+void enable_mmu_hyp(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL2_REGIME);
+ enable_mmu_direct_hyp(flags);
+}
+
+#else
+
+void enable_mmu_el1(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
+ enable_mmu_direct_el1(flags);
+}
+
+void enable_mmu_el2(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL2_REGIME);
+ enable_mmu_direct_el2(flags);
+}
+
+void enable_mmu_el3(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL3_REGIME);
+ enable_mmu_direct_el3(flags);
+}
+
+#endif /* !__aarch64__ */
diff --git a/spm/scmi/xlat_tables_core.c b/spm/scmi/xlat_tables_core.c
new file mode 100644
index 0000000..c3dd445
--- /dev/null
+++ b/spm/scmi/xlat_tables_core.c
@@ -0,0 +1,1226 @@
+/*
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform_def.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+#include <utils_def.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+#include "xlat_tables_private.h"
+
+/* Helper function that cleans the data cache only if it is enabled. */
+static inline __attribute__((unused)) void xlat_clean_dcache_range(uintptr_t addr, size_t size)
+{
+ if (is_dcache_enabled())
+ clean_dcache_range(addr, size);
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+/*
+ * The following functions assume that they will be called using subtables only.
+ * The base table can't be unmapped, so it is not needed to do any special
+ * handling for it.
+ */
+
+/*
+ * Returns the index of the array corresponding to the specified translation
+ * table.
+ */
+static int xlat_table_get_index(const xlat_ctx_t *ctx, const uint64_t *table)
+{
+ for (int i = 0; i < ctx->tables_num; i++)
+ if (ctx->tables[i] == table)
+ return i;
+
+ /*
+ * Maybe we were asked to get the index of the base level table, which
+ * should never happen.
+ */
+ assert(false);
+
+ return -1;
+}
+
+/* Returns a pointer to an empty translation table. */
+static uint64_t *xlat_table_get_empty(const xlat_ctx_t *ctx)
+{
+ for (int i = 0; i < ctx->tables_num; i++)
+ if (ctx->tables_mapped_regions[i] == 0)
+ return ctx->tables[i];
+
+ return NULL;
+}
+
+/* Increments region count for a given table. */
+static void xlat_table_inc_regions_count(const xlat_ctx_t *ctx,
+ const uint64_t *table)
+{
+ int idx = xlat_table_get_index(ctx, table);
+
+ ctx->tables_mapped_regions[idx]++;
+}
+
+/* Decrements region count for a given table. */
+static void xlat_table_dec_regions_count(const xlat_ctx_t *ctx,
+ const uint64_t *table)
+{
+ int idx = xlat_table_get_index(ctx, table);
+
+ ctx->tables_mapped_regions[idx]--;
+}
+
+/* Returns 0 if the specified table isn't empty, otherwise 1. */
+static bool xlat_table_is_empty(const xlat_ctx_t *ctx, const uint64_t *table)
+{
+ return ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)] == 0;
+}
+
+#else /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/* Returns a pointer to the first empty translation table. */
+static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
+{
+ assert(ctx->next_table < ctx->tables_num);
+
+ return ctx->tables[ctx->next_table++];
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/*
+ * Returns a block/page table descriptor for the given level and attributes.
+ */
+uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
+ unsigned long long addr_pa, unsigned int level)
+{
+ uint64_t desc;
+ uint32_t mem_type;
+
+ /* Make sure that the granularity is fine enough to map this address. */
+ assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U);
+
+ desc = addr_pa;
+ /*
+ * There are different translation table descriptors for level 3 and the
+ * rest.
+ */
+ desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
+ /*
+ * Always set the access flag, as this library assumes access flag
+ * faults aren't managed.
+ */
+ desc |= LOWER_ATTRS(ACCESS_FLAG);
+ /*
+ * Deduce other fields of the descriptor based on the MT_NS and MT_RW
+ * memory region attributes.
+ */
+ desc |= ((attr & MT_NS) != 0U) ? LOWER_ATTRS(NS) : 0U;
+ desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
+
+ /*
+ * Do not allow unprivileged access when the mapping is for a privileged
+ * EL. For translation regimes that do not have mappings for access for
+ * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
+ */
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ if ((attr & MT_USER) != 0U) {
+ /* EL0 mapping requested, so we give User access */
+ desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
+ } else {
+ /* EL1 mapping requested, no User access granted */
+ desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
+ }
+ } else {
+ assert((ctx->xlat_regime == EL2_REGIME) ||
+ (ctx->xlat_regime == EL3_REGIME));
+ desc |= LOWER_ATTRS(AP_ONE_VA_RANGE_RES1);
+ }
+
+ /*
+ * Deduce shareability domain and executability of the memory region
+ * from the memory type of the attributes (MT_TYPE).
+ *
+ * Data accesses to device memory and non-cacheable normal memory are
+ * coherent for all observers in the system, and correspondingly are
+ * always treated as being Outer Shareable. Therefore, for these 2 types
+ * of memory, it is not strictly needed to set the shareability field
+ * in the translation tables.
+ */
+ mem_type = MT_TYPE(attr);
+ if (mem_type == MT_DEVICE) {
+ desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
+ /*
+ * Always map device memory as execute-never.
+ * This is to avoid the possibility of a speculative instruction
+ * fetch, which could be an issue if this memory region
+ * corresponds to a read-sensitive peripheral.
+ */
+ desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
+
+ } else { /* Normal memory */
+ /*
+ * Always map read-write normal memory as execute-never.
+ * This library assumes that it is used by software that does
+ * not self-modify its code, therefore R/W memory is reserved
+ * for data storage, which must not be executable.
+ *
+ * Note that setting the XN bit here is for consistency only.
+ * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
+ * which makes any writable memory region to be treated as
+ * execute-never, regardless of the value of the XN bit in the
+ * translation table.
+ *
+ * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
+ * attribute to figure out the value of the XN bit. The actual
+ * XN bit(s) to set in the descriptor depends on the context's
+ * translation regime and the policy applied in
+ * xlat_arch_regime_get_xn_desc().
+ */
+ if (((attr & MT_RW) != 0U) || ((attr & MT_EXECUTE_NEVER) != 0U)) {
+ desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
+ }
+
+ if (mem_type == MT_MEMORY) {
+ desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
+#if ENABLE_BTI
+ /* Check if Branch Target Identification is implemented */
+ if (is_armv8_5_bti_present() &&
+ ((attr & (MT_TYPE_MASK | MT_RW |
+ MT_EXECUTE_NEVER)) == MT_CODE)) {
+ /* Set GP bit for block and page code entries */
+ desc |= GP;
+ }
+#endif
+ } else {
+ assert(mem_type == MT_NON_CACHEABLE);
+ desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
+ }
+ }
+
+ return desc;
+}
+
+/*
+ * Enumeration of actions that can be made when mapping table entries depending
+ * on the previous value in that entry and information about the region being
+ * mapped.
+ */
+typedef enum {
+
+ /* Do nothing */
+ ACTION_NONE,
+
+ /* Write a block (or page, if in level 3) entry. */
+ ACTION_WRITE_BLOCK_ENTRY,
+
+ /*
+ * Create a new table and write a table entry pointing to it. Recurse
+ * into it for further processing.
+ */
+ ACTION_CREATE_NEW_TABLE,
+
+ /*
+ * There is a table descriptor in this entry, read it and recurse into
+ * that table for further processing.
+ */
+ ACTION_RECURSE_INTO_TABLE,
+
+} action_t;
+
+/*
+ * Function that returns the first VA of the table affected by the specified
+ * mmap region.
+ */
+static uintptr_t xlat_tables_find_start_va(mmap_region_t *mm,
+ const uintptr_t table_base_va,
+ const unsigned int level)
+{
+ uintptr_t table_idx_va;
+
+ if (mm->base_va > table_base_va) {
+ /* Find the first index of the table affected by the region. */
+ table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
+ } else {
+ /* Start from the beginning of the table. */
+ table_idx_va = table_base_va;
+ }
+
+ return table_idx_va;
+}
+
+/*
+ * Function that returns table index for the given VA and level arguments.
+ */
+static inline unsigned int xlat_tables_va_to_index(const uintptr_t table_base_va,
+ const uintptr_t va,
+ const unsigned int level)
+{
+ return (unsigned int)((va - table_base_va) >> XLAT_ADDR_SHIFT(level));
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+/*
+ * From the given arguments, it decides which action to take when unmapping the
+ * specified region.
+ */
+static action_t xlat_tables_unmap_region_action(const mmap_region_t *mm,
+ const uintptr_t table_idx_va, const uintptr_t table_idx_end_va,
+ const unsigned int level, const uint64_t desc_type)
+{
+ action_t action;
+ uintptr_t region_end_va = mm->base_va + mm->size - 1U;
+
+ if ((mm->base_va <= table_idx_va) &&
+ (region_end_va >= table_idx_end_va)) {
+ /* Region covers all block */
+
+ if (level == 3U) {
+ /*
+ * Last level, only page descriptors allowed,
+ * erase it.
+ */
+ assert(desc_type == PAGE_DESC);
+
+ action = ACTION_WRITE_BLOCK_ENTRY;
+ } else {
+ /*
+ * Other levels can have table descriptors. If
+ * so, recurse into it and erase descriptors
+ * inside it as needed. If there is a block
+ * descriptor, just erase it. If an invalid
+ * descriptor is found, this table isn't
+ * actually mapped, which shouldn't happen.
+ */
+ if (desc_type == TABLE_DESC) {
+ action = ACTION_RECURSE_INTO_TABLE;
+ } else {
+ assert(desc_type == BLOCK_DESC);
+ action = ACTION_WRITE_BLOCK_ENTRY;
+ }
+ }
+
+ } else if ((mm->base_va <= table_idx_end_va) ||
+ (region_end_va >= table_idx_va)) {
+ /*
+ * Region partially covers block.
+ *
+ * It can't happen in level 3.
+ *
+ * There must be a table descriptor here, if not there
+ * was a problem when mapping the region.
+ */
+ assert(level < 3U);
+ assert(desc_type == TABLE_DESC);
+
+ action = ACTION_RECURSE_INTO_TABLE;
+ } else {
+ /* The region doesn't cover the block at all */
+ action = ACTION_NONE;
+ }
+
+ return action;
+}
+/*
+ * Recursive function that writes to the translation tables and unmaps the
+ * specified region.
+ */
+static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
+ const uintptr_t table_base_va,
+ uint64_t *const table_base,
+ const unsigned int table_entries,
+ const unsigned int level)
+{
+ assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
+
+ uint64_t *subtable;
+ uint64_t desc;
+
+ uintptr_t table_idx_va;
+ uintptr_t table_idx_end_va; /* End VA of this entry */
+
+ uintptr_t region_end_va = mm->base_va + mm->size - 1U;
+
+ unsigned int table_idx;
+
+ table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
+ table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
+
+ while (table_idx < table_entries) {
+
+ table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
+
+ desc = table_base[table_idx];
+ uint64_t desc_type = desc & DESC_MASK;
+
+ action_t action = xlat_tables_unmap_region_action(mm,
+ table_idx_va, table_idx_end_va, level,
+ desc_type);
+
+ if (action == ACTION_WRITE_BLOCK_ENTRY) {
+
+ table_base[table_idx] = INVALID_DESC;
+ xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
+
+ } else if (action == ACTION_RECURSE_INTO_TABLE) {
+
+ subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+
+ /* Recurse to write into subtable */
+ xlat_tables_unmap_region(ctx, mm, table_idx_va,
+ subtable, XLAT_TABLE_ENTRIES,
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
+ /*
+ * If the subtable is now empty, remove its reference.
+ */
+ if (xlat_table_is_empty(ctx, subtable)) {
+ table_base[table_idx] = INVALID_DESC;
+ xlat_arch_tlbi_va(table_idx_va,
+ ctx->xlat_regime);
+ }
+
+ } else {
+ assert(action == ACTION_NONE);
+ }
+
+ table_idx++;
+ table_idx_va += XLAT_BLOCK_SIZE(level);
+
+ /* If reached the end of the region, exit */
+ if (region_end_va <= table_idx_va)
+ break;
+ }
+
+ if (level > ctx->base_level)
+ xlat_table_dec_regions_count(ctx, table_base);
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/*
+ * From the given arguments, it decides which action to take when mapping the
+ * specified region.
+ */
+static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
+ unsigned int desc_type, unsigned long long dest_pa,
+ uintptr_t table_entry_base_va, unsigned int level)
+{
+ uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
+ uintptr_t table_entry_end_va =
+ table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1U;
+
+ /*
+ * The descriptor types allowed depend on the current table level.
+ */
+
+ if ((mm->base_va <= table_entry_base_va) &&
+ (mm_end_va >= table_entry_end_va)) {
+
+ /*
+ * Table entry is covered by region
+ * --------------------------------
+ *
+ * This means that this table entry can describe the whole
+ * translation with this granularity in principle.
+ */
+
+ if (level == 3U) {
+ /*
+ * Last level, only page descriptors are allowed.
+ */
+ if (desc_type == PAGE_DESC) {
+ /*
+ * There's another region mapped here, don't
+ * overwrite.
+ */
+ return ACTION_NONE;
+ } else {
+ assert(desc_type == INVALID_DESC);
+ return ACTION_WRITE_BLOCK_ENTRY;
+ }
+
+ } else {
+
+ /*
+ * Other levels. Table descriptors are allowed. Block
+ * descriptors too, but they have some limitations.
+ */
+
+ if (desc_type == TABLE_DESC) {
+ /* There's already a table, recurse into it. */
+ return ACTION_RECURSE_INTO_TABLE;
+
+ } else if (desc_type == INVALID_DESC) {
+ /*
+ * There's nothing mapped here, create a new
+ * entry.
+ *
+ * Check if the destination granularity allows
+ * us to use a block descriptor or we need a
+ * finer table for it.
+ *
+ * Also, check if the current level allows block
+ * descriptors. If not, create a table instead.
+ */
+ if (((dest_pa & XLAT_BLOCK_MASK(level)) != 0U)
+ || (level < MIN_LVL_BLOCK_DESC) ||
+ (mm->granularity < XLAT_BLOCK_SIZE(level)))
+ return ACTION_CREATE_NEW_TABLE;
+ else
+ return ACTION_WRITE_BLOCK_ENTRY;
+
+ } else {
+ /*
+ * There's another region mapped here, don't
+ * overwrite.
+ */
+ assert(desc_type == BLOCK_DESC);
+
+ return ACTION_NONE;
+ }
+ }
+
+ } else if ((mm->base_va <= table_entry_end_va) ||
+ (mm_end_va >= table_entry_base_va)) {
+
+ /*
+ * Region partially covers table entry
+ * -----------------------------------
+ *
+ * This means that this table entry can't describe the whole
+ * translation, a finer table is needed.
+
+ * There cannot be partial block overlaps in level 3. If that
+ * happens, some of the preliminary checks when adding the
+ * mmap region failed to detect that PA and VA must at least be
+ * aligned to PAGE_SIZE.
+ */
+ assert(level < 3U);
+
+ if (desc_type == INVALID_DESC) {
+ /*
+ * The block is not fully covered by the region. Create
+ * a new table, recurse into it and try to map the
+ * region with finer granularity.
+ */
+ return ACTION_CREATE_NEW_TABLE;
+
+ } else {
+ assert(desc_type == TABLE_DESC);
+ /*
+ * The block is not fully covered by the region, but
+ * there is already a table here. Recurse into it and
+ * try to map with finer granularity.
+ *
+ * PAGE_DESC for level 3 has the same value as
+ * TABLE_DESC, but this code can't run on a level 3
+ * table because there can't be overlaps in level 3.
+ */
+ return ACTION_RECURSE_INTO_TABLE;
+ }
+ } else {
+
+ /*
+ * This table entry is outside of the region specified in the
+ * arguments, don't write anything to it.
+ */
+ return ACTION_NONE;
+ }
+}
+
+/*
+ * Recursive function that writes to the translation tables and maps the
+ * specified region. On success, it returns the VA of the last byte that was
+ * successfully mapped. On error, it returns the VA of the next entry that
+ * should have been mapped.
+ */
+static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
+ uintptr_t table_base_va,
+ uint64_t *const table_base,
+ unsigned int table_entries,
+ unsigned int level)
+{
+ assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
+
+ uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
+
+ uintptr_t table_idx_va;
+ unsigned long long table_idx_pa;
+
+ uint64_t *subtable;
+ uint64_t desc;
+
+ unsigned int table_idx;
+
+ table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
+ table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+ if (level > ctx->base_level)
+ xlat_table_inc_regions_count(ctx, table_base);
+#endif
+
+ while (table_idx < table_entries) {
+
+ desc = table_base[table_idx];
+
+ table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
+
+ action_t action = xlat_tables_map_region_action(mm,
+ (uint32_t)(desc & DESC_MASK), table_idx_pa,
+ table_idx_va, level);
+
+ if (action == ACTION_WRITE_BLOCK_ENTRY) {
+
+ table_base[table_idx] =
+ xlat_desc(ctx, (uint32_t)mm->attr, table_idx_pa,
+ level);
+
+ } else if (action == ACTION_CREATE_NEW_TABLE) {
+ uintptr_t end_va;
+
+ subtable = xlat_table_get_empty(ctx);
+ if (subtable == NULL) {
+ /* Not enough free tables to map this region */
+ return table_idx_va;
+ }
+
+ /* Point to new subtable from this one. */
+ table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
+
+ /* Recurse to write into subtable */
+ end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+ subtable, XLAT_TABLE_ENTRIES,
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
+ if (end_va !=
+ (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
+ return end_va;
+
+ } else if (action == ACTION_RECURSE_INTO_TABLE) {
+ uintptr_t end_va;
+
+ subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+ /* Recurse to write into subtable */
+ end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+ subtable, XLAT_TABLE_ENTRIES,
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
+ if (end_va !=
+ (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
+ return end_va;
+
+ } else {
+
+ assert(action == ACTION_NONE);
+
+ }
+
+ table_idx++;
+ table_idx_va += XLAT_BLOCK_SIZE(level);
+
+ /* If reached the end of the region, exit */
+ if (mm_end_va <= table_idx_va)
+ break;
+ }
+
+ return table_idx_va - 1U;
+}
+
+/*
+ * Function that verifies that a region can be mapped.
+ * Returns:
+ * 0: Success, the mapping is allowed.
+ * EINVAL: Invalid values were used as arguments.
+ * ERANGE: The memory limits were surpassed.
+ * ENOMEM: There is not enough memory in the mmap array.
+ * EPERM: Region overlaps another one in an invalid way.
+ */
+static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm)
+{
+ unsigned long long base_pa = mm->base_pa;
+ uintptr_t base_va = mm->base_va;
+ size_t size = mm->size;
+ size_t granularity = mm->granularity;
+
+ unsigned long long end_pa = base_pa + size - 1U;
+ uintptr_t end_va = base_va + size - 1U;
+
+ if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
+ !IS_PAGE_ALIGNED(size))
+ return -EINVAL;
+
+ if ((granularity != XLAT_BLOCK_SIZE(1U)) &&
+ (granularity != XLAT_BLOCK_SIZE(2U)) &&
+ (granularity != XLAT_BLOCK_SIZE(3U))) {
+ return -EINVAL;
+ }
+
+ /* Check for overflows */
+ if ((base_pa > end_pa) || (base_va > end_va))
+ return -ERANGE;
+
+ if ((base_va + (uintptr_t)size - (uintptr_t)1) > ctx->va_max_address)
+ return -ERANGE;
+
+ if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
+ return -ERANGE;
+
+ /* Check that there is space in the ctx->mmap array */
+ if (ctx->mmap[ctx->mmap_num - 1].size != 0U)
+ return -ENOMEM;
+
+ /* Check for PAs and VAs overlaps with all other regions */
+ for (const mmap_region_t *mm_cursor = ctx->mmap;
+ mm_cursor->size != 0U; ++mm_cursor) {
+
+ uintptr_t mm_cursor_end_va = mm_cursor->base_va
+ + mm_cursor->size - 1U;
+
+ /*
+ * Check if one of the regions is completely inside the other
+ * one.
+ */
+ bool fully_overlapped_va =
+ ((base_va >= mm_cursor->base_va) &&
+ (end_va <= mm_cursor_end_va)) ||
+ ((mm_cursor->base_va >= base_va) &&
+ (mm_cursor_end_va <= end_va));
+
+ /*
+ * Full VA overlaps are only allowed if both regions are
+ * identity mapped (zero offset) or have the same VA to PA
+ * offset. Also, make sure that it's not the exact same area.
+ * This can only be done with static regions.
+ */
+ if (fully_overlapped_va) {
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+ if (((mm->attr & MT_DYNAMIC) != 0U) ||
+ ((mm_cursor->attr & MT_DYNAMIC) != 0U))
+ return -EPERM;
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+ if ((mm_cursor->base_va - mm_cursor->base_pa) !=
+ (base_va - base_pa))
+ return -EPERM;
+
+ if ((base_va == mm_cursor->base_va) &&
+ (size == mm_cursor->size))
+ return -EPERM;
+
+ } else {
+ /*
+ * If the regions do not have fully overlapping VAs,
+ * then they must have fully separated VAs and PAs.
+ * Partial overlaps are not allowed
+ */
+
+ unsigned long long mm_cursor_end_pa =
+ mm_cursor->base_pa + mm_cursor->size - 1U;
+
+ bool separated_pa = (end_pa < mm_cursor->base_pa) ||
+ (base_pa > mm_cursor_end_pa);
+ bool separated_va = (end_va < mm_cursor->base_va) ||
+ (base_va > mm_cursor_end_va);
+
+ if (!separated_va || !separated_pa)
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
+{
+ mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
+ const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
+ const mmap_region_t *mm_last;
+ unsigned long long end_pa = mm->base_pa + mm->size - 1U;
+ uintptr_t end_va = mm->base_va + mm->size - 1U;
+ int ret;
+
+ /* Ignore empty regions */
+ if (mm->size == 0U)
+ return;
+
+ /* Static regions must be added before initializing the xlat tables. */
+ assert(!ctx->initialized);
+
+ ret = mmap_add_region_check(ctx, mm);
+ if (ret != 0) {
+ ERROR("mmap_add_region_check() failed. error %d\n", ret);
+ assert(false);
+ return;
+ }
+
+ /*
+ * Find correct place in mmap to insert new region.
+ *
+ * 1 - Lower region VA end first.
+ * 2 - Smaller region size first.
+ *
+ * VA 0 0xFF
+ *
+ * 1st |------|
+ * 2nd |------------|
+ * 3rd |------|
+ * 4th |---|
+ * 5th |---|
+ * 6th |----------|
+ * 7th |-------------------------------------|
+ *
+ * This is required for overlapping regions only. It simplifies adding
+ * regions with the loop in xlat_tables_init_internal because the outer
+ * ones won't overwrite block or page descriptors of regions added
+ * previously.
+ *
+ * Overlapping is only allowed for static regions.
+ */
+
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
+ && (mm_cursor->size != 0U)) {
+ ++mm_cursor;
+ }
+
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
+ (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
+ ++mm_cursor;
+ }
+
+ /*
+ * Find the last entry marker in the mmap
+ */
+ mm_last = ctx->mmap;
+ while ((mm_last->size != 0U) && (mm_last < mm_end)) {
+ ++mm_last;
+ }
+
+ /*
+ * Check if we have enough space in the memory mapping table.
+ * This shouldn't happen as we have checked in mmap_add_region_check
+ * that there is free space.
+ */
+ assert(mm_last->size == 0U);
+
+ /* Make room for new region by moving other regions up by one place */
+ mm_destination = mm_cursor + 1;
+ (void)memmove(mm_destination, mm_cursor,
+ (uintptr_t)mm_last - (uintptr_t)mm_cursor);
+
+ /*
+ * Check we haven't lost the empty sentinel from the end of the array.
+ * This shouldn't happen as we have checked in mmap_add_region_check
+ * that there is free space.
+ */
+ assert(mm_end->size == 0U);
+
+ *mm_cursor = *mm;
+
+ if (end_pa > ctx->max_pa)
+ ctx->max_pa = end_pa;
+ if (end_va > ctx->max_va)
+ ctx->max_va = end_va;
+}
+
+/*
+ * Determine the table level closest to the initial lookup level that
+ * can describe this translation. Then, align base VA to the next block
+ * at the determined level.
+ */
+static void mmap_alloc_va_align_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
+{
+ /*
+ * By or'ing the size and base PA the alignment will be the one
+ * corresponding to the smallest boundary of the two of them.
+ *
+ * There are three different cases. For example (for 4 KiB page size):
+ *
+ * +--------------+------------------++--------------+
+ * | PA alignment | Size multiple of || VA alignment |
+ * +--------------+------------------++--------------+
+ * | 2 MiB | 2 MiB || 2 MiB | (1)
+ * | 2 MiB | 4 KiB || 4 KiB | (2)
+ * | 4 KiB | 2 MiB || 4 KiB | (3)
+ * +--------------+------------------++--------------+
+ *
+ * - In (1), it is possible to take advantage of the alignment of the PA
+ * and the size of the region to use a level 2 translation table
+ * instead of a level 3 one.
+ *
+ * - In (2), the size is smaller than a block entry of level 2, so it is
+ * needed to use a level 3 table to describe the region or the library
+ * will map more memory than the desired one.
+ *
+ * - In (3), even though the region has the size of one level 2 block
+ * entry, it isn't possible to describe the translation with a level 2
+ * block entry because of the alignment of the base PA.
+ *
+ * Only bits 47:21 of a level 2 block descriptor are used by the MMU,
+ * bits 20:0 of the resulting address are 0 in this case. Because of
+ * this, the PA generated as result of this translation is aligned to
+ * 2 MiB. The PA that was requested to be mapped is aligned to 4 KiB,
+ * though, which means that the resulting translation is incorrect.
+ * The only way to prevent this is by using a finer granularity.
+ */
+ unsigned long long align_check;
+
+ align_check = mm->base_pa | (unsigned long long)mm->size;
+
+ /*
+ * Assume it is always aligned to level 3. There's no need to check that
+ * level because its block size is PAGE_SIZE. The checks to verify that
+ * the addresses and size are aligned to PAGE_SIZE are inside
+ * mmap_add_region.
+ */
+ for (unsigned int level = ctx->base_level; level <= 2U; ++level) {
+
+ if ((align_check & XLAT_BLOCK_MASK(level)) != 0U)
+ continue;
+
+ mm->base_va = round_up(mm->base_va, XLAT_BLOCK_SIZE(level));
+ return;
+ }
+}
+
+void mmap_add_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
+{
+ mm->base_va = ctx->max_va + 1UL;
+
+ assert(mm->size > 0U);
+
+ mmap_alloc_va_align_ctx(ctx, mm);
+
+ /* Detect overflows. More checks are done in mmap_add_region_check(). */
+ assert(mm->base_va > ctx->max_va);
+
+ mmap_add_region_ctx(ctx, mm);
+}
+
+void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
+{
+ const mmap_region_t *mm_cursor = mm;
+
+ while (mm_cursor->granularity != 0U) {
+ mmap_add_region_ctx(ctx, mm_cursor);
+ mm_cursor++;
+ }
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
+{
+ mmap_region_t *mm_cursor = ctx->mmap;
+ const mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
+ unsigned long long end_pa = mm->base_pa + mm->size - 1U;
+ uintptr_t end_va = mm->base_va + mm->size - 1U;
+ int ret;
+
+ /* Nothing to do */
+ if (mm->size == 0U)
+ return 0;
+
+ /* Now this region is a dynamic one */
+ mm->attr |= MT_DYNAMIC;
+
+ ret = mmap_add_region_check(ctx, mm);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * Find the adequate entry in the mmap array in the same way done for
+ * static regions in mmap_add_region_ctx().
+ */
+
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
+ && (mm_cursor->size != 0U)) {
+ ++mm_cursor;
+ }
+
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
+ (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
+ ++mm_cursor;
+ }
+
+ /* Make room for new region by moving other regions up by one place */
+ (void)memmove(mm_cursor + 1U, mm_cursor,
+ (uintptr_t)mm_last - (uintptr_t)mm_cursor);
+
+ /*
+ * Check we haven't lost the empty sentinal from the end of the array.
+ * This shouldn't happen as we have checked in mmap_add_region_check
+ * that there is free space.
+ */
+ assert(mm_last->size == 0U);
+
+ *mm_cursor = *mm;
+
+ /*
+ * Update the translation tables if the xlat tables are initialized. If
+ * not, this region will be mapped when they are initialized.
+ */
+ if (ctx->initialized) {
+ end_va = xlat_tables_map_region(ctx, mm_cursor,
+ 0U, ctx->base_table, ctx->base_table_entries,
+ ctx->base_level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
+ /* Failed to map, remove mmap entry, unmap and return error. */
+ if (end_va != (mm_cursor->base_va + mm_cursor->size - 1U)) {
+ (void)memmove(mm_cursor, mm_cursor + 1U,
+ (uintptr_t)mm_last - (uintptr_t)mm_cursor);
+
+ /*
+ * Check if the mapping function actually managed to map
+ * anything. If not, just return now.
+ */
+ if (mm->base_va >= end_va)
+ return -ENOMEM;
+
+ /*
+ * Something went wrong after mapping some table
+ * entries, undo every change done up to this point.
+ */
+ mmap_region_t unmap_mm = {
+ .base_pa = 0U,
+ .base_va = mm->base_va,
+ .size = end_va - mm->base_va,
+ .attr = 0U
+ };
+ xlat_tables_unmap_region(ctx, &unmap_mm, 0U,
+ ctx->base_table, ctx->base_table_entries,
+ ctx->base_level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
+ return -ENOMEM;
+ }
+
+ /*
+ * Make sure that all entries are written to the memory. There
+ * is no need to invalidate entries when mapping dynamic regions
+ * because new table/block/page descriptors only replace old
+ * invalid descriptors, that aren't TLB cached.
+ */
+ dsbishst();
+ }
+
+ if (end_pa > ctx->max_pa)
+ ctx->max_pa = end_pa;
+ if (end_va > ctx->max_va)
+ ctx->max_va = end_va;
+
+ return 0;
+}
+
+int mmap_add_dynamic_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
+{
+ mm->base_va = ctx->max_va + 1UL;
+
+ if (mm->size == 0U)
+ return 0;
+
+ mmap_alloc_va_align_ctx(ctx, mm);
+
+ /* Detect overflows. More checks are done in mmap_add_region_check(). */
+ if (mm->base_va < ctx->max_va) {
+ return -ENOMEM;
+ }
+
+ return mmap_add_dynamic_region_ctx(ctx, mm);
+}
+
+/*
+ * Removes the region with given base Virtual Address and size from the given
+ * context.
+ *
+ * Returns:
+ * 0: Success.
+ * EINVAL: Invalid values were used as arguments (region not found).
+ * EPERM: Tried to remove a static region.
+ */
+int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
+ size_t size)
+{
+ mmap_region_t *mm = ctx->mmap;
+ const mmap_region_t *mm_last = mm + ctx->mmap_num;
+ int update_max_va_needed = 0;
+ int update_max_pa_needed = 0;
+
+ /* Check sanity of mmap array. */
+ assert(mm[ctx->mmap_num].size == 0U);
+
+ while (mm->size != 0U) {
+ if ((mm->base_va == base_va) && (mm->size == size))
+ break;
+ ++mm;
+ }
+
+ /* Check that the region was found */
+ if (mm->size == 0U)
+ return -EINVAL;
+
+ /* If the region is static it can't be removed */
+ if ((mm->attr & MT_DYNAMIC) == 0U)
+ return -EPERM;
+
+ /* Check if this region is using the top VAs or PAs. */
+ if ((mm->base_va + mm->size - 1U) == ctx->max_va)
+ update_max_va_needed = 1;
+ if ((mm->base_pa + mm->size - 1U) == ctx->max_pa)
+ update_max_pa_needed = 1;
+
+ /* Update the translation tables if needed */
+ if (ctx->initialized) {
+ xlat_tables_unmap_region(ctx, mm, 0U, ctx->base_table,
+ ctx->base_table_entries,
+ ctx->base_level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
+ xlat_arch_tlbi_va_sync();
+ }
+
+ /* Remove this region by moving the rest down by one place. */
+ (void)memmove(mm, mm + 1U, (uintptr_t)mm_last - (uintptr_t)mm);
+
+ /* Check if we need to update the max VAs and PAs */
+ if (update_max_va_needed == 1) {
+ ctx->max_va = 0U;
+ mm = ctx->mmap;
+ while (mm->size != 0U) {
+ if ((mm->base_va + mm->size - 1U) > ctx->max_va)
+ ctx->max_va = mm->base_va + mm->size - 1U;
+ ++mm;
+ }
+ }
+
+ if (update_max_pa_needed == 1) {
+ ctx->max_pa = 0U;
+ mm = ctx->mmap;
+ while (mm->size != 0U) {
+ if ((mm->base_pa + mm->size - 1U) > ctx->max_pa)
+ ctx->max_pa = mm->base_pa + mm->size - 1U;
+ ++mm;
+ }
+ }
+
+ return 0;
+}
+
+void xlat_setup_dynamic_ctx(xlat_ctx_t *ctx, unsigned long long pa_max,
+ uintptr_t va_max, struct mmap_region *mmap,
+ unsigned int mmap_num, uint64_t **tables,
+ unsigned int tables_num, uint64_t *base_table,
+ int xlat_regime, int *mapped_regions)
+{
+ ctx->xlat_regime = xlat_regime;
+
+ ctx->pa_max_address = pa_max;
+ ctx->va_max_address = va_max;
+
+ ctx->mmap = mmap;
+ ctx->mmap_num = mmap_num;
+ memset(ctx->mmap, 0, sizeof(struct mmap_region) * mmap_num);
+
+ ctx->tables = (void *) tables;
+ ctx->tables_num = tables_num;
+
+ uintptr_t va_space_size = va_max + 1;
+ ctx->base_level = GET_XLAT_TABLE_LEVEL_BASE(va_space_size);
+ ctx->base_table = base_table;
+ ctx->base_table_entries = GET_NUM_BASE_LEVEL_ENTRIES(va_space_size);
+
+ ctx->tables_mapped_regions = mapped_regions;
+
+ ctx->max_pa = 0;
+ ctx->max_va = 0;
+ ctx->initialized = 0;
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+void __init init_xlat_tables_ctx(xlat_ctx_t *ctx)
+{
+ assert(ctx != NULL);
+ assert(!ctx->initialized);
+ assert((ctx->xlat_regime == EL3_REGIME) ||
+ (ctx->xlat_regime == EL2_REGIME) ||
+ (ctx->xlat_regime == EL1_EL0_REGIME));
+ assert(!is_mmu_enabled_ctx(ctx));
+
+ mmap_region_t *mm = ctx->mmap;
+
+ assert(ctx->va_max_address >=
+ (xlat_get_min_virt_addr_space_size() - 1U));
+ assert(ctx->va_max_address <= (MAX_VIRT_ADDR_SPACE_SIZE - 1U));
+ assert(IS_POWER_OF_TWO(ctx->va_max_address + 1U));
+
+ xlat_mmap_print(mm);
+
+ /* All tables must be zeroed before mapping any region. */
+
+ for (unsigned int i = 0U; i < ctx->base_table_entries; i++)
+ ctx->base_table[i] = INVALID_DESC;
+
+ for (int j = 0; j < ctx->tables_num; j++) {
+#if PLAT_XLAT_TABLES_DYNAMIC
+ ctx->tables_mapped_regions[j] = 0;
+#endif
+ for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++)
+ ctx->tables[j][i] = INVALID_DESC;
+ }
+
+ while (mm->size != 0U) {
+ uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0U,
+ ctx->base_table, ctx->base_table_entries,
+ ctx->base_level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
+ if (end_va != (mm->base_va + mm->size - 1U)) {
+ ERROR("Not enough memory to map region:\n"
+ " VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x\n",
+ mm->base_va, mm->base_pa, mm->size, mm->attr);
+ panic();
+ }
+
+ mm++;
+ }
+
+ assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa());
+ assert(ctx->max_va <= ctx->va_max_address);
+ assert(ctx->max_pa <= ctx->pa_max_address);
+
+ ctx->initialized = true;
+
+ xlat_tables_print(ctx);
+}
diff --git a/spm/scmi/xlat_tables_private.h b/spm/scmi/xlat_tables_private.h
new file mode 100644
index 0000000..8f51686
--- /dev/null
+++ b/spm/scmi/xlat_tables_private.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_PRIVATE_H
+#define XLAT_TABLES_PRIVATE_H
+
+#include <platform_def.h>
+#include <stdbool.h>
+#include <xlat_tables_defs.h>
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+/*
+ * Private shifts and masks to access fields of an mmap attribute
+ */
+/* Dynamic or static */
+#define MT_DYN_SHIFT U(31)
+
+/*
+ * Memory mapping private attributes
+ *
+ * Private attributes not exposed in the public header.
+ */
+
+/*
+ * Regions mapped before the MMU can't be unmapped dynamically (they are
+ * static) and regions mapped with MMU enabled can be unmapped. This
+ * behaviour can't be overridden.
+ *
+ * Static regions can overlap each other, dynamic regions can't.
+ */
+#define MT_STATIC (U(0) << MT_DYN_SHIFT)
+#define MT_DYNAMIC (U(1) << MT_DYN_SHIFT)
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+extern uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
+/*
+ * Return the execute-never mask that will prevent instruction fetch at the
+ * given translation regime.
+ */
+uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime);
+
+/*
+ * Invalidate all TLB entries that match the given virtual address. This
+ * operation applies to all PEs in the same Inner Shareable domain as the PE
+ * that executes this function. This functions must be called for every
+ * translation table entry that is modified. It only affects the specified
+ * translation regime.
+ *
+ * Note, however, that it is architecturally UNDEFINED to invalidate TLB entries
+ * pertaining to a higher exception level, e.g. invalidating EL3 entries from
+ * S-EL1.
+ */
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime);
+
+/*
+ * This function has to be called at the end of any code that uses the function
+ * xlat_arch_tlbi_va().
+ */
+void xlat_arch_tlbi_va_sync(void);
+
+/* Print VA, PA, size and attributes of all regions in the mmap array. */
+void xlat_mmap_print(const mmap_region_t *mmap);
+
+/*
+ * Print the current state of the translation tables by reading them from
+ * memory.
+ */
+void xlat_tables_print(xlat_ctx_t *ctx);
+
+/*
+ * Returns a block/page table descriptor for the given level and attributes.
+ */
+uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
+ unsigned long long addr_pa, unsigned int level);
+
+/*
+ * Architecture-specific initialization code.
+ */
+
+/* Returns the current Exception Level. The returned EL must be 1 or higher. */
+unsigned int xlat_arch_current_el(void);
+
+/*
+ * Return the maximum physical address supported by the hardware.
+ * This value depends on the execution state (AArch32/AArch64).
+ */
+unsigned long long xlat_arch_get_max_supported_pa(void);
+
+/*
+ * Returns true if the MMU of the translation regime managed by the given
+ * xlat_ctx_t is enabled, false otherwise.
+ */
+bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx);
+
+/* Returns true if the data cache is enabled at the current EL. */
+bool is_dcache_enabled(void);
+
+/*
+ * Returns minimum virtual address space size supported by the architecture
+ */
+uintptr_t xlat_get_min_virt_addr_space_size(void);
+
+#endif /* XLAT_TABLES_PRIVATE_H */
diff --git a/spm/scmi/xlat_tables_utils.c b/spm/scmi/xlat_tables_utils.c
new file mode 100644
index 0000000..168d492
--- /dev/null
+++ b/spm/scmi/xlat_tables_utils.c
@@ -0,0 +1,573 @@
+/*
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform_def.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <utils_def.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+#include "xlat_tables_private.h"
+
+#if LOG_LEVEL < LOG_LEVEL_VERBOSE
+
+void xlat_mmap_print(__unused const mmap_region_t *mmap)
+{
+ /* Empty */
+}
+
+void xlat_tables_print(__unused xlat_ctx_t *ctx)
+{
+ /* Empty */
+}
+
+#else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+void xlat_mmap_print(const mmap_region_t *mmap)
+{
+ printf("mmap:\n");
+ const mmap_region_t *mm = mmap;
+
+ while (mm->size != 0U) {
+ printf(" VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x granularity:0x%zx\n",
+ mm->base_va, mm->base_pa, mm->size, mm->attr,
+ mm->granularity);
+ ++mm;
+ };
+ printf("\n");
+}
+
+/* Print the attributes of the specified block descriptor. */
+static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
+{
+ uint64_t mem_type_index = ATTR_INDEX_GET(desc);
+ int xlat_regime = ctx->xlat_regime;
+
+ if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
+ printf("MEM");
+ } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
+ printf("NC");
+ } else {
+ assert(mem_type_index == ATTR_DEVICE_INDEX);
+ printf("DEV");
+ }
+
+ if ((xlat_regime == EL3_REGIME) || (xlat_regime == EL2_REGIME)) {
+ /* For EL3 and EL2 only check the AP[2] and XN bits. */
+ printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
+ printf(((desc & UPPER_ATTRS(XN)) != 0ULL) ? "-XN" : "-EXEC");
+ } else {
+ assert(xlat_regime == EL1_EL0_REGIME);
+ /*
+ * For EL0 and EL1:
+ * - In AArch64 PXN and UXN can be set independently but in
+ * AArch32 there is no UXN (XN affects both privilege levels).
+ * For consistency, we set them simultaneously in both cases.
+ * - RO and RW permissions must be the same in EL1 and EL0. If
+ * EL0 can access that memory region, so can EL1, with the
+ * same permissions.
+ */
+#if ENABLE_ASSERTIONS
+ uint64_t xn_mask = xlat_arch_regime_get_xn_desc(EL1_EL0_REGIME);
+ uint64_t xn_perm = desc & xn_mask;
+
+ assert((xn_perm == xn_mask) || (xn_perm == 0ULL));
+#endif
+ printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
+ /* Only check one of PXN and UXN, the other one is the same. */
+ printf(((desc & UPPER_ATTRS(PXN)) != 0ULL) ? "-XN" : "-EXEC");
+ /*
+ * Privileged regions can only be accessed from EL1, user
+ * regions can be accessed from EL1 and EL0.
+ */
+ printf(((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED)) != 0ULL)
+ ? "-USER" : "-PRIV");
+ }
+
+ printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S");
+
+#ifdef __aarch64__
+ /* Check Guarded Page bit */
+ if ((desc & GP) != 0ULL) {
+ printf("-GP");
+ }
+#endif
+}
+
+static const char * const level_spacers[] = {
+ "[LV0] ",
+ " [LV1] ",
+ " [LV2] ",
+ " [LV3] "
+};
+
+static const char *invalid_descriptors_ommited =
+ "%s(%d invalid descriptors omitted)\n";
+
+/*
+ * Recursive function that reads the translation tables passed as an argument
+ * and prints their status.
+ */
+static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
+ const uint64_t *table_base, unsigned int table_entries,
+ unsigned int level)
+{
+ assert(level <= XLAT_TABLE_LEVEL_MAX);
+
+ uint64_t desc;
+ uintptr_t table_idx_va = table_base_va;
+ unsigned int table_idx = 0U;
+ size_t level_size = XLAT_BLOCK_SIZE(level);
+
+ /*
+ * Keep track of how many invalid descriptors are counted in a row.
+ * Whenever multiple invalid descriptors are found, only the first one
+ * is printed, and a line is added to inform about how many descriptors
+ * have been omitted.
+ */
+ int invalid_row_count = 0;
+
+ while (table_idx < table_entries) {
+
+ desc = table_base[table_idx];
+
+ if ((desc & DESC_MASK) == INVALID_DESC) {
+
+ if (invalid_row_count == 0) {
+ printf("%sVA:0x%lx size:0x%zx\n",
+ level_spacers[level],
+ table_idx_va, level_size);
+ }
+ invalid_row_count++;
+
+ } else {
+
+ if (invalid_row_count > 1) {
+ printf(invalid_descriptors_ommited,
+ level_spacers[level],
+ invalid_row_count - 1);
+ }
+ invalid_row_count = 0;
+
+ /*
+ * Check if this is a table or a block. Tables are only
+ * allowed in levels other than 3, but DESC_PAGE has the
+ * same value as DESC_TABLE, so we need to check.
+ */
+ if (((desc & DESC_MASK) == TABLE_DESC) &&
+ (level < XLAT_TABLE_LEVEL_MAX)) {
+ /*
+ * Do not print any PA for a table descriptor,
+ * as it doesn't directly map physical memory
+ * but instead points to the next translation
+ * table in the translation table walk.
+ */
+ printf("%sVA:0x%lx size:0x%zx\n",
+ level_spacers[level],
+ table_idx_va, level_size);
+
+ uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
+
+ xlat_tables_print_internal(ctx, table_idx_va,
+ (uint64_t *)addr_inner,
+ XLAT_TABLE_ENTRIES, level + 1U);
+ } else {
+ printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
+ level_spacers[level], table_idx_va,
+ (uint64_t)(desc & TABLE_ADDR_MASK),
+ level_size);
+ xlat_desc_print(ctx, desc);
+ printf("\n");
+ }
+ }
+
+ table_idx++;
+ table_idx_va += level_size;
+ }
+
+ if (invalid_row_count > 1) {
+ printf(invalid_descriptors_ommited,
+ level_spacers[level], invalid_row_count - 1);
+ }
+}
+
+void xlat_tables_print(xlat_ctx_t *ctx)
+{
+ const char *xlat_regime_str;
+ int used_page_tables;
+
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ xlat_regime_str = "1&0";
+ } else if (ctx->xlat_regime == EL2_REGIME) {
+ xlat_regime_str = "2";
+ } else {
+ assert(ctx->xlat_regime == EL3_REGIME);
+ xlat_regime_str = "3";
+ }
+ VERBOSE("Translation tables state:\n");
+ VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str);
+ VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address);
+ VERBOSE(" Max allowed VA: 0x%lx\n", ctx->va_max_address);
+ VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa);
+ VERBOSE(" Max mapped VA: 0x%lx\n", ctx->max_va);
+
+ VERBOSE(" Initial lookup level: %u\n", ctx->base_level);
+ VERBOSE(" Entries @initial lookup level: %u\n",
+ ctx->base_table_entries);
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+ used_page_tables = 0;
+ for (int i = 0; i < ctx->tables_num; ++i) {
+ if (ctx->tables_mapped_regions[i] != 0)
+ ++used_page_tables;
+ }
+#else
+ used_page_tables = ctx->next_table;
+#endif
+ VERBOSE(" Used %d sub-tables out of %d (spare: %d)\n",
+ used_page_tables, ctx->tables_num,
+ ctx->tables_num - used_page_tables);
+
+ xlat_tables_print_internal(ctx, 0U, ctx->base_table,
+ ctx->base_table_entries, ctx->base_level);
+}
+
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+/*
+ * Do a translation table walk to find the block or page descriptor that maps
+ * virtual_addr.
+ *
+ * On success, return the address of the descriptor within the translation
+ * table. Its lookup level is stored in '*out_level'.
+ * On error, return NULL.
+ *
+ * xlat_table_base
+ * Base address for the initial lookup level.
+ * xlat_table_base_entries
+ * Number of entries in the translation table for the initial lookup level.
+ * virt_addr_space_size
+ * Size in bytes of the virtual address space.
+ */
+static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
+ void *xlat_table_base,
+ unsigned int xlat_table_base_entries,
+ unsigned long long virt_addr_space_size,
+ unsigned int *out_level)
+{
+ unsigned int start_level;
+ uint64_t *table;
+ unsigned int entries;
+
+ start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
+
+ table = xlat_table_base;
+ entries = xlat_table_base_entries;
+
+ for (unsigned int level = start_level;
+ level <= XLAT_TABLE_LEVEL_MAX;
+ ++level) {
+ uint64_t idx, desc, desc_type;
+
+ idx = XLAT_TABLE_IDX(virtual_addr, level);
+ if (idx >= entries) {
+ WARN("Missing xlat table entry at address 0x%lx\n",
+ virtual_addr);
+ return NULL;
+ }
+
+ desc = table[idx];
+ desc_type = desc & DESC_MASK;
+
+ if (desc_type == INVALID_DESC) {
+ VERBOSE("Invalid entry (memory not mapped)\n");
+ return NULL;
+ }
+
+ if (level == XLAT_TABLE_LEVEL_MAX) {
+ /*
+ * Only page descriptors allowed at the final lookup
+ * level.
+ */
+ assert(desc_type == PAGE_DESC);
+ *out_level = level;
+ return &table[idx];
+ }
+
+ if (desc_type == BLOCK_DESC) {
+ *out_level = level;
+ return &table[idx];
+ }
+
+ assert(desc_type == TABLE_DESC);
+ table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+ entries = XLAT_TABLE_ENTRIES;
+ }
+
+ /*
+ * This shouldn't be reached, the translation table walk should end at
+ * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
+ */
+ assert(false);
+
+ return NULL;
+}
+
+
+static int xlat_get_mem_attributes_internal(const xlat_ctx_t *ctx,
+ uintptr_t base_va, uint32_t *attributes, uint64_t **table_entry,
+ unsigned long long *addr_pa, unsigned int *table_level)
+{
+ uint64_t *entry;
+ uint64_t desc;
+ unsigned int level;
+ unsigned long long virt_addr_space_size;
+
+ /*
+ * Sanity-check arguments.
+ */
+ assert(ctx != NULL);
+ assert(ctx->initialized);
+ assert((ctx->xlat_regime == EL1_EL0_REGIME) ||
+ (ctx->xlat_regime == EL2_REGIME) ||
+ (ctx->xlat_regime == EL3_REGIME));
+
+ virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1ULL;
+ assert(virt_addr_space_size > 0U);
+
+ entry = find_xlat_table_entry(base_va,
+ ctx->base_table,
+ ctx->base_table_entries,
+ virt_addr_space_size,
+ &level);
+ if (entry == NULL) {
+ WARN("Address 0x%lx is not mapped.\n", base_va);
+ return -EINVAL;
+ }
+
+ if (addr_pa != NULL) {
+ *addr_pa = *entry & TABLE_ADDR_MASK;
+ }
+
+ if (table_entry != NULL) {
+ *table_entry = entry;
+ }
+
+ if (table_level != NULL) {
+ *table_level = level;
+ }
+
+ desc = *entry;
+
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+ VERBOSE("Attributes: ");
+ xlat_desc_print(ctx, desc);
+ printf("\n");
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+ assert(attributes != NULL);
+ *attributes = 0U;
+
+ uint64_t attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+
+ if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
+ *attributes |= MT_MEMORY;
+ } else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
+ *attributes |= MT_NON_CACHEABLE;
+ } else {
+ assert(attr_index == ATTR_DEVICE_INDEX);
+ *attributes |= MT_DEVICE;
+ }
+
+ uint64_t ap2_bit = (desc >> AP2_SHIFT) & 1U;
+
+ if (ap2_bit == AP2_RW)
+ *attributes |= MT_RW;
+
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ uint64_t ap1_bit = (desc >> AP1_SHIFT) & 1U;
+
+ if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
+ *attributes |= MT_USER;
+ }
+
+ uint64_t ns_bit = (desc >> NS_SHIFT) & 1U;
+
+ if (ns_bit == 1U)
+ *attributes |= MT_NS;
+
+ uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
+
+ if ((desc & xn_mask) == xn_mask) {
+ *attributes |= MT_EXECUTE_NEVER;
+ } else {
+ assert((desc & xn_mask) == 0U);
+ }
+
+ return 0;
+}
+
+
+int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+ uint32_t *attr)
+{
+ return xlat_get_mem_attributes_internal(ctx, base_va, attr,
+ NULL, NULL, NULL);
+}
+
+
+int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+ size_t size, uint32_t attr)
+{
+ /* Note: This implementation isn't optimized. */
+
+ assert(ctx != NULL);
+ assert(ctx->initialized);
+
+ unsigned long long virt_addr_space_size =
+ (unsigned long long)ctx->va_max_address + 1U;
+ assert(virt_addr_space_size > 0U);
+
+ if (!IS_PAGE_ALIGNED(base_va)) {
+ WARN("%s: Address 0x%lx is not aligned on a page boundary.\n",
+ __func__, base_va);
+ return -EINVAL;
+ }
+
+ if (size == 0U) {
+ WARN("%s: Size is 0.\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((size % PAGE_SIZE) != 0U) {
+ WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
+ __func__, size);
+ return -EINVAL;
+ }
+
+ if (((attr & MT_EXECUTE_NEVER) == 0U) && ((attr & MT_RW) != 0U)) {
+ WARN("%s: Mapping memory as read-write and executable not allowed.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ size_t pages_count = size / PAGE_SIZE;
+
+ VERBOSE("Changing memory attributes of %zu pages starting from address 0x%lx...\n",
+ pages_count, base_va);
+
+ uintptr_t base_va_original = base_va;
+
+ /*
+ * Sanity checks.
+ */
+ for (size_t i = 0U; i < pages_count; ++i) {
+ const uint64_t *entry;
+ uint64_t desc, attr_index;
+ unsigned int level;
+
+ entry = find_xlat_table_entry(base_va,
+ ctx->base_table,
+ ctx->base_table_entries,
+ virt_addr_space_size,
+ &level);
+ if (entry == NULL) {
+ WARN("Address 0x%lx is not mapped.\n", base_va);
+ return -EINVAL;
+ }
+
+ desc = *entry;
+
+ /*
+ * Check that all the required pages are mapped at page
+ * granularity.
+ */
+ if (((desc & DESC_MASK) != PAGE_DESC) ||
+ (level != XLAT_TABLE_LEVEL_MAX)) {
+ WARN("Address 0x%lx is not mapped at the right granularity.\n",
+ base_va);
+ WARN("Granularity is 0x%llx, should be 0x%x.\n",
+ (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ /*
+ * If the region type is device, it shouldn't be executable.
+ */
+ attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+ if (attr_index == ATTR_DEVICE_INDEX) {
+ if ((attr & MT_EXECUTE_NEVER) == 0U) {
+ WARN("Setting device memory as executable at address 0x%lx.",
+ base_va);
+ return -EINVAL;
+ }
+ }
+
+ base_va += PAGE_SIZE;
+ }
+
+ /* Restore original value. */
+ base_va = base_va_original;
+
+ for (unsigned int i = 0U; i < pages_count; ++i) {
+
+ uint32_t old_attr = 0U, new_attr;
+ uint64_t *entry = NULL;
+ unsigned int level = 0U;
+ unsigned long long addr_pa = 0ULL;
+
+ (void) xlat_get_mem_attributes_internal(ctx, base_va, &old_attr,
+ &entry, &addr_pa, &level);
+
+ /*
+ * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
+ * MT_USER/MT_PRIVILEGED are taken into account. Any other
+ * information is ignored.
+ */
+
+ /* Clean the old attributes so that they can be rebuilt. */
+ new_attr = old_attr & ~(MT_RW | MT_EXECUTE_NEVER | MT_USER);
+
+ /*
+ * Update attributes, but filter out the ones this function
+ * isn't allowed to change.
+ */
+ new_attr |= attr & (MT_RW | MT_EXECUTE_NEVER | MT_USER);
+
+ /*
+ * The break-before-make sequence requires writing an invalid
+ * descriptor and making sure that the system sees the change
+ * before writing the new descriptor.
+ */
+ *entry = INVALID_DESC;
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ dccvac((uintptr_t)entry);
+#endif
+ /* Invalidate any cached copy of this mapping in the TLBs. */
+ xlat_arch_tlbi_va(base_va, ctx->xlat_regime);
+
+ /* Ensure completion of the invalidation. */
+ xlat_arch_tlbi_va_sync();
+
+ /* Write new descriptor */
+ *entry = xlat_desc(ctx, new_attr, addr_pa, level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ dccvac((uintptr_t)entry);
+#endif
+ base_va += PAGE_SIZE;
+ }
+
+ /* Ensure that the last descriptor writen is seen by the system. */
+ dsbish();
+
+ return 0;
+}