aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Wiklander <jens.wiklander@linaro.org>2020-06-02 17:58:31 +0200
committerJérôme Forissier <jerome@forissier.org>2020-07-20 13:21:16 +0200
commit1b302ac0981603328c5d1b26300eb9328c0eec49 (patch)
treecadde3d47eb9d641653ad72fee3bfed6378d263a
parentb0490ed15c01d646505c893b270bd519a5efbc72 (diff)
core: enable FF-A with SPM Core at S-EL1
Adds support for using FF-A as transport instead of using the proprietary SMCs defined in optee_smc.h. The configuration support the case where SPM Core is implementation at S-EL1, that is, inside OP-TEE. This configuration is also know as "S-EL1 SPMC" in the FF-A 1.0 specification [1]. Compile with CFG_CORE_SEL1_SPMC=y Note that this is an experimental feature, ABIs etc may have incompatible changes Link: [1] https://static.docs.arm.com/den0077/a/DEN0077A_PSA_Firmware_Framework_Arm_v8-A_1.0_EAC.pdf Acked-by: Etienne Carriere <etienne.carriere@linaro.org> Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
-rw-r--r--.shippable.yml1
-rw-r--r--core/arch/arm/arm.mk8
-rw-r--r--core/arch/arm/include/kernel/thread.h4
-rw-r--r--core/arch/arm/kernel/asm-defines.c7
-rw-r--r--core/arch/arm/kernel/entry_a32.S18
-rw-r--r--core/arch/arm/kernel/entry_a64.S17
-rw-r--r--core/arch/arm/kernel/sub.mk7
-rw-r--r--core/arch/arm/kernel/thread_private.h21
-rw-r--r--core/arch/arm/kernel/thread_spmc.c1169
-rw-r--r--core/arch/arm/kernel/thread_spmc_a32.S141
-rw-r--r--core/arch/arm/kernel/thread_spmc_a64.S152
-rw-r--r--core/arch/arm/plat-vexpress/sub.mk8
-rw-r--r--core/arch/arm/tee/entry_std.c59
-rw-r--r--core/arch/arm/tee/sub.mk2
-rw-r--r--core/kernel/sub.mk2
15 files changed, 1612 insertions, 4 deletions
diff --git a/.shippable.yml b/.shippable.yml
index b120f376..c3231e9c 100644
--- a/.shippable.yml
+++ b/.shippable.yml
@@ -54,6 +54,7 @@ build:
- _make PLATFORM=vexpress-qemu_armv8a CFG_ARM64_core=y CFG_FTRACE_SUPPORT=y CFG_ULIBS_MCOUNT=y CFG_ULIBS_SHARED=y
- _make PLATFORM=vexpress-qemu_armv8a CFG_ARM64_core=y CFG_TA_GPROF_SUPPORT=y CFG_FTRACE_SUPPORT=y CFG_SYSCALL_FTRACE=y CFG_ULIBS_MCOUNT=y
- _make PLATFORM=vexpress-qemu_armv8a CFG_ARM64_core=y CFG_VIRTUALIZATION=y
+ - _make PLATFORM=vexpress-qemu_armv8a CFG_ARM64_core=y CFG_CORE_SEL1_SPMC=y
- _make PLATFORM=stm-b2260
- _make PLATFORM=stm-cannes
- _make PLATFORM=stm32mp1
diff --git a/core/arch/arm/arm.mk b/core/arch/arm/arm.mk
index 834b90de..5c9f16ef 100644
--- a/core/arch/arm/arm.mk
+++ b/core/arch/arm/arm.mk
@@ -85,6 +85,14 @@ ifeq ($(CFG_CORE_LARGE_PHYS_ADDR),y)
$(call force,CFG_WITH_LPAE,y)
endif
+# SPMC configuration "S-EL1 SPMC" where SPM Core is implemented at S-EL1,
+# that is, OP-TEE.
+# Note that this is an experimental feature, ABIs etc may have incompatible
+# changes
+ifeq ($(CFG_CORE_SEL1_SPMC),y)
+$(call force,CFG_CORE_FFA,y)
+endif
+
# Unmaps all kernel mode code except the code needed to take exceptions
# from user space and restore kernel mode mapping again. This gives more
# strict control over what is accessible while in user mode.
diff --git a/core/arch/arm/include/kernel/thread.h b/core/arch/arm/include/kernel/thread.h
index c0443e5c..c7ef6a86 100644
--- a/core/arch/arm/include/kernel/thread.h
+++ b/core/arch/arm/include/kernel/thread.h
@@ -235,12 +235,16 @@ struct thread_specific_data {
void *rpc_fs_payload;
struct mobj *rpc_fs_payload_mobj;
size_t rpc_fs_payload_size;
+#ifdef CFG_CORE_FFA
+ uint32_t rpc_target_info;
+#endif
uint32_t abort_type;
uint32_t abort_descr;
vaddr_t abort_va;
unsigned int abort_core;
struct thread_abort_regs abort_regs;
+
};
#endif /*__ASSEMBLER__*/
diff --git a/core/arch/arm/kernel/asm-defines.c b/core/arch/arm/kernel/asm-defines.c
index abe46a71..4f3183fd 100644
--- a/core/arch/arm/kernel/asm-defines.c
+++ b/core/arch/arm/kernel/asm-defines.c
@@ -92,6 +92,13 @@ DEFINES
DEFINE(THREAD_CORE_LOCAL_X2, offsetof(struct thread_core_local, x[2]));
#endif /*ARM64*/
+ /* struct thread_ctx */
+ DEFINE(THREAD_CTX_SIZE, sizeof(struct thread_ctx));
+#ifdef CFG_CORE_FFA
+ DEFINE(THREAD_CTX_TSD_RPC_TARGET_INFO,
+ offsetof(struct thread_ctx, tsd.rpc_target_info))
+#endif
+
/* struct thread_core_local */
DEFINE(THREAD_CORE_LOCAL_TMP_STACK_VA_END,
offsetof(struct thread_core_local, tmp_stack_va_end));
diff --git a/core/arch/arm/kernel/entry_a32.S b/core/arch/arm/kernel/entry_a32.S
index 26306b21..6c127297 100644
--- a/core/arch/arm/kernel/entry_a32.S
+++ b/core/arch/arm/kernel/entry_a32.S
@@ -560,6 +560,18 @@ shadow_stack_access_ok:
*/
bl thread_clr_boot_thread
+#ifdef CFG_CORE_FFA
+ ldr r0, =cpu_on_handler
+ /*
+ * Compensate for the load offset since cpu_on_handler() is
+ * called with MMU off.
+ */
+ ldr r1, boot_mmu_config + CORE_MMU_CONFIG_LOAD_OFFSET
+ sub r0, r0, r1
+ bl ffa_secondary_cpu_boot_req
+ b thread_ffa_msg_wait
+#else /* CFG_CORE_FFA */
+
#if defined(CFG_WITH_ARM_TRUSTED_FW)
ldr r0, =boot_mmu_config
ldr r0, [r0, #CORE_MMU_CONFIG_LOAD_OFFSET]
@@ -577,6 +589,7 @@ shadow_stack_access_ok:
mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
smc #0
b . /* SMC should not return */
+#endif /* CFG_CORE_FFA */
UNWIND( .fnend)
END_FUNC reset_primary
@@ -811,8 +824,11 @@ UNWIND( .cantunwind)
mov r0, r4
mov r1, r5
bl boot_cpu_on_handler
-
+#ifdef CFG_CORE_FFA
+ b thread_ffa_msg_wait
+#else
bx r6
+#endif
UNWIND( .fnend)
END_FUNC cpu_on_handler
DECLARE_KEEP_PAGER cpu_on_handler
diff --git a/core/arch/arm/kernel/entry_a64.S b/core/arch/arm/kernel/entry_a64.S
index 2a50ec7b..f572982e 100644
--- a/core/arch/arm/kernel/entry_a64.S
+++ b/core/arch/arm/kernel/entry_a64.S
@@ -232,6 +232,17 @@ clear_nex_bss:
bl thread_clr_boot_thread
#endif
+#ifdef CFG_CORE_FFA
+ adr x0, cpu_on_handler
+ /*
+ * Compensate for the load offset since cpu_on_handler() is
+ * called with MMU off.
+ */
+ ldr x1, boot_mmu_config + CORE_MMU_CONFIG_LOAD_OFFSET
+ sub x0, x0, x1
+ bl ffa_secondary_cpu_boot_req
+ b thread_ffa_msg_wait
+#else
/*
* Pass the vector address returned from main_init
* Compensate for the load offset since cpu_on_handler() is
@@ -243,6 +254,7 @@ clear_nex_bss:
mov x0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
smc #0
b . /* SMC should not return */
+#endif
END_FUNC _start
DECLARE_KEEP_INIT _start
@@ -404,8 +416,13 @@ FUNC cpu_on_handler , :
mov x0, x19
mov x1, x20
+#ifdef CFG_CORE_FFA
+ bl boot_cpu_on_handler
+ b thread_ffa_msg_wait
+#else
mov x30, x21
b boot_cpu_on_handler
+#endif
END_FUNC cpu_on_handler
DECLARE_KEEP_PAGER cpu_on_handler
diff --git a/core/arch/arm/kernel/sub.mk b/core/arch/arm/kernel/sub.mk
index cdaba8c9..965b13e4 100644
--- a/core/arch/arm/kernel/sub.mk
+++ b/core/arch/arm/kernel/sub.mk
@@ -26,9 +26,16 @@ srcs-$(CFG_PL310) += tee_l2cc_mutex.c
srcs-$(CFG_ARM32_core) += thread_a32.S
srcs-$(CFG_ARM64_core) += thread_a64.S
srcs-y += thread.c
+ifeq ($(CFG_CORE_FFA),y)
+srcs-y += thread_spmc.c
+cppflags-thread_spmc.c-y += -DTEE_IMPL_GIT_SHA1=$(TEE_IMPL_GIT_SHA1)
+srcs-$(CFG_ARM32_core) += thread_spmc_a32.S
+srcs-$(CFG_ARM64_core) += thread_spmc_a64.S
+else
srcs-y += thread_optee_smc.c
srcs-$(CFG_ARM32_core) += thread_optee_smc_a32.S
srcs-$(CFG_ARM64_core) += thread_optee_smc_a64.S
+endif
srcs-y += abort.c
srcs-$(CFG_WITH_VFP) += vfp.c
ifeq ($(CFG_WITH_VFP),y)
diff --git a/core/arch/arm/kernel/thread_private.h b/core/arch/arm/kernel/thread_private.h
index ed35e6e7..2933f679 100644
--- a/core/arch/arm/kernel/thread_private.h
+++ b/core/arch/arm/kernel/thread_private.h
@@ -194,7 +194,28 @@ void thread_unlock_global(void);
* world.
*/
#define THREAD_RPC_NUM_ARGS 4
+#ifdef CFG_CORE_FFA
+struct thread_rpc_arg {
+ union {
+ struct {
+ uint32_t w1;
+ uint32_t w4;
+ uint32_t w5;
+ uint32_t w6;
+ } call;
+ struct {
+ uint32_t w4;
+ uint32_t w5;
+ uint32_t w6;
+ } ret;
+ uint32_t pad[THREAD_RPC_NUM_ARGS];
+ };
+};
+
+void thread_rpc(struct thread_rpc_arg *rpc_arg);
+#else
void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]);
+#endif
/*
* Called from assembly only, vector_fast_smc_entry(). Handles a fast SMC
diff --git a/core/arch/arm/kernel/thread_spmc.c b/core/arch/arm/kernel/thread_spmc.c
new file mode 100644
index 00000000..b74cd167
--- /dev/null
+++ b/core/arch/arm/kernel/thread_spmc.c
@@ -0,0 +1,1169 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright (c) 2020, Linaro Limited.
+ * Copyright (c) 2019, Arm Limited. All rights reserved.
+ */
+
+#include <assert.h>
+#include <ffa.h>
+#include <io.h>
+#include <kernel/interrupt.h>
+#include <kernel/panic.h>
+#include <kernel/spinlock.h>
+#include <kernel/tee_misc.h>
+#include <kernel/thread.h>
+#include <mm/core_mmu.h>
+#include <mm/mobj.h>
+#include <optee_ffa.h>
+#include <optee_msg.h>
+#include <optee_rpc_cmd.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <tee/entry_std.h>
+#include <util.h>
+
+#include "thread_private.h"
+
+/* Table 39: Constituent memory region descriptor */
+struct constituent_address_range {
+ uint64_t address;
+ uint32_t page_count;
+ uint32_t reserved;
+};
+
+/* Table 38: Composite memory region descriptor */
+struct mem_region_descr {
+ uint32_t total_page_count;
+ uint32_t address_range_count;
+ uint64_t reserved;
+ struct constituent_address_range address_range_array[];
+};
+
+/* Table 40: Memory access permissions descriptor */
+struct mem_access_perm_descr {
+ uint16_t endpoint_id;
+ uint8_t access_perm;
+ uint8_t flags;
+};
+
+/* Table 41: Endpoint memory access descriptor */
+struct mem_accsess_descr {
+ struct mem_access_perm_descr mem_access_perm_descr;
+ uint32_t mem_region_offs;
+ uint64_t reserved;
+};
+
+/* Table 44: Lend, donate or share memory transaction descriptor */
+struct mem_transaction_descr {
+ uint16_t sender_id;
+ uint8_t mem_reg_attr;
+ uint8_t reserved0;
+ uint32_t flags;
+ uint64_t global_handle;
+ uint64_t tag;
+ uint32_t reserved1;
+ uint32_t mem_access_descr_count;
+ struct mem_accsess_descr mem_accsess_descr_array[];
+};
+
+struct ffa_partition_info {
+ uint16_t id;
+ uint16_t execution_context;
+ uint32_t partition_properties;
+};
+
+struct mem_share_state {
+ struct mobj_ffa *mf;
+ unsigned int page_count;
+ unsigned int region_count;
+ unsigned int current_page_idx;
+};
+
+struct mem_frag_state {
+ struct mem_share_state share;
+ tee_mm_entry_t *mm;
+ unsigned int frag_offset;
+ SLIST_ENTRY(mem_frag_state) link;
+};
+
+static uint16_t my_sp_id = 0x8001;
+
+/*
+ * If @rxtx_size is 0 RX/TX buffers are not mapped or initialized.
+ *
+ * @rxtx_spinlock protects the variables below from concurrent access
+ * this includes the use of content of @rx_buf and @frag_state_head.
+ *
+ * @tx_buf_is_mine is true when we may write to tx_buf and false when it is
+ * owned by normal world.
+ *
+ * Note that we can't prevent normal world from updating the content of
+ * these buffers so we must always be careful when reading. while we hold
+ * the lock.
+ */
+static void *rx_buf;
+static void *tx_buf;
+static unsigned int rxtx_size;
+static unsigned int rxtx_spinlock;
+static bool tx_buf_is_mine;
+
+static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
+ SLIST_HEAD_INITIALIZER(&frag_state_head);
+
+static uint32_t swap_src_dst(uint32_t src_dst)
+{
+ return (src_dst >> 16) | (src_dst << 16);
+}
+
+static void set_args(struct thread_smc_args *args, uint32_t fid,
+ uint32_t src_dst, uint32_t w2, uint32_t w3, uint32_t w4,
+ uint32_t w5)
+{
+ *args = (struct thread_smc_args){ .a0 = fid,
+ .a1 = src_dst,
+ .a2 = w2,
+ .a3 = w3,
+ .a4 = w4,
+ .a5 = w5, };
+}
+
+static void handle_version(struct thread_smc_args *args)
+{
+ /*
+ * We currently only support one version, 1.0 so let's keep it
+ * simple.
+ */
+ set_args(args, MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR),
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+}
+
+static void handle_features(struct thread_smc_args *args)
+{
+ uint32_t ret_fid = 0;
+ uint32_t ret_w2 = FFA_PARAM_MBZ;
+
+ switch (args->a1) {
+#ifdef ARM64
+ case FFA_RXTX_MAP_64:
+#endif
+ case FFA_RXTX_MAP_32:
+ ret_fid = FFA_SUCCESS_32;
+ ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
+ break;
+#ifdef ARM64
+ case FFA_MEM_SHARE_64:
+#endif
+ case FFA_MEM_SHARE_32:
+ ret_fid = FFA_SUCCESS_32;
+ /*
+ * Partition manager supports transmission of a memory
+ * transaction descriptor in a buffer dynamically allocated
+ * by the endpoint.
+ */
+ ret_w2 = BIT(0);
+ break;
+
+ case FFA_ERROR:
+ case FFA_VERSION:
+ case FFA_SUCCESS_32:
+#ifdef ARM64
+ case FFA_SUCCESS_64:
+#endif
+ case FFA_MEM_FRAG_TX:
+ case FFA_MEM_RECLAIM:
+ case FFA_MSG_SEND_DIRECT_REQ_32:
+ case FFA_INTERRUPT:
+ case FFA_PARTITION_INFO_GET:
+ case FFA_RX_RELEASE:
+ ret_fid = FFA_SUCCESS_32;
+ break;
+ default:
+ ret_fid = FFA_ERROR;
+ ret_w2 = FFA_NOT_SUPPORTED;
+ break;
+ }
+
+ set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
+}
+
+static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
+{
+ tee_mm_entry_t *mm = NULL;
+
+ if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
+ return FFA_INVALID_PARAMETERS;
+
+ mm = tee_mm_alloc(&tee_mm_shm, sz);
+ if (!mm)
+ return FFA_NO_MEMORY;
+
+ if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
+ sz / SMALL_PAGE_SIZE,
+ MEM_AREA_NSEC_SHM)) {
+ tee_mm_free(mm);
+ return FFA_INVALID_PARAMETERS;
+ }
+
+ *va_ret = (void *)tee_mm_get_smem(mm);
+ return 0;
+}
+
+static void unmap_buf(void *va, size_t sz)
+{
+ tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
+
+ assert(mm);
+ core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
+ tee_mm_free(mm);
+}
+
+static void handle_rxtx_map(struct thread_smc_args *args)
+{
+ int rc = 0;
+ uint32_t ret_fid = FFA_ERROR;
+ unsigned int sz = 0;
+ paddr_t rx_pa = 0;
+ paddr_t tx_pa = 0;
+ void *rx = NULL;
+ void *tx = NULL;
+
+ cpu_spin_lock(&rxtx_spinlock);
+
+ if (args->a3 & GENMASK_64(63, 6)) {
+ rc = FFA_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ sz = args->a3 * SMALL_PAGE_SIZE;
+ if (!sz) {
+ rc = FFA_INVALID_PARAMETERS;
+ goto out;
+ }
+ /* TX/RX are swapped compared to the caller */
+ tx_pa = args->a2;
+ rx_pa = args->a1;
+
+ if (rxtx_size) {
+ rc = FFA_DENIED;
+ goto out;
+ }
+
+ rc = map_buf(tx_pa, sz, &tx);
+ if (rc)
+ goto out;
+ rc = map_buf(rx_pa, sz, &rx);
+ if (rc) {
+ unmap_buf(tx, sz);
+ goto out;
+ }
+
+ tx_buf = tx;
+ rx_buf = rx;
+ rxtx_size = sz;
+ tx_buf_is_mine = true;
+ ret_fid = FFA_SUCCESS_32;
+ DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
+ DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
+out:
+ cpu_spin_unlock(&rxtx_spinlock);
+ set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
+}
+
+static void handle_rxtx_unmap(struct thread_smc_args *args)
+{
+ uint32_t ret_fid = FFA_ERROR;
+ int rc = FFA_INVALID_PARAMETERS;
+
+ cpu_spin_lock(&rxtx_spinlock);
+
+ if (!rxtx_size)
+ goto out;
+ unmap_buf(rx_buf, rxtx_size);
+ unmap_buf(tx_buf, rxtx_size);
+ rxtx_size = 0;
+ rx_buf = NULL;
+ tx_buf = NULL;
+ ret_fid = FFA_SUCCESS_32;
+ rc = 0;
+out:
+ cpu_spin_unlock(&rxtx_spinlock);
+ set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
+}
+
+static void handle_rx_release(struct thread_smc_args *args)
+{
+ uint32_t ret_fid = 0;
+ int rc = 0;
+
+ cpu_spin_lock(&rxtx_spinlock);
+ /* The senders RX is our TX */
+ if (!rxtx_size || tx_buf_is_mine) {
+ ret_fid = FFA_ERROR;
+ rc = FFA_DENIED;
+ } else {
+ ret_fid = FFA_SUCCESS_32;
+ rc = 0;
+ tx_buf_is_mine = true;
+ }
+ cpu_spin_unlock(&rxtx_spinlock);
+
+ set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
+}
+
+static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
+{
+ return !w0 && !w1 && !w2 && !w3;
+}
+
+static bool is_optee_os_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
+{
+ return w0 == OPTEE_MSG_OS_OPTEE_UUID_0 &&
+ w1 == OPTEE_MSG_OS_OPTEE_UUID_1 &&
+ w2 == OPTEE_MSG_OS_OPTEE_UUID_2 &&
+ w3 == OPTEE_MSG_OS_OPTEE_UUID_3;
+}
+
+static void handle_partition_info_get(struct thread_smc_args *args)
+{
+ uint32_t ret_fid = 0;
+ int rc = 0;
+
+ if (!is_nil_uuid(args->a1, args->a2, args->a3, args->a4) &&
+ !is_optee_os_uuid(args->a1, args->a2, args->a3, args->a4)) {
+ ret_fid = FFA_ERROR;
+ rc = FFA_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ cpu_spin_lock(&rxtx_spinlock);
+ if (rxtx_size && tx_buf_is_mine) {
+ struct ffa_partition_info *fpi = tx_buf;
+
+ fpi->id = my_sp_id;
+ fpi->execution_context = CFG_TEE_CORE_NB_CORE;
+ fpi->partition_properties = BIT(0) | BIT(1);
+
+ ret_fid = FFA_SUCCESS_32;
+ rc = 1;
+ tx_buf_is_mine = false;
+ } else {
+ ret_fid = FFA_ERROR;
+ if (rxtx_size)
+ rc = FFA_BUSY;
+ else
+ rc = FFA_DENIED; /* TX buffer not setup yet */
+ }
+ cpu_spin_unlock(&rxtx_spinlock);
+
+out:
+ set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
+}
+
+static void handle_yielding_call(struct thread_smc_args *args)
+{
+ uint32_t ret_val = 0;
+
+ thread_check_canaries();
+
+ if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
+ /* Note connection to struct thread_rpc_arg::ret */
+ thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
+ 0);
+ ret_val = FFA_INVALID_PARAMETERS;
+ } else {
+ thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5);
+ ret_val = FFA_BUSY;
+ }
+ set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
+ swap_src_dst(args->a1), 0, ret_val, 0, 0);
+}
+
+static void handle_blocking_call(struct thread_smc_args *args)
+{
+ switch (args->a3) {
+ case OPTEE_FFA_GET_API_VERSION:
+ set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
+ swap_src_dst(args->a1), 0, OPTEE_FFA_VERSION_MAJOR,
+ OPTEE_FFA_VERSION_MINOR, 0);
+ break;
+ case OPTEE_FFA_GET_OS_VERSION:
+ set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
+ swap_src_dst(args->a1), 0, CFG_OPTEE_REVISION_MAJOR,
+ CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
+ break;
+ case OPTEE_FFA_EXCHANGE_CAPABILITIES:
+ set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
+ swap_src_dst(args->a1), 0, 0, 0, 0);
+ break;
+ default:
+ EMSG("Unhandled blocking service ID %#"PRIx32,
+ (uint32_t)args->a3);
+ panic();
+ }
+}
+
+static int get_acc_perms(struct mem_accsess_descr *mem_acc,
+ unsigned int num_mem_accs, uint8_t *acc_perms,
+ unsigned int *region_offs)
+{
+ unsigned int n = 0;
+
+ for (n = 0; n < num_mem_accs; n++) {
+ struct mem_access_perm_descr *descr =
+ &mem_acc[n].mem_access_perm_descr;
+
+ if (READ_ONCE(descr->endpoint_id) == my_sp_id) {
+ *acc_perms = READ_ONCE(descr->access_perm);
+ *region_offs = READ_ONCE(mem_acc[n].mem_region_offs);
+ return 0;
+ }
+ }
+
+ return FFA_INVALID_PARAMETERS;
+}
+
+static int mem_share_init(void *buf, size_t blen, unsigned int *page_count,
+ unsigned int *region_count, size_t *addr_range_offs)
+{
+ struct mem_region_descr *region_descr = NULL;
+ struct mem_transaction_descr *descr = NULL;
+ const uint8_t exp_mem_acc_perm = 0x6; /* Not executable, Read-write */
+ /* Normal memory, Write-Back cacheable, Inner shareable */
+ const uint8_t exp_mem_reg_attr = 0x2f;
+ unsigned int num_mem_accs = 0;
+ uint8_t mem_acc_perm = 0;
+ unsigned int region_descr_offs = 0;
+ size_t n = 0;
+
+ if (!ALIGNMENT_IS_OK(buf, struct mem_transaction_descr) ||
+ blen < sizeof(struct mem_transaction_descr))
+ return FFA_INVALID_PARAMETERS;
+
+ descr = buf;
+
+ /* Check that the endpoint memory access descriptor array fits */
+ num_mem_accs = READ_ONCE(descr->mem_access_descr_count);
+ if (MUL_OVERFLOW(sizeof(struct mem_accsess_descr), num_mem_accs, &n) ||
+ ADD_OVERFLOW(sizeof(*descr), n, &n) || n > blen)
+ return FFA_INVALID_PARAMETERS;
+
+ if (READ_ONCE(descr->mem_reg_attr) != exp_mem_reg_attr)
+ return FFA_INVALID_PARAMETERS;
+
+ /* Check that the access permissions matches what's expected */
+ if (get_acc_perms(descr->mem_accsess_descr_array,
+ num_mem_accs, &mem_acc_perm, &region_descr_offs) ||
+ mem_acc_perm != exp_mem_acc_perm)
+ return FFA_INVALID_PARAMETERS;
+
+ /* Check that the Composite memory region descriptor fits */
+ if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
+ n > blen)
+ return FFA_INVALID_PARAMETERS;
+
+ if (!ALIGNMENT_IS_OK((vaddr_t)descr + region_descr_offs,
+ struct mem_region_descr))
+ return FFA_INVALID_PARAMETERS;
+
+ region_descr = (struct mem_region_descr *)((vaddr_t)descr +
+ region_descr_offs);
+ *page_count = READ_ONCE(region_descr->total_page_count);
+ *region_count = READ_ONCE(region_descr->address_range_count);
+ *addr_range_offs = n;
+ return 0;
+}
+
+static int add_mem_share_helper(struct mem_share_state *s, void *buf,
+ size_t flen)
+{
+ unsigned int region_count = flen /
+ sizeof(struct constituent_address_range);
+ struct constituent_address_range *arange = NULL;
+ unsigned int n = 0;
+
+ if (region_count > s->region_count)
+ region_count = s->region_count;
+
+ if (!ALIGNMENT_IS_OK(buf, struct constituent_address_range))
+ return FFA_INVALID_PARAMETERS;
+ arange = buf;
+
+ for (n = 0; n < region_count; n++) {
+ unsigned int page_count = READ_ONCE(arange[n].page_count);
+ uint64_t addr = READ_ONCE(arange[n].address);
+
+ if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
+ addr, page_count))
+ return FFA_INVALID_PARAMETERS;
+ }
+
+ s->region_count -= region_count;
+ if (s->region_count)
+ return region_count * sizeof(*arange);
+
+ if (s->current_page_idx != s->page_count)
+ return FFA_INVALID_PARAMETERS;
+
+ return 0;
+}
+
+static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
+{
+ int rc = 0;
+
+ rc = add_mem_share_helper(&s->share, buf, flen);
+ if (rc >= 0) {
+ if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
+ if (s->share.region_count)
+ return s->frag_offset;
+ /* We're done, return the number of consumed bytes */
+ rc = s->frag_offset;
+ } else {
+ rc = FFA_INVALID_PARAMETERS;
+ }
+ }
+
+ SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
+ if (rc < 0)
+ mobj_ffa_sel1_spmc_delete(s->share.mf);
+ else
+ mobj_ffa_push_to_inactive(s->share.mf);
+ free(s);
+
+ return rc;
+}
+
+static int add_mem_share(tee_mm_entry_t *mm, void *buf, size_t blen,
+ size_t flen, uint64_t *global_handle)
+{
+ int rc = 0;
+ struct mem_share_state share = { };
+ size_t addr_range_offs = 0;
+ size_t n = 0;
+
+ if (flen > blen)
+ return FFA_INVALID_PARAMETERS;
+
+ rc = mem_share_init(buf, flen, &share.page_count, &share.region_count,
+ &addr_range_offs);
+ if (rc)
+ return rc;
+
+ if (MUL_OVERFLOW(share.region_count,
+ sizeof(struct constituent_address_range), &n) ||
+ ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
+ return FFA_INVALID_PARAMETERS;
+
+ share.mf = mobj_ffa_sel1_spmc_new(share.page_count);
+ if (!share.mf)
+ return FFA_NO_MEMORY;
+
+ if (flen != blen) {
+ struct mem_frag_state *s = calloc(sizeof(*s), 1);
+
+ if (!s) {
+ rc = FFA_NO_MEMORY;
+ goto err;
+ }
+ s->share = share;
+ s->mm = mm;
+ s->frag_offset = addr_range_offs;
+
+ SLIST_INSERT_HEAD(&frag_state_head, s, link);
+ rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
+ flen - addr_range_offs);
+
+ if (rc >= 0)
+ *global_handle = mobj_ffa_get_cookie(share.mf);
+
+ return rc;
+ }
+
+ rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
+ flen - addr_range_offs);
+ if (rc) {
+ /*
+ * Number of consumed bytes may be returned instead of 0 for
+ * done.
+ */
+ rc = FFA_INVALID_PARAMETERS;
+ goto err;
+ }
+
+ *global_handle = mobj_ffa_push_to_inactive(share.mf);
+
+ return 0;
+err:
+ mobj_ffa_sel1_spmc_delete(share.mf);
+ return rc;
+}
+
+static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
+ unsigned int page_count,
+ uint64_t *global_handle)
+{
+ int rc = 0;
+ size_t len = 0;
+ tee_mm_entry_t *mm = NULL;
+ vaddr_t offs = pbuf & SMALL_PAGE_MASK;
+
+ if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
+ return FFA_INVALID_PARAMETERS;
+ if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
+ return FFA_INVALID_PARAMETERS;
+
+ /*
+ * Check that the length reported in blen is covered by len even
+ * if the offset is taken into account.
+ */
+ if (len < blen || len - offs < blen)
+ return FFA_INVALID_PARAMETERS;
+
+ mm = tee_mm_alloc(&tee_mm_shm, len);
+ if (!mm)
+ return FFA_NO_MEMORY;
+
+ if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
+ page_count, MEM_AREA_NSEC_SHM)) {
+ rc = FFA_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ cpu_spin_lock(&rxtx_spinlock);
+ rc = add_mem_share(mm, (void *)(tee_mm_get_smem(mm) + offs), blen, flen,
+ global_handle);
+ cpu_spin_unlock(&rxtx_spinlock);
+ if (rc > 0)
+ return rc;
+
+ core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
+out:
+ tee_mm_free(mm);
+ return rc;
+}
+
+static int handle_mem_share_rxbuf(size_t blen, size_t flen,
+ uint64_t *global_handle)
+{
+ int rc = FFA_DENIED;
+
+ cpu_spin_lock(&rxtx_spinlock);
+
+ if (rx_buf && flen <= rxtx_size)
+ rc = add_mem_share(NULL, rx_buf, blen, flen, global_handle);
+
+ cpu_spin_unlock(&rxtx_spinlock);
+
+ return rc;
+}
+
+static void handle_mem_share(struct thread_smc_args *args)
+{
+ uint32_t ret_w1 = 0;
+ uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
+ uint32_t ret_w3 = 0;
+ uint32_t ret_fid = FFA_ERROR;
+ uint64_t global_handle = 0;
+ int rc = 0;
+
+ /* Check that the MBZs are indeed 0 */
+ if (args->a5 || args->a6 || args->a7)
+ goto out;
+
+ if (!args->a3) {
+ /*
+ * The memory transaction descriptor is passed via our rx
+ * buffer.
+ */
+ if (args->a4)
+ goto out;
+ rc = handle_mem_share_rxbuf(args->a1, args->a2, &global_handle);
+ } else {
+ rc = handle_mem_share_tmem(args->a3, args->a1, args->a2,
+ args->a4, &global_handle);
+ }
+ if (rc < 0) {
+ ret_w2 = rc;
+ goto out;
+ }
+ if (rc > 0) {
+ ret_fid = FFA_MEM_FRAG_RX;
+ ret_w3 = rc;
+ reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
+ }
+ ret_fid = FFA_SUCCESS_32;
+ reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
+out:
+ set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
+}
+
+static struct mem_frag_state *get_frag_state(uint64_t global_handle)
+{
+ struct mem_frag_state *s = NULL;
+
+ SLIST_FOREACH(s, &frag_state_head, link)
+ if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
+ return s;
+
+ return NULL;
+}
+
+static void handle_mem_frag_tx(struct thread_smc_args *args)
+{
+ int rc = 0;
+ uint64_t global_handle = reg_pair_to_64(READ_ONCE(args->a2),
+ READ_ONCE(args->a1));
+ size_t flen = READ_ONCE(args->a3);
+ struct mem_frag_state *s = NULL;
+ tee_mm_entry_t *mm = NULL;
+ unsigned int page_count = 0;
+ void *buf = NULL;
+ uint32_t ret_w1 = 0;
+ uint32_t ret_w2 = 0;
+ uint32_t ret_w3 = 0;
+ uint32_t ret_fid = 0;
+
+ /*
+ * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
+ * requests.
+ */
+
+ cpu_spin_lock(&rxtx_spinlock);
+
+ s = get_frag_state(global_handle);
+ if (!s) {
+ rc = FFA_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ mm = s->mm;
+ if (mm) {
+ if (flen > tee_mm_get_bytes(mm)) {
+ rc = FFA_INVALID_PARAMETERS;
+ goto out;
+ }
+ page_count = s->share.page_count;
+ buf = (void *)tee_mm_get_smem(mm);
+ } else {
+ if (flen > rxtx_size) {
+ rc = FFA_INVALID_PARAMETERS;
+ goto out;
+ }
+ buf = rx_buf;
+ }
+
+ rc = add_mem_share_frag(s, buf, flen);
+out:
+ cpu_spin_unlock(&rxtx_spinlock);
+
+ if (rc <= 0 && mm) {
+ core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
+ tee_mm_free(mm);
+ }
+
+ if (rc < 0) {
+ ret_fid = FFA_ERROR;
+ ret_w2 = rc;
+ } else if (rc > 0) {
+ ret_fid = FFA_MEM_FRAG_RX;
+ ret_w3 = rc;
+ reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
+ } else {
+ ret_fid = FFA_SUCCESS_32;
+ reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
+ }
+
+ set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
+}
+
+static void handle_mem_reclaim(struct thread_smc_args *args)
+{
+ uint32_t ret_val = FFA_INVALID_PARAMETERS;
+ uint32_t ret_fid = FFA_ERROR;
+ uint64_t cookie = 0;
+
+ if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
+ goto out;
+
+ cookie = reg_pair_to_64(args->a2, args->a1);
+ switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
+ case TEE_SUCCESS:
+ ret_fid = FFA_SUCCESS_32;
+ ret_val = 0;
+ break;
+ case TEE_ERROR_ITEM_NOT_FOUND:
+ DMSG("cookie %#"PRIx64" not found", cookie);
+ ret_val = FFA_INVALID_PARAMETERS;
+ break;
+ default:
+ DMSG("cookie %#"PRIx64" busy", cookie);
+ ret_val = FFA_DENIED;
+ break;
+ }
+out:
+ set_args(args, ret_fid, ret_val, 0, 0, 0, 0);
+}
+
+/* Only called from assembly */
+void thread_spmc_msg_recv(struct thread_smc_args *args);
+void thread_spmc_msg_recv(struct thread_smc_args *args)
+{
+ assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
+ switch (args->a0) {
+ case FFA_VERSION:
+ handle_version(args);
+ break;
+ case FFA_FEATURES:
+ handle_features(args);
+ break;
+#ifdef ARM64
+ case FFA_RXTX_MAP_64:
+#endif
+ case FFA_RXTX_MAP_32:
+ handle_rxtx_map(args);
+ break;
+ case FFA_RXTX_UNMAP:
+ handle_rxtx_unmap(args);
+ break;
+ case FFA_RX_RELEASE:
+ handle_rx_release(args);
+ break;
+ case FFA_PARTITION_INFO_GET:
+ handle_partition_info_get(args);
+ break;
+ case FFA_INTERRUPT:
+ itr_core_handler();
+ set_args(args, FFA_SUCCESS_32, args->a1, 0, 0, 0, 0);
+ break;
+ case FFA_MSG_SEND_DIRECT_REQ_32:
+ if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
+ handle_yielding_call(args);
+ else
+ handle_blocking_call(args);
+ break;
+#ifdef ARM64
+ case FFA_MEM_SHARE_64:
+#endif
+ case FFA_MEM_SHARE_32:
+ handle_mem_share(args);
+ break;
+ case FFA_MEM_RECLAIM:
+ handle_mem_reclaim(args);
+ break;
+ case FFA_MEM_FRAG_TX:
+ handle_mem_frag_tx(args);
+ break;
+ default:
+ EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
+ set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
+ }
+}
+
+static uint32_t yielding_call_with_arg(uint64_t cookie)
+{
+ uint32_t rv = TEE_ERROR_BAD_PARAMETERS;
+ struct optee_msg_arg *arg = NULL;
+ struct mobj *mobj = NULL;
+ uint32_t num_params = 0;
+
+ mobj = mobj_ffa_get_by_cookie(cookie, 0);
+ if (!mobj) {
+ EMSG("Can't find cookie %#"PRIx64, cookie);
+ return TEE_ERROR_BAD_PARAMETERS;
+ }
+
+ rv = mobj_inc_map(mobj);
+ if (rv)
+ goto out_put_mobj;
+
+ rv = TEE_ERROR_BAD_PARAMETERS;
+ arg = mobj_get_va(mobj, 0);
+ if (!arg)
+ goto out_dec_map;
+
+ if (!mobj_get_va(mobj, sizeof(*arg)))
+ goto out_dec_map;
+
+ num_params = READ_ONCE(arg->num_params);
+ if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
+ goto out_dec_map;
+
+ if (!mobj_get_va(mobj, OPTEE_MSG_GET_ARG_SIZE(num_params)))
+ goto out_dec_map;
+
+ rv = tee_entry_std(arg, num_params);
+
+out_dec_map:
+ mobj_dec_map(mobj);
+out_put_mobj:
+ mobj_put(mobj);
+ return rv;
+}
+
+static uint32_t yielding_unregister_shm(uint64_t cookie)
+{
+ uint32_t res = mobj_ffa_unregister_by_cookie(cookie);
+
+ switch (res) {
+ case TEE_SUCCESS:
+ case TEE_ERROR_ITEM_NOT_FOUND:
+ return 0;
+ case TEE_ERROR_BUSY:
+ EMSG("res %#"PRIx32, res);
+ return FFA_BUSY;
+ default:
+ EMSG("res %#"PRIx32, res);
+ return FFA_INVALID_PARAMETERS;
+ }
+}
+
+/*
+ * Helper routine for the assembly function thread_std_smc_entry()
+ *
+ * Note: this function is weak just to make it possible to exclude it from
+ * the unpaged area.
+ */
+uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
+ uint32_t a2, uint32_t a3)
+{
+ /*
+ * Arguments are supplied from handle_yielding_call() as:
+ * a0 <- w1
+ * a1 <- w3
+ * a2 <- w4
+ * a3 <- w5
+ */
+ thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
+ switch (a1) {
+ case OPTEE_FFA_YIELDING_CALL_WITH_ARG:
+ return yielding_call_with_arg(reg_pair_to_64(a3, a2));
+ case OPTEE_FFA_YIELDING_CALL_REGISTER_SHM:
+ return FFA_NOT_SUPPORTED;
+ case OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM:
+ return yielding_unregister_shm(reg_pair_to_64(a3, a2));
+ default:
+ return FFA_DENIED;
+ }
+}
+
+static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
+{
+ uint64_t offs = tpm->u.memref.offs;
+
+ param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
+ OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
+
+ param->u.fmem.offs_low = offs;
+ param->u.fmem.offs_high = offs >> 32;
+ if (param->u.fmem.offs_high != offs >> 32)
+ return false;
+
+ param->u.fmem.size = tpm->u.memref.size;
+ if (tpm->u.memref.mobj) {
+ param->u.fmem.global_id = mobj_get_cookie(tpm->u.memref.mobj);
+ if (!param->u.fmem.global_id)
+ return false;
+ } else {
+ param->u.fmem.global_id = 0;
+ }
+
+ return true;
+}
+
+static void thread_rpc_free(uint32_t type, uint64_t cookie, struct mobj *mobj)
+{
+ TEE_Result res = TEE_SUCCESS;
+ struct thread_rpc_arg rpc_arg = { .call = {
+ .w1 = thread_get_tsd()->rpc_target_info,
+ .w4 = type,
+ },
+ };
+
+ reg_pair_from_64(cookie, &rpc_arg.call.w6, &rpc_arg.call.w5);
+ mobj_put(mobj);
+ res = mobj_ffa_unregister_by_cookie(cookie);
+ if (res)
+ DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): res %#"PRIx32,
+ cookie, res);
+ thread_rpc(&rpc_arg);
+}
+
+static struct mobj *thread_rpc_alloc(size_t size, uint32_t type)
+{
+ struct mobj *mobj = NULL;
+ unsigned int page_count = ROUNDUP(size, SMALL_PAGE_SIZE) /
+ SMALL_PAGE_SIZE;
+ struct thread_rpc_arg rpc_arg = { .call = {
+ .w1 = thread_get_tsd()->rpc_target_info,
+ .w4 = type,
+ .w5 = page_count,
+ },
+ };
+ unsigned int internal_offset = 0;
+ uint64_t cookie = 0;
+
+ thread_rpc(&rpc_arg);
+
+ cookie = reg_pair_to_64(rpc_arg.ret.w5, rpc_arg.ret.w4);
+ if (!cookie)
+ return NULL;
+ internal_offset = rpc_arg.ret.w6;
+
+ mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
+ if (!mobj) {
+ DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
+ cookie, internal_offset);
+ return NULL;
+ }
+
+ assert(mobj_is_nonsec(mobj));
+
+ if (mobj_inc_map(mobj)) {
+ DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
+ mobj_put(mobj);
+ return NULL;
+ }
+
+ return mobj;
+}
+
+struct mobj *thread_rpc_alloc_payload(size_t size)
+{
+ return thread_rpc_alloc(size,
+ OPTEE_FFA_YIELDING_CALL_RETURN_ALLOC_SUPPL_SHM);
+}
+
+void thread_rpc_free_payload(struct mobj *mobj)
+{
+ thread_rpc_free(OPTEE_FFA_YIELDING_CALL_RETURN_FREE_SUPPL_SHM,
+ mobj_get_cookie(mobj), mobj);
+}
+
+static struct mobj *thread_rpc_alloc_arg(size_t size)
+{
+ return thread_rpc_alloc(size,
+ OPTEE_FFA_YIELDING_CALL_RETURN_ALLOC_KERN_SHM);
+}
+
+static void thread_rpc_free_arg(struct mobj *mobj)
+{
+ thread_rpc_free(OPTEE_FFA_YIELDING_CALL_RETURN_FREE_KERN_SHM,
+ mobj_get_cookie(mobj), mobj);
+}
+
+static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
+ struct thread_param *params,
+ struct optee_msg_arg **arg_ret,
+ uint64_t *carg_ret)
+{
+ size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
+ struct thread_ctx *thr = threads + thread_get_id();
+ struct optee_msg_arg *arg = thr->rpc_arg;
+
+ if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ if (!arg) {
+ struct mobj *mobj = thread_rpc_alloc_arg(sz);
+
+ if (!mobj)
+ return TEE_ERROR_OUT_OF_MEMORY;
+
+ arg = mobj_get_va(mobj, 0);
+ if (!arg) {
+ thread_rpc_free_arg(mobj);
+ return TEE_ERROR_OUT_OF_MEMORY;
+ }
+
+ thr->rpc_arg = arg;
+ thr->rpc_mobj = mobj;
+ }
+
+ memset(arg, 0, sz);
+ arg->cmd = cmd;
+ arg->num_params = num_params;
+ arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
+
+ for (size_t n = 0; n < num_params; n++) {
+ switch (params[n].attr) {
+ case THREAD_PARAM_ATTR_NONE:
+ arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
+ break;
+ case THREAD_PARAM_ATTR_VALUE_IN:
+ case THREAD_PARAM_ATTR_VALUE_OUT:
+ case THREAD_PARAM_ATTR_VALUE_INOUT:
+ arg->params[n].attr = params[n].attr -
+ THREAD_PARAM_ATTR_VALUE_IN +
+ OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ arg->params[n].u.value.a = params[n].u.value.a;
+ arg->params[n].u.value.b = params[n].u.value.b;
+ arg->params[n].u.value.c = params[n].u.value.c;
+ break;
+ case THREAD_PARAM_ATTR_MEMREF_IN:
+ case THREAD_PARAM_ATTR_MEMREF_OUT:
+ case THREAD_PARAM_ATTR_MEMREF_INOUT:
+ if (!set_fmem(arg->params + n, params + n))
+ return TEE_ERROR_BAD_PARAMETERS;
+ break;
+ default:
+ return TEE_ERROR_BAD_PARAMETERS;
+ }
+ }
+
+ *arg_ret = arg;
+ *carg_ret = mobj_get_cookie(thr->rpc_mobj);
+
+ return TEE_SUCCESS;
+}
+
+static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
+ struct thread_param *params)
+{
+ for (size_t n = 0; n < num_params; n++) {
+ switch (params[n].attr) {
+ case THREAD_PARAM_ATTR_VALUE_OUT:
+ case THREAD_PARAM_ATTR_VALUE_INOUT:
+ params[n].u.value.a = arg->params[n].u.value.a;
+ params[n].u.value.b = arg->params[n].u.value.b;
+ params[n].u.value.c = arg->params[n].u.value.c;
+ break;
+ case THREAD_PARAM_ATTR_MEMREF_OUT:
+ case THREAD_PARAM_ATTR_MEMREF_INOUT:
+ params[n].u.memref.size = arg->params[n].u.fmem.size;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return arg->ret;
+}
+
+uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
+ struct thread_param *params)
+{
+ struct thread_rpc_arg rpc_arg = { .call = {
+ .w1 = thread_get_tsd()->rpc_target_info,
+ .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
+ },
+ };
+ uint64_t carg = 0;
+ struct optee_msg_arg *arg = NULL;
+ uint32_t ret = 0;
+
+ ret = get_rpc_arg(cmd, num_params, params, &arg, &carg);
+ if (ret)
+ return ret;
+
+ reg_pair_from_64(carg, &rpc_arg.call.w6, &rpc_arg.call.w5);
+ thread_rpc(&rpc_arg);
+
+ return get_rpc_arg_res(arg, num_params, params);
+}
diff --git a/core/arch/arm/kernel/thread_spmc_a32.S b/core/arch/arm/kernel/thread_spmc_a32.S
new file mode 100644
index 00000000..4d7a6b1a
--- /dev/null
+++ b/core/arch/arm/kernel/thread_spmc_a32.S
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (c) 2020, Linaro Limited
+ */
+
+#include <platform_config.h>
+
+#include <arm32_macros.S>
+#include <arm.h>
+#include <asm.S>
+#include <ffa.h>
+#include <generated/asm-defines.h>
+#include <keep.h>
+#include <kernel/thread_defs.h>
+#include <kernel/unwind.h>
+#include <optee_ffa.h>
+
+FUNC thread_ffa_msg_wait , :
+ mov_imm r0, FFA_MSG_WAIT /* FID */
+ mov r1, #FFA_TARGET_INFO_MBZ /* Target info MBZ */
+ mov r2, #FFA_PARAM_MBZ /* Param MBZ */
+ mov r3, #FFA_PARAM_MBZ /* Param MBZ */
+ mov r4, #FFA_PARAM_MBZ /* Param MBZ */
+ mov r5, #FFA_PARAM_MBZ /* Param MBZ */
+ mov r6, #FFA_PARAM_MBZ /* Param MBZ */
+ mov r7, #FFA_PARAM_MBZ /* Param MBZ */
+ b .ffa_msg_loop
+END_FUNC thread_ffa_msg_wait
+
+ /* Caller provides r1, r3-r7 params */
+LOCAL_FUNC ffa_msg_send_direct_resp , :
+ ldr r0, =FFA_MSG_SEND_DIRECT_RESP_32 /* FID */
+ mov r2, #FFA_PARAM_MBZ /* RES MBZ */
+
+.ffa_msg_loop:
+ /* Invoke SMC with caller provided parameters */
+ smc #0
+
+ /* Store the parameters as struct thread_smc_args on stack */
+ push {r0-r7}
+ mov r0, sp
+
+ /* parse and handle message */
+ bl thread_spmc_msg_recv
+
+ /* Load struct thread_smc_args into registers */
+ pop {r0-r7}
+ b .ffa_msg_loop
+END_FUNC ffa_msg_send_direct_resp
+
+FUNC thread_std_smc_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
+
+ ror r4, r0, #16 /* Save target info with src and dst swapped */
+ bl __thread_std_smc_entry
+ mov r5, r0 /* Save return value */
+
+ /* Mask all maskable exceptions before switching to temporary stack */
+ cpsid aif
+ bl thread_get_tmp_sp
+ mov sp, r0
+
+ bl thread_state_free
+
+ mov r1, r4 /* Target info */
+ mov r3, r5 /* Return value */
+ mov r4, #FFA_PARAM_MBZ /* Unused parameter */
+ mov r5, #FFA_PARAM_MBZ /* Unused parameter */
+ mov r6, #FFA_PARAM_MBZ /* Unused parameter */
+ mov r7, #FFA_PARAM_MBZ /* Unused parameter */
+ b ffa_msg_send_direct_resp
+UNWIND( .fnend)
+END_FUNC thread_std_smc_entry
+
+/* void thread_rpc(struct thread_rpc_arg *rpc_arg) */
+FUNC thread_rpc , :
+UNWIND( .fnstart)
+ push {r0, lr}
+UNWIND( .save {r0, lr})
+
+ bl thread_save_state
+ mov r4, r0 /* Save original CPSR */
+
+ /*
+ * Switch to temporary stack and SVC mode. Save CPSR to resume into.
+ */
+ bl thread_get_tmp_sp
+ ldr r8, [sp] /* Get pointer to rv[] */
+ cps #CPSR_MODE_SVC /* Change to SVC mode */
+ mov sp, r0 /* Switch to tmp stack */
+
+ mov r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
+ mov r1, r4 /* CPSR to restore */
+ ldr r2, =.thread_rpc_return
+ bl thread_state_suspend
+ mov r7, r0 /* Supply thread index */
+ ldr r0, =FFA_MSG_SEND_DIRECT_RESP_32
+ mov r2, #FFA_PARAM_MBZ
+ mov r3, #0 /* Error code = 0 */
+ ldm r8, {r1, r4-r6} /* Load rv[] into r1,r4-r6 */
+ b ffa_msg_send_direct_resp
+
+.thread_rpc_return:
+ /*
+ * At this point has the stack pointer been restored to the value
+ * it had when thread_save_state() was called above.
+ *
+ * Jumps here from thread_resume above when RPC has returned. The
+ * IRQ and FIQ bits are restored to what they where when this
+ * function was originally entered.
+ */
+ pop {r12, lr} /* Get pointer to rv[] */
+ stm r12, {r0-r3} /* Store r0-r3 into rv[] */
+ bx lr
+UNWIND( .fnend)
+END_FUNC thread_rpc
+DECLARE_KEEP_PAGER thread_rpc
+
+/*
+ * void thread_foreign_intr_exit(uint32_t thread_index)
+ *
+ * This function is jumped to at the end of macro foreign_intr_handler().
+ * The current thread as indicated by @thread_index has just been
+ * suspended. The job here is just to inform normal world the thread id to
+ * resume when returning.
+ */
+FUNC thread_foreign_intr_exit , :
+ /* load threads[r0].tsd.rpc_target_info into r1 */
+ mov r1, #THREAD_CTX_SIZE
+ ldr r2, =threads
+ mla r1, r1, r0, r2
+ ldr r1, [r1, #THREAD_CTX_TSD_RPC_TARGET_INFO]
+ mov r2, #FFA_PARAM_MBZ
+ mov r3, #FFA_PARAM_MBZ
+ mov r4, #OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT
+ mov r5, #FFA_PARAM_MBZ
+ mov r6, #FFA_PARAM_MBZ
+ mov r7, r0
+ b ffa_msg_send_direct_resp
+END_FUNC thread_foreign_intr_exit
diff --git a/core/arch/arm/kernel/thread_spmc_a64.S b/core/arch/arm/kernel/thread_spmc_a64.S
new file mode 100644
index 00000000..3d57da4d
--- /dev/null
+++ b/core/arch/arm/kernel/thread_spmc_a64.S
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (c) 2020, Linaro Limited
+ * Copyright (c) 2019, Arm Limited
+ */
+
+#include <platform_config.h>
+
+#include <arm64_macros.S>
+#include <arm.h>
+#include <asm.S>
+#include <ffa.h>
+#include <generated/asm-defines.h>
+#include <keep.h>
+#include <kernel/thread_defs.h>
+#include <optee_ffa.h>
+
+FUNC thread_ffa_msg_wait , :
+ mov_imm x0, FFA_MSG_WAIT /* FID */
+ mov x1, #FFA_TARGET_INFO_MBZ /* Target info MBZ */
+ mov x2, #FFA_PARAM_MBZ /* Param MBZ */
+ mov x3, #FFA_PARAM_MBZ /* Param MBZ */
+ mov x4, #FFA_PARAM_MBZ /* Param MBZ */
+ mov x5, #FFA_PARAM_MBZ /* Param MBZ */
+ mov x6, #FFA_PARAM_MBZ /* Param MBZ */
+ mov x7, #FFA_PARAM_MBZ /* Param MBZ */
+ b .ffa_msg_loop
+END_FUNC thread_ffa_msg_wait
+
+ /* Caller provides x1, x3-x7 params */
+LOCAL_FUNC ffa_msg_send_direct_resp , :
+ mov_imm x0, FFA_MSG_SEND_DIRECT_RESP_32 /* FID */
+ mov x2, #FFA_PARAM_MBZ /* RES MBZ */
+
+.ffa_msg_loop:
+ /* Invoke SMC with caller provided parameters */
+ smc #0
+
+ /* Store the parameters as struct thread_smc_args on stack */
+ sub sp, sp, #THREAD_SMC_ARGS_SIZE
+ store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
+ mov x0, sp
+
+ /* parse and handle message */
+ bl thread_spmc_msg_recv
+
+ /* Load struct thread_smc_args into registers */
+ load_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
+ add sp, sp, #THREAD_SMC_ARGS_SIZE
+ b .ffa_msg_loop
+END_FUNC ffa_msg_send_direct_resp
+
+FUNC thread_std_smc_entry , :
+ ror w19, w0, #16 /* Save target info with src and dst swapped */
+ bl __thread_std_smc_entry
+ mov w20, w0 /* Save return value */
+
+ /* Mask all maskable exceptions before switching to temporary stack */
+ msr daifset, #DAIFBIT_ALL
+ bl thread_get_tmp_sp
+ mov sp, x0
+
+ bl thread_state_free
+
+ mov w1, w19 /* Target info */
+ mov w3, w20 /* Return value */
+ mov x4, #FFA_PARAM_MBZ /* Unused parameter */
+ mov x5, #FFA_PARAM_MBZ /* Unused parameter */
+ mov x6, #FFA_PARAM_MBZ /* Unused parameter */
+ mov x7, #FFA_PARAM_MBZ /* Unused parameter */
+ b ffa_msg_send_direct_resp
+END_FUNC thread_std_smc_entry
+
+/* void thread_rpc(struct thread_rpc_arg *rpc_arg) */
+FUNC thread_rpc , :
+ /* Read daif and create an SPSR */
+ mrs x1, daif
+ orr x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
+
+ /* Mask all maskable exceptions before switching to temporary stack */
+ msr daifset, #DAIFBIT_ALL
+ push x0, xzr
+ push x1, x30
+ bl thread_get_ctx_regs
+ ldr x30, [sp, #8]
+ store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
+ mov x19, x0
+
+ bl thread_get_tmp_sp
+ pop x1, xzr /* Match "push x1, x30" above */
+ mov x2, sp
+ str x2, [x19, #THREAD_CTX_REGS_SP]
+ ldr x20, [sp] /* Get pointer to rpc_arg[] */
+ mov sp, x0 /* Switch to tmp stack */
+ /*
+ * We need to read rpc_arg[] early, because thread_state_suspend
+ * can invoke virt_unset_guest() which will unmap pages,
+ * where rpc_arg[] resides
+ */
+ load_wregs x20, 0, 21, 24 /* Load rpc_arg[] into w21-w24 */
+
+ adr x2, .thread_rpc_return
+ mov w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
+ bl thread_state_suspend
+ mov w7, w0 /* Supply thread index */
+ ldr w0, =FFA_MSG_SEND_DIRECT_RESP_32
+ mov w1, w21
+ mov w2, #FFA_PARAM_MBZ
+ mov w3, #0 /* Error code = 0 */
+ mov w4, w22
+ mov w5, w23
+ mov w6, w24
+ b ffa_msg_send_direct_resp
+
+.thread_rpc_return:
+ /*
+ * At this point has the stack pointer been restored to the value
+ * stored in THREAD_CTX above.
+ *
+ * Jumps here from thread_resume above when RPC has returned. The
+ * IRQ and FIQ bits are restored to what they where when this
+ * function was originally entered. w0-w3 holds the values supplied
+ * to thread_resume_from_rpc() in a0-a3.
+ */
+ pop x16, xzr /* Get pointer to rv[] */
+ store_wregs x16, 0, 0, 3 /* Store w0-w3 into rv[] */
+ ret
+END_FUNC thread_rpc
+DECLARE_KEEP_PAGER thread_rpc
+
+/*
+ * void thread_foreign_intr_exit(uint32_t thread_index)
+ *
+ * This function is jumped to at the end of macro foreign_intr_handler().
+ * The current thread as indicated by @thread_index has just been
+ * suspended. The job here is just to inform normal world the thread id to
+ * resume when returning.
+ */
+FUNC thread_foreign_intr_exit , :
+ /* load threads[w0].tsd.rpc_target_info into w1 */
+ mov x1, #THREAD_CTX_SIZE
+ adr x2, threads
+ madd x1, x1, x0, x2
+ ldr w1, [x1, #THREAD_CTX_TSD_RPC_TARGET_INFO]
+ mov x2, #FFA_PARAM_MBZ
+ mov w3, #FFA_PARAM_MBZ
+ mov w4, #OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT
+ mov x5, #FFA_PARAM_MBZ
+ mov w6, #FFA_PARAM_MBZ
+ mov w7, w0
+ b ffa_msg_send_direct_resp
+END_FUNC thread_foreign_intr_exit
diff --git a/core/arch/arm/plat-vexpress/sub.mk b/core/arch/arm/plat-vexpress/sub.mk
index 9b1dd7b0..09d341c7 100644
--- a/core/arch/arm/plat-vexpress/sub.mk
+++ b/core/arch/arm/plat-vexpress/sub.mk
@@ -5,3 +5,11 @@ srcs-$(CFG_ARM32_core) += juno_core_pos_a32.S
srcs-$(CFG_ARM64_core) += juno_core_pos_a64.S
endif
srcs-$(CFG_WITH_USER_TA) += vendor_props.c
+ifeq ($(CFG_CORE_FFA),y)
+ifeq ($(PLATFORM_FLAVOR_fvp),y)
+srcs-$(CFG_ARM64_core) += fvp_spmc_pm.c
+endif
+ifeq ($(PLATFORM_FLAVOR_qemu_armv8a),y)
+srcs-y += fvp_spmc_pm.c
+endif
+endif
diff --git a/core/arch/arm/tee/entry_std.c b/core/arch/arm/tee/entry_std.c
index 5c1e8c20..424c4c67 100644
--- a/core/arch/arm/tee/entry_std.c
+++ b/core/arch/arm/tee/entry_std.c
@@ -59,6 +59,40 @@ static bool __maybe_unused param_mem_from_mobj(struct param_mem *mem,
return true;
}
+#ifdef CFG_CORE_FFA
+static TEE_Result set_fmem_param(const struct optee_msg_param_fmem *fmem,
+ struct param_mem *mem)
+{
+ size_t req_size = 0;
+ uint64_t global_id = READ_ONCE(fmem->global_id);
+ size_t sz = READ_ONCE(fmem->size);
+
+ if (!global_id && !sz) {
+ mem->mobj = NULL;
+ mem->offs = 0;
+ mem->size = 0;
+ return TEE_SUCCESS;
+ }
+ mem->mobj = mobj_ffa_get_by_cookie(global_id,
+ READ_ONCE(fmem->internal_offs));
+ if (!mem->mobj)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ mem->offs = reg_pair_to_64(READ_ONCE(fmem->offs_high),
+ READ_ONCE(fmem->offs_low));
+ mem->size = sz;
+
+ /*
+ * Check that the supplied offset and size is covered by the
+ * previously verified MOBJ.
+ */
+ if (ADD_OVERFLOW(mem->offs, mem->size, &req_size) ||
+ mem->mobj->size < req_size)
+ return TEE_ERROR_SECURITY;
+
+ return TEE_SUCCESS;
+}
+#else /*!CFG_CORE_FFA*/
/* fill 'struct param_mem' structure if buffer matches a valid memory object */
static TEE_Result set_tmem_param(const struct optee_msg_param_tmem *tmem,
uint32_t attr, struct param_mem *mem)
@@ -112,13 +146,14 @@ static TEE_Result set_rmem_param(const struct optee_msg_param_rmem *rmem,
{
size_t req_size = 0;
uint64_t shm_ref = READ_ONCE(rmem->shm_ref);
+ size_t sz = READ_ONCE(rmem->size);
mem->mobj = mobj_reg_shm_get_by_cookie(shm_ref);
if (!mem->mobj)
return TEE_ERROR_BAD_PARAMETERS;
mem->offs = READ_ONCE(rmem->offs);
- mem->size = READ_ONCE(rmem->size);
+ mem->size = sz;
/*
* Check that the supplied offset and size is covered by the
@@ -130,7 +165,8 @@ static TEE_Result set_rmem_param(const struct optee_msg_param_rmem *rmem,
return TEE_SUCCESS;
}
-#endif
+#endif /*CFG_CORE_DYN_SHM*/
+#endif /*!CFG_CORE_FFA*/
static TEE_Result copy_in_params(const struct optee_msg_param *params,
uint32_t num_params,
@@ -167,6 +203,18 @@ static TEE_Result copy_in_params(const struct optee_msg_param *params,
ta_param->u[n].val.a = READ_ONCE(params[n].u.value.a);
ta_param->u[n].val.b = READ_ONCE(params[n].u.value.b);
break;
+#ifdef CFG_CORE_FFA
+ case OPTEE_MSG_ATTR_TYPE_FMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_FMEM_INOUT:
+ res = set_fmem_param(&params[n].u.fmem,
+ &ta_param->u[n].mem);
+ if (res)
+ return res;
+ pt[n] = TEE_PARAM_TYPE_MEMREF_INPUT + attr -
+ OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
+ break;
+#else /*!CFG_CORE_FFA*/
case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
@@ -188,7 +236,8 @@ static TEE_Result copy_in_params(const struct optee_msg_param *params,
pt[n] = TEE_PARAM_TYPE_MEMREF_INPUT + attr -
OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
break;
-#endif
+#endif /*CFG_CORE_DYN_SHM*/
+#endif /*!CFG_CORE_FFA*/
default:
return TEE_ERROR_BAD_PARAMETERS;
}
@@ -428,6 +477,7 @@ out:
arg->ret_origin = err_orig;
}
+#ifndef CFG_CORE_FFA
#ifdef CFG_CORE_DYN_SHM
static void register_shm(struct optee_msg_arg *arg, uint32_t num_params)
{
@@ -465,6 +515,7 @@ static void unregister_shm(struct optee_msg_arg *arg, uint32_t num_params)
}
}
#endif /*CFG_CORE_DYN_SHM*/
+#endif
void nsec_sessions_list_head(struct tee_ta_session_head **open_sessions)
{
@@ -500,6 +551,7 @@ uint32_t __tee_entry_std(struct optee_msg_arg *arg, uint32_t num_params)
case OPTEE_MSG_CMD_CANCEL:
entry_cancel(arg, num_params);
break;
+#ifndef CFG_CORE_FFA
#ifdef CFG_CORE_DYN_SHM
case OPTEE_MSG_CMD_REGISTER_SHM:
register_shm(arg, num_params);
@@ -508,6 +560,7 @@ uint32_t __tee_entry_std(struct optee_msg_arg *arg, uint32_t num_params)
unregister_shm(arg, num_params);
break;
#endif
+#endif
default:
EMSG("Unknown cmd 0x%x", arg->cmd);
rv = OPTEE_SMC_RETURN_EBADCMD;
diff --git a/core/arch/arm/tee/sub.mk b/core/arch/arm/tee/sub.mk
index 80589bc7..e27e11c9 100644
--- a/core/arch/arm/tee/sub.mk
+++ b/core/arch/arm/tee/sub.mk
@@ -5,6 +5,8 @@ srcs-$(CFG_CACHE_API) += svc_cache.c
srcs-y += arch_svc.c
endif
srcs-y += entry_std.c
+ifneq ($(CFG_CORE_FFA),y)
srcs-y += entry_fast.c
cppflags-entry_fast.c-y += -DTEE_IMPL_GIT_SHA1=$(TEE_IMPL_GIT_SHA1)
+endif
srcs-y += cache.c
diff --git a/core/kernel/sub.mk b/core/kernel/sub.mk
index a4ae7479..8f16ef7e 100644
--- a/core/kernel/sub.mk
+++ b/core/kernel/sub.mk
@@ -7,7 +7,9 @@ srcs-y += pm.c
srcs-y += handle.c
srcs-y += interrupt.c
srcs-$(CFG_LOCKDEP) += lockdep.c
+ifneq ($(CFG_CORE_FFA),y)
srcs-$(CFG_CORE_DYN_SHM) += msg_param.c
+endif
srcs-y += panic.c
srcs-y += refcount.c
srcs-y += tee_misc.c