aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Wiklander <jens.wiklander@linaro.org>2020-06-02 17:58:29 +0200
committerJérôme Forissier <jerome@forissier.org>2020-07-20 13:21:16 +0200
commit73e1d3f398b023562b93e81b7fbe4e8897f3bd7f (patch)
tree42674fa7ebe5a711ea37311bfe4c5c559befdd8f
parentc13d4bd7d4d863445689a1e6a2ee1e2ba2e4cfce (diff)
core: add mobj_ffa
Adds a new mobj, mobj_ffa, tailored to handle shared memory registrations over FF-A. Acked-by: Etienne Carriere <etienne.carriere@linaro.org> Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
-rw-r--r--core/arch/arm/include/mm/mobj.h57
-rw-r--r--core/arch/arm/mm/mobj_ffa.c561
-rw-r--r--core/arch/arm/mm/sub.mk3
3 files changed, 602 insertions, 19 deletions
diff --git a/core/arch/arm/include/mm/mobj.h b/core/arch/arm/include/mm/mobj.h
index 2a80aa9a..ba37a30d 100644
--- a/core/arch/arm/include/mm/mobj.h
+++ b/core/arch/arm/include/mm/mobj.h
@@ -162,25 +162,7 @@ struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
enum buf_is_attr battr);
-#ifdef CFG_CORE_DYN_SHM
-/* reg_shm represents TEE shared memory */
-struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages,
- paddr_t page_offset, uint64_t cookie);
-
-/**
- * mobj_reg_shm_get_by_cookie() - get a MOBJ based on cookie
- * @cookie: Cookie used by normal world when suppling the shared memory
- *
- * Searches for a registered shared memory MOBJ and if one with a matching
- * @cookie is found its reference counter is increased before returning
- * the MOBJ.
- *
- * Returns a valid pointer on success or NULL on failure.
- */
-struct mobj *mobj_reg_shm_get_by_cookie(uint64_t cookie);
-
-TEE_Result mobj_reg_shm_release_by_cookie(uint64_t cookie);
-
+#if defined(CFG_CORE_FFA) || defined(CFG_CORE_DYN_SHM)
/**
* mobj_inc_map() - increase map count
* @mobj: pointer to a registered shared memory MOBJ
@@ -204,6 +186,43 @@ TEE_Result mobj_inc_map(struct mobj *mobj);
* Returns TEE_SUCCESS on success or an error code on failure
*/
TEE_Result mobj_dec_map(struct mobj *mobj);
+#endif
+
+#if defined(CFG_CORE_FFA)
+struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
+ unsigned int internal_offs);
+
+TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie);
+
+/* Functions for SPMC */
+#ifdef CFG_CORE_SEL1_SPMC
+struct mobj_ffa *mobj_ffa_sel1_spmc_new(unsigned int num_pages);
+void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mobj);
+TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie);
+#endif
+uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mobj);
+TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mobj, unsigned int *idx,
+ paddr_t pa, unsigned int num_pages);
+uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mobj);
+
+#elif defined(CFG_CORE_DYN_SHM)
+/* reg_shm represents TEE shared memory */
+struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages,
+ paddr_t page_offset, uint64_t cookie);
+
+/**
+ * mobj_reg_shm_get_by_cookie() - get a MOBJ based on cookie
+ * @cookie: Cookie used by normal world when suppling the shared memory
+ *
+ * Searches for a registered shared memory MOBJ and if one with a matching
+ * @cookie is found its reference counter is increased before returning
+ * the MOBJ.
+ *
+ * Returns a valid pointer on success or NULL on failure.
+ */
+struct mobj *mobj_reg_shm_get_by_cookie(uint64_t cookie);
+
+TEE_Result mobj_reg_shm_release_by_cookie(uint64_t cookie);
/**
* mobj_reg_shm_unguard() - unguards a reg_shm
diff --git a/core/arch/arm/mm/mobj_ffa.c b/core/arch/arm/mm/mobj_ffa.c
new file mode 100644
index 00000000..1f3ef53b
--- /dev/null
+++ b/core/arch/arm/mm/mobj_ffa.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright (c) 2016-2020, Linaro Limited
+ */
+
+#include <assert.h>
+#include <bitstring.h>
+#include <initcall.h>
+#include <keep.h>
+#include <kernel/refcount.h>
+#include <kernel/spinlock.h>
+#include <mm/mobj.h>
+#include <sys/queue.h>
+
+struct mobj_ffa {
+ struct mobj mobj;
+ SLIST_ENTRY(mobj_ffa) link;
+ uint64_t cookie;
+ tee_mm_entry_t *mm;
+ struct refcount mapcount;
+ uint16_t page_offset;
+ bool registered_by_cookie;
+ bool unregistered_by_cookie;
+ paddr_t pages[];
+};
+
+SLIST_HEAD(mobj_ffa_head, mobj_ffa);
+
+#ifdef CFG_CORE_SEL1_SPMC
+#define NUM_SHMS 64
+static bitstr_t bit_decl(shm_bits, NUM_SHMS);
+#endif
+
+static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
+static struct mobj_ffa_head shm_inactive_head =
+ SLIST_HEAD_INITIALIZER(shm_inactive_head);
+
+static unsigned int shm_lock = SPINLOCK_UNLOCK;
+
+static const struct mobj_ops mobj_ffa_ops __rodata_unpaged;
+
+static struct mobj_ffa *to_mobj_ffa(struct mobj *mobj)
+{
+ assert(mobj->ops == &mobj_ffa_ops);
+ return container_of(mobj, struct mobj_ffa, mobj);
+}
+
+static struct mobj_ffa *to_mobj_ffa_may_fail(struct mobj *mobj)
+{
+ if (mobj && mobj->ops != &mobj_ffa_ops)
+ return NULL;
+
+ return container_of(mobj, struct mobj_ffa, mobj);
+}
+
+static size_t shm_size(size_t num_pages)
+{
+ size_t s = 0;
+
+ if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
+ return 0;
+ if (ADD_OVERFLOW(sizeof(struct mobj_ffa), s, &s))
+ return 0;
+ return s;
+}
+
+static struct mobj_ffa *ffa_new(unsigned int num_pages)
+{
+ struct mobj_ffa *mf = NULL;
+ size_t s = 0;
+
+ if (!num_pages)
+ return NULL;
+
+ s = shm_size(num_pages);
+ if (!s)
+ return NULL;
+ mf = calloc(1, s);
+ if (!mf)
+ return NULL;
+
+ mf->mobj.ops = &mobj_ffa_ops;
+ mf->mobj.size = num_pages * SMALL_PAGE_SIZE;
+ mf->mobj.phys_granule = SMALL_PAGE_SIZE;
+ refcount_set(&mf->mobj.refc, 0);
+
+ return mf;
+}
+
+#ifdef CFG_CORE_SEL1_SPMC
+struct mobj_ffa *mobj_ffa_sel1_spmc_new(unsigned int num_pages)
+{
+ struct mobj_ffa *mf = NULL;
+ uint32_t exceptions = 0;
+ int i = 0;
+
+ mf = ffa_new(num_pages);
+ if (!mf)
+ return NULL;
+
+ exceptions = cpu_spin_lock_xsave(&shm_lock);
+ bit_ffc(shm_bits, NUM_SHMS, &i);
+ if (i != -1) {
+ bit_set(shm_bits, i);
+ /*
+ * + 1 to avoid a cookie value 0, setting bit 44 to use one
+ * of the upper 32 bits too for testing.
+ */
+ mf->cookie = (i + 1) | BIT64(44);
+ }
+ cpu_spin_unlock_xrestore(&shm_lock, exceptions);
+
+ if (i == -1) {
+ free(mf);
+ return NULL;
+ }
+
+ return mf;
+}
+#endif /*CFG_CORE_SEL1_SPMC*/
+
+static size_t get_page_count(struct mobj_ffa *mf)
+{
+ return ROUNDUP(mf->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
+}
+
+static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
+{
+ return mf->cookie == cookie;
+}
+
+static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
+{
+ return mf == (void *)(vaddr_t)ptr;
+}
+
+static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
+ bool (*cmp_func)(struct mobj_ffa *mf,
+ uint64_t val),
+ uint64_t val)
+{
+ struct mobj_ffa *mf = SLIST_FIRST(head);
+ struct mobj_ffa *p = NULL;
+
+ if (!mf)
+ return NULL;
+
+ if (cmp_func(mf, val)) {
+ SLIST_REMOVE_HEAD(head, link);
+ return mf;
+ }
+
+ while (true) {
+ p = SLIST_NEXT(mf, link);
+ if (!p)
+ return NULL;
+ if (cmp_func(p, val)) {
+ SLIST_REMOVE_AFTER(mf, link);
+ return p;
+ }
+ mf = p;
+ }
+}
+
+static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
+ bool (*cmp_func)(struct mobj_ffa *mf,
+ uint64_t val),
+ uint64_t val)
+{
+ struct mobj_ffa *mf = NULL;
+
+ SLIST_FOREACH(mf, head, link)
+ if (cmp_func(mf, val))
+ return mf;
+
+ return NULL;
+}
+
+#ifdef CFG_CORE_SEL1_SPMC
+void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
+{
+ int i = (mf->cookie - 1) & ~BIT64(44);
+ uint32_t exceptions = 0;
+
+ assert(i >= 0 && i < NUM_SHMS);
+
+ exceptions = cpu_spin_lock_xsave(&shm_lock);
+ assert(bit_test(shm_bits, i));
+ bit_clear(shm_bits, i);
+ assert(!mf->mm);
+ cpu_spin_unlock_xrestore(&shm_lock, exceptions);
+
+ free(mf);
+}
+#endif /*CFG_CORE_SEL1_SPMC*/
+
+TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
+ paddr_t pa, unsigned int num_pages)
+{
+ unsigned int n = 0;
+ size_t tot_page_count = get_page_count(mf);
+
+ if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ for (n = 0; n < num_pages; n++)
+ mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
+
+ (*idx) += n;
+ return TEE_SUCCESS;
+}
+
+uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
+{
+ return mf->cookie;
+}
+
+uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
+{
+ uint32_t exceptions = 0;
+
+ exceptions = cpu_spin_lock_xsave(&shm_lock);
+ assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
+ assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
+ assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
+ SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
+ cpu_spin_unlock_xrestore(&shm_lock, exceptions);
+
+ return mf->cookie;
+}
+
+static void unmap_helper(struct mobj_ffa *mf)
+{
+ if (mf->mm) {
+ core_mmu_unmap_pages(tee_mm_get_smem(mf->mm),
+ get_page_count(mf));
+ tee_mm_free(mf->mm);
+ mf->mm = NULL;
+ }
+}
+
+TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
+{
+ TEE_Result res = TEE_SUCCESS;
+ struct mobj_ffa *mf = NULL;
+ uint32_t exceptions = 0;
+
+ exceptions = cpu_spin_lock_xsave(&shm_lock);
+ mf = find_in_list(&shm_head, cmp_cookie, cookie);
+ /*
+ * If the mobj is found here it's still active and cannot be
+ * unregistered.
+ */
+ if (mf) {
+ DMSG("cookie %#"PRIx64" busy refc %u",
+ cookie, refcount_val(&mf->mobj.refc));
+ res = TEE_ERROR_BUSY;
+ goto out;
+ }
+ mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
+ /*
+ * If the mobj isn't found or if it already has been unregistered.
+ */
+ if (!mf || mf->unregistered_by_cookie) {
+ res = TEE_ERROR_ITEM_NOT_FOUND;
+ goto out;
+ }
+ mf->unregistered_by_cookie = true;
+ res = TEE_SUCCESS;
+
+out:
+ cpu_spin_unlock_xrestore(&shm_lock, exceptions);
+ return res;
+}
+
+#ifdef CFG_CORE_SEL1_SPMC
+TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
+{
+ TEE_Result res = TEE_SUCCESS;
+ struct mobj_ffa *mf = NULL;
+ uint32_t exceptions = 0;
+
+ exceptions = cpu_spin_lock_xsave(&shm_lock);
+ mf = find_in_list(&shm_head, cmp_cookie, cookie);
+ /*
+ * If the mobj is found here it's still active and cannot be
+ * reclaimed.
+ */
+ if (mf) {
+ DMSG("cookie %#"PRIx64" busy refc %u",
+ cookie, refcount_val(&mf->mobj.refc));
+ res = TEE_ERROR_BUSY;
+ goto out;
+ }
+
+ mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
+ if (!mf) {
+ res = TEE_ERROR_ITEM_NOT_FOUND;
+ goto out;
+ }
+ /*
+ * If the mobj has been registered via mobj_ffa_get_by_cookie()
+ * but not unregistered yet with mobj_ffa_unregister_by_cookie().
+ */
+ if (mf->registered_by_cookie && !mf->unregistered_by_cookie) {
+ DMSG("cookie %#"PRIx64" busy", cookie);
+ res = TEE_ERROR_BUSY;
+ goto out;
+ }
+
+ if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
+ panic();
+ res = TEE_SUCCESS;
+out:
+ cpu_spin_unlock_xrestore(&shm_lock, exceptions);
+ if (!res)
+ mobj_ffa_sel1_spmc_delete(mf);
+ return res;
+}
+#endif /*CFG_CORE_SEL1_SPMC*/
+
+struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie, unsigned int internal_offs)
+{
+ struct mobj_ffa *mf = NULL;
+ uint32_t exceptions = 0;
+
+ if (internal_offs >= SMALL_PAGE_SIZE)
+ return NULL;
+
+ exceptions = cpu_spin_lock_xsave(&shm_lock);
+
+ mf = find_in_list(&shm_head, cmp_cookie, cookie);
+ if (mf) {
+ if (mf->page_offset == internal_offs) {
+ if (!refcount_inc(&mf->mobj.refc)) {
+ /*
+ * If refcount is 0 some other thread has
+ * called mobj_put() on this reached 0 and
+ * before ffa_inactivate() got the lock we
+ * found it. Let's reinitialize it.
+ */
+ refcount_set(&mf->mobj.refc, 1);
+ }
+ DMSG("cookie %#"PRIx64" active: refc %d",
+ cookie, refcount_val(&mf->mobj.refc));
+ } else {
+ EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
+ cookie, mf->page_offset, internal_offs);
+ mf = NULL;
+ }
+ } else {
+ mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
+ if (mf) {
+ mf->unregistered_by_cookie = false;
+ mf->registered_by_cookie = true;
+ assert(refcount_val(&mf->mobj.refc) == 0);
+ refcount_set(&mf->mobj.refc, 1);
+ refcount_set(&mf->mapcount, 0);
+ mf->mobj.size += mf->page_offset;
+ assert(!(mf->mobj.size & SMALL_PAGE_MASK));
+ mf->mobj.size -= internal_offs;
+ mf->page_offset = internal_offs;
+ SLIST_INSERT_HEAD(&shm_head, mf, link);
+ }
+ }
+
+ cpu_spin_unlock_xrestore(&shm_lock, exceptions);
+
+ if (!mf) {
+ EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
+ cookie, internal_offs);
+ return NULL;
+ }
+
+ return &mf->mobj;
+}
+
+static TEE_Result ffa_get_pa(struct mobj *mobj, size_t offset,
+ size_t granule, paddr_t *pa)
+{
+ struct mobj_ffa *mf = to_mobj_ffa(mobj);
+ size_t full_offset = 0;
+ paddr_t p = 0;
+
+ if (!pa)
+ return TEE_ERROR_GENERIC;
+
+ if (offset >= mobj->size)
+ return TEE_ERROR_GENERIC;
+
+ full_offset = offset + mf->page_offset;
+ switch (granule) {
+ case 0:
+ p = mf->pages[full_offset / SMALL_PAGE_SIZE] +
+ (full_offset & SMALL_PAGE_MASK);
+ break;
+ case SMALL_PAGE_SIZE:
+ p = mf->pages[full_offset / SMALL_PAGE_SIZE];
+ break;
+ default:
+ return TEE_ERROR_GENERIC;
+ }
+ *pa = p;
+
+ return TEE_SUCCESS;
+}
+DECLARE_KEEP_PAGER(ffa_get_pa);
+
+static size_t ffa_get_phys_offs(struct mobj *mobj,
+ size_t granule __maybe_unused)
+{
+ assert(granule >= mobj->phys_granule);
+
+ return to_mobj_ffa(mobj)->page_offset;
+}
+
+static void *ffa_get_va(struct mobj *mobj, size_t offset)
+{
+ struct mobj_ffa *mf = to_mobj_ffa(mobj);
+
+ if (!mf->mm || offset >= mobj->size)
+ return NULL;
+
+ return (void *)(tee_mm_get_smem(mf->mm) + offset + mf->page_offset);
+}
+
+static void ffa_inactivate(struct mobj *mobj)
+{
+ struct mobj_ffa *mf = to_mobj_ffa(mobj);
+ uint32_t exceptions = 0;
+
+ exceptions = cpu_spin_lock_xsave(&shm_lock);
+ /*
+ * If refcount isn't 0 some other thread has found this mobj in
+ * shm_head after the mobj_put() that put us here and before we got
+ * the lock.
+ */
+ if (refcount_val(&mobj->refc)) {
+ DMSG("cookie %#"PRIx64" was resurrected", mf->cookie);
+ goto out;
+ }
+
+ DMSG("cookie %#"PRIx64, mf->cookie);
+ if (!pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf))
+ panic();
+ unmap_helper(mf);
+ SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
+out:
+ cpu_spin_unlock_xrestore(&shm_lock, exceptions);
+}
+
+static TEE_Result ffa_get_cattr(struct mobj *mobj __unused, uint32_t *cattr)
+{
+ if (!cattr)
+ return TEE_ERROR_GENERIC;
+
+ *cattr = TEE_MATTR_CACHE_CACHED;
+
+ return TEE_SUCCESS;
+}
+
+static bool ffa_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr)
+{
+ assert(mobj->ops == &mobj_ffa_ops);
+
+ return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
+}
+
+static uint64_t ffa_get_cookie(struct mobj *mobj)
+{
+ return to_mobj_ffa(mobj)->cookie;
+}
+
+static const struct mobj_ops mobj_ffa_ops __rodata_unpaged = {
+ .get_pa = ffa_get_pa,
+ .get_phys_offs = ffa_get_phys_offs,
+ .get_va = ffa_get_va,
+ .get_cattr = ffa_get_cattr,
+ .matches = ffa_matches,
+ .free = ffa_inactivate,
+ .get_cookie = ffa_get_cookie,
+};
+
+TEE_Result mobj_inc_map(struct mobj *mobj)
+{
+ TEE_Result res = TEE_SUCCESS;
+ uint32_t exceptions = 0;
+ struct mobj_ffa *mf = to_mobj_ffa_may_fail(mobj);
+
+ if (!mf)
+ return TEE_ERROR_GENERIC;
+
+ if (refcount_inc(&mf->mapcount))
+ return TEE_SUCCESS;
+
+ exceptions = cpu_spin_lock_xsave(&shm_lock);
+
+ if (refcount_val(&mf->mapcount))
+ goto out;
+
+ mf->mm = tee_mm_alloc(&tee_mm_shm, mf->mobj.size);
+ if (!mf->mm) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ res = core_mmu_map_pages(tee_mm_get_smem(mf->mm), mf->pages,
+ get_page_count(mf), MEM_AREA_NSEC_SHM);
+ if (res) {
+ tee_mm_free(mf->mm);
+ mf->mm = NULL;
+ goto out;
+ }
+
+ refcount_set(&mf->mapcount, 1);
+out:
+ cpu_spin_unlock_xrestore(&shm_lock, exceptions);
+
+ return res;
+}
+
+TEE_Result mobj_dec_map(struct mobj *mobj)
+{
+ struct mobj_ffa *mf = to_mobj_ffa_may_fail(mobj);
+ uint32_t exceptions = 0;
+
+ if (!mf)
+ return TEE_ERROR_GENERIC;
+
+ if (!refcount_dec(&mf->mapcount))
+ return TEE_SUCCESS;
+
+ exceptions = cpu_spin_lock_xsave(&shm_lock);
+ unmap_helper(mf);
+ cpu_spin_unlock_xrestore(&shm_lock, exceptions);
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result mapped_shm_init(void)
+{
+ vaddr_t pool_start = 0;
+ vaddr_t pool_end = 0;
+
+ core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
+ if (!pool_start || !pool_end)
+ panic("Can't find region for shmem pool");
+
+ if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end, SMALL_PAGE_SHIFT,
+ TEE_MM_POOL_NO_FLAGS))
+ panic("Could not create shmem pool");
+
+ DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
+ pool_start, pool_end);
+ return TEE_SUCCESS;
+}
+
+service_init(mapped_shm_init);
diff --git a/core/arch/arm/mm/sub.mk b/core/arch/arm/mm/sub.mk
index 39d9c1f5..1b0862df 100644
--- a/core/arch/arm/mm/sub.mk
+++ b/core/arch/arm/mm/sub.mk
@@ -9,7 +9,10 @@ endif
srcs-y += tee_mm.c
srcs-y += pgt_cache.c
srcs-y += mobj.c
+srcs-$(CFG_CORE_FFA) += mobj_ffa.c
+ifneq ($(CFG_CORE_FFA),y)
srcs-$(CFG_CORE_DYN_SHM) += mobj_dyn_shm.c
+endif
ifeq ($(CFG_SYSCALL_FTRACE),y)
# We would not like to profile MMU APIs as these are used to switch TA