aboutsummaryrefslogtreecommitdiff
path: root/core/arch/arm32
diff options
context:
space:
mode:
authorJens Wiklander <jens.wiklander@linaro.org>2014-12-04 14:30:32 +0100
committerJens Wiklander <jens.wiklander@linaro.org>2014-12-16 07:52:41 +0100
commit7de955b38ae4c4a572843f3a2893514d6cd57e6b (patch)
tree01ff7d8b520757c4a52cb2ef409eb5b33d5e160d /core/arch/arm32
parent8f7de3fc92dbdfdc217b83403c64abc0bafb5d31 (diff)
arm32: paging of TEE Core optionally enabled
plat-vexpress-*: * Optionally enable paging with CFG_WITH_PAGER=y * Uses fake SRAM when paging is enabled * Supports partitioning OP-TEE binary in unpaged, init and pagable areas plat-stm: * Displays an error message if compiled with CFG_WITH_PAGER=y arm32: * Replaces legacy paging support with new paging support * Removes unused tee_pager_unpg.c Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org> Tested-by: Jens Wiklander <jens.wiklander@linaro.org> (QEMU, FVP, Juno) Reviewed-by: Pascal Brand <pascal.brand@linaro.org> Tested-by: Pascal Brand <pascal.brand@linaro.org> (STM platform - Check the code without the pager is not broken).
Diffstat (limited to 'core/arch/arm32')
-rw-r--r--core/arch/arm32/include/mm/tee_mm_def.h38
-rw-r--r--core/arch/arm32/include/mm/tee_pager.h55
-rw-r--r--core/arch/arm32/kernel/tee_ta_manager.c6
-rw-r--r--core/arch/arm32/mm/tee_mmu.c3
-rw-r--r--core/arch/arm32/mm/tee_pager.c701
-rw-r--r--core/arch/arm32/mm/tee_pager_unpg.c419
-rw-r--r--core/arch/arm32/plat-stm/platform_config.h6
-rw-r--r--core/arch/arm32/plat-vexpress/core_bootcfg.c11
-rw-r--r--core/arch/arm32/plat-vexpress/entry.S32
-rw-r--r--core/arch/arm32/plat-vexpress/link.mk1
-rw-r--r--core/arch/arm32/plat-vexpress/main.c179
-rw-r--r--core/arch/arm32/plat-vexpress/platform_config.h58
12 files changed, 753 insertions, 756 deletions
diff --git a/core/arch/arm32/include/mm/tee_mm_def.h b/core/arch/arm32/include/mm/tee_mm_def.h
index 6046f70..996e6ac 100644
--- a/core/arch/arm32/include/mm/tee_mm_def.h
+++ b/core/arch/arm32/include/mm/tee_mm_def.h
@@ -36,17 +36,6 @@
#define SECTION_MASK 0x000fffff
#define SECTION_SIZE 0x00100000
-#define TEE_VMEM_START 0x40000000
-#define TEE_VMEM_SIZE (1024 * 1024)
-
-/* virtual addresses of ROM code variables and L2 MMU tables */
-#define SEC_VIRT_MMU_L2_BASE 0x40000000
-
-/* Paged virtual memory defines */
-#define TEE_PVMEM_PSIZE (TEE_VMEM_SIZE / SMALL_PAGE_SIZE)
-
-#define TEE_PVMEM_LO TEE_VMEM_START
-
/* define section to load */
#define TEE_DDR_VLOFFSET 0x1
@@ -56,8 +45,6 @@
/*
* MMU related values
*/
-#define TEE_VIRT_MMU_L2_BASE TEE_VMEM_START
-#define TEE_VIRT_MMU_L2_SIZE 0x400
#define TEE_MMU_UL1_BASE core_mmu_get_ta_ul1_va()
#define TEE_MMU_UL1_PA_BASE core_mmu_get_ta_ul1_pa()
@@ -82,31 +69,6 @@
#define TEE_PAGER_NO_ACCESS_ATTRIBUTES 0x00000000
-#define TEE_ROM_AREA_START TEE_VMEM_START
-#define TEE_ROM_AREA_SIZE 0x2000
-
-#define TEE_HOLE_START (TEE_ROM_AREA_START + TEE_ROM_AREA_SIZE)
-#define TEE_HOLE_SIZE 0x2000
-
-/* Has to be kept in sync with elf_arm.x */
-#define TEE_STACK_AREA_START (TEE_HOLE_START + TEE_HOLE_SIZE)
-/* Stack is not physically contigious. */
-#define TEE_STACK_AREA_START0 TEE_STACK_AREA_START
-#define TEE_STACK_AREA_SIZE0 0x3000
-#define TEE_STACK_AREA_SIZE TEE_STACK_AREA_SIZE0
-
-/* Has to be kept in sync with elf_arm.x */
-#define TEE_CODE_START (TEE_STACK_AREA_START + TEE_STACK_AREA_SIZE)
-#define TEE_CODE_SIZE 0xA000
-
-#define TEE_HEAP_START (TEE_CODE_START + TEE_CODE_SIZE)
-/*
- * This address has to be 16kb aligned as the first few bytes are
- * used to hold the L1 mmu descriptor for user mode mapping.
- */
-#define TEE_HEAP_START0 TEE_HEAP_START
-
-
/*
* Register addresses related to time
* RTT = Real-Time Timer
diff --git a/core/arch/arm32/include/mm/tee_pager.h b/core/arch/arm32/include/mm/tee_pager.h
index ccb1bba..d8fdf79 100644
--- a/core/arch/arm32/include/mm/tee_pager.h
+++ b/core/arch/arm32/include/mm/tee_pager.h
@@ -29,9 +29,60 @@
#define MM_TEE_PAGER_H
#include <kernel/thread.h>
+#include <mm/tee_mm_unpg.h>
+
+/* Read-only mapping */
+#define TEE_PAGER_AREA_RO (1 << 0)
+/*
+ * Read/write mapping, pages will only be reused after explicit release of
+ * the pages. A partial area can be release for instance when shrinking a
+ * stack.
+ */
+#define TEE_PAGER_AREA_RW (1 << 1)
+/* Executable mapping */
+#define TEE_PAGER_AREA_X (1 << 2)
+
+/*
+ * tee_pager_add_area() - Adds a pagable area
+ * @mm: covered memory area
+ * @flags: describes attributes of mapping
+ * @store: backing store for the memory area
+ * @hashes: hashes of the pages in the backing store
+ *
+ * Exacly of TEE_PAGER_AREA_RO and TEE_PAGER_AREA_RW has to be supplied in
+ * flags.
+ *
+ * If TEE_PAGER_AREA_X is supplied the area will be mapped as executable,
+ * currently only supported together with TEE_PAGER_AREA_RO.
+ *
+ * TEE_PAGER_AREA_RO requires store and hashes to be !NULL while
+ * TEE_PAGER_AREA_RW requires store and hashes to be NULL, pages will only
+ * be reused after explicit release of the pages. A partial area can be
+ * release for instance when releasing unused parts of a stack.
+ *
+ * Invalid use of flags will cause a panic.
+ *
+ * Return true on success or false if area can't be added.
+ */
+bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
+ const void *hashes);
+
+/*
+ * tee_pager_init() - Initializes the pager
+ * @xlat_table: Address of translation table mapping the region covered
+ * by tee_mm_vcore
+ * @mm: Memory region with paging activated, should be allocated
+ * from tee_mm_vcore
+ * @store: Address of backing store of the paged region
+ * @hashes: Hashes for the pages in the backing store
+ *
+ * The pager will use tee_mm_vcore.lo as virtual base address for the
+ * tranlation table.
+ */
+void tee_pager_init(void *xlat_table);
void tee_pager_abort_handler(uint32_t abort_type,
- struct thread_abort_regs *regs);
+ struct thread_abort_regs *regs);
/*
* Adds physical pages to the pager to use. The supplied virtual address range
@@ -40,7 +91,7 @@ void tee_pager_abort_handler(uint32_t abort_type,
* vaddr is the first virtual address
* npages is the number of pages to add
*/
-void tee_pager_add_pages(vaddr_t vaddr, size_t npages);
+void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap);
void tee_pager_unhide_all_pages(void);
diff --git a/core/arch/arm32/kernel/tee_ta_manager.c b/core/arch/arm32/kernel/tee_ta_manager.c
index fc6cdb7..57d7663 100644
--- a/core/arch/arm32/kernel/tee_ta_manager.c
+++ b/core/arch/arm32/kernel/tee_ta_manager.c
@@ -262,7 +262,7 @@ static void tee_ta_init_got(struct tee_ta_ctx *const ctx)
while (ptr < end_ptr) {
*ptr += va_start;
#ifdef PAGER_DEBUG_PRINT
- DMSG("GOT [0x%x] = 0x%x", ptr, *ptr);
+ DMSG("GOT [%p] = 0x%x", (void *)ptr, *ptr);
#endif
ptr++;
}
@@ -299,7 +299,7 @@ static void tee_ta_init_reldyn(struct tee_ta_ctx *const ctx)
data = (uint32_t *)(ctx->load_addr + rel_dyn->addr);
*data += ctx->load_addr;
#ifdef PAGER_DEBUG_PRINT
- DMSG("rel.dyn [0x%x] = 0x%x", data, *data);
+ DMSG("rel.dyn [%p] = 0x%x", (void *)data, *data);
#endif
}
}
@@ -326,7 +326,7 @@ static void tee_ta_init_heap(struct tee_ta_ctx *const ctx, size_t heap_size)
*data = heap_start_addr;
#ifdef PAGER_DEBUG_PRINT
- DMSG("heap_base [0x%x] = 0x%x", data, *data);
+ DMSG("heap_base [%p] = 0x%x", (void *)data, *data);
#endif
}
diff --git a/core/arch/arm32/mm/tee_mmu.c b/core/arch/arm32/mm/tee_mmu.c
index 196131b..0d2d986 100644
--- a/core/arch/arm32/mm/tee_mmu.c
+++ b/core/arch/arm32/mm/tee_mmu.c
@@ -116,9 +116,6 @@
#define TEE_MMU_SECTION_NOCACHE \
TEE_MMU_SECTION_TEX(1)
-#define TEE_MMU_KL2_ENTRY(page_num) \
- (*(uint32_t *)(SEC_VIRT_MMU_L2_BASE + ((uint32_t)(page_num)) * 4))
-
#define TEE_MMU_UL1_ENTRY(page_num) \
(*(uint32_t *)(TEE_MMU_UL1_BASE + ((uint32_t)(page_num)) * 4))
diff --git a/core/arch/arm32/mm/tee_pager.c b/core/arch/arm32/mm/tee_pager.c
index 2a9061c..55e9846 100644
--- a/core/arch/arm32/mm/tee_pager.c
+++ b/core/arch/arm32/mm/tee_pager.c
@@ -25,14 +25,15 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <generated/conf.h>
#include <sys/queue.h>
#include <stdlib.h>
#include <inttypes.h>
#include <kernel/tee_common_unpg.h>
#include <kernel/tee_common.h>
+#include <kernel/thread_defs.h>
#include <kernel/panic.h>
#include <mm/tee_mmu_defs.h>
-#include <trace.h>
#include <kernel/tee_ta_manager.h>
#include <kernel/tee_kta_trace.h>
#include <kernel/misc.h>
@@ -42,29 +43,49 @@
#include <mm/core_mmu.h>
#include <tee/arch_svc.h>
#include <arm32.h>
+#include <tee/tee_cryp_provider.h>
+#include <tee_api_defines.h>
+#include <utee_defines.h>
+#include <trace.h>
/* Interesting aborts for TEE pager */
-#define TEE_FSR_FS_MASK 0x040F
-#define TEE_FSR_FS_ALIGNMENT_FAULT 0x0001 /* DFSR[10,3:0] 0b00001 */
-#define TEE_FSR_FS_DEBUG_EVENT 0x0002 /* DFSR[10,3:0] 0b00010 */
-#define TEE_FSR_FS_ASYNC_EXTERNAL_ABORT 0x0406 /* DFSR[10,3:0] 0b10110 */
-#define TEE_FSR_FS_PERMISSION_FAULT_SECTION 0x000D /* DFSR[10,3:0] 0b01101 */
-#define TEE_FSR_FS_PERMISSION_FAULT_PAGE 0x000F /* DFSR[10,3:0] 0b01111 */
-
-#define TEE_PAGER_NORMAL_RETURN 0
-#define TEE_PAGER_USER_TA_PANIC 1
-
-#define TEE_PAGER_SPSR_MODE_MASK 0x1F
-#define TEE_PAGER_SPSR_MODE_USR 0x10
-#define TEE_PAGER_SPSR_MODE_SVC 0x13
-#define TEE_PAGER_SPSR_MODE_ABT 0x17
-#define TEE_PAGER_SPSR_MODE_MON 0x16
+#define TEE_PAGER_FSR_FS_MASK 0x040F
+ /* DFSR[10,3:0] 0b00001 */
+#define TEE_PAGER_FSR_FS_ALIGNMENT_FAULT 0x0001
+ /* DFSR[10,3:0] 0b00010 */
+#define TEE_PAGER_FSR_FS_DEBUG_EVENT 0x0002
+ /* DFSR[10,3:0] 0b10110 */
+#define TEE_PAGER_FSR_FS_ASYNC_EXTERNAL_ABORT 0x0406
+ /* DFSR[10,3:0] 0b01101 */
+#define TEE_PAGER_FSR_FS_PERMISSION_FAULT_SECTION 0x000D
+ /* DFSR[10,3:0] 0b01111 */
+#define TEE_PAGER_FSR_FS_PERMISSION_FAULT_PAGE 0x000F
+
+struct tee_pager_abort_info {
+ uint32_t abort_type;
+ uint32_t fsr;
+ vaddr_t va;
+ uint32_t pc;
+ struct thread_abort_regs *regs;
+};
-#define TEE_PAGER_DATA_ABORT 0x00000000
-#define TEE_PAGER_PREF_ABORT 0x00000001
-#define TEE_PAGER_UNDEF_ABORT 0x00000002
+enum tee_pager_fault_type {
+ TEE_PAGER_FAULT_TYPE_USER_TA_PANIC,
+ TEE_PAGER_FAULT_TYPE_PAGABLE,
+ TEE_PAGER_FAULT_TYPE_IGNORE,
+};
+#ifdef CFG_WITH_PAGER
+struct tee_pager_area {
+ const uint8_t *hashes;
+ const uint8_t *store;
+ uint32_t flags;
+ tee_mm_entry_t *mm;
+ TAILQ_ENTRY(tee_pager_area) link;
+};
+static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
+ TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
/*
* Represents a physical page used for paging.
@@ -78,343 +99,428 @@
*/
struct tee_pager_pmem {
uint32_t *mmu_entry;
- void *ctx_handle;
+ struct tee_pager_area *area;
TAILQ_ENTRY(tee_pager_pmem) link;
};
/* The list of physical pages. The first page in the list is the oldest */
TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
+
static struct tee_pager_pmem_head tee_pager_pmem_head =
-TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
+ TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
+
+static struct tee_pager_pmem_head tee_pager_rw_pmem_head =
+ TAILQ_HEAD_INITIALIZER(tee_pager_rw_pmem_head);
/* number of pages hidden */
#define TEE_PAGER_NHIDE (tee_pager_npages / 3)
+/* Number of registered physical pages, used hiding pages. */
+static size_t tee_pager_npages;
-/* Get VA from L2 MMU entry address */
-#define TEE_PAGER_GET_VA(a) \
- (((((uint32_t)a) - SEC_VIRT_MMU_L2_BASE) << \
- (SMALL_PAGE_SHIFT - 2)) + TEE_VMEM_START)
+/*
+ * Pointer to L2 translation table used to map the virtual memory range
+ * covered by the pager.
+ */
+static uint32_t *l2_table;
-/* Number of registered physical pages, used hiding pages. */
-static uint8_t tee_pager_npages;
+bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
+ const void *hashes)
+{
+ struct tee_pager_area *area;
+
+ DMSG("0x%x - 0x%x : flags 0x%x, store %p, hashes %p",
+ tee_mm_get_smem(mm),
+ tee_mm_get_smem(mm) + (mm->size << mm->pool->shift),
+ flags, store, hashes);
+
+ if (flags & TEE_PAGER_AREA_RO)
+ TEE_ASSERT(store && hashes);
+ else if (flags & TEE_PAGER_AREA_RW)
+ TEE_ASSERT(!store && !hashes);
+ else
+ panic();
-/* Get L2 MMU entry address from virtual address */
-static uint32_t *tee_pager_get_mmu_entry(tee_vaddr_t va)
+ area = malloc(sizeof(struct tee_pager_area));
+ if (!area)
+ return false;
+ area->mm = mm;
+ area->flags = flags;
+ area->store = store;
+ area->hashes = hashes;
+ TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
+ return true;
+}
+
+static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
{
- tee_vaddr_t addr = va & ~SMALL_PAGE_MASK;
- size_t mmu_entry_offset = (addr - TEE_VMEM_START) >> SMALL_PAGE_SHIFT;
+ struct tee_pager_area *area;
+
+ TAILQ_FOREACH(area, &tee_pager_area_head, link) {
+ tee_mm_entry_t *mm = area->mm;
+ size_t offset = (va - mm->pool->lo) >> mm->pool->shift;
- return (uint32_t *)(TEE_VIRT_MMU_L2_BASE +
- mmu_entry_offset * sizeof(uint32_t));
+ if (offset >= mm->offset && offset < (mm->offset + mm->size))
+ return area;
+ }
+ return NULL;
}
-/* Returns true if the exception originated from user mode */
-static bool tee_pager_is_user_exception(void)
+void tee_pager_init(void *xlat_table)
{
- return (read_spsr() & TEE_PAGER_SPSR_MODE_MASK) ==
- TEE_PAGER_SPSR_MODE_USR;
+ l2_table = xlat_table;
}
-/* Returns true if the exception originated from abort mode */
-static bool tee_pager_is_abort_in_abort_handler(void)
+
+/* Get L2 translation entry address from virtual address */
+static uint32_t *tee_pager_va_to_xe(vaddr_t va)
{
- return (read_spsr() & TEE_PAGER_SPSR_MODE_MASK) ==
- TEE_PAGER_SPSR_MODE_ABT;
+ vaddr_t page_va = va & ~SMALL_PAGE_MASK;
+ size_t mmu_entry_offset = (page_va - tee_mm_vcore.lo) >>
+ SMALL_PAGE_SHIFT;
+
+ return l2_table + mmu_entry_offset;
}
-static void tee_pager_print_abort(const uint32_t addr __unused,
- const uint32_t fsr __unused, const uint32_t pc __unused,
- const uint32_t flags __unused, const uint32_t dbgpcsr __unused)
+/* Get virtual address of page from translation entry */
+static vaddr_t tee_pager_xe_to_va(uint32_t *xe)
{
- DMSG("%s at 0x%x: FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X",
- (flags == TEE_PAGER_DATA_ABORT) ? "data-abort" :
- (flags == TEE_PAGER_PREF_ABORT) ? "prefetch-abort" : "undef-abort",
- addr, fsr, pc, read_ttbr0(), read_contextidr());
- DMSG("CPUID %dd DBGPCSR 0x%x SPSR_abt 0x%x",
- read_mpidr(), dbgpcsr, read_spsr());
+ return (vaddr_t)(xe - l2_table) * SMALL_PAGE_SIZE + tee_mm_vcore.lo;
}
-static void tee_pager_print_error_abort(const uint32_t addr __unused,
- const uint32_t fsr __unused, const uint32_t pc __unused,
- const uint32_t flags __unused, const uint32_t dbgpcsr __unused)
+static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va)
{
- EMSG("%s at 0x%x\n"
- "FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X\n"
- "CPUID 0x%x DBGPCSR 0x%x CPSR 0x%x (read from SPSR)",
- (flags == TEE_PAGER_DATA_ABORT) ? "data-abort" :
- (flags == TEE_PAGER_PREF_ABORT) ? "prefetch-abort" : "undef-abort",
- addr, fsr, pc, read_ttbr0(), read_contextidr(),
- read_mpidr(), dbgpcsr, read_spsr());
+ size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
+
+ if (area->store) {
+ size_t rel_pg_idx = pg_idx - area->mm->offset;
+ const void *stored_page = area->store +
+ rel_pg_idx * SMALL_PAGE_SIZE;
+
+ memcpy((void *)page_va, stored_page, SMALL_PAGE_SIZE);
+ } else {
+ memset((void *)page_va, 0, SMALL_PAGE_SIZE);
+ }
}
-static void tee_pager_restore_irq(void)
+static void tee_pager_verify_page(struct tee_pager_area *area, vaddr_t page_va)
{
- /*
- * Restores the settings of IRQ as saved when entering secure
- * world, using something like
- * INTERRUPT_ENABLE(SEC_ENV_SETTINGS_READ() & SEC_ROM_IRQ_ENABLE_MASK);
- */
+ size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
+
+ if (area->store) {
+ size_t rel_pg_idx = pg_idx - area->mm->offset;
+ const void *hash = area->hashes +
+ rel_pg_idx * TEE_SHA256_HASH_SIZE;
- /* Infinite loop as this is not implemented yet */
- volatile bool mytrue = true;
- EMSG("tee_pager_restore_irq not implemented yet");
- while (mytrue)
- ;
+ if (hash_sha256_check(hash, (void *)page_va, SMALL_PAGE_SIZE) !=
+ TEE_SUCCESS) {
+ EMSG("PH 0x%x failed", page_va);
+ panic();
+ }
+ }
}
-static void tee_pager_print_user_abort(const uint32_t addr __unused,
- const uint32_t fsr __unused,
- const uint32_t pc __unused,
- const uint32_t flags __unused,
- const uint32_t dbgpcsr __unused,
- struct thread_abort_regs *regs __unused)
+static bool tee_pager_unhide_page(vaddr_t page_va)
{
- EMSG_RAW("\nUser TA %s at address 0x%x\n",
- (flags == TEE_PAGER_DATA_ABORT) ? "data-abort" :
- (flags == TEE_PAGER_PREF_ABORT) ? "prefetch-abort" : "undef-abort",
- addr);
+ struct tee_pager_pmem *pmem;
+
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ if (((*pmem->mmu_entry & SMALL_PAGE_MASK) ==
+ TEE_PAGER_PAGE_UNLOADED) &&
+ page_va == tee_pager_xe_to_va(pmem->mmu_entry)) {
+ /* page is hidden, show and move to back */
+ *pmem->mmu_entry |= TEE_MMU_L2SP_PRIV_ACC;
+
+ TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
+ TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
+
+ /* TODO only invalidate entry touched above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void tee_pager_hide_pages(void)
+{
+ struct tee_pager_pmem *pmem;
+ size_t n = 0;
+
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ if (n >= TEE_PAGER_NHIDE)
+ break;
+ n++;
+ *pmem->mmu_entry = TEE_MMU_L2SP_CLEAR_ACC(*pmem->mmu_entry);
+ }
+
+ /* TODO only invalidate entries touched above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+}
+#endif /*CFG_WITH_PAGER*/
+
+/* Returns true if the exception originated from user mode */
+static bool tee_pager_is_user_exception(void)
+{
+ return (read_spsr() & CPSR_MODE_MASK) == CPSR_MODE_USR;
+}
+
+/* Returns true if the exception originated from abort mode */
+static bool tee_pager_is_abort_in_abort_handler(void)
+{
+ return (read_spsr() & CPSR_MODE_MASK) == CPSR_MODE_ABT;
+}
+
+static __unused const char *abort_type_to_str(uint32_t abort_type)
+{
+ if (abort_type == THREAD_ABORT_DATA)
+ return "data";
+ if (abort_type == THREAD_ABORT_PREFETCH)
+ return "prefetch";
+ return "undef";
+}
+
+static void tee_pager_print_user_abort(struct tee_pager_abort_info *ai __unused)
+{
+ EMSG_RAW("\nUser TA %s-abort at address 0x%x\n",
+ abort_type_to_str(ai->abort_type), ai->va);
EMSG_RAW(" fsr 0x%08x ttbr0 0x%08x ttbr1 0x%08x cidr 0x%X\n",
- fsr, read_ttbr0(), read_ttbr1(), read_contextidr());
- EMSG_RAW(" cpu #%d cpsr 0x%08x (0x%08x)\n",
- get_core_pos(), read_spsr(), dbgpcsr);
+ ai->fsr, read_ttbr0(), read_ttbr1(), read_contextidr());
+ EMSG_RAW(" cpu #%d cpsr 0x%08x\n",
+ get_core_pos(), read_spsr());
EMSG_RAW(" r0 0x%08x r4 0x%08x r8 0x%08x r12 0x%08x\n",
- regs->r0, regs->r4, regs->r8, regs->ip);
+ ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
EMSG_RAW(" r1 0x%08x r5 0x%08x r9 0x%08x sp 0x%08x\n",
- regs->r1, regs->r5, regs->r9, read_usr_sp());
+ ai->regs->r1, ai->regs->r5, ai->regs->r9, read_usr_sp());
EMSG_RAW(" r2 0x%08x r6 0x%08x r10 0x%08x lr 0x%08x\n",
- regs->r2, regs->r6, regs->r10, read_usr_lr());
+ ai->regs->r2, ai->regs->r6, ai->regs->r10, read_usr_lr());
EMSG_RAW(" r3 0x%08x r7 0x%08x r11 0x%08x pc 0x%08x\n",
- regs->r3, regs->r7, regs->r11, pc);
+ ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
tee_ta_dump_current();
}
-static uint32_t tee_pager_handle_abort(const uint32_t flags, const uint32_t pc,
- const uint32_t dbgpcsr,
- struct thread_abort_regs *regs)
+static void tee_pager_print_abort(struct tee_pager_abort_info *ai __unused)
{
- struct tee_pager_pmem *apage;
- uint32_t addr;
- uint32_t w_addr;
- uint32_t i;
- uint32_t fsr;
+ DMSG("%s-abort at 0x%x: FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X",
+ abort_type_to_str(ai->abort_type),
+ ai->va, ai->fsr, ai->pc, read_ttbr0(), read_contextidr());
+ DMSG("CPUID 0x%x SPSR_abt 0x%x",
+ read_mpidr(), read_spsr());
+}
+
+static void tee_pager_print_error_abort(
+ struct tee_pager_abort_info *ai __unused)
+{
+ EMSG("%s-abort at 0x%x\n"
+ "FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X\n"
+ "CPUID 0x%x CPSR 0x%x (read from SPSR)",
+ abort_type_to_str(ai->abort_type),
+ ai->va, ai->fsr, ai->pc, read_ttbr0(), read_contextidr(),
+ read_mpidr(), read_spsr());
+}
- if (flags == TEE_PAGER_DATA_ABORT) {
- fsr = read_dfsr();
- addr = read_dfar();
- } else {
- if (flags == TEE_PAGER_PREF_ABORT) {
- fsr = read_ifsr();
- addr = read_ifar();
- } else {
- fsr = 0;
- addr = pc;
- }
- }
- w_addr = addr;
+
+static enum tee_pager_fault_type tee_pager_get_fault_type(
+ struct tee_pager_abort_info *ai)
+{
/* In case of multithreaded version, this section must be protected */
if (tee_pager_is_user_exception()) {
- tee_pager_print_user_abort(addr, fsr, pc, flags, dbgpcsr, regs);
+ tee_pager_print_user_abort(ai);
DMSG("[TEE_PAGER] abort in User mode (TA will panic)");
- return TEE_PAGER_USER_TA_PANIC;
+ return TEE_PAGER_FAULT_TYPE_USER_TA_PANIC;
}
if (tee_pager_is_abort_in_abort_handler()) {
- tee_pager_print_error_abort(addr, fsr, pc, flags, dbgpcsr);
- EMSG("[TEE_PAGER] abort in abort handler (trap CPU)");
+ tee_pager_print_error_abort(ai);
+ EMSG("[PAGER] abort in abort handler (trap CPU)");
panic();
}
- if (flags == TEE_PAGER_UNDEF_ABORT) {
- tee_pager_print_error_abort(addr, fsr, pc, flags, dbgpcsr);
+ if (ai->abort_type == THREAD_ABORT_UNDEF) {
+ tee_pager_print_error_abort(ai);
EMSG("[TEE_PAGER] undefined abort (trap CPU)");
panic();
}
- switch (fsr & TEE_FSR_FS_MASK) {
- case TEE_FSR_FS_ALIGNMENT_FAULT: /* Only possible for data abort */
- tee_pager_print_error_abort(addr, fsr, pc, flags, dbgpcsr);
+ switch (ai->fsr & TEE_PAGER_FSR_FS_MASK) {
+ /* Only possible for data abort */
+ case TEE_PAGER_FSR_FS_ALIGNMENT_FAULT:
+ tee_pager_print_error_abort(ai);
EMSG("[TEE_PAGER] alignement fault! (trap CPU)");
panic();
- case TEE_FSR_FS_DEBUG_EVENT:
- tee_pager_print_abort(addr, fsr, pc, flags, dbgpcsr);
+ case TEE_PAGER_FSR_FS_DEBUG_EVENT:
+ tee_pager_print_abort(ai);
DMSG("[TEE_PAGER] Ignoring debug event!");
- return TEE_PAGER_NORMAL_RETURN;
+ return TEE_PAGER_FAULT_TYPE_IGNORE;
- case TEE_FSR_FS_ASYNC_EXTERNAL_ABORT: /* Only possible for data abort */
- tee_pager_print_abort(addr, fsr, pc, flags, dbgpcsr);
+ /* Only possible for data abort */
+ case TEE_PAGER_FSR_FS_ASYNC_EXTERNAL_ABORT:
+ tee_pager_print_abort(ai);
DMSG("[TEE_PAGER] Ignoring async external abort!");
- return TEE_PAGER_NORMAL_RETURN;
+ return TEE_PAGER_FAULT_TYPE_IGNORE;
default:
-#ifdef PAGER_DEBUG_PRINT
- tee_pager_print_abort(addr, fsr, pc, flags, dbgpcsr);
-#endif
break;
}
+ return TEE_PAGER_FAULT_TYPE_PAGABLE;
+}
-#ifndef CFG_TEE_PAGER
- /*
- * Until PAGER is supported, trap CPU here.
- */
- tee_pager_print_error_abort(addr, fsr, pc, flags, dbgpcsr);
- EMSG("Unexpected page fault! Trap CPU");
- while (1)
- ;
-#endif
- /* check if the access is valid */
- if (!tee_mm_validate(&tee_mm_vcore, w_addr)) {
- tee_pager_print_abort(addr, fsr, pc, flags, dbgpcsr);
- DMSG("Invalid addr 0x%" PRIx32, addr);
- panic();
- }
+#ifdef CFG_WITH_PAGER
- /* check if page is hidden */
- TAILQ_FOREACH(apage, &tee_pager_pmem_head, link) {
- if (((*apage->mmu_entry & 0xFFF) == TEE_PAGER_PAGE_UNLOADED) &&
- apage->ctx_handle != NULL &&
- w_addr >= TEE_PAGER_GET_VA(apage->mmu_entry) &&
- w_addr <
- TEE_PAGER_GET_VA(apage->mmu_entry) + SMALL_PAGE_SIZE) {
- /* page is hidden, show and move to back */
- *(apage->mmu_entry) |= TEE_MMU_L2SP_PRIV_ACC;
+/* Finds the oldest page and remaps it for the new virtual address */
+static struct tee_pager_pmem *tee_pager_get_page(
+ struct tee_pager_abort_info *ai,
+ struct tee_pager_area *area)
+{
+ vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
- TAILQ_REMOVE(&tee_pager_pmem_head, apage, link);
- TAILQ_INSERT_TAIL(&tee_pager_pmem_head, apage, link);
+ uint32_t pa;
+ uint32_t *mmu_entry = tee_pager_va_to_xe(page_va);
+ struct tee_pager_pmem *pmem;
- w_addr = 0;
- break;
+ if (*mmu_entry != 0) {
+ /*
+ * There's an pmem entry using this mmu entry, let's use
+ * that entry in the new mapping.
+ */
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ if (pmem->mmu_entry == mmu_entry)
+ break;
}
- }
-
- if (apage == NULL) {
- /* the page wasn't hidden */
- uint32_t pa;
- uint32_t *mmu_entry =
- (uint32_t *)tee_pager_get_mmu_entry((tee_vaddr_t) w_addr);
-
- if (*mmu_entry != 0) {
- /*
- * There's an pmem entry using this mmu entry, let's use
- * that entry in the new mapping.
- */
- TAILQ_FOREACH(apage, &tee_pager_pmem_head, link) {
- if (apage->mmu_entry == mmu_entry)
- break;
- }
- if (apage == NULL) {
- tee_pager_print_abort(addr, fsr, pc, flags,
- dbgpcsr);
- DMSG("Couldn't find pmem for mmu_entry %p",
- (void *)mmu_entry);
- panic();
- }
- } else {
- apage = TAILQ_FIRST(&tee_pager_pmem_head);
- if (apage == NULL) {
- tee_pager_print_abort(addr, fsr, pc, flags,
- dbgpcsr);
- DMSG("No pmem entries");
- panic();
- }
+ if (!pmem) {
+ tee_pager_print_abort(ai);
+ DMSG("Couldn't find pmem for mmu_entry %p",
+ (void *)mmu_entry);
+ panic();
}
+ } else {
+ pmem = TAILQ_FIRST(&tee_pager_pmem_head);
+ if (!pmem) {
+ tee_pager_print_abort(ai);
+ DMSG("No pmem entries");
+ panic();
+ }
+ }
- /* save rw data if needed */
- if ((*apage->mmu_entry & 0xFFF) != 0 &&
- tee_ta_check_rw(TEE_PAGER_GET_VA(apage->mmu_entry),
- apage->ctx_handle)) {
- /* make sure the page is accessible */
- if (((*apage->mmu_entry & 0xFFF) ==
- TEE_PAGER_PAGE_UNLOADED)) {
- *apage->mmu_entry |= TEE_MMU_L2SP_PRIV_ACC;
-
- /* Invalidate secure TLB */
- core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
- }
+ /* add page to mmu table, small pages [31:12]PA */
+ pa = *pmem->mmu_entry & ~SMALL_PAGE_MASK;
- tee_ta_save_rw(TEE_PAGER_GET_VA(apage->mmu_entry),
- apage->ctx_handle);
- }
+ *pmem->mmu_entry = 0;
+ pmem->mmu_entry = mmu_entry;
+ *pmem->mmu_entry = pa | TEE_PAGER_PAGE_LOADED;
+ TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
+ if (area->store) {
/* move page to back */
- TAILQ_REMOVE(&tee_pager_pmem_head, apage, link);
- TAILQ_INSERT_TAIL(&tee_pager_pmem_head, apage, link);
+ TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
+ } else {
+ /* Move page to rw list */
+ TEE_ASSERT(tee_pager_npages > 0);
+ tee_pager_npages--;
+ TAILQ_INSERT_TAIL(&tee_pager_rw_pmem_head, pmem, link);
+ }
- /* add page to mmu table, small pages [31:12]PA */
- pa = *apage->mmu_entry & 0xFFFFF000;
+ /* TODO only invalidate entries touched above */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+
+#ifdef TEE_PAGER_DEBUG_PRINT
+ DMSG("Mapped 0x%x -> 0x%x", page_va, pa);
+#endif
- *apage->mmu_entry = 0;
- apage->mmu_entry = mmu_entry;
+ return pmem;
+}
- *apage->mmu_entry = pa | TEE_PAGER_PAGE_LOADED;
+static void tee_pager_handle_fault(struct tee_pager_abort_info *ai)
+{
+ struct tee_pager_area *area;
+ vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
-#ifdef PAGER_DEBUG_PRINT
- DMSG("Mapped %p -> %p", w_addr & 0xFFFFF000, pa);
+#ifdef TEE_PAGER_DEBUG_PRINT
+ tee_pager_print_abort(ai);
#endif
- }
- /* Hide */
- {
- struct tee_pager_pmem *bpage;
-
- i = 0;
- TAILQ_FOREACH(bpage, &tee_pager_pmem_head, link) {
- if (i >= TEE_PAGER_NHIDE)
- break;
- i++;
- *bpage->mmu_entry =
- TEE_MMU_L2SP_CLEAR_ACC(*bpage->mmu_entry);
- }
+ /* check if the access is valid */
+ area = tee_pager_find_area(ai->va);
+ if (!area) {
+ tee_pager_print_abort(ai);
+ DMSG("Invalid addr 0x%" PRIx32, ai->va);
+ panic();
}
- /* Invalidate secure TLB */
- core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
-
- if (w_addr) {
- /* load page code & data */
- apage->ctx_handle = tee_ta_load_page(w_addr);
+ if (!tee_pager_unhide_page(page_va)) {
+ /* the page wasn't hidden */
+ struct tee_pager_pmem *pmem;
- cache_maintenance_l1(DCACHE_AREA_CLEAN,
- (void *)(w_addr & 0xFFFFF000), SMALL_PAGE_SIZE);
+ pmem = tee_pager_get_page(ai, area);
- cache_maintenance_l1(ICACHE_AREA_INVALIDATE,
- (void *)(w_addr & 0xFFFFF000), SMALL_PAGE_SIZE);
+ /* load page code & data */
+ pmem->area = area;
+ tee_pager_load_page(area, page_va);
+ /* TODO remap readonly if TEE_PAGER_AREA_RO */
+ tee_pager_verify_page(area, page_va);
+ /* TODO remap executable if TEE_PAGER_AREA_X */
+
+ if (area->flags & TEE_PAGER_AREA_X) {
+ cache_maintenance_l1(DCACHE_AREA_CLEAN,
+ (void *)page_va, SMALL_PAGE_SIZE);
+
+ cache_maintenance_l1(ICACHE_AREA_INVALIDATE,
+ (void *)page_va, SMALL_PAGE_SIZE);
+ }
}
+ tee_pager_hide_pages();
/* end protect (multithreded version) */
+}
+
+#else /*CFG_WITH_PAGER*/
+static void tee_pager_handle_fault(struct tee_pager_abort_info *ai)
+{
/*
- * Until now we've been running with IRQ blocked. Let's enble IRQ now
- * when it should be safe to do further processing with them enabled.
- *
- * It should be possible to enable IRQ earlier, but MMU updates and
- * cache mainentance may need some tweaking to guarentee coherency in
- * case we switch CPU in the middle of an operation.
+ * Until PAGER is supported, trap CPU here.
*/
- tee_pager_restore_irq();
-
- return TEE_PAGER_NORMAL_RETURN;
+ tee_pager_print_error_abort(ai);
+ EMSG("Unexpected page fault! Trap CPU");
+ panic();
}
+#endif /*CFG_WITH_PAGER*/
+
void tee_pager_abort_handler(uint32_t abort_type,
- struct thread_abort_regs *regs)
+ struct thread_abort_regs *regs)
{
- static const uint32_t abort_type_to_flags[] = {
- TEE_PAGER_UNDEF_ABORT,
- TEE_PAGER_PREF_ABORT,
- TEE_PAGER_DATA_ABORT,
- };
- uint32_t res;
-
- res = tee_pager_handle_abort(abort_type_to_flags[abort_type],
- regs->lr, 0, regs);
- if (res == TEE_PAGER_USER_TA_PANIC) {
+ struct tee_pager_abort_info ai;
+
+ switch (abort_type) {
+ case THREAD_ABORT_DATA:
+ ai.fsr = read_dfsr();
+ ai.va = read_dfar();
+ break;
+ case THREAD_ABORT_PREFETCH:
+ ai.fsr = read_ifsr();
+ ai.va = read_ifar();
+ break;
+ default:
+ ai.fsr = 0;
+ ai.va = regs->lr;
+ break;
+ }
+ ai.abort_type = abort_type;
+ ai.pc = regs->lr;
+ ai.regs = regs;
+
+ switch (tee_pager_get_fault_type(&ai)) {
+ case TEE_PAGER_FAULT_TYPE_IGNORE:
+ break;
+ case TEE_PAGER_FAULT_TYPE_USER_TA_PANIC:
/*
* It was a user exception, stop user execution and return
* to TEE Core.
@@ -424,55 +530,70 @@ void tee_pager_abort_handler(uint32_t abort_type,
regs->r2 = 0xdeadbeef;
regs->lr = (uint32_t)tee_svc_unwind_enter_user_mode;
regs->spsr = read_cpsr();
- regs->spsr &= ~TEE_PAGER_SPSR_MODE_MASK;
- regs->spsr |= TEE_PAGER_SPSR_MODE_SVC;
+ regs->spsr &= ~CPSR_MODE_MASK;
+ regs->spsr |= CPSR_MODE_SVC;
+ regs->spsr &= ~CPSR_FIA;
+ regs->spsr |= read_spsr() & CPSR_FIA;
/* Select Thumb or ARM mode */
if (regs->lr & 1)
regs->spsr |= CPSR_T;
else
regs->spsr &= ~CPSR_T;
+ break;
+ case TEE_PAGER_FAULT_TYPE_PAGABLE:
+ default:
+ tee_pager_handle_fault(&ai);
+ break;
}
}
-void tee_pager_add_pages(tee_vaddr_t vaddr, size_t npages)
+#ifdef CFG_WITH_PAGER
+void tee_pager_add_pages(tee_vaddr_t vaddr, size_t npages, bool unmap)
{
size_t n;
+ DMSG("0x%x - 0x%x : %d",
+ vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
+
/* setup memory */
for (n = 0; n < npages; n++) {
- struct tee_pager_pmem *apage;
+ struct tee_pager_pmem *pmem;
tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
- uint32_t *mmu_entry = tee_pager_get_mmu_entry(va);
+ uint32_t *mmu_entry = tee_pager_va_to_xe(va);
/* Ignore unmapped entries */
if (*mmu_entry == 0)
continue;
- apage = malloc(sizeof(struct tee_pager_pmem));
- if (apage == NULL) {
+ pmem = malloc(sizeof(struct tee_pager_pmem));
+ if (pmem == NULL) {
DMSG("Can't allocate memory");
- while (1)
- ;
+ panic();
}
- apage->mmu_entry = (uint32_t *)mmu_entry;
+ pmem->mmu_entry = (uint32_t *)mmu_entry;
+ pmem->area = NULL;
- /*
- * Set to TEE_PAGER_NO_ACCESS_ATTRIBUTES and not
- * TEE_PAGER_PAGE_UNLOADED since pager would misstake it for a
- * hidden page in case the virtual address was reused before
- * the physical page was used for another virtual page.
- */
- *mmu_entry = (*mmu_entry & ~SMALL_PAGE_MASK) |
- TEE_PAGER_NO_ACCESS_ATTRIBUTES;
- apage->ctx_handle = NULL;
+ if (unmap) {
+ /*
+ * Set to TEE_PAGER_NO_ACCESS_ATTRIBUTES and not
+ * TEE_PAGER_PAGE_UNLOADED since pager would
+ * misstake it for a hidden page in case the
+ * virtual address was reused before the physical
+ * page was used for another virtual page.
+ */
+ *mmu_entry = (*mmu_entry & ~SMALL_PAGE_MASK) |
+ TEE_PAGER_NO_ACCESS_ATTRIBUTES;
- TAILQ_INSERT_TAIL(&tee_pager_pmem_head, apage, link);
+ }
tee_pager_npages++;
+ TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
}
- /* Invalidate secure TLB */
- core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+ if (unmap) {
+ /* Invalidate secure TLB */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+ }
}
void tee_pager_unmap(uint32_t page, uint8_t psize)
@@ -481,16 +602,15 @@ void tee_pager_unmap(uint32_t page, uint8_t psize)
if ((page & 0xFFF) != 0) {
EMSG("Invalid page address");
- while (1)
- ;
+ panic();
}
for (i = 0; i < psize; i++) {
uint32_t addr = page + (i << SMALL_PAGE_SHIFT);
- uint32_t *mmu_entry = tee_pager_get_mmu_entry(addr);
+ uint32_t *mmu_entry = tee_pager_va_to_xe(addr);
if (*mmu_entry != 0) {
- struct tee_pager_pmem *apage;
+ struct tee_pager_pmem *pmem;
/* Invalidate mmu_entry */
*mmu_entry &= ~SMALL_PAGE_MASK;
@@ -499,17 +619,16 @@ void tee_pager_unmap(uint32_t page, uint8_t psize)
* Unregister the session from the page entry using
* this mmu_entry.
*/
- TAILQ_FOREACH(apage, &tee_pager_pmem_head, link) {
- if (apage->mmu_entry == (uint32_t *)mmu_entry) {
- apage->ctx_handle = NULL;
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ if (pmem->mmu_entry == (uint32_t *)mmu_entry) {
+ pmem->area = NULL;
break;
}
}
- if (apage == NULL) {
+ if (pmem == NULL) {
EMSG("Physical page to unmap not found");
- while (1)
- ;
+ panic();
}
}
}
@@ -520,14 +639,15 @@ void tee_pager_unmap(uint32_t page, uint8_t psize)
void tee_pager_unhide_all_pages(void)
{
- struct tee_pager_pmem *apage;
+ struct tee_pager_pmem *pmem;
bool has_hidden_page = false;
- TAILQ_FOREACH(apage, &tee_pager_pmem_head, link) {
- if ((*apage->mmu_entry & 0xfff) == TEE_PAGER_PAGE_UNLOADED) {
+ TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ if ((*pmem->mmu_entry & SMALL_PAGE_MASK) ==
+ TEE_PAGER_PAGE_UNLOADED) {
/* Page is hidden, unhide it */
has_hidden_page = true;
- *apage->mmu_entry |= 0x10;
+ *pmem->mmu_entry |= TEE_MMU_L2SP_PRIV_ACC;
}
}
@@ -535,3 +655,4 @@ void tee_pager_unhide_all_pages(void)
if (has_hidden_page)
core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
}
+#endif /*CFG_WITH_PAGER*/
diff --git a/core/arch/arm32/mm/tee_pager_unpg.c b/core/arch/arm32/mm/tee_pager_unpg.c
deleted file mode 100644
index 03756e0..0000000
--- a/core/arch/arm32/mm/tee_pager_unpg.c
+++ /dev/null
@@ -1,419 +0,0 @@
-/*
- * Copyright (c) 2014, STMicroelectronics International N.V.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#include <arm32.h>
-#include <stdlib.h>
-#include <inttypes.h>
-#include <kernel/tee_common_unpg.h>
-#include <trace.h>
-#include <kernel/tee_ta_manager.h>
-#include <kernel/tee_kta_trace.h>
-#include <kernel/trace_ta.h>
-#include <kernel/misc.h>
-#include <mm/tee_pager_unpg.h>
-#include <mm/tee_mmu_defs.h>
-#include <mm/tee_mm_unpg.h>
-#include <mm/tee_mmu_unpg.h>
-#include <mm/core_mmu.h>
-#include <tee/tee_svc.h>
-#include <tee/arch_svc.h>
-#include <arm32.h>
-
-/* Dummies to allow the macros to be left at current places below */
-#define TEE_PAGER_RECORD_FAULT(x) do { } while (0)
-#define TEE_PAGER_SET_OLD_VA(x) do { } while (0)
-#define TEE_PAGER_SET_PA(x) do { } while (0)
-#define TEE_PAGER_SET_COPY(x) do { } while (0)
-#define TEE_PAGER_SET_UNHIDE(x) do { } while (0)
-#define TEE_PAGER_DUMP_RECORDING() do { } while (0)
-#define TEE_PRINT_SAVED_REGS() do { } while (0)
-
-/* The list of physical pages. The first page in the list is the oldest */
-struct tee_pager_pmem_head tee_pager_pmem_head =
-TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
-
-/* number of pages hidden */
-#define TEE_PAGER_NHIDE (tee_pager_npages / 3)
-
-/* number of pages */
-uint8_t tee_pager_npages;
-
-static bool tee_pager_is_monitor_exception(void)
-{
- return (read_spsr() & TEE_PAGER_SPSR_MODE_MASK) ==
- TEE_PAGER_SPSR_MODE_MON;
-}
-
-bool tee_pager_is_user_exception(void)
-{
- return (read_spsr() & TEE_PAGER_SPSR_MODE_MASK) ==
- TEE_PAGER_SPSR_MODE_USR;
-}
-
-bool tee_pager_is_abort_in_abort_handler(void)
-{
- return (read_spsr() & TEE_PAGER_SPSR_MODE_MASK) ==
- TEE_PAGER_SPSR_MODE_ABT;
-}
-
-static void tee_pager_print_abort(const uint32_t addr __unused,
- const uint32_t fsr __unused, const uint32_t pc __unused,
- const uint32_t flags __unused, const uint32_t dbgpcsr __unused)
-{
- DMSG("%s at 0x%x: FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X",
- (flags == TEE_PAGER_DATA_ABORT) ? "data-abort" :
- (flags == TEE_PAGER_PREF_ABORT) ? "prefetch-abort" : "undef-abort",
- addr, fsr, pc, read_ttbr0(), read_contextidr());
- DMSG("CPUID %dd DBGPCSR 0x%x SPSR_abt 0x%x",
- read_mpidr(), dbgpcsr, read_spsr());
-}
-
-static void tee_pager_print_error_abort(const uint32_t addr __unused,
- const uint32_t fsr __unused, const uint32_t pc __unused,
- const uint32_t flags __unused, const uint32_t dbgpcsr __unused)
-{
- EMSG("%s at 0x%x\n"
- "FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X\n"
- "CPUID 0x%x DBGPCSR 0x%x CPSR 0x%x (read from SPSR)",
- (flags == TEE_PAGER_DATA_ABORT) ? "data-abort" :
- (flags == TEE_PAGER_PREF_ABORT) ? "prefetch-abort" : "undef-abort",
- addr, fsr, pc, read_ttbr0(), read_contextidr(),
- read_mpidr(), dbgpcsr, read_spsr());
-}
-
-static void tee_pager_print_user_abort(const uint32_t addr __unused,
- const uint32_t fsr __unused,
- const uint32_t pc __unused,
- const uint32_t flags __unused,
- const uint32_t dbgpcsr __unused,
- struct thread_abort_regs *regs __unused)
-{
- EMSG_RAW("\nUser TA %s at address 0x%x\n",
- (flags == TEE_PAGER_DATA_ABORT) ? "data-abort" :
- (flags == TEE_PAGER_PREF_ABORT) ? "prefetch-abort" : "undef-abort",
- addr);
- EMSG_RAW(" fsr 0x%08x ttbr0 0x%08x ttbr1 0x%08x cidr 0x%X\n",
- fsr, read_ttbr0(), read_ttbr1(), read_contextidr());
- EMSG_RAW(" cpu #%d cpsr 0x%08x (0x%08x)\n",
- get_core_pos(), read_spsr(), dbgpcsr);
- EMSG_RAW(" r0 0x%08x r4 0x%08x r8 0x%08x r12 0x%08x\n",
- regs->r0, regs->r4, regs->r8, regs->ip);
- EMSG_RAW(" r1 0x%08x r5 0x%08x r9 0x%08x sp 0x%08x\n",
- regs->r1, regs->r5, regs->r9, read_usr_sp());
- EMSG_RAW(" r2 0x%08x r6 0x%08x r10 0x%08x lr 0x%08x\n",
- regs->r2, regs->r6, regs->r10, read_usr_lr());
- EMSG_RAW(" r3 0x%08x r7 0x%08x r11 0x%08x pc 0x%08x\n",
- regs->r3, regs->r7, regs->r11, pc);
-
- tee_ta_dump_current();
-}
-
-static uint32_t tee_pager_handle_abort(const uint32_t flags, const uint32_t pc,
- const uint32_t dbgpcsr,
- struct thread_abort_regs *regs)
-{
- struct tee_pager_pmem *apage;
- uint32_t addr;
- uint32_t w_addr;
- uint32_t i;
- uint32_t fsr;
-
- if (flags == TEE_PAGER_DATA_ABORT) {
- fsr = read_dfsr();
- addr = read_dfar();
- } else {
- if (flags == TEE_PAGER_PREF_ABORT) {
- fsr = read_ifsr();
- addr = read_ifar();
- } else {
- fsr = 0;
- addr = pc;
- }
- }
-
- w_addr = addr;
-
- /*
- * w_addr is the address that we intend to handle to the page fault
- * for. This is normally the same as addr except in the case where we
- * have thumb instruction spread over two pages and the first page
- * already is available. In that case will addr still be the beginning
- * of the instruction even if the fault really is for the second page.
- */
-
- /* In case of multithreaded version, this section must be protected */
-
- if (tee_pager_is_user_exception()) {
- tee_pager_print_user_abort(addr, fsr, pc, flags, dbgpcsr, regs);
- DMSG("[TEE_PAGER] abort in User mode (TA will panic)");
- return TEE_PAGER_USER_TA_PANIC;
- }
-
- if (tee_pager_is_monitor_exception())
- EMSG("[TEE_PAGER] abort in monitor!");
-
- if (tee_pager_is_abort_in_abort_handler()) {
- tee_pager_print_error_abort(addr, fsr, pc, flags, dbgpcsr);
- EMSG("[TEE_PAGER] abort in abort handler (trap CPU)");
- while (1)
- ;
- }
-
- if (flags == TEE_PAGER_UNDEF_ABORT) {
- tee_pager_print_error_abort(addr, fsr, pc, flags, dbgpcsr);
- EMSG("[TEE_PAGER] undefined abort (trap CPU)");
- while (1)
- ;
- }
-
- switch (fsr & TEE_FSR_FS_MASK) {
- case TEE_FSR_FS_ALIGNMENT_FAULT: /* Only possible for data abort */
- tee_pager_print_error_abort(addr, fsr, pc, flags, dbgpcsr);
- EMSG("[TEE_PAGER] alignement fault! (trap CPU)");
- while (1)
- ;
-
- case TEE_FSR_FS_DEBUG_EVENT:
- tee_pager_print_abort(addr, fsr, pc, flags, dbgpcsr);
- DMSG("[TEE_PAGER] Ignoring debug event!");
- return TEE_PAGER_NORMAL_RETURN;
-
- case TEE_FSR_FS_ASYNC_EXTERNAL_ABORT: /* Only possible for data abort */
- tee_pager_print_abort(addr, fsr, pc, flags, dbgpcsr);
- DMSG("[TEE_PAGER] Ignoring async external abort!");
- return TEE_PAGER_NORMAL_RETURN;
-
- default:
-#ifdef PAGER_DEBUG_PRINT
- tee_pager_print_abort(addr, fsr, pc, flags, dbgpcsr);
-#endif
- break;
- }
-
-#ifndef CFG_TEE_PAGER
- /*
- * Until PAGER is supported, trap CPU here.
- */
- tee_pager_print_error_abort(addr, fsr, pc, flags, dbgpcsr);
- EMSG("Unexpected page fault! Trap CPU");
- while (1)
- ;
-#endif
-
- TEE_PAGER_RECORD_FAULT(addr);
-
- /* check if the access is valid */
- if (!tee_mm_validate(&tee_mm_vcore, w_addr)) {
- tee_pager_print_abort(addr, fsr, pc, flags, dbgpcsr);
- DMSG("Invalid addr 0x%" PRIx32, addr);
- TEE_PRINT_SAVED_REGS();
- TEE_PAGER_DUMP_RECORDING();
- while (1)
- ;
- }
-
- /* check if page is hidden */
- TAILQ_FOREACH(apage, &tee_pager_pmem_head, link) {
- if (((*apage->mmu_entry & 0xFFF) == TEE_PAGER_PAGE_UNLOADED) &&
- apage->ctx_handle != NULL &&
- w_addr >= TEE_PAGER_GET_VA(apage->mmu_entry) &&
- w_addr <
- TEE_PAGER_GET_VA(apage->mmu_entry) + SMALL_PAGE_SIZE) {
- /* page is hidden, show and move to back */
- *(apage->mmu_entry) |= TEE_MMU_L2SP_PRIV_ACC;
- TEE_PAGER_SET_UNHIDE(1);
- TEE_PAGER_SET_PA((*(apage->mmu_entry)) & 0xFFFFF000);
-
- TAILQ_REMOVE(&tee_pager_pmem_head, apage, link);
- TAILQ_INSERT_TAIL(&tee_pager_pmem_head, apage, link);
-
- w_addr = 0;
-
- break;
- }
- }
-
- if (apage == NULL) {
- /* the page wasn't hidden */
- uint32_t pa;
- uint32_t *mmu_entry =
- (uint32_t *)tee_pager_get_mmu_entry((tee_vaddr_t) w_addr);
-
- if (*mmu_entry != 0) {
- /*
- * There's an pmem entry using this mmu entry, let's use
- * that entry in the new mapping.
- */
- TAILQ_FOREACH(apage, &tee_pager_pmem_head, link) {
- if (apage->mmu_entry == mmu_entry)
- break;
- }
- if (apage == NULL) {
- tee_pager_print_abort(addr, fsr, pc, flags,
- dbgpcsr);
- DMSG("Couldn't find pmem for mmu_entry %p",
- (void *)mmu_entry);
- while (1)
- ;
- }
- } else {
- apage = TAILQ_FIRST(&tee_pager_pmem_head);
- if (apage == NULL) {
- tee_pager_print_abort(addr, fsr, pc, flags,
- dbgpcsr);
- DMSG("No pmem entries");
- while (1)
- ;
- }
- }
-
- TEE_PAGER_SET_OLD_VA(TEE_PAGER_GET_VA(apage->mmu_entry));
-
- /* save rw data if needed */
- if ((*apage->mmu_entry & 0xFFF) != 0 &&
- tee_ta_check_rw(TEE_PAGER_GET_VA(apage->mmu_entry),
- apage->ctx_handle)) {
- /* make sure the page is accessible */
- if (((*apage->mmu_entry & 0xFFF) ==
- TEE_PAGER_PAGE_UNLOADED)) {
- *apage->mmu_entry |= TEE_MMU_L2SP_PRIV_ACC;
-
- /* Invalidate secure TLB */
- core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
- }
-
- tee_ta_save_rw(TEE_PAGER_GET_VA(apage->mmu_entry),
- apage->ctx_handle);
- }
-
- /* move page to back */
- TAILQ_REMOVE(&tee_pager_pmem_head, apage, link);
- TAILQ_INSERT_TAIL(&tee_pager_pmem_head, apage, link);
-
- /* add page to mmu table, small pages [31:12]PA */
- pa = *apage->mmu_entry & 0xFFFFF000;
- TEE_PAGER_SET_PA(pa);
-
- *apage->mmu_entry = 0;
- apage->mmu_entry = mmu_entry;
-
- *apage->mmu_entry = pa | TEE_PAGER_PAGE_LOADED;
-
-#ifdef PAGER_DEBUG_PRINT
- DMSG("Mapped %p -> %p", w_addr & 0xFFFFF000, pa);
-#endif
- }
-
- /* Hide */
- {
- struct tee_pager_pmem *bpage;
-
- i = 0;
- TAILQ_FOREACH(bpage, &tee_pager_pmem_head, link) {
- if (i >= TEE_PAGER_NHIDE)
- break;
- i++;
- *bpage->mmu_entry =
- TEE_MMU_L2SP_CLEAR_ACC(*bpage->mmu_entry);
- }
- }
-
- /* Invalidate secure TLB */
- core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
-
- if (w_addr) {
- /* load page code & data */
- apage->ctx_handle = tee_ta_load_page(w_addr);
- TEE_PAGER_SET_COPY(1);
-
- cache_maintenance_l1(DCACHE_AREA_CLEAN,
- (void *)(w_addr & 0xFFFFF000), SMALL_PAGE_SIZE);
-
- cache_maintenance_l1(ICACHE_AREA_INVALIDATE,
- (void *)(w_addr & 0xFFFFF000), SMALL_PAGE_SIZE);
- }
-
- /* end protect (multithreded version) */
-
- /*
- * Until now we've been running with IRQ blocked. Let's enble IRQ now
- * when it should be safe to do further processing with them enabled.
- *
- * It should be possible to enable IRQ earlier, but MMU updates and
- * cache mainentance may need some tweaking to guarentee coherency in
- * case we switch CPU in the middle of an operation.
- */
- tee_pager_restore_irq();
-
- return TEE_PAGER_NORMAL_RETURN;
-}
-
-void tee_pager_abort_handler(uint32_t abort_type,
- struct thread_abort_regs *regs)
-{
- static const uint32_t abort_type_to_flags[] = {
- TEE_PAGER_UNDEF_ABORT,
- TEE_PAGER_PREF_ABORT,
- TEE_PAGER_DATA_ABORT,
- };
- uint32_t res;
-
- res = tee_pager_handle_abort(abort_type_to_flags[abort_type],
- regs->lr, 0, regs);
- if (res == TEE_PAGER_USER_TA_PANIC) {
- /*
- * It was a user exception, stop user execution and return
- * to TEE Core.
- */
- regs->r0 = TEE_ERROR_TARGET_DEAD;
- regs->r1 = true;
- regs->r2 = 0xdeadbeef;
- regs->lr = (uint32_t)tee_svc_unwind_enter_user_mode;
- regs->spsr = read_cpsr();
- regs->spsr &= ~TEE_PAGER_SPSR_MODE_MASK;
- regs->spsr |= TEE_PAGER_SPSR_MODE_SVC;
- /* Select Thumb or ARM mode */
- if (regs->lr & 1)
- regs->spsr |= CPSR_T;
- else
- regs->spsr &= ~CPSR_T;
- }
-}
-
-void tee_pager_restore_irq(void)
-{
- /*
- * Restores the settings of IRQ as saved when entering secure
- * world, using something like
- * INTERRUPT_ENABLE(SEC_ENV_SETTINGS_READ() & SEC_ROM_IRQ_ENABLE_MASK);
- */
-
- /* Infinite loop as this is not implemented yet */
- volatile bool mytrue = true;
- EMSG("tee_pager_restore_irq not implemented yet");
- while (mytrue)
- ;
-}
diff --git a/core/arch/arm32/plat-stm/platform_config.h b/core/arch/arm32/plat-stm/platform_config.h
index e2ed548..f4fc935 100644
--- a/core/arch/arm32/plat-stm/platform_config.h
+++ b/core/arch/arm32/plat-stm/platform_config.h
@@ -28,11 +28,17 @@
#ifndef PLATFORM_CONFIG_H
#define PLATFORM_CONFIG_H
+#include <generated/conf.h>
+
#define PLATFORM_FLAVOR_ID_orly2 0
#define PLATFORM_FLAVOR_ID_cannes 1
#define PLATFORM_FLAVOR_IS(flav) \
(PLATFORM_FLAVOR == PLATFORM_FLAVOR_ID_ ## flav)
+#ifdef CFG_WITH_PAGER
+#error "Pager not supported for platform STM"
+#endif
+
#if PLATFORM_FLAVOR_IS(cannes)
#define CPU_IOMEM_BASE 0x08760000
diff --git a/core/arch/arm32/plat-vexpress/core_bootcfg.c b/core/arch/arm32/plat-vexpress/core_bootcfg.c
index f6e68c8..ff3de47 100644
--- a/core/arch/arm32/plat-vexpress/core_bootcfg.c
+++ b/core/arch/arm32/plat-vexpress/core_bootcfg.c
@@ -120,9 +120,20 @@ static bool pbuf_is(enum buf_is_attr attr, paddr_t paddr, size_t size)
}
static struct map_area bootcfg_memory[] = {
+#ifdef ROM_BASE
+ {
+ .type = MEM_AREA_IO_SEC,
+ .pa = ROM_BASE, .size = ROM_SIZE,
+ .cached = true, .secure = true, .rw = false, .exec = false,
+ },
+#endif
+
{ /* teecore execution RAM */
.type = MEM_AREA_TEE_RAM,
.pa = CFG_TEE_RAM_START, .size = CFG_TEE_RAM_PH_SIZE,
+#ifdef CFG_WITH_PAGER
+ .region_size = SMALL_PAGE_SIZE,
+#endif
.cached = true, .secure = true, .rw = true, .exec = true,
},
diff --git a/core/arch/arm32/plat-vexpress/entry.S b/core/arch/arm32/plat-vexpress/entry.S
index b0965e0..cfe60c2 100644
--- a/core/arch/arm32/plat-vexpress/entry.S
+++ b/core/arch/arm32/plat-vexpress/entry.S
@@ -73,6 +73,30 @@ LOCAL_FUNC reset , :
END_FUNC reset
LOCAL_FUNC reset_primary , :
+#ifdef CFG_WITH_PAGER
+ /*
+ * Move init code into correct location
+ *
+ * The binary is built as:
+ * [Pager code, rodata and data] : In correct location
+ * [Init code and rodata] : Should be copied to __text_init_start
+ * [Hashes] : Should be saved before clearing bss
+ *
+ * When we copy init code and rodata into correct location we don't
+ * need to worry about hashes being overwritten as size of .bss,
+ * .heap, .nozi and .heap3 is much larger than the size of init
+ * code and rodata and hashes.
+ */
+ ldr r0, =__text_init_start /* dst */
+ ldr r1, =__data_end /* src */
+ ldr r2, =__rodata_init_end /* dst limit */
+copy_init:
+ ldm r1!, {r6-r12}
+ stm r0!, {r6-r12}
+ cmp r0, r2
+ blt copy_init
+#endif
+
bl get_core_pos
cmp r0, #CFG_TEE_CORE_NB_CORE
/* Unsupported CPU, park it before it breaks something */
@@ -90,7 +114,11 @@ LOCAL_FUNC reset_primary , :
* Before MMU is turned on is VA == PA for cache operations.
*/
ldr r0, =__text_start
+#ifdef CFG_WITH_PAGER
+ ldr r1, =__init_end
+#else
ldr r1, =_end
+#endif
sub r1, r1, #1
bl arm_cl1_d_invbyva
@@ -117,7 +145,11 @@ LOCAL_FUNC reset_primary , :
*/
mov r4, r0
ldr r0, =__text_start
+#ifdef CFG_WITH_PAGER
+ ldr r1, =__init_end
+#else
ldr r1, =_end
+#endif
sub r1, r1, #1
bl arm_cl1_d_cleaninvbyva
mov r0, r4
diff --git a/core/arch/arm32/plat-vexpress/link.mk b/core/arch/arm32/plat-vexpress/link.mk
index e17948c..e76232b 100644
--- a/core/arch/arm32/plat-vexpress/link.mk
+++ b/core/arch/arm32/plat-vexpress/link.mk
@@ -25,7 +25,6 @@ link-script-cppflags := -DASM=1 \
$(cppflags$(sm)))
entries-unpaged += tee_pager_abort_handler
-entries-unpaged += pager_load_rodata
entries-unpaged += thread_init_vbar
entries-unpaged += sm_init
entries-unpaged += core_init_mmu_regs
diff --git a/core/arch/arm32/plat-vexpress/main.c b/core/arch/arm32/plat-vexpress/main.c
index 9f796df..b9232cd 100644
--- a/core/arch/arm32/plat-vexpress/main.c
+++ b/core/arch/arm32/plat-vexpress/main.c
@@ -87,7 +87,9 @@
DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE);
DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE);
DECLARE_STACK(stack_sm, CFG_TEE_CORE_NB_CORE, SM_STACK_SIZE);
+#ifndef CFG_WITH_PAGER
DECLARE_STACK(stack_thread, NUM_THREADS, STACK_THREAD_SIZE);
+#endif
const vaddr_t stack_tmp_top[CFG_TEE_CORE_NB_CORE] = {
GET_STACK(stack_tmp[0]),
@@ -130,12 +132,21 @@ static uint32_t main_mmu_ul1_ttb[NUM_THREADS][TEE_MMU_UL1_NUM_ENTRIES]
__attribute__((section(".nozi.mmu.ul1"),
aligned(TEE_MMU_UL1_ALIGNMENT)));
+extern uint8_t __text_init_start[];
+extern uint8_t __data_start[];
+extern uint8_t __data_end[];
extern uint8_t __bss_start[];
extern uint8_t __bss_end[];
+extern uint8_t __init_start[];
+extern uint8_t __init_size[];
extern uint8_t __heap1_start[];
extern uint8_t __heap1_end[];
extern uint8_t __heap2_start[];
extern uint8_t __heap2_end[];
+extern uint8_t __pagable_part_start[];
+extern uint8_t __pagable_part_end[];
+extern uint8_t __pagable_start[];
+extern uint8_t __pagable_end[];
static void main_fiq(void);
#if defined(WITH_ARM_TRUSTED_FW)
@@ -171,7 +182,9 @@ static void init_canaries(void)
INIT_CANARY(stack_tmp);
INIT_CANARY(stack_abt);
INIT_CANARY(stack_sm);
+#ifndef CFG_WITH_PAGER
INIT_CANARY(stack_thread);
+#endif
}
void check_canaries(void)
@@ -188,7 +201,9 @@ void check_canaries(void)
ASSERT_STACK_CANARIES(stack_tmp);
ASSERT_STACK_CANARIES(stack_abt);
ASSERT_STACK_CANARIES(stack_sm);
+#ifndef CFG_WITH_PAGER
ASSERT_STACK_CANARIES(stack_thread);
+#endif
#endif /*WITH_STACK_CANARIES*/
}
@@ -272,6 +287,130 @@ static void main_init_gic(void)
}
#endif
+#ifdef CFG_WITH_PAGER
+static void main_init_runtime(uint32_t pagable_part)
+{
+ size_t n;
+ size_t init_size = (size_t)__init_size;
+ size_t pagable_size = __pagable_end - __pagable_start;
+ size_t hash_size = (pagable_size / SMALL_PAGE_SIZE) *
+ TEE_SHA256_HASH_SIZE;
+ tee_mm_entry_t *mm;
+ uint8_t *paged_store;
+ uint8_t *hashes;
+ uint8_t *tmp_hashes = __init_start + init_size;
+
+
+ TEE_ASSERT(pagable_size % SMALL_PAGE_SIZE == 0);
+
+
+ /* Copy it right after the init area. */
+ memcpy(tmp_hashes, __data_end + init_size, hash_size);
+
+ /*
+ * Zero BSS area. Note that globals that would normally would go
+ * into BSS which are used before this has to be put into .nozi.*
+ * to avoid getting overwritten.
+ */
+ memset(__bss_start, 0, __bss_end - __bss_start);
+
+ malloc_init(__heap1_start, __heap1_end - __heap1_start);
+ malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
+
+ hashes = malloc(hash_size);
+ EMSG("hash_size %d", hash_size);
+ TEE_ASSERT(hashes);
+ memcpy(hashes, tmp_hashes, hash_size);
+
+ /*
+ * Need tee_mm_sec_ddr initialized to be able to allocate secure
+ * DDR below.
+ */
+ teecore_init_ta_ram();
+
+ mm = tee_mm_alloc(&tee_mm_sec_ddr, pagable_size);
+ TEE_ASSERT(mm);
+ paged_store = (uint8_t *)tee_mm_get_smem(mm);
+ /* Copy init part into pagable area */
+ memcpy(paged_store, __init_start, init_size);
+ /* Copy pagable part after init part into pagable area */
+ memcpy(paged_store + init_size, (void *)pagable_part,
+ __pagable_part_end - __pagable_part_start);
+
+ /* Check that hashes of what's in pagable area is OK */
+ DMSG("Checking hashes of pagable area");
+ for (n = 0; (n * SMALL_PAGE_SIZE) < pagable_size; n++) {
+ const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
+ const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
+ TEE_Result res;
+
+ DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
+ res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
+ if (res != TEE_SUCCESS)
+ EMSG("Hash failed for page %zu at %p: res 0x%x",
+ n, page, res);
+ }
+
+ /*
+ * Copy what's not initialized in the last init page. Needed
+ * because we're not going fault in the init pages again. We can't
+ * fault in pages until we've switched to the new vector by calling
+ * thread_init_handlers() below.
+ */
+ if (init_size % SMALL_PAGE_SIZE) {
+ uint8_t *p;
+
+ memcpy(__init_start + init_size, paged_store + init_size,
+ SMALL_PAGE_SIZE - (init_size % SMALL_PAGE_SIZE));
+
+ p = (uint8_t *)(((vaddr_t)__init_start + init_size) &
+ ~SMALL_PAGE_MASK);
+
+ cache_maintenance_l1(DCACHE_AREA_CLEAN, p, SMALL_PAGE_SIZE);
+ cache_maintenance_l1(ICACHE_AREA_INVALIDATE, p,
+ SMALL_PAGE_SIZE);
+ }
+
+ /*
+ * Inialize the virtual memory pool used for main_mmu_l2_ttb which
+ * is supplied to tee_pager_init() below.
+ */
+ if (!tee_mm_init(&tee_mm_vcore,
+ ROUNDDOWN(CFG_TEE_RAM_START, SECTION_SIZE),
+ ROUNDDOWN(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
+ SECTION_SIZE),
+ SMALL_PAGE_SHIFT, 0))
+ panic();
+
+ tee_pager_init(main_mmu_l2_ttb);
+
+ /*
+ * Claim virtual memory which isn't paged, note that there migth be
+ * a gap between tee_mm_vcore.lo and TEE_RAM_START which is also
+ * claimed to avoid later allocations to get that memory.
+ */
+ mm = tee_mm_alloc2(&tee_mm_vcore, tee_mm_vcore.lo,
+ (vaddr_t)(__text_init_start - tee_mm_vcore.lo));
+ TEE_ASSERT(mm);
+
+ /*
+ * Allocate virtual memory for the pagable area and let the pager
+ * take charge of all the pages already assigned to that memory.
+ */
+ mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pagable_start,
+ pagable_size);
+ TEE_ASSERT(mm);
+ tee_pager_add_area(mm, TEE_PAGER_AREA_RO | TEE_PAGER_AREA_X,
+ paged_store, hashes);
+ tee_pager_add_pages((vaddr_t)__pagable_start,
+ ROUNDUP(init_size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE, false);
+ tee_pager_add_pages((vaddr_t)__pagable_start +
+ ROUNDUP(init_size, SMALL_PAGE_SIZE),
+ (pagable_size - ROUNDUP(init_size, SMALL_PAGE_SIZE)) /
+ SMALL_PAGE_SIZE, true);
+
+}
+#else
static void main_init_runtime(uint32_t pagable_part __unused)
{
/*
@@ -283,9 +422,47 @@ static void main_init_runtime(uint32_t pagable_part __unused)
malloc_init(__heap1_start, __heap1_end - __heap1_start);
+ /*
+ * Initialized at this stage in the pager version of this function
+ * above
+ */
teecore_init_ta_ram();
}
+#endif
+#ifdef CFG_WITH_PAGER
+static void main_init_thread_stacks(void)
+{
+ size_t n;
+
+ /*
+ * Allocate virtual memory for thread stacks.
+ */
+ for (n = 0; n < NUM_THREADS; n++) {
+ tee_mm_entry_t *mm;
+ vaddr_t sp;
+
+ /* Get unmapped page at bottom of stack */
+ mm = tee_mm_alloc(&tee_mm_vcore, SMALL_PAGE_SIZE);
+ TEE_ASSERT(mm);
+ /* Claim eventual physical page */
+ tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm),
+ true);
+
+ /* Allocate the actual stack */
+ mm = tee_mm_alloc(&tee_mm_vcore, STACK_THREAD_SIZE);
+ TEE_ASSERT(mm);
+ sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm);
+ if (!thread_init_stack(n, sp))
+ panic();
+ /* Claim eventual physical page */
+ tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm),
+ true);
+ /* Add the area to the pager */
+ tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL);
+ }
+}
+#else
static void main_init_thread_stacks(void)
{
size_t n;
@@ -296,6 +473,7 @@ static void main_init_thread_stacks(void)
panic();
}
}
+#endif
static void main_init_primary_helper(uint32_t pagable_part, uint32_t nsec_entry)
{
@@ -529,3 +707,4 @@ void *core_mmu_alloc_l2(struct map_area *map)
l2_offs += ROUNDUP(map->size, SECTION_SIZE) / SECTION_SIZE;
return main_mmu_l2_ttb;
}
+
diff --git a/core/arch/arm32/plat-vexpress/platform_config.h b/core/arch/arm32/plat-vexpress/platform_config.h
index bdab66d..ad6ac15 100644
--- a/core/arch/arm32/plat-vexpress/platform_config.h
+++ b/core/arch/arm32/plat-vexpress/platform_config.h
@@ -113,10 +113,23 @@
#define DRAM0_BASE 0x80000000
#define DRAM0_SIZE 0x80000000
+#ifdef CFG_WITH_PAGER
+
+/* Emulated SRAM */
+#define TZSRAM_BASE (0x06000000)
+#define TZSRAM_SIZE (200 * 1024)
+
+#define TZDRAM_BASE (TZSRAM_BASE + CFG_TEE_RAM_VA_SIZE)
+#define TZDRAM_SIZE (0x02000000 - CFG_TEE_RAM_VA_SIZE)
+
+#else /*CFG_WITH_PAGER*/
+
/* Location of trusted dram on the base fvp */
#define TZDRAM_BASE 0x06000000
#define TZDRAM_SIZE 0x02000000
+#endif /*CFG_WITH_PAGER*/
+
#define CFG_TEE_CORE_NB_CORE 8
#define CFG_SHMEM_START (DRAM0_BASE + 0x1000000)
@@ -133,6 +146,16 @@
#define DRAM0_BASE 0x80000000
#define DRAM0_SIZE 0x7F000000
+#ifdef CFG_WITH_PAGER
+
+/* Emulated SRAM */
+#define TZSRAM_BASE 0xFF000000
+#define TZSRAM_SIZE (200 * 1024)
+
+#define TZDRAM_BASE (TZSRAM_BASE + CFG_TEE_RAM_VA_SIZE)
+#define TZDRAM_SIZE (0x00E00000 - CFG_TEE_RAM_VA_SIZE)
+
+#else /*CFG_WITH_PAGER*/
/*
* Last part of DRAM is reserved as secure dram, note that the last 2MiB
* of DRAM0 is used by SCP dor DDR retraining.
@@ -145,6 +168,7 @@
* OP-TEE OS is mapped using small pages instead.
*/
#define TZDRAM_SIZE 0x00E00000
+#endif /*CFG_WITH_PAGER*/
#define CFG_TEE_CORE_NB_CORE 6
@@ -158,6 +182,9 @@
/*
* QEMU specifics.
*/
+#ifdef CFG_WITH_PAGER
+#error "Pager not supported for platform vepress-qemu"
+#endif
#define DRAM0_BASE 0x80000000
#define DRAM0_SIZE 0x40000000
@@ -192,10 +219,22 @@
#define DRAM0_TEERES_BASE (DRAM0_BASE + DRAM0_SIZE)
#define DRAM0_TEERES_SIZE (33 * 1024 * 1024)
+#ifdef CFG_WITH_PAGER
+
+/* Emulated SRAM */
+#define TZSRAM_BASE DRAM0_TEERES_BASE
+#define TZSRAM_SIZE (200 * 1024)
+
+#define TZDRAM_BASE (DRAM0_TEERES_BASE + CFG_TEE_RAM_VA_SIZE)
+#define TZDRAM_SIZE (32 * 1024 * 1024 - CFG_TEE_RAM_VA_SIZE)
+
+#else /* CFG_WITH_PAGER */
#define TZDRAM_BASE DRAM0_TEERES_BASE
#define TZDRAM_SIZE (32 * 1024 * 1024)
+#endif /* CFG_WITH_PAGER */
+
#define CFG_TEE_CORE_NB_CORE 2
#define CFG_SHMEM_START (DRAM0_TEERES_BASE + \
@@ -216,7 +255,25 @@
#define CFG_TEE_LOAD_ADDR CFG_TEE_RAM_START
#endif
+#ifdef CFG_WITH_PAGER
+/*
+ * Have TZSRAM either as real physical or emulated by reserving an area
+ * somewhere else.
+ *
+ * +------------------+
+ * | TZSRAM | TEE_RAM |
+ * +--------+---------+
+ * | TZDRAM | TA_RAM |
+ * +--------+---------+
+ */
+#define CFG_TEE_RAM_PH_SIZE TZSRAM_SIZE
+#define CFG_TEE_RAM_START TZSRAM_BASE
+#define CFG_TA_RAM_START TZDRAM_BASE
+#define CFG_TA_RAM_SIZE TZDRAM_SIZE
+#else
/*
+ * Assumes that either TZSRAM isn't large enough or TZSRAM doesn't exist,
+ * everything is in TZDRAM.
* +------------------+
* | | TEE_RAM |
* + TZDRAM +---------+
@@ -227,6 +284,7 @@
#define CFG_TEE_RAM_START TZDRAM_BASE
#define CFG_TA_RAM_START (TZDRAM_BASE + CFG_TEE_RAM_VA_SIZE)
#define CFG_TA_RAM_SIZE (TZDRAM_SIZE - CFG_TEE_RAM_VA_SIZE)
+#endif
#ifndef UART_BAUDRATE
#define UART_BAUDRATE 115200