summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Wiklander <jens.wiklander@linaro.org>2019-05-23 17:42:14 +0200
committerJérôme Forissier <jerome.forissier@linaro.org>2019-06-28 17:52:36 +0200
commit65137432d3a42d885777bf65d65952e3bae53e80 (patch)
tree8c97d78401a8026d92cddac0fca2fae133e1b4cf
parent23cf8945c70e1df9ef878eb1a6c9e204c04f9ea9 (diff)
ldelf: support dumping memory map
Adds support in ldelf to dump memory maps. Acked-by: Jerome Forissier <jerome.forissier@linaro.org> Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
-rw-r--r--ldelf/include/ldelf.h35
-rw-r--r--ldelf/main.c27
-rw-r--r--ldelf/ta_elf.c136
-rw-r--r--ldelf/ta_elf.h4
4 files changed, 195 insertions, 7 deletions
diff --git a/ldelf/include/ldelf.h b/ldelf/include/ldelf.h
index fc425387..20654e88 100644
--- a/ldelf/include/ldelf.h
+++ b/ldelf/include/ldelf.h
@@ -6,7 +6,7 @@
#ifndef __LDELF_H
#define __LDELF_H
-#include <stdint.h>
+#include <types_ext.h>
#include <tee_api_types.h>
/* Size of stack for TEE Core to allocate */
@@ -19,6 +19,7 @@
* @flags: [out] Flags field of TA header
* @entry_func: [out] TA entry function
* @stack_ptr: [out] TA stack pointer
+ * @dump_entry: [out] Dump TA mappings and stack trace
*/
struct ldelf_arg {
TEE_UUID uuid;
@@ -26,6 +27,38 @@ struct ldelf_arg {
uint32_t flags;
uint64_t entry_func;
uint64_t stack_ptr;
+ uint64_t dump_entry;
+};
+
+#define DUMP_MAP_READ BIT(0)
+#define DUMP_MAP_WRITE BIT(1)
+#define DUMP_MAP_EXEC BIT(2)
+#define DUMP_MAP_SECURE BIT(3)
+#define DUMP_MAP_EPHEM BIT(4)
+#define DUMP_MAP_LDELF BIT(7)
+
+/*
+ * struct dump_entry_arg - argument for ldelf_dump()
+ */
+struct dump_entry_arg {
+ union {
+ struct {
+ uint32_t regs[16];
+ } arm32;
+ struct {
+ uint64_t fp;
+ uint64_t sp;
+ uint64_t pc;
+ } arm64;
+ };
+ bool is_arm32;
+ size_t num_maps;
+ struct dump_map {
+ vaddr_t va;
+ paddr_t pa;
+ size_t sz;
+ uint32_t flags;
+ } maps[];
};
/*
diff --git a/ldelf/main.c b/ldelf/main.c
index dea2849d..7779bd5a 100644
--- a/ldelf/main.c
+++ b/ldelf/main.c
@@ -3,6 +3,7 @@
* Copyright (c) 2019, Linaro Limited
*/
+#include <assert.h>
#include <ldelf.h>
#include <malloc.h>
#include <sys/queue.h>
@@ -13,9 +14,23 @@
#include "ta_elf.h"
#include "sys.h"
+static struct ta_elf_queue elf_queue = TAILQ_HEAD_INITIALIZER(elf_queue);
static size_t mpool_size = 2 * SMALL_PAGE_SIZE;
static vaddr_t mpool_base;
+static void __noreturn __maybe_unused dump_ta_state(struct dump_entry_arg *arg)
+{
+ struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue);
+
+ assert(elf && elf->is_main);
+ EMSG_RAW("Status of TA %pUl", (void *)&elf->uuid);
+ EMSG_RAW(" arch: %s", elf->is_32bit ? "arm" : "aarch64");
+
+
+ ta_elf_print_mappings(&elf_queue, arg->num_maps, arg->maps, mpool_base);
+ sys_return_cleanup();
+}
+
/*
* ldelf()- Loads ELF into memory
* @arg: Argument passing to/from TEE Core
@@ -57,11 +72,11 @@ void ldelf(struct ldelf_arg *arg)
DMSG("ELF (%pUl) at %#"PRIxVA,
(void *)&elf->uuid, elf->load_addr);
- res = sys_unmap(mpool_base, mpool_size);
- if (res) {
- EMSG("sys_unmap(%p, %zu): result %"PRIx32,
- (void *)mpool_base, mpool_size, res);
- panic();
- }
+#if TRACE_LEVEL >= TRACE_ERROR
+ arg->dump_entry = (vaddr_t)(void *)dump_ta_state;
+#else
+ arg->dump_entry = 0;
+#endif
+
sys_return_cleanup();
}
diff --git a/ldelf/ta_elf.c b/ldelf/ta_elf.c
index 450d6101..460d1436 100644
--- a/ldelf/ta_elf.c
+++ b/ldelf/ta_elf.c
@@ -8,7 +8,9 @@
#include <elf32.h>
#include <elf64.h>
#include <elf_common.h>
+#include <ldelf.h>
#include <pta_system.h>
+#include <stdio.h>
#include <stdlib.h>
#include <string_ext.h>
#include <string.h>
@@ -18,6 +20,8 @@
#include "sys.h"
#include "ta_elf.h"
+static vaddr_t ta_stack;
+
struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
static struct ta_elf *queue_elf(const TEE_UUID *uuid)
@@ -771,6 +775,7 @@ void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit,
*ta_flags = head->flags;
*sp = va + head->stack_size;
+ ta_stack = va;
}
void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
@@ -813,3 +818,134 @@ void ta_elf_finalize_mappings(struct ta_elf *elf)
err(res, "sys_set_prot");
}
}
+
+static void print_seg(size_t idx __maybe_unused, int elf_idx __maybe_unused,
+ vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
+ size_t sz __maybe_unused, uint32_t flags)
+{
+ int width __maybe_unused = 8;
+ char desc[14] __maybe_unused = "";
+ char flags_str[] __maybe_unused = "----";
+
+ if (elf_idx > -1) {
+ snprintf(desc, sizeof(desc), " [%d]", elf_idx);
+ } else {
+ if (flags & DUMP_MAP_EPHEM)
+ snprintf(desc, sizeof(desc), " (param)");
+ if (flags & DUMP_MAP_LDELF)
+ snprintf(desc, sizeof(desc), " (ldelf)");
+ if (va == ta_stack)
+ snprintf(desc, sizeof(desc), " (stack)");
+ }
+
+ if (flags & DUMP_MAP_READ)
+ flags_str[0] = 'r';
+ if (flags & DUMP_MAP_WRITE)
+ flags_str[1] = 'w';
+ if (flags & DUMP_MAP_EXEC)
+ flags_str[2] = 'x';
+ if (flags & DUMP_MAP_SECURE)
+ flags_str[3] = 's';
+
+ EMSG_RAW("region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s",
+ idx, width, va, width, pa, sz, flags_str, desc);
+}
+
+void ta_elf_print_mappings(struct ta_elf_queue *elf_queue, size_t num_maps,
+ struct dump_map *maps, vaddr_t mpool_base)
+{
+ struct segment *seg = NULL;
+ struct ta_elf *elf = NULL;
+ size_t elf_idx = 0;
+ size_t idx = 0;
+ size_t map_idx = 0;
+
+ /*
+ * Loop over all segments and maps, printing virtual address in
+ * order. Segment has priority if the virtual address is present
+ * in both map and segment.
+ */
+ elf = TAILQ_FIRST(elf_queue);
+ if (elf)
+ seg = TAILQ_FIRST(&elf->segs);
+ while (true) {
+ vaddr_t va = -1;
+ size_t sz = 0;
+ uint32_t flags = DUMP_MAP_SECURE;
+ size_t offs = 0;
+
+ if (seg) {
+ va = rounddown(seg->vaddr + elf->load_addr);
+ sz = roundup(seg->vaddr + seg->memsz) -
+ rounddown(seg->vaddr);
+ }
+
+ while (map_idx < num_maps && maps[map_idx].va <= va) {
+ uint32_t f = 0;
+
+ /* If there's a match, it should be the same map */
+ if (maps[map_idx].va == va) {
+ /*
+ * In shared libraries the first page is
+ * mapped separately with the rest of that
+ * segment following back to back in a
+ * separate entry.
+ */
+ if (map_idx + 1 < num_maps &&
+ maps[map_idx].sz == SMALL_PAGE_SIZE) {
+ vaddr_t next_va = maps[map_idx].va +
+ maps[map_idx].sz;
+ size_t comb_sz = maps[map_idx].sz +
+ maps[map_idx + 1].sz;
+
+ if (next_va == maps[map_idx + 1].va &&
+ comb_sz == sz &&
+ maps[map_idx].flags ==
+ maps[map_idx + 1].flags) {
+ /* Skip this and next entry */
+ map_idx += 2;
+ continue;
+ }
+ }
+ assert(maps[map_idx].sz == sz);
+ } else if (maps[map_idx].va < va) {
+ if (maps[map_idx].va == mpool_base)
+ f |= DUMP_MAP_LDELF;
+ print_seg(idx, -1, maps[map_idx].va,
+ maps[map_idx].pa, maps[map_idx].sz,
+ maps[map_idx].flags | f);
+ idx++;
+ }
+ map_idx++;
+ }
+
+ if (!seg)
+ break;
+
+ offs = rounddown(seg->offset);
+ if (seg->flags & PF_R)
+ flags |= DUMP_MAP_READ;
+ if (seg->flags & PF_W)
+ flags |= DUMP_MAP_WRITE;
+ if (seg->flags & PF_X)
+ flags |= DUMP_MAP_EXEC;
+
+ print_seg(idx, elf_idx, va, offs, sz, flags);
+ idx++;
+
+ seg = TAILQ_NEXT(seg, link);
+ if (!seg) {
+ elf = TAILQ_NEXT(elf, link);
+ if (elf)
+ seg = TAILQ_FIRST(&elf->segs);
+ elf_idx++;
+ }
+ };
+
+ elf_idx = 0;
+ TAILQ_FOREACH(elf, elf_queue, link) {
+ EMSG_RAW(" [%zu] %pUl @ 0x%0*" PRIxVA,
+ elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
+ elf_idx++;
+ }
+}
diff --git a/ldelf/ta_elf.h b/ldelf/ta_elf.h
index 61822778..bf18200b 100644
--- a/ldelf/ta_elf.h
+++ b/ldelf/ta_elf.h
@@ -6,6 +6,7 @@
#ifndef TA_ELF_H
#define TA_ELF_H
+#include <ldelf.h>
#include <sys/queue.h>
#include <tee_api_types.h>
#include <types_ext.h>
@@ -72,4 +73,7 @@ void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit);
void ta_elf_relocate(struct ta_elf *elf);
void ta_elf_finalize_mappings(struct ta_elf *elf);
+void ta_elf_print_mappings(struct ta_elf_queue *elf_queue, size_t num_maps,
+ struct dump_map *maps, vaddr_t mpool_base);
+
#endif /*TA_ELF_H*/