summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Wiklander <jens.wiklander@linaro.org>2021-01-04 08:34:49 +0100
committerJérôme Forissier <jerome@forissier.org>2021-01-07 15:49:29 +0100
commit507229d540f7f048c4782209e0942553ed78d83a (patch)
tree1aa9a57100748b0a38538c17451c445153ee40ae
parent450c1b15bdeeba92ce10f7833163219fef78f586 (diff)
core: add core_mmu_remove_mapping()
Adds core_mmu_remove_mapping() which removes mappings earlier added with core_mmu_add_mapping(). Reviewed-by: Etienne Carriere <etienne.carriere@linaro.org> Acked-by: Jerome Forissier <jerome@forissier.org> Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
-rw-r--r--core/arch/arm/include/mm/core_mmu.h2
-rw-r--r--core/arch/arm/mm/core_mmu.c74
2 files changed, 76 insertions, 0 deletions
diff --git a/core/arch/arm/include/mm/core_mmu.h b/core/arch/arm/include/mm/core_mmu.h
index b55a406c..33ee2ebd 100644
--- a/core/arch/arm/include/mm/core_mmu.h
+++ b/core/arch/arm/include/mm/core_mmu.h
@@ -614,6 +614,8 @@ static inline bool core_mmu_is_shm_cached(void)
(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT);
}
+TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
+ size_t len);
bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len);
/* various invalidate secure TLB */
diff --git a/core/arch/arm/mm/core_mmu.c b/core/arch/arm/mm/core_mmu.c
index 59e2e681..4f27b249 100644
--- a/core/arch/arm/mm/core_mmu.c
+++ b/core/arch/arm/mm/core_mmu.c
@@ -1483,6 +1483,26 @@ void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
idx, pa, attr);
}
+static void clear_region(struct core_mmu_table_info *tbl_info,
+ struct tee_mmap_region *region)
+{
+ unsigned int end = 0;
+ unsigned int idx = 0;
+
+ /* va, len and pa should be block aligned */
+ assert(!core_mmu_get_block_offset(tbl_info, region->va));
+ assert(!core_mmu_get_block_offset(tbl_info, region->size));
+ assert(!core_mmu_get_block_offset(tbl_info, region->pa));
+
+ idx = core_mmu_va2idx(tbl_info, region->va);
+ end = core_mmu_va2idx(tbl_info, region->va + region->size);
+
+ while (idx < end) {
+ core_mmu_set_entry(tbl_info, idx, 0, 0);
+ idx++;
+ }
+}
+
static void set_region(struct core_mmu_table_info *tbl_info,
struct tee_mmap_region *region)
{
@@ -1859,6 +1879,60 @@ void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
set_pg_region(dir_info, r, &pgt, &pg_info);
}
+TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
+ size_t len)
+{
+ struct core_mmu_table_info tbl_info = { };
+ struct tee_mmap_region *res_map = NULL;
+ struct tee_mmap_region *map = NULL;
+ paddr_t pa = virt_to_phys(addr);
+ size_t granule = 0;
+ ptrdiff_t i = 0;
+ paddr_t p = 0;
+ size_t l = 0;
+
+ map = find_map_by_type_and_pa(type, pa);
+ if (!map)
+ return TEE_ERROR_GENERIC;
+
+ res_map = find_map_by_type(MEM_AREA_RES_VASPACE);
+ if (!res_map)
+ return TEE_ERROR_GENERIC;
+ if (!core_mmu_find_table(NULL, res_map->va, UINT_MAX, &tbl_info))
+ return TEE_ERROR_GENERIC;
+ granule = BIT(tbl_info.shift);
+
+ if (map < static_memory_map ||
+ map >= static_memory_map + ARRAY_SIZE(static_memory_map))
+ return TEE_ERROR_GENERIC;
+ i = map - static_memory_map;
+
+ /* Check that we have a full match */
+ p = ROUNDDOWN(pa, granule);
+ l = ROUNDUP(len + pa - p, granule);
+ if (map->pa != p || map->size != l)
+ return TEE_ERROR_GENERIC;
+
+ clear_region(&tbl_info, map);
+ tlbi_all();
+
+ /* If possible remove the va range from res_map */
+ if (res_map->va - map->size == map->va) {
+ res_map->va -= map->size;
+ res_map->size += map->size;
+ }
+
+ /* Remove the entry. */
+ memmove(map, map + 1,
+ (ARRAY_SIZE(static_memory_map) - i - 1) * sizeof(*map));
+
+ /* Clear the last new entry in case it was used */
+ memset(static_memory_map + ARRAY_SIZE(static_memory_map) - 1,
+ 0, sizeof(*map));
+
+ return TEE_SUCCESS;
+}
+
bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len)
{
struct core_mmu_table_info tbl_info;