diff options
Diffstat (limited to 'arch/arm64/include/asm/memory.h')
-rw-r--r-- | arch/arm64/include/asm/memory.h | 70 |
1 files changed, 62 insertions, 8 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index d4bae7d6e0d8..8f7f30fec6be 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -80,12 +80,12 @@ #define KERNEL_END _end /* - * KASAN requires 1/8th of the kernel virtual address space for the shadow - * region. KASAN can bloat the stack significantly, so double the (minimum) - * stack size when KASAN is in use. + * Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual + * address space for the shadow region respectively. They can bloat the stack + * significantly, so double the (minimum) stack size when they are in use. */ #ifdef CONFIG_KASAN -#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3)) +#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) #define KASAN_THREAD_SHIFT 1 #else #define KASAN_SHADOW_SIZE (0) @@ -211,6 +211,36 @@ static inline unsigned long kaslr_offset(void) #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) /* + * When dealing with data aborts, watchpoints, or instruction traps we may end + * up with a tagged userland pointer. Clear the tag to get a sane pointer to + * pass on to access_ok(), for instance. + */ +#define __untagged_addr(addr) \ + ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55)) + +#define untagged_addr(addr) ({ \ + u64 __addr = (__force u64)addr; \ + __addr &= __untagged_addr(__addr); \ + (__force __typeof__(addr))__addr; \ +}) + +#ifdef CONFIG_KASAN_SW_TAGS +#define __tag_shifted(tag) ((u64)(tag) << 56) +#define __tag_reset(addr) __untagged_addr(addr) +#define __tag_get(addr) (__u8)((u64)(addr) >> 56) +#else +#define __tag_shifted(tag) 0UL +#define __tag_reset(addr) (addr) +#define __tag_get(addr) 0 +#endif + +static inline const void *__tag_set(const void *addr, u8 tag) +{ + u64 __addr = (u64)addr & ~__tag_shifted(0xff); + return (const void *)(__addr | __tag_shifted(tag)); +} + +/* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. @@ -281,6 +311,22 @@ static inline void *phys_to_virt(phys_addr_t x) #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) /* + * With non-canonical CFI jump tables, the compiler replaces function + * address references with the address of the function's CFI jump + * table entry. This results in __pa_symbol(function) returning the + * physical address of the jump table entry, which can lead to address + * space confusion since the jump table points to the function's + * virtual address. Therefore, use inline assembly to ensure we are + * always taking the address of the actual function. + */ +#define __pa_function(x) ({ \ + unsigned long addr; \ + asm("adrp %0, " __stringify(x) "\n\t" \ + "add %0, %0, :lo12:" __stringify(x) : "=r" (addr)); \ + __pa_symbol(addr); \ +}) + +/* * virt_to_page(k) convert a _valid_ virtual address to struct page * * virt_addr_valid(k) indicates whether a virtual address is valid */ @@ -293,7 +339,14 @@ static inline void *phys_to_virt(phys_addr_t x) #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) #define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) -#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) +#define page_to_virt(page) ({ \ + unsigned long __addr = \ + ((__page_to_voff(page)) | PAGE_OFFSET); \ + const void *__addr_tag = \ + __tag_set((void *)__addr, page_kasan_tag(page)); \ + ((void *)__addr_tag); \ +}) + #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) #define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ @@ -301,9 +354,10 @@ static inline void *phys_to_virt(phys_addr_t x) #endif #endif -#define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET) -#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \ - _virt_addr_valid(kaddr)) +#define _virt_addr_is_linear(kaddr) \ + (__tag_reset((u64)(kaddr)) >= PAGE_OFFSET) +#define virt_addr_valid(kaddr) \ + (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) #include <asm-generic/memory_model.h> |