diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/entry/vdso/vma.c | 1 | ||||
-rw-r--r-- | arch/x86/entry/vsyscall/vsyscall_64.c | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/vsyscall.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/amd.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/init.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/kaiser.c | 34 | ||||
-rw-r--r-- | arch/x86/mm/kasan_init_64.c | 10 |
7 files changed, 51 insertions, 7 deletions
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index aa828191c654..b8f69e264ac4 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -12,6 +12,7 @@ #include <linux/random.h> #include <linux/elf.h> #include <linux/cpu.h> +#include <asm/pvclock.h> #include <asm/vgtod.h> #include <asm/proto.h> #include <asm/vdso.h> diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 174c2549939d..112178b401a1 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -66,6 +66,11 @@ static int __init vsyscall_setup(char *str) } early_param("vsyscall", vsyscall_setup); +bool vsyscall_enabled(void) +{ + return vsyscall_mode != NONE; +} + static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, const char *message) { diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h index 6ba66ee79710..4865e10dbb55 100644 --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h @@ -12,12 +12,14 @@ extern void map_vsyscall(void); * Returns true if handled. */ extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address); +extern bool vsyscall_enabled(void); #else static inline void map_vsyscall(void) {} static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) { return false; } +static inline bool vsyscall_enabled(void) { return false; } #endif #endif /* _ASM_X86_VSYSCALL_H */ diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 2233f8a76615..2a0f44d225fe 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -580,6 +580,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size, #define F14H_MPB_MAX_SIZE 1824 #define F15H_MPB_MAX_SIZE 4096 #define F16H_MPB_MAX_SIZE 3458 +#define F17H_MPB_MAX_SIZE 3200 switch (family) { case 0x14: @@ -591,6 +592,9 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size, case 0x16: max_size = F16H_MPB_MAX_SIZE; break; + case 0x17: + max_size = F17H_MPB_MAX_SIZE; + break; default: max_size = F1XH_MPB_MAX_SIZE; break; diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 2bd45ae91eb3..151fd33e9043 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -757,7 +757,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { .state = 0, .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ }; -EXPORT_SYMBOL_GPL(cpu_tlbstate); +EXPORT_PER_CPU_SYMBOL(cpu_tlbstate); void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) { diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c index b0b3a69f1c7f..6a7a77929a8c 100644 --- a/arch/x86/mm/kaiser.c +++ b/arch/x86/mm/kaiser.c @@ -20,6 +20,7 @@ #include <asm/pgalloc.h> #include <asm/desc.h> #include <asm/cmdline.h> +#include <asm/vsyscall.h> int kaiser_enabled __read_mostly = 1; EXPORT_SYMBOL(kaiser_enabled); /* for inlined TLB flush functions */ @@ -111,12 +112,13 @@ static inline unsigned long get_pa_from_mapping(unsigned long vaddr) * * Returns a pointer to a PTE on success, or NULL on failure. */ -static pte_t *kaiser_pagetable_walk(unsigned long address) +static pte_t *kaiser_pagetable_walk(unsigned long address, bool user) { pmd_t *pmd; pud_t *pud; pgd_t *pgd = native_get_shadow_pgd(pgd_offset_k(address)); gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + unsigned long prot = _KERNPG_TABLE; if (pgd_none(*pgd)) { WARN_ONCE(1, "All shadow pgds should have been populated"); @@ -124,6 +126,17 @@ static pte_t *kaiser_pagetable_walk(unsigned long address) } BUILD_BUG_ON(pgd_large(*pgd) != 0); + if (user) { + /* + * The vsyscall page is the only page that will have + * _PAGE_USER set. Catch everything else. + */ + BUG_ON(address != VSYSCALL_ADDR); + + set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER)); + prot = _PAGE_TABLE; + } + pud = pud_offset(pgd, address); /* The shadow page tables do not use large mappings: */ if (pud_large(*pud)) { @@ -136,7 +149,7 @@ static pte_t *kaiser_pagetable_walk(unsigned long address) return NULL; spin_lock(&shadow_table_allocation_lock); if (pud_none(*pud)) { - set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page))); + set_pud(pud, __pud(prot | __pa(new_pmd_page))); __inc_zone_page_state(virt_to_page((void *) new_pmd_page), NR_KAISERTABLE); } else @@ -156,7 +169,7 @@ static pte_t *kaiser_pagetable_walk(unsigned long address) return NULL; spin_lock(&shadow_table_allocation_lock); if (pmd_none(*pmd)) { - set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page))); + set_pmd(pmd, __pmd(prot | __pa(new_pte_page))); __inc_zone_page_state(virt_to_page((void *) new_pte_page), NR_KAISERTABLE); } else @@ -192,7 +205,7 @@ static int kaiser_add_user_map(const void *__start_addr, unsigned long size, ret = -EIO; break; } - pte = kaiser_pagetable_walk(address); + pte = kaiser_pagetable_walk(address, flags & _PAGE_USER); if (!pte) { ret = -ENOMEM; break; @@ -319,6 +332,19 @@ void __init kaiser_init(void) kaiser_init_all_pgds(); + /* + * Note that this sets _PAGE_USER and it needs to happen when the + * pagetable hierarchy gets created, i.e., early. Otherwise + * kaiser_pagetable_walk() will encounter initialized PTEs in the + * hierarchy and not set the proper permissions, leading to the + * pagefaults with page-protection violations when trying to read the + * vsyscall page. For example. + */ + if (vsyscall_enabled()) + kaiser_add_user_map_early((void *)VSYSCALL_ADDR, + PAGE_SIZE, + __PAGE_KERNEL_VSYSCALL); + for_each_possible_cpu(cpu) { void *percpu_vaddr = __per_cpu_user_mapped_start + per_cpu_offset(cpu); diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 81ec7c02f968..fdfa25c83119 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -126,10 +126,16 @@ void __init kasan_init(void) /* * kasan_zero_page has been used as early shadow memory, thus it may - * contain some garbage. Now we can clear it, since after the TLB flush - * no one should write to it. + * contain some garbage. Now we can clear and write protect it, since + * after the TLB flush no one should write to it. */ memset(kasan_zero_page, 0, PAGE_SIZE); + for (i = 0; i < PTRS_PER_PTE; i++) { + pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO); + set_pte(&kasan_zero_pte[i], pte); + } + /* Flush TLBs again to be sure that write protection applied. */ + __flush_tlb_all(); init_task.kasan_depth = 0; pr_info("KernelAddressSanitizer initialized\n"); |