summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/arm64/Makefile19
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi17
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi3
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi2
-rw-r--r--arch/arm64/configs/cuttlefish_defconfig7
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c2
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c2
-rw-r--r--arch/arm64/include/asm/assembler.h9
-rw-r--r--arch/arm64/include/asm/brk-imm.h2
-rw-r--r--arch/arm64/include/asm/cache.h4
-rw-r--r--arch/arm64/include/asm/compat.h1
-rw-r--r--arch/arm64/include/asm/cpufeature.h7
-rw-r--r--arch/arm64/include/asm/efi.h6
-rw-r--r--arch/arm64/include/asm/futex.h4
-rw-r--r--arch/arm64/include/asm/insn.h8
-rw-r--r--arch/arm64/include/asm/kasan.h25
-rw-r--r--arch/arm64/include/asm/memory.h43
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h7
-rw-r--r--arch/arm64/include/asm/processor.h8
-rw-r--r--arch/arm64/include/asm/string.h14
-rw-r--r--arch/arm64/include/asm/uaccess.h7
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h4
-rw-r--r--arch/arm64/include/asm/vdso_datapage.h1
-rw-r--r--arch/arm64/kernel/acpi.c10
-rw-r--r--arch/arm64/kernel/arm64ksyms.c7
-rw-r--r--arch/arm64/kernel/asm-offsets.c2
-rw-r--r--arch/arm64/kernel/cpu_ops.c1
-rw-r--r--arch/arm64/kernel/cpufeature.c8
-rw-r--r--arch/arm64/kernel/debug-monitors.c1
-rw-r--r--arch/arm64/kernel/ftrace.c21
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c7
-rw-r--r--arch/arm64/kernel/image.h6
-rw-r--r--arch/arm64/kernel/insn.c40
-rw-r--r--arch/arm64/kernel/module.c8
-rw-r--r--arch/arm64/kernel/return_address.c3
-rw-r--r--arch/arm64/kernel/setup.c8
-rw-r--r--arch/arm64/kernel/smp.c5
-rw-r--r--arch/arm64/kernel/stacktrace.c3
-rw-r--r--arch/arm64/kernel/traps.c60
-rw-r--r--arch/arm64/kernel/vdso.c3
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S7
-rw-r--r--arch/arm64/kvm/Kconfig1
-rw-r--r--arch/arm64/kvm/hyp/tlb.c35
-rw-r--r--arch/arm64/lib/memchr.S2
-rw-r--r--arch/arm64/lib/memcmp.S2
-rw-r--r--arch/arm64/lib/strchr.S2
-rw-r--r--arch/arm64/lib/strcmp.S2
-rw-r--r--arch/arm64/lib/strlen.S2
-rw-r--r--arch/arm64/lib/strncmp.S2
-rw-r--r--arch/arm64/lib/strnlen.S2
-rw-r--r--arch/arm64/lib/strrchr.S2
-rw-r--r--arch/arm64/mm/dma-mapping.c10
-rw-r--r--arch/arm64/mm/fault.c29
-rw-r--r--arch/arm64/mm/kasan_init.c164
-rw-r--r--arch/arm64/mm/mmu.c11
-rw-r--r--arch/arm64/mm/proc.S43
-rw-r--r--arch/arm64/net/bpf_jit.h8
-rw-r--r--arch/arm64/net/bpf_jit_comp.c31
63 files changed, 551 insertions, 209 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 71c5559443da..d469fccc7f31 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -69,7 +69,8 @@ config ARM64
select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
- select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
+ select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
+ select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 36b9c3ec9a30..3162e4f50ac5 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -128,12 +128,23 @@ ifeq ($(cc-name),clang)
KBUILD_CFLAGS += $(call cc-disable-warning, asm-operand-widths)
endif
-# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - 3)) - (1 << 61)
+ifeq ($(CONFIG_KASAN_SW_TAGS), y)
+KASAN_SHADOW_SCALE_SHIFT := 4
+else
+KASAN_SHADOW_SCALE_SHIFT := 3
+endif
+
+KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
+KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
+KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
+
+# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
+# - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT))
# in 32-bit arithmetic
KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
- (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
- + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - 3)) \
- - (1 << (64 - 32 - 3)) )) )
+ (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
+ + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \
+ - (1 << (64 - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) )) )
export TEXT_OFFSET GZFLAGS
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index 8c0cf7efac65..b554cdaf5e53 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -134,7 +134,7 @@
uart0: serial@12000 {
compatible = "marvell,armada-3700-uart";
- reg = <0x12000 0x400>;
+ reg = <0x12000 0x200>;
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 30d48ecf46e0..27d2bd85d1ae 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -65,6 +65,23 @@
method = "smc";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /*
+ * This area matches the mapping done with a
+ * mainline U-Boot, and should be updated by the
+ * bootloader.
+ */
+
+ psci-area@4000000 {
+ reg = <0x0 0x4000000 0x0 0x200000>;
+ no-map;
+ };
+ };
+
ap806 {
#address-cells = <2>;
#size-cells = <2>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
index d10d4430537a..f6e6f1e83ba8 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
@@ -307,7 +307,8 @@
regulator-max-microvolt = <1320000>;
enable-gpios = <&pmic 6 GPIO_ACTIVE_HIGH>;
regulator-ramp-delay = <80>;
- regulator-enable-ramp-delay = <1000>;
+ regulator-enable-ramp-delay = <2000>;
+ regulator-settling-time-us = <160>;
};
};
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 9bdf19f2cca7..466199766848 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -1103,7 +1103,7 @@
compatible = "nvidia,tegra210-agic";
#interrupt-cells = <3>;
interrupt-controller;
- reg = <0x702f9000 0x2000>,
+ reg = <0x702f9000 0x1000>,
<0x702fa000 0x2000>;
interrupts = <GIC_SPI 102 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
clocks = <&tegra_car TEGRA210_CLK_APE>;
diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig
index 2c161483bd4a..642ca4c5bd56 100644
--- a/arch/arm64/configs/cuttlefish_defconfig
+++ b/arch/arm64/configs/cuttlefish_defconfig
@@ -69,6 +69,7 @@ CONFIG_PM_DEBUG=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
@@ -190,13 +191,16 @@ CONFIG_NET_EMATCH_U32=y
CONFIG_NET_CLS_ACT=y
CONFIG_VSOCKETS=y
CONFIG_VIRTIO_VSOCKETS=y
+CONFIG_CAN=y
+# CONFIG_CAN_BCM is not set
+# CONFIG_CAN_GW is not set
+CONFIG_CAN_VCAN=y
CONFIG_CFG80211=y
# CONFIG_CFG80211_DEFAULT_PS is not set
CONFIG_MAC80211=y
# CONFIG_MAC80211_RC_MINSTREL is not set
CONFIG_RFKILL=y
# CONFIG_UEVENT_HELPER is not set
-CONFIG_DEVTMPFS=y
# CONFIG_ALLOW_DEV_COREDUMP is not set
CONFIG_DEBUG_DEVRES=y
CONFIG_OF_UNITTEST=y
@@ -397,6 +401,7 @@ CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ANDROID_VSOC=y
CONFIG_ION=y
+CONFIG_ION_SYSTEM_HEAP=y
CONFIG_COMMON_CLK_SCPI=y
# CONFIG_COMMON_CLK_XGENE is not set
CONFIG_MAILBOX=y
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index c55d68ccb89f..52975817fdb6 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -307,6 +307,8 @@ static int __xts_crypt(struct skcipher_request *req,
int err;
err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
kernel_neon_begin();
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index 656b959bcdaa..fb7a9caf51c8 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -62,7 +62,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
- bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
+ bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
if (!may_use_simd())
return crypto_sha1_finup(desc, data, len, out);
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index ddda74844c43..9141184d89e4 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -67,7 +67,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
- bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
+ bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
if (!may_use_simd()) {
if (len)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 02d73d83f0de..1f4612bfd2cb 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -23,6 +23,8 @@
#ifndef __ASM_ASSEMBLER_H
#define __ASM_ASSEMBLER_H
+#include <asm-generic/export.h>
+
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
#include <asm/cputype.h>
@@ -458,6 +460,13 @@ alternative_endif
#else
#define NOKPROBE(x)
#endif
+
+#ifdef CONFIG_KASAN
+#define EXPORT_SYMBOL_NOKASAN(name)
+#else
+#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
+#endif
+
/*
* Emit a 64-bit absolute little endian symbol reference in a way that
* ensures that it will be resolved at build time, even when building a
diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h
index ed693c5bcec0..2945fe6cd863 100644
--- a/arch/arm64/include/asm/brk-imm.h
+++ b/arch/arm64/include/asm/brk-imm.h
@@ -16,10 +16,12 @@
* 0x400: for dynamic BRK instruction
* 0x401: for compile time BRK instruction
* 0x800: kernel-mode BUG() and WARN() traps
+ * 0x9xx: tag-based KASAN trap (allowed values 0x900 - 0x9ff)
*/
#define FAULT_BRK_IMM 0x100
#define KGDB_DYN_DBG_BRK_IMM 0x400
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
#define BUG_BRK_IMM 0x800
+#define KASAN_BRK_IMM 0x900
#endif
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index e40f8a2df545..e2ace4b07840 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -46,6 +46,10 @@
*/
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#ifdef CONFIG_KASAN_SW_TAGS
+#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
+#endif
+
#ifndef __ASSEMBLY__
#include <linux/bitops.h>
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index a3c7f271ad4c..9ed290a9811c 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -234,6 +234,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
}
#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
+#define COMPAT_MINSIGSTKSZ 2048
static inline void __user *arch_compat_alloc_user_space(long len)
{
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index c5bc80a03515..5048c7a55eef 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -44,9 +44,10 @@
*/
enum ftr_type {
- FTR_EXACT, /* Use a predefined safe value */
- FTR_LOWER_SAFE, /* Smaller value is safe */
- FTR_HIGHER_SAFE,/* Bigger value is safe */
+ FTR_EXACT, /* Use a predefined safe value */
+ FTR_LOWER_SAFE, /* Smaller value is safe */
+ FTR_HIGHER_SAFE, /* Bigger value is safe */
+ FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
};
#define FTR_STRICT true /* SANITY check strict matching required */
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 8389050328bb..558542086069 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -89,7 +89,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
#define alloc_screen_info(x...) &screen_info
-#define free_screen_info(x...)
+
+static inline void free_screen_info(efi_system_table_t *sys_table_arg,
+ struct screen_info *si)
+{
+}
/* redeclare as 'hidden' so the compiler will generate relative references */
extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index c7e30a6ed56e..232917e9c1d9 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -134,7 +134,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
: "memory");
uaccess_disable();
- *uval = val;
+ if (!ret)
+ *uval = val;
+
return ret;
}
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 4214c38d016b..e3193fd39d8d 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -271,6 +271,7 @@ __AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000)
__AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000)
__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
+__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
@@ -383,6 +384,13 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
enum aarch64_insn_register state,
enum aarch64_insn_size_type size,
enum aarch64_insn_ldst_type type);
+u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
+ enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size);
+u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size);
u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
int imm, enum aarch64_insn_variant variant,
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
index e266f80e45b7..b52aacd2c526 100644
--- a/arch/arm64/include/asm/kasan.h
+++ b/arch/arm64/include/asm/kasan.h
@@ -4,15 +4,20 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_KASAN
-
#include <linux/linkage.h>
#include <asm/memory.h>
#include <asm/pgtable-types.h>
+#define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag)
+#define arch_kasan_reset_tag(addr) __tag_reset(addr)
+#define arch_kasan_get_tag(addr) __tag_get(addr)
+
+#ifdef CONFIG_KASAN
+
/*
* KASAN_SHADOW_START: beginning of the kernel virtual addresses.
- * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses.
+ * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,
+ * where N = (1 << KASAN_SHADOW_SCALE_SHIFT).
*/
#define KASAN_SHADOW_START (VA_START)
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
@@ -20,14 +25,16 @@
/*
* This value is used to map an address to the corresponding shadow
* address by the following formula:
- * shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET;
+ * shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET
*
- * (1 << 61) shadow addresses - [KASAN_SHADOW_OFFSET,KASAN_SHADOW_END]
- * cover all 64-bits of virtual addresses. So KASAN_SHADOW_OFFSET
- * should satisfy the following equation:
- * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - (1ULL << 61)
+ * (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) shadow addresses that lie in range
+ * [KASAN_SHADOW_OFFSET, KASAN_SHADOW_END) cover all 64-bits of virtual
+ * addresses. So KASAN_SHADOW_OFFSET should satisfy the following equation:
+ * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END -
+ * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT))
*/
-#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << (64 - 3)))
+#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \
+ (64 - KASAN_SHADOW_SCALE_SHIFT)))
void kasan_init(void);
void kasan_copy_shadow(pgd_t *pgdir);
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index d4bae7d6e0d8..2592649304a3 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -80,12 +80,12 @@
#define KERNEL_END _end
/*
- * KASAN requires 1/8th of the kernel virtual address space for the shadow
- * region. KASAN can bloat the stack significantly, so double the (minimum)
- * stack size when KASAN is in use.
+ * Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual
+ * address space for the shadow region respectively. They can bloat the stack
+ * significantly, so double the (minimum) stack size when they are in use.
*/
#ifdef CONFIG_KASAN
-#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3))
+#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
#define KASAN_THREAD_SHIFT 1
#else
#define KASAN_SHADOW_SIZE (0)
@@ -211,6 +211,26 @@ static inline unsigned long kaslr_offset(void)
#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
/*
+ * When dealing with data aborts, watchpoints, or instruction traps we may end
+ * up with a tagged userland pointer. Clear the tag to get a sane pointer to
+ * pass on to access_ok(), for instance.
+ */
+#define untagged_addr(addr) \
+ ((__typeof__(addr))sign_extend64((u64)(addr), 55))
+
+#ifdef CONFIG_KASAN_SW_TAGS
+#define __tag_shifted(tag) ((u64)(tag) << 56)
+#define __tag_set(addr, tag) (__typeof__(addr))( \
+ ((u64)(addr) & ~__tag_shifted(0xff)) | __tag_shifted(tag))
+#define __tag_reset(addr) untagged_addr(addr)
+#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
+#else
+#define __tag_set(addr, tag) (addr)
+#define __tag_reset(addr) (addr)
+#define __tag_get(addr) 0
+#endif
+
+/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
@@ -293,7 +313,13 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
-#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
+#define page_to_virt(page) ({ \
+ unsigned long __addr = \
+ ((__page_to_voff(page)) | PAGE_OFFSET); \
+ __addr = __tag_set(__addr, page_kasan_tag(page)); \
+ ((void *)__addr); \
+})
+
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
@@ -301,9 +327,10 @@ static inline void *phys_to_virt(phys_addr_t x)
#endif
#endif
-#define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET)
-#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
- _virt_addr_valid(kaddr))
+#define _virt_addr_is_linear(kaddr) \
+ (__tag_reset((u64)(kaddr)) >= PAGE_OFFSET)
+#define virt_addr_valid(kaddr) \
+ (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr))
#include <asm-generic/memory_model.h>
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 8df4cb6ac6f7..48030234301f 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -275,6 +275,7 @@
#define TCR_A1 (UL(1) << 22)
#define TCR_ASID16 (UL(1) << 36)
#define TCR_TBI0 (UL(1) << 37)
+#define TCR_TBI1 (UL(1) << 38)
#define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index aafea648a30f..4cf248185e6f 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -394,8 +394,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
PMD_TYPE_SECT)
#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
-#define pud_sect(pud) (0)
-#define pud_table(pud) (1)
+static inline bool pud_sect(pud_t pud) { return false; }
+static inline bool pud_table(pud_t pud) { return true; }
#else
#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
PUD_TYPE_SECT)
@@ -420,6 +420,8 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
}
+static inline void pte_unmap(pte_t *pte) { }
+
/* Find an entry in the third-level page table. */
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
@@ -428,7 +430,6 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index fda6f5812281..91bb97d8bdbf 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -49,7 +49,15 @@
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
*/
#ifdef CONFIG_COMPAT
+#ifdef CONFIG_ARM64_64K_PAGES
+/*
+ * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
+ * by the compat vectors page.
+ */
#define TASK_SIZE_32 UL(0x100000000)
+#else
+#define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE)
+#endif /* CONFIG_ARM64_64K_PAGES */
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64)
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
index dd95d33a5bd5..03a6c256b7ec 100644
--- a/arch/arm64/include/asm/string.h
+++ b/arch/arm64/include/asm/string.h
@@ -16,6 +16,7 @@
#ifndef __ASM_STRING_H
#define __ASM_STRING_H
+#ifndef CONFIG_KASAN
#define __HAVE_ARCH_STRRCHR
extern char *strrchr(const char *, int c);
@@ -34,6 +35,13 @@ extern __kernel_size_t strlen(const char *);
#define __HAVE_ARCH_STRNLEN
extern __kernel_size_t strnlen(const char *, __kernel_size_t);
+#define __HAVE_ARCH_MEMCMP
+extern int memcmp(const void *, const void *, size_t);
+
+#define __HAVE_ARCH_MEMCHR
+extern void *memchr(const void *, int, __kernel_size_t);
+#endif
+
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *, const void *, __kernel_size_t);
extern void *__memcpy(void *, const void *, __kernel_size_t);
@@ -42,16 +50,10 @@ extern void *__memcpy(void *, const void *, __kernel_size_t);
extern void *memmove(void *, const void *, __kernel_size_t);
extern void *__memmove(void *, const void *, __kernel_size_t);
-#define __HAVE_ARCH_MEMCHR
-extern void *memchr(const void *, int, __kernel_size_t);
-
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, __kernel_size_t);
extern void *__memset(void *, int, __kernel_size_t);
-#define __HAVE_ARCH_MEMCMP
-extern int memcmp(const void *, const void *, size_t);
-
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
#define __HAVE_ARCH_MEMCPY_FLUSHCACHE
void memcpy_flushcache(void *dst, const void *src, size_t cnt);
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index fad8c1b2ca3e..286fb6c73765 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -97,13 +97,6 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
return addr;
}
-/*
- * When dealing with data aborts, watchpoints, or instruction traps we may end
- * up with a tagged userland pointer. Clear the tag to get a sane pointer to
- * pass on to access_ok(), for instance.
- */
-#define untagged_addr(addr) sign_extend64(addr, 55)
-
#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
#define user_addr_max get_fs
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index a0baa9af5487..b52bd9696386 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -43,7 +43,7 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
-#define __NR_compat_syscalls 398
+#define __NR_compat_syscalls 435
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index ef292160748c..b81507012f45 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -817,6 +817,10 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
__SYSCALL(__NR_pkey_free, sys_pkey_free)
#define __NR_statx 397
__SYSCALL(__NR_statx, sys_statx)
+#define __NR_pidfd_send_signal 424
+__SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal)
+#define __NR_pidfd_open 434
+__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
index 2b9a63771eda..f89263c8e11a 100644
--- a/arch/arm64/include/asm/vdso_datapage.h
+++ b/arch/arm64/include/asm/vdso_datapage.h
@@ -38,6 +38,7 @@ struct vdso_data {
__u32 tz_minuteswest; /* Whacky timezone stuff */
__u32 tz_dsttime;
__u32 use_syscall;
+ __u32 hrtimer_res;
};
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index b3162715ed78..285f0b4851fc 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -157,10 +157,14 @@ static int __init acpi_fadt_sanity_check(void)
*/
if (table->revision < 5 ||
(table->revision == 5 && fadt->minor_revision < 1)) {
- pr_err("Unsupported FADT revision %d.%d, should be 5.1+\n",
+ pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n",
table->revision, fadt->minor_revision);
- ret = -EINVAL;
- goto out;
+
+ if (!fadt->arm_boot_flags) {
+ ret = -EINVAL;
+ goto out;
+ }
+ pr_err("FADT has ARM boot flags set, assuming 5.1\n");
}
if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 66be504edb6c..9eedf839e739 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -44,20 +44,23 @@ EXPORT_SYMBOL(__arch_copy_in_user);
EXPORT_SYMBOL(memstart_addr);
/* string / mem functions */
+#ifndef CONFIG_KASAN
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memchr);
+#endif
+
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove);
-EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(memcmp);
/* atomic bitops */
EXPORT_SYMBOL(set_bit);
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index b5e43b01b396..b4a0f4ab770a 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -95,7 +95,7 @@ int main(void)
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
- DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
+ DEFINE(CLOCK_REALTIME_RES, offsetof(struct vdso_data, hrtimer_res));
DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC);
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index d16978213c5b..e2a9d04d0517 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -85,6 +85,7 @@ static const char *__init cpu_read_enable_method(int cpu)
pr_err("%pOF: missing enable-method property\n",
dn);
}
+ of_node_put(dn);
} else {
enable_method = acpi_get_enable_method(cpu);
if (!enable_method) {
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index b008ad7307fd..1a4667e1b51d 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -182,8 +182,8 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 24, 4, 0), /* CWG */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 20, 4, 0), /* ERG */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
/*
* Linux can handle differing I-cache policies. Userspace JITs will
@@ -415,6 +415,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
case FTR_LOWER_SAFE:
ret = new < cur ? new : cur;
break;
+ case FTR_HIGHER_OR_ZERO_SAFE:
+ if (!cur || !new)
+ break;
+ /* Fallthrough */
case FTR_HIGHER_SAFE:
ret = new > cur ? new : cur;
break;
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index c7ef99904934..edb63bf2ac1c 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -133,6 +133,7 @@ NOKPROBE_SYMBOL(disable_debug_monitors);
*/
static int clear_os_lock(unsigned int cpu)
{
+ write_sysreg(0, osdlr_el1);
write_sysreg(0, oslar_el1);
isb();
return 0;
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 50986e388d2b..fac79d75d1d9 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (offset < -SZ_128M || offset >= SZ_128M) {
#ifdef CONFIG_ARM64_MODULE_PLTS
- struct plt_entry trampoline;
+ struct plt_entry trampoline, *dst;
struct module *mod;
/*
@@ -104,24 +104,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
* is added in the future, but for now, the pr_err() below
* deals with a theoretical issue only.
*/
+ dst = mod->arch.ftrace_trampoline;
trampoline = get_plt_entry(addr);
- if (!plt_entries_equal(mod->arch.ftrace_trampoline,
- &trampoline)) {
- if (!plt_entries_equal(mod->arch.ftrace_trampoline,
- &(struct plt_entry){})) {
+ if (!plt_entries_equal(dst, &trampoline)) {
+ if (!plt_entries_equal(dst, &(struct plt_entry){})) {
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
return -EINVAL;
}
/* point the trampoline to our ftrace entry point */
module_disable_ro(mod);
- *mod->arch.ftrace_trampoline = trampoline;
+ *dst = trampoline;
module_enable_ro(mod, true);
- /* update trampoline before patching in the branch */
- smp_wmb();
+ /*
+ * Ensure updated trampoline is visible to instruction
+ * fetch before we patch in the branch.
+ */
+ flush_icache_range((unsigned long)&dst[0],
+ (unsigned long)&dst[1]);
}
- addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
+ addr = (unsigned long)dst;
#else /* CONFIG_ARM64_MODULE_PLTS */
return -EINVAL;
#endif /* CONFIG_ARM64_MODULE_PLTS */
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 749f81779420..95697a9c1245 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -548,13 +548,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
/* Aligned */
break;
case 1:
- /* Allow single byte watchpoint. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
- break;
case 2:
/* Allow halfword watchpoints and breakpoints. */
if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
+ case 3:
+ /* Allow single byte watchpoint. */
+ if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ break;
default:
return -EINVAL;
}
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index 40f9f0b078a4..12af2ba8d558 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -73,7 +73,11 @@
#ifdef CONFIG_EFI
-__efistub_stext_offset = stext - _text;
+/*
+ * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol:
+ * https://github.com/ClangBuiltLinux/linux/issues/561
+ */
+__efistub_stext_offset = ABSOLUTE(stext - _text);
/*
* The EFI stub has its own symbol namespace prefixed by __efistub_, to
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 2718a77da165..4381aa7b071d 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -793,6 +793,46 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
state);
}
+u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
+ enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size)
+{
+ u32 insn = aarch64_insn_get_ldadd_value();
+
+ switch (size) {
+ case AARCH64_INSN_SIZE_32:
+ case AARCH64_INSN_SIZE_64:
+ break;
+ default:
+ pr_err("%s: unimplemented size encoding %d\n", __func__, size);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_ldst_size(size, insn);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
+ result);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ address);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
+ value);
+}
+
+u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size)
+{
+ /*
+ * STADD is simply encoded as an alias for LDADD with XZR as
+ * the destination register.
+ */
+ return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
+ value, size);
+}
+
static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
enum aarch64_insn_prfm_target target,
enum aarch64_insn_prfm_policy policy,
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index f469e0435903..73ecccc514e1 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -32,6 +32,7 @@
void *module_alloc(unsigned long size)
{
+ u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
gfp_t gfp_mask = GFP_KERNEL;
void *p;
@@ -39,9 +40,12 @@ void *module_alloc(unsigned long size)
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
gfp_mask |= __GFP_NOWARN;
+ if (IS_ENABLED(CONFIG_KASAN))
+ /* don't exceed the static module region - see below */
+ module_alloc_end = MODULES_END;
+
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
- module_alloc_base + MODULES_VSIZE,
- gfp_mask, PAGE_KERNEL_EXEC, 0,
+ module_alloc_end, gfp_mask, PAGE_KERNEL_EXEC, 0,
NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 933adbc0f654..0311fe52c8ff 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
@@ -32,6 +33,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
return 0;
}
}
+NOKPROBE_SYMBOL(save_return_addr);
void *return_address(unsigned int level)
{
@@ -55,3 +57,4 @@ void *return_address(unsigned int level)
return NULL;
}
EXPORT_SYMBOL_GPL(return_address);
+NOKPROBE_SYMBOL(return_address);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index d4b740538ad5..0c0c53e3d652 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -259,6 +259,11 @@ void __init setup_arch(char **cmdline_p)
setup_machine_fdt(__fdt_pointer);
+ /*
+ * Initialise the static keys early as they may be enabled by the
+ * cpufeature code and early parameters.
+ */
+ jump_label_init();
parse_early_param();
/*
@@ -304,6 +309,9 @@ void __init setup_arch(char **cmdline_p)
smp_init_cpus();
smp_build_mpidr_hash();
+ /* Init percpu seeds for random tags after cpus are set up. */
+ kasan_init_tags();
+
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Make sure init_thread_info.ttbr0 always generates translation
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index b7ad41d7b6ee..7110600f0ede 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -442,11 +442,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_prepare_boot_cpu(void)
{
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
- /*
- * Initialise the static keys early as they may be enabled by the
- * cpufeature code.
- */
- jump_label_init();
cpuinfo_store_boot_cpu();
save_boot_cpu_run_el();
/*
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index d5718a060672..2ae7630d685b 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
@@ -85,6 +86,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
return 0;
}
+NOKPROBE_SYMBOL(unwind_frame);
void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data)
@@ -99,6 +101,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
break;
}
}
+NOKPROBE_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 74259ae9c7f2..6458aba01ac4 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -35,6 +35,7 @@
#include <linux/sizes.h>
#include <linux/syscalls.h>
#include <linux/mm_types.h>
+#include <linux/kasan.h>
#include <asm/atomic.h>
#include <asm/bug.h>
@@ -778,6 +779,58 @@ static struct break_hook bug_break_hook = {
.fn = bug_handler,
};
+#ifdef CONFIG_KASAN_SW_TAGS
+
+#define KASAN_ESR_RECOVER 0x20
+#define KASAN_ESR_WRITE 0x10
+#define KASAN_ESR_SIZE_MASK 0x0f
+#define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
+
+static int kasan_handler(struct pt_regs *regs, unsigned int esr)
+{
+ bool recover = esr & KASAN_ESR_RECOVER;
+ bool write = esr & KASAN_ESR_WRITE;
+ size_t size = KASAN_ESR_SIZE(esr);
+ u64 addr = regs->regs[0];
+ u64 pc = regs->pc;
+
+ if (user_mode(regs))
+ return DBG_HOOK_ERROR;
+
+ kasan_report(addr, size, write, pc);
+
+ /*
+ * The instrumentation allows to control whether we can proceed after
+ * a crash was detected. This is done by passing the -recover flag to
+ * the compiler. Disabling recovery allows to generate more compact
+ * code.
+ *
+ * Unfortunately disabling recovery doesn't work for the kernel right
+ * now. KASAN reporting is disabled in some contexts (for example when
+ * the allocator accesses slab object metadata; this is controlled by
+ * current->kasan_depth). All these accesses are detected by the tool,
+ * even though the reports for them are not printed.
+ *
+ * This is something that might be fixed at some point in the future.
+ */
+ if (!recover)
+ die("Oops - KASAN", regs, 0);
+
+ /* If thread survives, skip over the brk instruction and continue: */
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+ return DBG_HOOK_HANDLED;
+}
+
+#define KASAN_ESR_VAL (0xf2000000 | KASAN_BRK_IMM)
+#define KASAN_ESR_MASK 0xffffff00
+
+static struct break_hook kasan_break_hook = {
+ .esr_val = KASAN_ESR_VAL,
+ .esr_mask = KASAN_ESR_MASK,
+ .fn = kasan_handler,
+};
+#endif
+
/*
* Initial handler for AArch64 BRK exceptions
* This handler only used until debug_traps_init().
@@ -785,6 +838,10 @@ static struct break_hook bug_break_hook = {
int __init early_brk64(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
+#ifdef CONFIG_KASAN_SW_TAGS
+ if ((esr & KASAN_ESR_MASK) == KASAN_ESR_VAL)
+ return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
+#endif
return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
}
@@ -792,4 +849,7 @@ int __init early_brk64(unsigned long addr, unsigned int esr,
void __init trap_init(void)
{
register_break_hook(&bug_break_hook);
+#ifdef CONFIG_KASAN_SW_TAGS
+ register_break_hook(&kasan_break_hook);
+#endif
}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 2d419006ad43..ec0bb588d755 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -232,6 +232,9 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
+ /* Read without the seqlock held by clock_getres() */
+ WRITE_ONCE(vdso_data->hrtimer_res, hrtimer_resolution);
+
if (!use_syscall) {
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index c39872a7b03c..21805e416483 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -301,13 +301,14 @@ ENTRY(__kernel_clock_getres)
ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
b.ne 1f
- ldr x2, 5f
+ adr vdso_data, _vdso_data
+ ldr w2, [vdso_data, #CLOCK_REALTIME_RES]
b 2f
1:
cmp w0, #CLOCK_REALTIME_COARSE
ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
b.ne 4f
- ldr x2, 6f
+ ldr x2, 5f
2:
cbz x1, 3f
stp xzr, x2, [x1]
@@ -321,8 +322,6 @@ ENTRY(__kernel_clock_getres)
svc #0
ret
5:
- .quad CLOCK_REALTIME_RES
-6:
.quad CLOCK_COARSE_RES
.cfi_endproc
ENDPROC(__kernel_clock_getres)
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 13f81f971390..aef1d51395da 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -22,7 +22,6 @@ config KVM
depends on OF
select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
- select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 73464a96c365..db23c6e5c885 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -15,13 +15,18 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/irqflags.h>
+
#include <asm/kvm_hyp.h>
#include <asm/tlbflush.h>
-static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
+ unsigned long *flags)
{
u64 val;
+ local_irq_save(*flags);
+
/*
* With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
* most TLB operations target EL2/EL0. In order to affect the
@@ -36,7 +41,8 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
isb();
}
-static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
+ unsigned long *flags)
{
write_sysreg(kvm->arch.vttbr, vttbr_el2);
isb();
@@ -47,7 +53,8 @@ static hyp_alternate_select(__tlb_switch_to_guest,
__tlb_switch_to_guest_vhe,
ARM64_HAS_VIRT_HOST_EXTN);
-static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
+ unsigned long flags)
{
/*
* We're done with the TLB operation, let's restore the host's
@@ -55,9 +62,12 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
*/
write_sysreg(0, vttbr_el2);
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+ isb();
+ local_irq_restore(flags);
}
-static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
+ unsigned long flags)
{
write_sysreg(0, vttbr_el2);
}
@@ -69,11 +79,13 @@ static hyp_alternate_select(__tlb_switch_to_host,
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
+ unsigned long flags;
+
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest()(kvm, &flags);
/*
* We could do so much better if we had the VA as well.
@@ -116,36 +128,39 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
if (!has_vhe() && icache_is_vpipt())
__flush_icache_all();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host()(kvm, flags);
}
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
{
+ unsigned long flags;
+
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest()(kvm, &flags);
__tlbi(vmalls12e1is);
dsb(ish);
isb();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host()(kvm, flags);
}
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+ unsigned long flags;
/* Switch to requested VMID */
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest()(kvm, &flags);
__tlbi(vmalle1);
dsb(nsh);
isb();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host()(kvm, flags);
}
void __hyp_text __kvm_flush_vm_context(void)
diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S
index 4444c1d25f4b..0f164a4baf52 100644
--- a/arch/arm64/lib/memchr.S
+++ b/arch/arm64/lib/memchr.S
@@ -30,7 +30,7 @@
* Returns:
* x0 - address of first occurrence of 'c' or 0
*/
-ENTRY(memchr)
+WEAK(memchr)
and w1, w1, #0xff
1: subs x2, x2, #1
b.mi 2f
diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S
index 2a4e239bd17a..fb295f52e9f8 100644
--- a/arch/arm64/lib/memcmp.S
+++ b/arch/arm64/lib/memcmp.S
@@ -58,7 +58,7 @@ pos .req x11
limit_wd .req x12
mask .req x13
-ENTRY(memcmp)
+WEAK(memcmp)
cbz limit, .Lret0
eor tmp1, src1, src2
tst tmp1, #7
diff --git a/arch/arm64/lib/strchr.S b/arch/arm64/lib/strchr.S
index dae0cf5591f9..7c83091d1bcd 100644
--- a/arch/arm64/lib/strchr.S
+++ b/arch/arm64/lib/strchr.S
@@ -29,7 +29,7 @@
* Returns:
* x0 - address of first occurrence of 'c' or 0
*/
-ENTRY(strchr)
+WEAK(strchr)
and w1, w1, #0xff
1: ldrb w2, [x0], #1
cmp w2, w1
diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
index 471fe61760ef..7d5d15398bfb 100644
--- a/arch/arm64/lib/strcmp.S
+++ b/arch/arm64/lib/strcmp.S
@@ -60,7 +60,7 @@ tmp3 .req x9
zeroones .req x10
pos .req x11
-ENTRY(strcmp)
+WEAK(strcmp)
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S
index 55ccc8e24c08..8e0b14205dcb 100644
--- a/arch/arm64/lib/strlen.S
+++ b/arch/arm64/lib/strlen.S
@@ -56,7 +56,7 @@ pos .req x12
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
-ENTRY(strlen)
+WEAK(strlen)
mov zeroones, #REP8_01
bic src, srcin, #15
ands tmp1, srcin, #15
diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
index e267044761c6..66bd145935d9 100644
--- a/arch/arm64/lib/strncmp.S
+++ b/arch/arm64/lib/strncmp.S
@@ -64,7 +64,7 @@ limit_wd .req x13
mask .req x14
endloop .req x15
-ENTRY(strncmp)
+WEAK(strncmp)
cbz limit, .Lret0
eor tmp1, src1, src2
mov zeroones, #REP8_01
diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S
index eae38da6e0bb..355be04441fe 100644
--- a/arch/arm64/lib/strnlen.S
+++ b/arch/arm64/lib/strnlen.S
@@ -59,7 +59,7 @@ limit_wd .req x14
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
-ENTRY(strnlen)
+WEAK(strnlen)
cbz limit, .Lhit_limit
mov zeroones, #REP8_01
bic src, srcin, #15
diff --git a/arch/arm64/lib/strrchr.S b/arch/arm64/lib/strrchr.S
index 61eabd9a289a..f3b9f8e2917c 100644
--- a/arch/arm64/lib/strrchr.S
+++ b/arch/arm64/lib/strrchr.S
@@ -29,7 +29,7 @@
* Returns:
* x0 - address of last occurrence of 'c' or 0
*/
-ENTRY(strrchr)
+WEAK(strrchr)
mov x3, #0
and w1, w1, #0xff
1: ldrb w2, [x0], #1
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d2cbb1c98624..2c2eee348e9e 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -710,6 +710,11 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
+ if (!is_vmalloc_addr(cpu_addr)) {
+ unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
+ return __swiotlb_mmap_pfn(vma, pfn, size);
+ }
+
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
/*
* DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
@@ -733,6 +738,11 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct vm_struct *area = find_vm_area(cpu_addr);
+ if (!is_vmalloc_addr(cpu_addr)) {
+ struct page *page = virt_to_page(cpu_addr);
+ return __swiotlb_get_sgtable_page(sgt, page, size);
+ }
+
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
/*
* DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 465b90d7abf2..c8c233658bef 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -39,6 +39,7 @@
#include <asm/exception.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
+#include <asm/kasan.h>
#include <asm/sysreg.h>
#include <asm/system_misc.h>
#include <asm/pgtable.h>
@@ -126,6 +127,18 @@ static void mem_abort_decode(unsigned int esr)
data_abort_decode(esr);
}
+static inline bool is_ttbr0_addr(unsigned long addr)
+{
+ /* entry assembly clears tags for TTBR0 addrs */
+ return addr < TASK_SIZE;
+}
+
+static inline bool is_ttbr1_addr(unsigned long addr)
+{
+ /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
+ return arch_kasan_reset_tag(addr) >= VA_START;
+}
+
/*
* Dump out the page tables associated with 'addr' in the currently active mm.
*/
@@ -134,7 +147,7 @@ void show_pte(unsigned long addr)
struct mm_struct *mm;
pgd_t *pgd;
- if (addr < TASK_SIZE) {
+ if (is_ttbr0_addr(addr)) {
/* TTBR0 */
mm = current->active_mm;
if (mm == &init_mm) {
@@ -142,7 +155,7 @@ void show_pte(unsigned long addr)
addr);
return;
}
- } else if (addr >= VA_START) {
+ } else if (is_ttbr1_addr(addr)) {
/* TTBR1 */
mm = &init_mm;
} else {
@@ -242,7 +255,7 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs,
if (fsc_type == ESR_ELx_FSC_PERM)
return true;
- if (addr < TASK_SIZE && system_uses_ttbr0_pan())
+ if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan())
return fsc_type == ESR_ELx_FSC_FAULT &&
(regs->pstate & PSR_PAN_BIT);
@@ -426,7 +439,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) {
+ if (is_ttbr0_addr(addr) && is_permission_fault(esr, regs, addr)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
@@ -580,7 +593,7 @@ static int __kprobes do_translation_fault(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
{
- if (addr < TASK_SIZE)
+ if (is_ttbr0_addr(addr))
return do_page_fault(addr, esr, regs);
do_bad_area(addr, esr, regs);
@@ -766,7 +779,7 @@ asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
* re-enabled IRQs. If the address is a kernel address, apply
* BP hardening prior to enabling IRQs and pre-emption.
*/
- if (addr > TASK_SIZE)
+ if (!is_ttbr0_addr(addr))
arm64_apply_bp_hardening();
local_irq_enable();
@@ -785,7 +798,7 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
struct task_struct *tsk = current;
if (user_mode(regs)) {
- if (instruction_pointer(regs) > TASK_SIZE)
+ if (!is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening();
local_irq_enable();
}
@@ -850,7 +863,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
if (interrupts_enabled(regs))
trace_hardirqs_off();
- if (user_mode(regs) && pc > TASK_SIZE)
+ if (user_mode(regs) && !is_ttbr0_addr(pc))
arm64_apply_bp_hardening();
if (!inf->fn(addr_if_watchpoint, esr, regs)) {
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 81f03959a4ab..9c6e0d4d69cc 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -11,6 +11,7 @@
*/
#define pr_fmt(fmt) "kasan: " fmt
+#include <linux/bootmem.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/sched/task.h>
@@ -35,77 +36,124 @@ static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
* with the physical address from __pa_symbol.
*/
-static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
- unsigned long end)
+static phys_addr_t __init kasan_alloc_zeroed_page(int node)
{
- pte_t *pte;
- unsigned long next;
+ void *p = memblock_virt_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS),
+ MEMBLOCK_ALLOC_KASAN, node);
+ return __pa(p);
+}
+
+static pte_t *__init kasan_pte_offset(pmd_t *pmd, unsigned long addr, int node,
+ bool early)
+{
+ if (pmd_none(*pmd)) {
+ phys_addr_t pte_phys = early ?
+ __pa_symbol(kasan_early_shadow_pte)
+ : kasan_alloc_zeroed_page(node);
+ __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
+ }
+
+ return early ? pte_offset_kimg(pmd, addr)
+ : pte_offset_kernel(pmd, addr);
+}
- if (pmd_none(*pmd))
- __pmd_populate(pmd, __pa_symbol(kasan_zero_pte), PMD_TYPE_TABLE);
+static pmd_t *__init kasan_pmd_offset(pud_t *pud, unsigned long addr, int node,
+ bool early)
+{
+ if (pud_none(*pud)) {
+ phys_addr_t pmd_phys = early ?
+ __pa_symbol(kasan_early_shadow_pmd)
+ : kasan_alloc_zeroed_page(node);
+ __pud_populate(pud, pmd_phys, PMD_TYPE_TABLE);
+ }
+
+ return early ? pmd_offset_kimg(pud, addr) : pmd_offset(pud, addr);
+}
+
+static pud_t *__init kasan_pud_offset(pgd_t *pgd, unsigned long addr, int node,
+ bool early)
+{
+ if (pgd_none(*pgd)) {
+ phys_addr_t pud_phys = early ?
+ __pa_symbol(kasan_early_shadow_pud)
+ : kasan_alloc_zeroed_page(node);
+ __pgd_populate(pgd, pud_phys, PMD_TYPE_TABLE);
+ }
+
+ return early ? pud_offset_kimg(pgd, addr) : pud_offset(pgd, addr);
+}
+
+static void __init kasan_pte_populate(pmd_t *pmd, unsigned long addr,
+ unsigned long end, int node, bool early)
+{
+ unsigned long next;
+ pte_t *pte = kasan_pte_offset(pmd, addr, node, early);
- pte = pte_offset_kimg(pmd, addr);
do {
+ phys_addr_t page_phys = early ?
+ __pa_symbol(kasan_early_shadow_page)
+ : kasan_alloc_zeroed_page(node);
+ if (!early)
+ memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
next = addr + PAGE_SIZE;
- set_pte(pte, pfn_pte(sym_to_pfn(kasan_zero_page),
- PAGE_KERNEL));
+ set_pte(pte, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
} while (pte++, addr = next, addr != end && pte_none(*pte));
}
-static void __init kasan_early_pmd_populate(pud_t *pud,
- unsigned long addr,
- unsigned long end)
+static void __init kasan_pmd_populate(pud_t *pud, unsigned long addr,
+ unsigned long end, int node, bool early)
{
- pmd_t *pmd;
unsigned long next;
+ pmd_t *pmd = kasan_pmd_offset(pud, addr, node, early);
- if (pud_none(*pud))
- __pud_populate(pud, __pa_symbol(kasan_zero_pmd), PMD_TYPE_TABLE);
-
- pmd = pmd_offset_kimg(pud, addr);
do {
next = pmd_addr_end(addr, end);
- kasan_early_pte_populate(pmd, addr, next);
+ kasan_pte_populate(pmd, addr, next, node, early);
} while (pmd++, addr = next, addr != end && pmd_none(*pmd));
}
-static void __init kasan_early_pud_populate(pgd_t *pgd,
- unsigned long addr,
- unsigned long end)
+static void __init kasan_pud_populate(pgd_t *pgd, unsigned long addr,
+ unsigned long end, int node, bool early)
{
- pud_t *pud;
unsigned long next;
+ pud_t *pud = kasan_pud_offset(pgd, addr, node, early);
- if (pgd_none(*pgd))
- __pgd_populate(pgd, __pa_symbol(kasan_zero_pud), PUD_TYPE_TABLE);
-
- pud = pud_offset_kimg(pgd, addr);
do {
next = pud_addr_end(addr, end);
- kasan_early_pmd_populate(pud, addr, next);
+ kasan_pmd_populate(pud, addr, next, node, early);
} while (pud++, addr = next, addr != end && pud_none(*pud));
}
-static void __init kasan_map_early_shadow(void)
+static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
+ int node, bool early)
{
- unsigned long addr = KASAN_SHADOW_START;
- unsigned long end = KASAN_SHADOW_END;
unsigned long next;
pgd_t *pgd;
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
- kasan_early_pud_populate(pgd, addr, next);
+ kasan_pud_populate(pgd, addr, next, node, early);
} while (pgd++, addr = next, addr != end);
}
+/* The early shadow maps everything to a single page of zeroes */
asmlinkage void __init kasan_early_init(void)
{
- BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61));
+ BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
+ KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
- kasan_map_early_shadow();
+ kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
+ true);
+}
+
+/* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
+static void __init kasan_map_populate(unsigned long start, unsigned long end,
+ int node)
+{
+ kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
}
/*
@@ -142,15 +190,15 @@ void __init kasan_init(void)
struct memblock_region *reg;
int i;
- kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
- kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
+ kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
+ kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
/*
* We are going to perform proper setup of shadow memory.
- * At first we should unmap early shadow (clear_pgds() call bellow).
+ * At first we should unmap early shadow (clear_pgds() call below).
* However, instrumented code couldn't execute without shadow memory.
* tmp_pg_dir used to keep early shadow mapped until full shadow
* setup will be finished.
@@ -161,28 +209,17 @@ void __init kasan_init(void)
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
- vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
- pfn_to_nid(virt_to_pfn(lm_alias(_text))));
-
- /*
- * vmemmap_populate() has populated the shadow region that covers the
- * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
- * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
- * kasan_populate_zero_shadow() from replacing the page table entries
- * (PMD or PTE) at the edges of the shadow region for the kernel
- * image.
- */
- kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
- kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
+ kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
+ early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
- kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
- (void *)mod_shadow_start);
- kasan_populate_zero_shadow((void *)kimg_shadow_end,
- kasan_mem_to_shadow((void *)PAGE_OFFSET));
+ kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
+ (void *)mod_shadow_start);
+ kasan_populate_early_shadow((void *)kimg_shadow_end,
+ kasan_mem_to_shadow((void *)PAGE_OFFSET));
if (kimg_shadow_start > mod_shadow_end)
- kasan_populate_zero_shadow((void *)mod_shadow_end,
- (void *)kimg_shadow_start);
+ kasan_populate_early_shadow((void *)mod_shadow_end,
+ (void *)kimg_shadow_start);
for_each_memblock(memory, reg) {
void *start = (void *)__phys_to_virt(reg->base);
@@ -191,20 +228,21 @@ void __init kasan_init(void)
if (start >= end)
break;
- vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
- (unsigned long)kasan_mem_to_shadow(end),
- pfn_to_nid(virt_to_pfn(start)));
+ kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
+ (unsigned long)kasan_mem_to_shadow(end),
+ early_pfn_to_nid(virt_to_pfn(start)));
}
/*
- * KAsan may reuse the contents of kasan_zero_pte directly, so we
- * should make sure that it maps the zero page read-only.
+ * KAsan may reuse the contents of kasan_early_shadow_pte directly,
+ * so we should make sure that it maps the zero page read-only.
*/
for (i = 0; i < PTRS_PER_PTE; i++)
- set_pte(&kasan_zero_pte[i],
- pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
+ set_pte(&kasan_early_shadow_pte[i],
+ pfn_pte(sym_to_pfn(kasan_early_shadow_page),
+ PAGE_KERNEL_RO));
- memset(kasan_zero_page, 0, PAGE_SIZE);
+ memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
/* At this point kasan is fully initialized. Enable error messages */
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6ac0d32d60a5..abb9d2ecc675 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -899,13 +899,18 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
int __init arch_ioremap_pud_supported(void)
{
- /* only 4k granule supports level 1 block mappings */
- return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
+ /*
+ * Only 4k granule supports level 1 block mappings.
+ * SW table walks can't handle removal of intermediate entries.
+ */
+ return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
+ !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
}
int __init arch_ioremap_pmd_supported(void)
{
- return 1;
+ /* See arch_ioremap_pud_supported() */
+ return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
}
int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 10c835f13f62..dcb170e43bd4 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -41,6 +41,12 @@
/* PTWs cacheable, inner/outer WBWA */
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
+#ifdef CONFIG_KASAN_SW_TAGS
+#define TCR_KASAN_FLAGS TCR_TBI1
+#else
+#define TCR_KASAN_FLAGS 0
+#endif
+
#define MAIR(attr, mt) ((attr) << ((mt) * 8))
/*
@@ -64,24 +70,25 @@ ENTRY(cpu_do_suspend)
mrs x2, tpidr_el0
mrs x3, tpidrro_el0
mrs x4, contextidr_el1
- mrs x5, cpacr_el1
- mrs x6, tcr_el1
- mrs x7, vbar_el1
- mrs x8, mdscr_el1
- mrs x9, oslsr_el1
- mrs x10, sctlr_el1
+ mrs x5, osdlr_el1
+ mrs x6, cpacr_el1
+ mrs x7, tcr_el1
+ mrs x8, vbar_el1
+ mrs x9, mdscr_el1
+ mrs x10, oslsr_el1
+ mrs x11, sctlr_el1
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- mrs x11, tpidr_el1
+ mrs x12, tpidr_el1
alternative_else
- mrs x11, tpidr_el2
+ mrs x12, tpidr_el2
alternative_endif
- mrs x12, sp_el0
+ mrs x13, sp_el0
stp x2, x3, [x0]
- stp x4, xzr, [x0, #16]
- stp x5, x6, [x0, #32]
- stp x7, x8, [x0, #48]
- stp x9, x10, [x0, #64]
- stp x11, x12, [x0, #80]
+ stp x4, x5, [x0, #16]
+ stp x6, x7, [x0, #32]
+ stp x8, x9, [x0, #48]
+ stp x10, x11, [x0, #64]
+ stp x12, x13, [x0, #80]
ret
ENDPROC(cpu_do_suspend)
@@ -104,8 +111,8 @@ ENTRY(cpu_do_resume)
msr cpacr_el1, x6
/* Don't change t0sz here, mask those bits when restoring */
- mrs x5, tcr_el1
- bfi x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
+ mrs x7, tcr_el1
+ bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
msr tcr_el1, x8
msr vbar_el1, x9
@@ -129,6 +136,7 @@ alternative_endif
/*
* Restore oslsr_el1 by writing oslar_el1
*/
+ msr osdlr_el1, x5
ubfx x11, x11, #1, #1
msr oslar_el1, x11
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
@@ -429,7 +437,8 @@ ENTRY(__cpu_setup)
* both user and kernel.
*/
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
- TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1
+ TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1 | \
+ TCR_KASAN_FLAGS
tcr_set_idmap_t0sz x10, x9
/*
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index 783de51a6c4e..76606e87233f 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -100,11 +100,9 @@
#define A64_STXR(sf, Rt, Rn, Rs) \
A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
-/* Prefetch */
-#define A64_PRFM(Rn, type, target, policy) \
- aarch64_insn_gen_prefetch(Rn, AARCH64_INSN_PRFM_TYPE_##type, \
- AARCH64_INSN_PRFM_TARGET_##target, \
- AARCH64_INSN_PRFM_POLICY_##policy)
+/* LSE atomics */
+#define A64_STADD(sf, Rn, Rs) \
+ aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf))
/* Add/subtract (immediate) */
#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index be155f70f108..1bbb457c293f 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -31,8 +31,6 @@
#include "bpf_jit.h"
-int bpf_jit_enable __read_mostly;
-
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
@@ -330,7 +328,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const int i = insn - ctx->prog->insnsi;
const bool is64 = BPF_CLASS(code) == BPF_ALU64;
const bool isdw = BPF_SIZE(code) == BPF_DW;
- u8 jmp_cond;
+ u8 jmp_cond, reg;
s32 jmp_offset;
#define check_imm(bits, imm) do { \
@@ -706,19 +704,28 @@ emit_cond_jmp:
break;
}
break;
+
/* STX XADD: lock *(u32 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_W:
/* STX XADD: lock *(u64 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_DW:
- emit_a64_mov_i(1, tmp, off, ctx);
- emit(A64_ADD(1, tmp, tmp, dst), ctx);
- emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
- emit(A64_LDXR(isdw, tmp2, tmp), ctx);
- emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
- emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
- jmp_offset = -3;
- check_imm19(jmp_offset);
- emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
+ if (!off) {
+ reg = dst;
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_ADD(1, tmp, tmp, dst), ctx);
+ reg = tmp;
+ }
+ if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
+ emit(A64_STADD(isdw, reg, src), ctx);
+ } else {
+ emit(A64_LDXR(isdw, tmp2, reg), ctx);
+ emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
+ emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
+ jmp_offset = -3;
+ check_imm19(jmp_offset);
+ emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
+ }
break;
/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */