From 8ce837cee8f51fb0eacb32c85461ea2f0fafc9f8 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 20 Oct 2014 15:42:07 +0200 Subject: arm64/mm: add create_pgd_mapping() to create private page tables For UEFI, we need to install the memory mappings used for Runtime Services in a dedicated set of page tables. Add create_pgd_mapping(), which allows us to allocate and install those page table entries early. Reviewed-by: Will Deacon Tested-by: Leif Lindholm Signed-off-by: Ard Biesheuvel --- arch/arm64/include/asm/mmu.h | 3 +++ arch/arm64/include/asm/pgtable.h | 5 +++++ 2 files changed, 8 insertions(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index c2f006c48bd..5fd40c43be8 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -33,5 +33,8 @@ extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); extern void init_mem_pgprot(void); /* create an identity mapping for memory (or io if map_io is true) */ extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io); +extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, + unsigned long virt, phys_addr_t size, + pgprot_t prot); #endif diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 210d632aa5a..59079248529 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -264,6 +264,11 @@ static inline pmd_t pte_pmd(pte_t pte) return __pmd(pte_val(pte)); } +static inline pgprot_t mk_sect_prot(pgprot_t prot) +{ + return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT); +} + /* * THP definitions. */ -- cgit v1.2.3 From 1bd0abb0c924a8b28c6466cdd6bb34ea053541dc Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 17 Nov 2014 13:50:21 +0100 Subject: arm64/efi: set EFI_ALLOC_ALIGN to 64 KB Set EFI_ALLOC_ALIGN to 64 KB so that all allocations done by the stub are naturally compatible with a 64 KB granule kernel. Acked-by: Leif Lindholm Tested-by: Leif Lindholm Signed-off-by: Ard Biesheuvel --- arch/arm64/include/asm/efi.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index a34fd3b12e2..71291253114 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -44,4 +44,6 @@ extern void efi_idmap_init(void); #define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) +#define EFI_ALLOC_ALIGN SZ_64K + #endif /* _ASM_EFI_H */ -- cgit v1.2.3 From f3cdfd239da56a4cea75a2920dc326f0f45f67e3 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 20 Oct 2014 16:27:26 +0200 Subject: arm64/efi: move SetVirtualAddressMap() to UEFI stub In order to support kexec, the kernel needs to be able to deal with the state of the UEFI firmware after SetVirtualAddressMap() has been called. To avoid having separate code paths for non-kexec and kexec, let's move the call to SetVirtualAddressMap() to the stub: this will guarantee us that it will only be called once (since the stub is not executed during kexec), and ensures that the UEFI state is identical between kexec and normal boot. This implies that the layout of the virtual mapping needs to be created by the stub as well. All regions are rounded up to a naturally aligned multiple of 64 KB (for compatibility with 64k pages kernels) and recorded in the UEFI memory map. The kernel proper reads those values and installs the mappings in a dedicated set of page tables that are swapped in during UEFI Runtime Services calls. Acked-by: Leif Lindholm Acked-by: Matt Fleming Tested-by: Leif Lindholm Signed-off-by: Ard Biesheuvel --- arch/arm64/include/asm/efi.h | 34 ++++++++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 71291253114..effef3713c5 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -7,28 +7,36 @@ #ifdef CONFIG_EFI extern void efi_init(void); extern void efi_idmap_init(void); +extern void efi_virtmap_init(void); #else #define efi_init() #define efi_idmap_init() +#define efi_virtmap_init() #endif #define efi_call_virt(f, ...) \ ({ \ - efi_##f##_t *__f = efi.systab->runtime->f; \ + efi_##f##_t *__f; \ efi_status_t __s; \ \ kernel_neon_begin(); \ + efi_virtmap_load(); \ + __f = efi.systab->runtime->f; \ __s = __f(__VA_ARGS__); \ + efi_virtmap_unload(); \ kernel_neon_end(); \ __s; \ }) #define __efi_call_virt(f, ...) \ ({ \ - efi_##f##_t *__f = efi.systab->runtime->f; \ + efi_##f##_t *__f; \ \ kernel_neon_begin(); \ + efi_virtmap_load(); \ + __f = efi.systab->runtime->f; \ __f(__VA_ARGS__); \ + efi_virtmap_unload(); \ kernel_neon_end(); \ }) @@ -46,4 +54,26 @@ extern void efi_idmap_init(void); #define EFI_ALLOC_ALIGN SZ_64K +/* + * On ARM systems, virtually remapped UEFI runtime services are set up in three + * distinct stages: + * - The stub retrieves the final version of the memory map from UEFI, populates + * the virt_addr fields and calls the SetVirtualAddressMap() [SVAM] runtime + * service to communicate the new mapping to the firmware (Note that the new + * mapping is not live at this time) + * - During early boot, the page tables are allocated and populated based on the + * virt_addr fields in the memory map, but only if all descriptors with the + * EFI_MEMORY_RUNTIME attribute have a non-zero value for virt_addr. If this + * succeeds, the EFI_VIRTMAP flag is set to indicate that the virtual mappings + * have been installed successfully. + * - During an early initcall(), the UEFI Runtime Services are enabled and the + * EFI_RUNTIME_SERVICES bit set if some conditions are met, i.e., we need a + * non-early mapping of the UEFI system table, and we need to have the virtmap + * installed. + */ +#define EFI_VIRTMAP EFI_ARCH_1 + +void efi_virtmap_load(void); +void efi_virtmap_unload(void); + #endif /* _ASM_EFI_H */ -- cgit v1.2.3 From 9679be103108926cfe9e6fd2f6829cefa77e47b0 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 20 Oct 2014 16:41:38 +0200 Subject: arm64/efi: remove idmap manipulations from UEFI code Now that we have moved the call to SetVirtualAddressMap() to the stub, UEFI has no use for the ID map, so we can drop the code that installs ID mappings for UEFI memory regions. Acked-by: Leif Lindholm Acked-by: Will Deacon Tested-by: Leif Lindholm Signed-off-by: Ard Biesheuvel --- arch/arm64/include/asm/efi.h | 2 -- arch/arm64/include/asm/mmu.h | 2 -- 2 files changed, 4 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index effef3713c5..7baf2cc04e1 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -6,11 +6,9 @@ #ifdef CONFIG_EFI extern void efi_init(void); -extern void efi_idmap_init(void); extern void efi_virtmap_init(void); #else #define efi_init() -#define efi_idmap_init() #define efi_virtmap_init() #endif diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 5fd40c43be8..3d311761e3c 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -31,8 +31,6 @@ extern void paging_init(void); extern void setup_mm_for_reboot(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); extern void init_mem_pgprot(void); -/* create an identity mapping for memory (or io if map_io is true) */ -extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io); extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot); -- cgit v1.2.3 From 5d425c18653731af62831d30a4fa023d532657a9 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 8 Jan 2015 10:42:34 +0000 Subject: arm64: kernel: add support for cpu cache information This patch adds support for cacheinfo on ARM64. On ARMv8, the cache hierarchy can be identified through Cache Level ID (CLIDR) register while the cache geometry is provided by Cache Size ID (CCSIDR) register. Since the architecture doesn't provide any way of detecting the cpus sharing particular cache, device tree is used for the same purpose. Signed-off-by: Sudeep Holla Cc: Will Deacon Cc: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cachetype.h | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h index 4c631a0a360..da2fc9e3ced 100644 --- a/arch/arm64/include/asm/cachetype.h +++ b/arch/arm64/include/asm/cachetype.h @@ -39,24 +39,41 @@ extern unsigned long __icache_flags; +/* + * NumSets, bits[27:13] - (Number of sets in cache) - 1 + * Associativity, bits[12:3] - (Associativity of cache) - 1 + * LineSize, bits[2:0] - (Log2(Number of words in cache line)) - 2 + */ +#define CCSIDR_EL1_WRITE_THROUGH BIT(31) +#define CCSIDR_EL1_WRITE_BACK BIT(30) +#define CCSIDR_EL1_READ_ALLOCATE BIT(29) +#define CCSIDR_EL1_WRITE_ALLOCATE BIT(28) #define CCSIDR_EL1_LINESIZE_MASK 0x7 #define CCSIDR_EL1_LINESIZE(x) ((x) & CCSIDR_EL1_LINESIZE_MASK) - +#define CCSIDR_EL1_ASSOCIATIVITY_SHIFT 3 +#define CCSIDR_EL1_ASSOCIATIVITY_MASK 0x3ff +#define CCSIDR_EL1_ASSOCIATIVITY(x) \ + (((x) >> CCSIDR_EL1_ASSOCIATIVITY_SHIFT) & CCSIDR_EL1_ASSOCIATIVITY_MASK) #define CCSIDR_EL1_NUMSETS_SHIFT 13 -#define CCSIDR_EL1_NUMSETS_MASK (0x7fff << CCSIDR_EL1_NUMSETS_SHIFT) +#define CCSIDR_EL1_NUMSETS_MASK 0x7fff #define CCSIDR_EL1_NUMSETS(x) \ - (((x) & CCSIDR_EL1_NUMSETS_MASK) >> CCSIDR_EL1_NUMSETS_SHIFT) + (((x) >> CCSIDR_EL1_NUMSETS_SHIFT) & CCSIDR_EL1_NUMSETS_MASK) + +#define CACHE_LINESIZE(x) (16 << CCSIDR_EL1_LINESIZE(x)) +#define CACHE_NUMSETS(x) (CCSIDR_EL1_NUMSETS(x) + 1) +#define CACHE_ASSOCIATIVITY(x) (CCSIDR_EL1_ASSOCIATIVITY(x) + 1) -extern u64 __attribute_const__ icache_get_ccsidr(void); +extern u64 __attribute_const__ cache_get_ccsidr(u64 csselr); +/* Helpers for Level 1 Instruction cache csselr = 1L */ static inline int icache_get_linesize(void) { - return 16 << CCSIDR_EL1_LINESIZE(icache_get_ccsidr()); + return CACHE_LINESIZE(cache_get_ccsidr(1L)); } static inline int icache_get_numsets(void) { - return 1 + CCSIDR_EL1_NUMSETS(icache_get_ccsidr()); + return CACHE_NUMSETS(cache_get_ccsidr(1L)); } /* -- cgit v1.2.3 From cf99a48dce66b126391bb33c7709892d3d8002d7 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 24 Nov 2014 12:03:32 +0000 Subject: arm64: introduce common ESR_ELx_* definitions Currently we have separate ESR_EL{1,2}_* macros, despite the fact that the encodings are common. While encodings are architected to refer to the current EL or a lower EL, the macros refer to particular ELs (e.g. ESR_ELx_EC_DABT_EL0). Having these duplicate definitions is redundant, and their naming is misleading. This patch introduces common ESR_ELx_* macros that can be used in all cases, in preparation for later patches which will migrate existing users over. Some additional cleanups are made in the process: * Suffixes for particular exception levelts (e.g. _EL0, _EL1) are replaced with more general _LOW and _CUR suffixes, matching the architectural intent. * ESR_ELx_EC_WFx, rather than ESR_ELx_EC_WFI is introduced, as this EC encoding covers traps from both WFE and WFI. Similarly, ESR_ELx_WFx_ISS_WFE rather than ESR_ELx_EC_WFI_ISS_WFE is introduced. * Multi-bit fields are given consistently named _SHIFT and _MASK macros. * UL() is used for compatiblity with assembly files. * Comments are added for currently unallocated ESR_ELx.EC encodings. For fields other than ESR_ELx.EC, macros are only implemented for fields for which there is already an ESR_EL{1,2}_* macro. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Reviewed-by: Christoffer Dall Cc: Marc Zyngier Cc: Peter Maydell Cc: Will Deacon --- arch/arm64/include/asm/esr.h | 79 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 72674f4c387..0fd1b0e15ea 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -54,4 +54,83 @@ #define ESR_EL1_EC_BKPT32 (0x38) #define ESR_EL1_EC_BRK64 (0x3C) +#define ESR_ELx_EC_UNKNOWN (0x00) +#define ESR_ELx_EC_WFx (0x01) +/* Unallocated EC: 0x02 */ +#define ESR_ELx_EC_CP15_32 (0x03) +#define ESR_ELx_EC_CP15_64 (0x04) +#define ESR_ELx_EC_CP14_MR (0x05) +#define ESR_ELx_EC_CP14_LS (0x06) +#define ESR_ELx_EC_FP_ASIMD (0x07) +#define ESR_ELx_EC_CP10_ID (0x08) +/* Unallocated EC: 0x09 - 0x0B */ +#define ESR_ELx_EC_CP14_64 (0x0C) +/* Unallocated EC: 0x0d */ +#define ESR_ELx_EC_ILL (0x0E) +/* Unallocated EC: 0x0F - 0x10 */ +#define ESR_ELx_EC_SVC32 (0x11) +#define ESR_ELx_EC_HVC32 (0x12) +#define ESR_ELx_EC_SMC32 (0x13) +/* Unallocated EC: 0x14 */ +#define ESR_ELx_EC_SVC64 (0x15) +#define ESR_ELx_EC_HVC64 (0x16) +#define ESR_ELx_EC_SMC64 (0x17) +#define ESR_ELx_EC_SYS64 (0x18) +/* Unallocated EC: 0x19 - 0x1E */ +#define ESR_ELx_EC_IMP_DEF (0x1f) +#define ESR_ELx_EC_IABT_LOW (0x20) +#define ESR_ELx_EC_IABT_CUR (0x21) +#define ESR_ELx_EC_PC_ALIGN (0x22) +/* Unallocated EC: 0x23 */ +#define ESR_ELx_EC_DABT_LOW (0x24) +#define ESR_ELx_EC_DABT_CUR (0x25) +#define ESR_ELx_EC_SP_ALIGN (0x26) +/* Unallocated EC: 0x27 */ +#define ESR_ELx_EC_FP_EXC32 (0x28) +/* Unallocated EC: 0x29 - 0x2B */ +#define ESR_ELx_EC_FP_EXC64 (0x2C) +/* Unallocated EC: 0x2D - 0x2E */ +#define ESR_ELx_EC_SERROR (0x2F) +#define ESR_ELx_EC_BREAKPT_LOW (0x30) +#define ESR_ELx_EC_BREAKPT_CUR (0x31) +#define ESR_ELx_EC_SOFTSTP_LOW (0x32) +#define ESR_ELx_EC_SOFTSTP_CUR (0x33) +#define ESR_ELx_EC_WATCHPT_LOW (0x34) +#define ESR_ELx_EC_WATCHPT_CUR (0x35) +/* Unallocated EC: 0x36 - 0x37 */ +#define ESR_ELx_EC_BKPT32 (0x38) +/* Unallocated EC: 0x39 */ +#define ESR_ELx_EC_VECTOR32 (0x3A) +/* Unallocted EC: 0x3B */ +#define ESR_ELx_EC_BRK64 (0x3C) +/* Unallocated EC: 0x3D - 0x3F */ +#define ESR_ELx_EC_MAX (0x3F) + +#define ESR_ELx_EC_SHIFT (26) +#define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT) + +#define ESR_ELx_IL (UL(1) << 25) +#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1) +#define ESR_ELx_ISV (UL(1) << 24) +#define ESR_ELx_SAS_SHIFT (22) +#define ESR_ELx_SAS (UL(3) << ESR_ELx_SAS_SHIFT) +#define ESR_ELx_SSE (UL(1) << 21) +#define ESR_ELx_SRT_SHIFT (16) +#define ESR_ELx_SRT_MASK (UL(0x1F) << ESR_ELx_SRT_SHIFT) +#define ESR_ELx_SF (UL(1) << 15) +#define ESR_ELx_AR (UL(1) << 14) +#define ESR_ELx_EA (UL(1) << 9) +#define ESR_ELx_CM (UL(1) << 8) +#define ESR_ELx_S1PTW (UL(1) << 7) +#define ESR_ELx_WNR (UL(1) << 6) +#define ESR_ELx_FSC (0x3F) +#define ESR_ELx_FSC_TYPE (0x3C) +#define ESR_ELx_FSC_EXTABT (0x10) +#define ESR_ELx_FSC_FAULT (0x04) +#define ESR_ELx_FSC_PERM (0x0C) +#define ESR_ELx_CV (UL(1) << 24) +#define ESR_ELx_COND_SHIFT (20) +#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT) +#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0) + #endif /* __ASM_ESR_H */ -- cgit v1.2.3 From 60a1f02c9e91e0796b54e83b14fb8a07f7a568b6 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 18 Nov 2014 12:16:30 +0000 Subject: arm64: decode ESR_ELx.EC when reporting exceptions To aid the developer when something triggers an unexpected exception, decode the ESR_ELx.EC field when logging an ESR_ELx value. This doesn't tell the developer the specifics of the exception encoded in the remaining IL and ISS bits, but it can be helpful to distinguish between exception classes (e.g. SError and a data abort) without having to manually decode the field, which can be tiresome. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Reviewed-by: Christoffer Dall Cc: Marc Zyngier Cc: Peter Maydell Cc: Will Deacon --- arch/arm64/include/asm/esr.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 0fd1b0e15ea..c315543d50f 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -133,4 +133,10 @@ #define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT) #define ESR_ELx_WFx_ISS_WFE (UL(1) << 0) +#ifndef __ASSEMBLY__ +#include + +const char *esr_get_class_string(u32 esr); +#endif /* __ASSEMBLY */ + #endif /* __ASM_ESR_H */ -- cgit v1.2.3 From c6d01a947a51193e839516165286bc8d14a0e409 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 24 Nov 2014 13:59:30 +0000 Subject: arm64: kvm: move to ESR_ELx macros Now that we have common ESR_ELx macros, make use of them in the arm64 KVM code. The addition of to the include path highlighted badly ordered (i.e. not alphabetical) include lists; these are changed to alphabetical order. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Christoffer Dall Cc: Catalin Marinas Cc: Marc Zyngier Cc: Peter Maydell Cc: Will Deacon --- arch/arm64/include/asm/kvm_emulate.h | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 8127e45e263..5c56c0d2cef 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -23,8 +23,10 @@ #define __ARM64_KVM_EMULATE_H__ #include -#include + +#include #include +#include #include #include @@ -128,63 +130,63 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV); + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); } static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR); + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR); } static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE); + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); } static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) { - return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT; + return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; } static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA); + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA); } static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW); + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); } static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) { - return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT); + return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); } /* This one is not specific to Data Abort */ static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL); + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); } static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) { - return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT; + return kvm_vcpu_get_hsr(vcpu) >> ESR_ELx_EC_SHIFT; } static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) { - return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT; + return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; } static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) { - return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC; + return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; } static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) { - return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; + return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; } static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 4a939087bd02ce38135a3924ad3099685abe4c5f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 24 Nov 2014 14:03:52 +0000 Subject: arm64: remove ESR_EL1_* macros Now that all users have been moved over to the common ESR_ELx_* macros, remove the redundant ESR_EL1 macros. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Reviewed-by: Christoffer Dall Cc: Marc Zyngier Cc: Peter Maydell Cc: Will Deacon --- arch/arm64/include/asm/esr.h | 36 ------------------------------------ 1 file changed, 36 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index c315543d50f..62167090937 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -18,42 +18,6 @@ #ifndef __ASM_ESR_H #define __ASM_ESR_H -#define ESR_EL1_WRITE (1 << 6) -#define ESR_EL1_CM (1 << 8) -#define ESR_EL1_IL (1 << 25) - -#define ESR_EL1_EC_SHIFT (26) -#define ESR_EL1_EC_UNKNOWN (0x00) -#define ESR_EL1_EC_WFI (0x01) -#define ESR_EL1_EC_CP15_32 (0x03) -#define ESR_EL1_EC_CP15_64 (0x04) -#define ESR_EL1_EC_CP14_MR (0x05) -#define ESR_EL1_EC_CP14_LS (0x06) -#define ESR_EL1_EC_FP_ASIMD (0x07) -#define ESR_EL1_EC_CP10_ID (0x08) -#define ESR_EL1_EC_CP14_64 (0x0C) -#define ESR_EL1_EC_ILL_ISS (0x0E) -#define ESR_EL1_EC_SVC32 (0x11) -#define ESR_EL1_EC_SVC64 (0x15) -#define ESR_EL1_EC_SYS64 (0x18) -#define ESR_EL1_EC_IABT_EL0 (0x20) -#define ESR_EL1_EC_IABT_EL1 (0x21) -#define ESR_EL1_EC_PC_ALIGN (0x22) -#define ESR_EL1_EC_DABT_EL0 (0x24) -#define ESR_EL1_EC_DABT_EL1 (0x25) -#define ESR_EL1_EC_SP_ALIGN (0x26) -#define ESR_EL1_EC_FP_EXC32 (0x28) -#define ESR_EL1_EC_FP_EXC64 (0x2C) -#define ESR_EL1_EC_SERROR (0x2F) -#define ESR_EL1_EC_BREAKPT_EL0 (0x30) -#define ESR_EL1_EC_BREAKPT_EL1 (0x31) -#define ESR_EL1_EC_SOFTSTP_EL0 (0x32) -#define ESR_EL1_EC_SOFTSTP_EL1 (0x33) -#define ESR_EL1_EC_WATCHPT_EL0 (0x34) -#define ESR_EL1_EC_WATCHPT_EL1 (0x35) -#define ESR_EL1_EC_BKPT32 (0x38) -#define ESR_EL1_EC_BRK64 (0x3C) - #define ESR_ELx_EC_UNKNOWN (0x00) #define ESR_ELx_EC_WFx (0x01) /* Unallocated EC: 0x02 */ -- cgit v1.2.3 From 6e53031ed840fc6d6ad4d4e5778eed5a50183f23 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 24 Nov 2014 14:05:44 +0000 Subject: arm64: kvm: remove ESR_EL2_* macros Now that all users have been moved over to the common ESR_ELx_* macros, remove the redundant ESR_EL2 macros. To maintain compatibility with the fault handling code shared with 32-bit, the FSC_{FAULT,PERM} macros are retained as aliases for the common ESR_ELx_FSC_{FAULT,PERM} definitions. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Acked-by: Christoffer Dall Reviewed-by: Christoffer Dall Cc: Catalin Marinas Cc: Marc Zyngier Cc: Peter Maydell Cc: Will Deacon --- arch/arm64/include/asm/kvm_arm.h | 73 +++------------------------------------- 1 file changed, 4 insertions(+), 69 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 8afb863f5a9..94674eb7e7b 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -18,6 +18,7 @@ #ifndef __ARM64_KVM_ARM_H__ #define __ARM64_KVM_ARM_H__ +#include #include #include @@ -184,77 +185,11 @@ #define MDCR_EL2_TPMCR (1 << 5) #define MDCR_EL2_HPMN_MASK (0x1F) -/* Exception Syndrome Register (ESR) bits */ -#define ESR_EL2_EC_SHIFT (26) -#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT) -#define ESR_EL2_IL (UL(1) << 25) -#define ESR_EL2_ISS (ESR_EL2_IL - 1) -#define ESR_EL2_ISV_SHIFT (24) -#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT) -#define ESR_EL2_SAS_SHIFT (22) -#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT) -#define ESR_EL2_SSE (1 << 21) -#define ESR_EL2_SRT_SHIFT (16) -#define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT) -#define ESR_EL2_SF (1 << 15) -#define ESR_EL2_AR (1 << 14) -#define ESR_EL2_EA (1 << 9) -#define ESR_EL2_CM (1 << 8) -#define ESR_EL2_S1PTW (1 << 7) -#define ESR_EL2_WNR (1 << 6) -#define ESR_EL2_FSC (0x3f) -#define ESR_EL2_FSC_TYPE (0x3c) - -#define ESR_EL2_CV_SHIFT (24) -#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT) -#define ESR_EL2_COND_SHIFT (20) -#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT) - - -#define FSC_FAULT (0x04) -#define FSC_PERM (0x0c) +/* For compatibility with fault code shared with 32-bit */ +#define FSC_FAULT ESR_ELx_FSC_FAULT +#define FSC_PERM ESR_ELx_FSC_PERM /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ #define HPFAR_MASK (~UL(0xf)) -#define ESR_EL2_EC_UNKNOWN (0x00) -#define ESR_EL2_EC_WFI (0x01) -#define ESR_EL2_EC_CP15_32 (0x03) -#define ESR_EL2_EC_CP15_64 (0x04) -#define ESR_EL2_EC_CP14_MR (0x05) -#define ESR_EL2_EC_CP14_LS (0x06) -#define ESR_EL2_EC_FP_ASIMD (0x07) -#define ESR_EL2_EC_CP10_ID (0x08) -#define ESR_EL2_EC_CP14_64 (0x0C) -#define ESR_EL2_EC_ILL_ISS (0x0E) -#define ESR_EL2_EC_SVC32 (0x11) -#define ESR_EL2_EC_HVC32 (0x12) -#define ESR_EL2_EC_SMC32 (0x13) -#define ESR_EL2_EC_SVC64 (0x15) -#define ESR_EL2_EC_HVC64 (0x16) -#define ESR_EL2_EC_SMC64 (0x17) -#define ESR_EL2_EC_SYS64 (0x18) -#define ESR_EL2_EC_IABT (0x20) -#define ESR_EL2_EC_IABT_HYP (0x21) -#define ESR_EL2_EC_PC_ALIGN (0x22) -#define ESR_EL2_EC_DABT (0x24) -#define ESR_EL2_EC_DABT_HYP (0x25) -#define ESR_EL2_EC_SP_ALIGN (0x26) -#define ESR_EL2_EC_FP_EXC32 (0x28) -#define ESR_EL2_EC_FP_EXC64 (0x2C) -#define ESR_EL2_EC_SERROR (0x2F) -#define ESR_EL2_EC_BREAKPT (0x30) -#define ESR_EL2_EC_BREAKPT_HYP (0x31) -#define ESR_EL2_EC_SOFTSTP (0x32) -#define ESR_EL2_EC_SOFTSTP_HYP (0x33) -#define ESR_EL2_EC_WATCHPT (0x34) -#define ESR_EL2_EC_WATCHPT_HYP (0x35) -#define ESR_EL2_EC_BKPT32 (0x38) -#define ESR_EL2_EC_VECTOR32 (0x3A) -#define ESR_EL2_EC_BRK64 (0x3C) - -#define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10 - -#define ESR_EL2_EC_WFI_ISS_WFE (1 << 0) - #endif /* __ARM64_KVM_ARM_H__ */ -- cgit v1.2.3 From 2f896d5866107e2926dcdec34a7d40bc56dd2951 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 22 Jan 2015 01:36:05 +0000 Subject: arm64: use fixmap for text patching When kernel text is marked as read only, it cannot be modified directly. Use a fixmap to modify the text instead in a similar manner to x86 and arm. Reviewed-by: Kees Cook Reviewed-by: Mark Rutland Tested-by: Kees Cook Tested-by: Mark Rutland Signed-off-by: Laura Abbott Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fixmap.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index 9ef6eca905c..defa0ff9825 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -49,6 +49,7 @@ enum fixed_addresses { FIX_BTMAP_END = __end_of_permanent_fixed_addresses, FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, + FIX_TEXT_POKE0, __end_of_fixed_addresses }; -- cgit v1.2.3 From da141706aea52c1a9fbd28cb8d289b78819f5436 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 21 Jan 2015 17:36:06 -0800 Subject: arm64: add better page protections to arm64 Add page protections for arm64 similar to those in arm. This is for security reasons to prevent certain classes of exploits. The current method: - Map all memory as either RWX or RW. We round to the nearest section to avoid creating page tables before everything is mapped - Once everything is mapped, if either end of the RWX section should not be X, we split the PMD and remap as necessary - When initmem is to be freed, we change the permissions back to RW (using stop machine if necessary to flush the TLB) - If CONFIG_DEBUG_RODATA is set, the read only sections are set read only. Acked-by: Ard Biesheuvel Tested-by: Kees Cook Tested-by: Ard Biesheuvel Signed-off-by: Laura Abbott Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cacheflush.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 7ae31a2cc6c..67d309cc3b6 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -152,4 +152,9 @@ int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); + +#ifdef CONFIG_DEBUG_RODATA +void mark_rodata_ro(void); +#endif + #endif -- cgit v1.2.3 From 60305db9884515ca063474e262b454f6da04e4e2 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 22 Jan 2015 10:01:40 +0000 Subject: arm64/efi: move virtmap init to early initcall Now that the create_mapping() code in mm/mmu.c is able to support setting up kernel page tables at initcall time, we can move the whole virtmap creation to arm64_enable_runtime_services() instead of having a distinct stage during early boot. This also allows us to drop the arm64-specific EFI_VIRTMAP flag. Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/efi.h | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 7baf2cc04e1..ef572206f1c 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -6,10 +6,8 @@ #ifdef CONFIG_EFI extern void efi_init(void); -extern void efi_virtmap_init(void); #else #define efi_init() -#define efi_virtmap_init() #endif #define efi_call_virt(f, ...) \ @@ -53,23 +51,17 @@ extern void efi_virtmap_init(void); #define EFI_ALLOC_ALIGN SZ_64K /* - * On ARM systems, virtually remapped UEFI runtime services are set up in three + * On ARM systems, virtually remapped UEFI runtime services are set up in two * distinct stages: * - The stub retrieves the final version of the memory map from UEFI, populates * the virt_addr fields and calls the SetVirtualAddressMap() [SVAM] runtime * service to communicate the new mapping to the firmware (Note that the new * mapping is not live at this time) - * - During early boot, the page tables are allocated and populated based on the - * virt_addr fields in the memory map, but only if all descriptors with the - * EFI_MEMORY_RUNTIME attribute have a non-zero value for virt_addr. If this - * succeeds, the EFI_VIRTMAP flag is set to indicate that the virtual mappings - * have been installed successfully. - * - During an early initcall(), the UEFI Runtime Services are enabled and the - * EFI_RUNTIME_SERVICES bit set if some conditions are met, i.e., we need a - * non-early mapping of the UEFI system table, and we need to have the virtmap - * installed. + * - During an early initcall(), the EFI system table is permanently remapped + * and the virtual remapping of the UEFI Runtime Services regions is loaded + * into a private set of page tables. If this all succeeds, the Runtime + * Services are enabled and the EFI_RUNTIME_SERVICES bit set. */ -#define EFI_VIRTMAP EFI_ARCH_1 void efi_virtmap_load(void); void efi_virtmap_unload(void); -- cgit v1.2.3 From aa03c428e67881795f4190f9ffdb62fc14978608 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 22 Jan 2015 18:20:35 +0000 Subject: arm64: Fix overlapping VA allocations PCI IO space was intended to be 16MiB, at 32MiB below MODULES_VADDR, but commit d1e6dc91b532d3d3 ("arm64: Add architectural support for PCI") extended this to cover the full 32MiB. The final 8KiB of this 32MiB is also allocated for the fixmap, allowing for potential clashes between the two. This change was masked by assumptions in mem_init and the page table dumping code, which assumed the I/O space to be 16MiB long through seaparte hard-coded definitions. This patch changes the definition of the PCI I/O space allocation to live in asm/memory.h, along with the other VA space allocations. As the fixmap allocation depends on the number of fixmap entries, this is moved below the PCI I/O space allocation. Both the fixmap and PCI I/O space are guarded with 2MB of padding. Sites assuming the I/O space was 16MiB are moved over use new PCI_IO_{START,END} definitions, which will keep in sync with the size of the IO space (now restored to 16MiB). As a useful side effect, the use of the new PCI_IO_{START,END} definitions prevents a build issue in the dumping code due to a (now redundant) missing include of io.h for PCI_IOBASE. Signed-off-by: Mark Rutland Cc: Kees Cook Cc: Laura Abbott Cc: Liviu Dudau Cc: Steve Capper Cc: Will Deacon [catalin.marinas@arm.com: reorder FIXADDR and PCI_IO address_markers_idx enum] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/io.h | 5 +++-- arch/arm64/include/asm/memory.h | 10 +++++++++- 2 files changed, 12 insertions(+), 3 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 949c406d4df..540f7c0aea8 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -26,6 +26,7 @@ #include #include +#include #include #include #include @@ -145,8 +146,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * I/O port access primitives. */ #define arch_has_dev_port() (1) -#define IO_SPACE_LIMIT (SZ_32M - 1) -#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M)) +#define IO_SPACE_LIMIT (PCI_IO_SIZE - 1) +#define PCI_IOBASE ((void __iomem *)PCI_IO_START) /* * String version of I/O memory access operations. diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 6486b2bfd56..f800d45ea22 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -32,6 +32,12 @@ */ #define UL(x) _AC(x, UL) +/* + * Size of the PCI I/O space. This must remain a power of two so that + * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses. + */ +#define PCI_IO_SIZE SZ_16M + /* * PAGE_OFFSET - the virtual address of the start of the kernel image (top * (VA_BITS - 1)) @@ -45,7 +51,9 @@ #define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1)) #define MODULES_END (PAGE_OFFSET) #define MODULES_VADDR (MODULES_END - SZ_64M) -#define FIXADDR_TOP (MODULES_VADDR - SZ_2M - PAGE_SIZE) +#define PCI_IO_END (MODULES_VADDR - SZ_2M) +#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) +#define FIXADDR_TOP (PCI_IO_START - SZ_2M) #define TASK_SIZE_64 (UL(1) << VA_BITS) #ifdef CONFIG_COMPAT -- cgit v1.2.3 From 9d3bfbb4df58a8025b46b2676da2cf450385b754 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 12 Jan 2015 20:48:53 +0000 Subject: arm64: Combine coherent and non-coherent swiotlb dma_ops Since dev_archdata now has a dma_coherent state, combine the two coherent and non-coherent operations and remove their declaration, together with set_dma_ops, from the arch dma-mapping.h file. Acked-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/dma-mapping.h | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 9ce3e680ae1..6932bb57dba 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -28,8 +28,6 @@ #define DMA_ERROR_CODE (~(dma_addr_t)0) extern struct dma_map_ops *dma_ops; -extern struct dma_map_ops coherent_swiotlb_dma_ops; -extern struct dma_map_ops noncoherent_swiotlb_dma_ops; static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) { @@ -47,23 +45,18 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return __generic_dma_ops(dev); } -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) -{ - dev->archdata.dma_ops = ops; -} - static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, struct iommu_ops *iommu, bool coherent) { dev->archdata.dma_coherent = coherent; - if (coherent) - set_dma_ops(dev, &coherent_swiotlb_dma_ops); } #define arch_setup_dma_ops arch_setup_dma_ops /* do not use this function in a driver */ static inline bool is_device_dma_coherent(struct device *dev) { + if (!dev) + return false; return dev->archdata.dma_coherent; } -- cgit v1.2.3 From 04597a65c5efc207257a736d339c6f2f5b00250f Mon Sep 17 00:00:00 2001 From: "Suzuki K. Poulose" Date: Wed, 21 Jan 2015 12:43:09 +0000 Subject: arm64: Track system support for mixed endian EL0 This patch keeps track of the mixed endian EL0 support across the system and provides helper functions to export it. The status is a boolean indicating whether all the CPUs on the system supports mixed endian at EL0. Signed-off-by: Suzuki K. Poulose Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 2 ++ arch/arm64/include/asm/cputype.h | 14 ++++++++++++++ 2 files changed, 16 insertions(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 07547ccc1f2..b6c16d5f622 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -52,6 +52,8 @@ static inline void cpus_set_cap(unsigned int num) } void check_local_cpu_errata(void); +bool cpu_supports_mixed_endian_el0(void); +bool system_supports_mixed_endian_el0(void); #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 8adb986a308..ee6403df9fe 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -72,6 +72,15 @@ #define APM_CPU_PART_POTENZA 0x000 +#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16 +#define ID_AA64MMFR0_BIGENDEL0_MASK (0xf << ID_AA64MMFR0_BIGENDEL0_SHIFT) +#define ID_AA64MMFR0_BIGENDEL0(mmfr0) \ + (((mmfr0) & ID_AA64MMFR0_BIGENDEL0_MASK) >> ID_AA64MMFR0_BIGENDEL0_SHIFT) +#define ID_AA64MMFR0_BIGEND_SHIFT 8 +#define ID_AA64MMFR0_BIGEND_MASK (0xf << ID_AA64MMFR0_BIGEND_SHIFT) +#define ID_AA64MMFR0_BIGEND(mmfr0) \ + (((mmfr0) & ID_AA64MMFR0_BIGEND_MASK) >> ID_AA64MMFR0_BIGEND_SHIFT) + #ifndef __ASSEMBLY__ /* @@ -104,6 +113,11 @@ static inline u32 __attribute_const__ read_cpuid_cachetype(void) return read_cpuid(CTR_EL0); } +static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) +{ + return (ID_AA64MMFR0_BIGEND(mmfr0) == 0x1) || + (ID_AA64MMFR0_BIGENDEL0(mmfr0) == 0x1); +} #endif /* __ASSEMBLY__ */ #endif -- cgit v1.2.3 From 736d474f0fafd1486f178570bc47660ee9dfdef8 Mon Sep 17 00:00:00 2001 From: "Suzuki K. Poulose" Date: Wed, 21 Jan 2015 12:43:10 +0000 Subject: arm64: Consolidate hotplug notifier for instruction emulation As of now each insn_emulation has a cpu hotplug notifier that enables/disables the CPU feature bit for the functionality. This patch re-arranges the code, such that there is only one notifier that runs through the list of registered emulation hooks and runs their corresponding set_hw_mode. We do nothing when a CPU is dying as we will set the appropriate bits as it comes back online based on the state of the hooks. Signed-off-by: Mark Rutland Signed-off-by: Suzuki K. Poulose Cc: Will Deacon Cc: Punit Agrawal [catalin.marinas@arm.com: fix pr_warn compilation error] [catalin.marinas@arm.com: remove unnecessary "insn" check] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cputype.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index ee6403df9fe..68732e9a02f 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -81,6 +81,8 @@ #define ID_AA64MMFR0_BIGEND(mmfr0) \ (((mmfr0) & ID_AA64MMFR0_BIGEND_MASK) >> ID_AA64MMFR0_BIGEND_SHIFT) +#define SCTLR_EL1_CP15BEN (0x1 << 5) + #ifndef __ASSEMBLY__ /* -- cgit v1.2.3 From 2d888f48e056119495847a269a435d5c3d9df349 Mon Sep 17 00:00:00 2001 From: "Suzuki K. Poulose" Date: Wed, 21 Jan 2015 12:43:11 +0000 Subject: arm64: Emulate SETEND for AArch32 tasks Emulate deprecated 'setend' instruction for AArch32 bit tasks. setend [le/be] - Sets the endianness of EL0 On systems with CPUs which support mixed endian at EL0, the hardware support for the instruction can be enabled by setting the SCTLR_EL1.SED bit. Like the other emulated instructions it is controlled by an entry in /proc/sys/abi/. For more information see : Documentation/arm64/legacy_instructions.txt The instruction is emulated by setting/clearing the SPSR_EL1.E bit, which will be reflected in the PSTATE.E in AArch32 context. This patch also restores the native endianness for the execution of signal handlers, since the process could have changed the endianness. Note: All CPUs on the system must have mixed endian support at EL0. Once the handler is registered, hotplugging a CPU which doesn't support mixed endian, could lead to unexpected results/behavior in applications. Signed-off-by: Suzuki K. Poulose Cc: Will Deacon Cc: Punit Agrawal Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cputype.h | 1 + arch/arm64/include/asm/ptrace.h | 7 +++++++ 2 files changed, 8 insertions(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 68732e9a02f..a84ec605bed 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -82,6 +82,7 @@ (((mmfr0) & ID_AA64MMFR0_BIGEND_MASK) >> ID_AA64MMFR0_BIGEND_SHIFT) #define SCTLR_EL1_CP15BEN (0x1 << 5) +#define SCTLR_EL1_SED (0x1 << 8) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 41ed9e13795..d6dd9fdbc3b 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -58,6 +58,13 @@ #define COMPAT_PSR_Z_BIT 0x40000000 #define COMPAT_PSR_N_BIT 0x80000000 #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ + +#ifdef CONFIG_CPU_BIG_ENDIAN +#define COMPAT_PSR_ENDSTATE COMPAT_PSR_E_BIT +#else +#define COMPAT_PSR_ENDSTATE 0 +#endif + /* * These are 'magic' values for PTRACE_PEEKUSR that return info about where a * process is located in memory. -- cgit v1.2.3 From 0aaf0dae81b586134faeb52e28b7ad567629dd68 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Fri, 23 Jan 2015 05:36:42 +0000 Subject: smp, ARM64: Kill SMP single function call interrupt Commit 9a46ad6d6df3b54 "smp: make smp_call_function_many() use logic similar to smp_call_function_single()" has unified the way to handle single and multiple cross-CPU function calls. Now only one interrupt is needed for architecture specific code to support generic SMP function call interfaces, so kill the redundant single function call interrupt. Signed-off-by: Jiang Liu Acked-by: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: Arnd Bergmann Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hardirq.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index e8a3268a891..6aae421f4d7 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -20,7 +20,7 @@ #include #include -#define NR_IPI 6 +#define NR_IPI 5 typedef struct { unsigned int __softirq_pending; -- cgit v1.2.3 From 33b36543df336d9158e1a763fe97251885f52c5c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 16 Jan 2015 13:52:14 +0000 Subject: arm64: uapi: expose our struct ucontext to the uapi headers arm64 defines its own ucontext structure which is incompatible with the struct defined (and exposed to userspace by) the asm-generic headers. glibc carries its own struct definition that is compatible with the arm64 definition, but we should expose our format in the uapi headers in case other libraries want to make use of the ucontext pushed as part of an arm64 sigframe. This patch moves the arm64 asm/ucontext.h to the uapi headers, along with the necessary #include of linux/types.h. Cc: Arnd Bergmann Cc: Marcus Shawcroft Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/ucontext.h | 30 ------------------------------ arch/arm64/include/uapi/asm/Kbuild | 1 + arch/arm64/include/uapi/asm/ucontext.h | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 33 insertions(+), 30 deletions(-) delete mode 100644 arch/arm64/include/asm/ucontext.h create mode 100644 arch/arm64/include/uapi/asm/ucontext.h (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/ucontext.h b/arch/arm64/include/asm/ucontext.h deleted file mode 100644 index 42e04c87742..00000000000 --- a/arch/arm64/include/asm/ucontext.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#ifndef __ASM_UCONTEXT_H -#define __ASM_UCONTEXT_H - -struct ucontext { - unsigned long uc_flags; - struct ucontext *uc_link; - stack_t uc_stack; - sigset_t uc_sigmask; - /* glibc uses a 1024-bit sigset_t */ - __u8 __unused[1024 / 8 - sizeof(sigset_t)]; - /* last for future expansion */ - struct sigcontext uc_mcontext; -}; - -#endif /* __ASM_UCONTEXT_H */ diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild index 942376d37d2..825b0fe51c2 100644 --- a/arch/arm64/include/uapi/asm/Kbuild +++ b/arch/arm64/include/uapi/asm/Kbuild @@ -18,4 +18,5 @@ header-y += siginfo.h header-y += signal.h header-y += stat.h header-y += statfs.h +header-y += ucontext.h header-y += unistd.h diff --git a/arch/arm64/include/uapi/asm/ucontext.h b/arch/arm64/include/uapi/asm/ucontext.h new file mode 100644 index 00000000000..791de8e89e3 --- /dev/null +++ b/arch/arm64/include/uapi/asm/ucontext.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _UAPI__ASM_UCONTEXT_H +#define _UAPI__ASM_UCONTEXT_H + +#include + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + sigset_t uc_sigmask; + /* glibc uses a 1024-bit sigset_t */ + __u8 __unused[1024 / 8 - sizeof(sigset_t)]; + /* last for future expansion */ + struct sigcontext uc_mcontext; +}; + +#endif /* _UAPI__ASM_UCONTEXT_H */ -- cgit v1.2.3 From 0156411b1828771c09c92f034e7b81f702b84f07 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 6 Jan 2015 16:42:32 +0000 Subject: arm64: Implement the compat_sys_call_table in C Unlike the sys_call_table[], the compat one was implemented in sys32.S making it impossible to notice discrepancies between the number of compat syscalls and the __NR_compat_syscalls macro, the latter having to be defined in asm/unistd.h as including asm/unistd32.h would cause conflicts on __NR_* definitions. With this patch, incorrect __NR_compat_syscalls values will result in a build-time error. Signed-off-by: Catalin Marinas Suggested-by: Mark Rutland Acked-by: Mark Rutland --- arch/arm64/include/asm/unistd.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index b780c6c76ee..159b44fff25 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -48,6 +48,9 @@ #endif #define __ARCH_WANT_SYS_CLONE + +#ifndef __COMPAT_SYSCALL_NR #include +#endif #define NR_syscalls (__NR_syscalls) -- cgit v1.2.3 From 9648606946b3b0c608846dddb30482b48a6f5c68 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 6 Jan 2015 17:01:56 +0000 Subject: arm64: Remove asm/syscalls.h This patch moves the sys_rt_sigreturn_wrapper prototype to arch/arm64/kernel/sys.c and removes the asm/syscalls.h header. Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/syscalls.h | 30 ------------------------------ 1 file changed, 30 deletions(-) delete mode 100644 arch/arm64/include/asm/syscalls.h (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/syscalls.h b/arch/arm64/include/asm/syscalls.h deleted file mode 100644 index 48fe7c600e9..00000000000 --- a/arch/arm64/include/asm/syscalls.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#ifndef __ASM_SYSCALLS_H -#define __ASM_SYSCALLS_H - -#include -#include -#include - -/* - * System call wrappers implemented in kernel/entry.S. - */ -asmlinkage long sys_rt_sigreturn_wrapper(void); - -#include - -#endif /* __ASM_SYSCALLS_H */ -- cgit v1.2.3 From af3cfdbf56b91785650f54e7c9a899d814b4b9fb Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Mon, 26 Jan 2015 18:33:44 +0000 Subject: arm64: kernel: remove ARM64_CPU_SUSPEND config option ARM64_CPU_SUSPEND config option was introduced to make code providing context save/restore selectable only on platforms requiring power management capabilities. Currently ARM64_CPU_SUSPEND depends on the PM_SLEEP config option which in turn is set by the SUSPEND config option. The introduction of CPU_IDLE for arm64 requires that code configured by ARM64_CPU_SUSPEND (context save/restore) should be compiled in in order to enable the CPU idle driver to rely on CPU operations carrying out context save/restore. The ARM64_CPUIDLE config option (ARM64 generic idle driver) is therefore forced to select ARM64_CPU_SUSPEND, even if there may be (ie PM_SLEEP) failed dependencies, which is not a clean way of handling the kernel configuration option. For these reasons, this patch removes the ARM64_CPU_SUSPEND config option and makes the context save/restore dependent on CPU_PM, which is selected whenever either SUSPEND or CPU_IDLE are configured, cleaning up dependencies in the process. This way, code previously configured through ARM64_CPU_SUSPEND is compiled in whenever a power management subsystem requires it to be present in the kernel (SUSPEND || CPU_IDLE), which is the behaviour expected on ARM64 kernels. The cpu_suspend and cpu_init_idle CPU operations are added only if CPU_IDLE is selected, since they are CPU_IDLE specific methods and should be grouped and defined accordingly. PSCI CPU operations are updated to reflect the introduced changes. Signed-off-by: Lorenzo Pieralisi Cc: Arnd Bergmann Cc: Will Deacon Cc: Krzysztof Kozlowski Cc: Daniel Lezcano Cc: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpu_ops.h | 8 ++++---- arch/arm64/include/asm/cpuidle.h | 6 ++++++ arch/arm64/include/asm/suspend.h | 2 -- 3 files changed, 10 insertions(+), 6 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index 6f8e2ef9094..da301ee7395 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -28,8 +28,6 @@ struct device_node; * enable-method property. * @cpu_init: Reads any data necessary for a specific enable-method from the * devicetree, for a given cpu node and proposed logical id. - * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from - * devicetree, for a given cpu node and proposed logical id. * @cpu_prepare: Early one-time preparation step for a cpu. If there is a * mechanism for doing so, tests whether it is possible to boot * the given CPU. @@ -42,6 +40,8 @@ struct device_node; * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the * cpu being killed. * @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu. + * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from + * devicetree, for a given cpu node and proposed logical id. * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing * to wrong parameters or error conditions. Called from the * CPU being suspended. Must be called with IRQs disabled. @@ -49,7 +49,6 @@ struct device_node; struct cpu_operations { const char *name; int (*cpu_init)(struct device_node *, unsigned int); - int (*cpu_init_idle)(struct device_node *, unsigned int); int (*cpu_prepare)(unsigned int); int (*cpu_boot)(unsigned int); void (*cpu_postboot)(void); @@ -58,7 +57,8 @@ struct cpu_operations { void (*cpu_die)(unsigned int cpu); int (*cpu_kill)(unsigned int cpu); #endif -#ifdef CONFIG_ARM64_CPU_SUSPEND +#ifdef CONFIG_CPU_IDLE + int (*cpu_init_idle)(struct device_node *, unsigned int); int (*cpu_suspend)(unsigned long); #endif }; diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h index b52a9932e2b..0710654631e 100644 --- a/arch/arm64/include/asm/cpuidle.h +++ b/arch/arm64/include/asm/cpuidle.h @@ -3,11 +3,17 @@ #ifdef CONFIG_CPU_IDLE extern int cpu_init_idle(unsigned int cpu); +extern int cpu_suspend(unsigned long arg); #else static inline int cpu_init_idle(unsigned int cpu) { return -EOPNOTSUPP; } + +static inline int cpu_suspend(unsigned long arg) +{ + return -EOPNOTSUPP; +} #endif #endif diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index 456d67c1f0f..003802f5896 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h @@ -23,6 +23,4 @@ struct sleep_save_sp { extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); extern void cpu_resume(void); -extern int cpu_suspend(unsigned long); - #endif -- cgit v1.2.3 From 523d6e9fae9333a0e2a7baf4d11c8bcca544790e Mon Sep 17 00:00:00 2001 From: "zhichang.yuan" Date: Tue, 9 Dec 2014 07:26:47 +0000 Subject: arm64:mm: free the useless initial page table For 64K page system, after mapping a PMD section, the corresponding initial page table is not needed any more. That page can be freed. Signed-off-by: Zhichang Yuan [catalin.marinas@arm.com: added BUG_ON() to catch late memblock freeing] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pgtable.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 59079248529..67f6ede3947 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -342,9 +342,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, #ifdef CONFIG_ARM64_64K_PAGES #define pud_sect(pud) (0) +#define pud_table(pud) (1) #else #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ PUD_TYPE_SECT) +#define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ + PUD_TYPE_TABLE) #endif static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) -- cgit v1.2.3 From 6917c857e3ab5bc5e15d2b1ff34dc2443ccf5b0d Mon Sep 17 00:00:00 2001 From: Dave P Martin Date: Thu, 29 Jan 2015 16:24:43 +0000 Subject: arm64: Avoid breakage caused by .altmacro in fpsimd save/restore macros Alternate macro mode is not a property of a macro definition, but a gas runtime state that alters the way macros are expanded for ever after (until .noaltmacro is seen). This means that subsequent assembly code that calls other macros can break if fpsimdmacros.h is included. Since these instruction sequences are simple (if dull -- but in a good way), this patch solves the problem by simply expanding the .irp loops. The pre-existing fpsimd_{save,restore} macros weren't rolled with .irp anyway and the sequences affected are short, so this change restores consistency at little cost. Signed-off-by: Dave Martin Acked-by: Marc Zyngier Acked-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimdmacros.h | 43 ++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 11 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index 007618b8188..a2daf129302 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -76,7 +76,6 @@ fpsimd_restore_fpcr x\tmpnr, \state .endm -.altmacro .macro fpsimd_save_partial state, numnr, tmpnr1, tmpnr2 mrs x\tmpnr1, fpsr str w\numnr, [\state, #8] @@ -86,11 +85,22 @@ add \state, \state, x\numnr, lsl #4 sub x\tmpnr1, x\tmpnr1, x\numnr, lsl #1 br x\tmpnr1 - .irp qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0 - .irp qb, %(qa + 1) - stp q\qa, q\qb, [\state, # -16 * \qa - 16] - .endr - .endr + stp q30, q31, [\state, #-16 * 30 - 16] + stp q28, q29, [\state, #-16 * 28 - 16] + stp q26, q27, [\state, #-16 * 26 - 16] + stp q24, q25, [\state, #-16 * 24 - 16] + stp q22, q23, [\state, #-16 * 22 - 16] + stp q20, q21, [\state, #-16 * 20 - 16] + stp q18, q19, [\state, #-16 * 18 - 16] + stp q16, q17, [\state, #-16 * 16 - 16] + stp q14, q15, [\state, #-16 * 14 - 16] + stp q12, q13, [\state, #-16 * 12 - 16] + stp q10, q11, [\state, #-16 * 10 - 16] + stp q8, q9, [\state, #-16 * 8 - 16] + stp q6, q7, [\state, #-16 * 6 - 16] + stp q4, q5, [\state, #-16 * 4 - 16] + stp q2, q3, [\state, #-16 * 2 - 16] + stp q0, q1, [\state, #-16 * 0 - 16] 0: .endm @@ -103,10 +113,21 @@ add \state, \state, x\tmpnr2, lsl #4 sub x\tmpnr1, x\tmpnr1, x\tmpnr2, lsl #1 br x\tmpnr1 - .irp qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0 - .irp qb, %(qa + 1) - ldp q\qa, q\qb, [\state, # -16 * \qa - 16] - .endr - .endr + ldp q30, q31, [\state, #-16 * 30 - 16] + ldp q28, q29, [\state, #-16 * 28 - 16] + ldp q26, q27, [\state, #-16 * 26 - 16] + ldp q24, q25, [\state, #-16 * 24 - 16] + ldp q22, q23, [\state, #-16 * 22 - 16] + ldp q20, q21, [\state, #-16 * 20 - 16] + ldp q18, q19, [\state, #-16 * 18 - 16] + ldp q16, q17, [\state, #-16 * 16 - 16] + ldp q14, q15, [\state, #-16 * 14 - 16] + ldp q12, q13, [\state, #-16 * 12 - 16] + ldp q10, q11, [\state, #-16 * 10 - 16] + ldp q8, q9, [\state, #-16 * 8 - 16] + ldp q6, q7, [\state, #-16 * 6 - 16] + ldp q4, q5, [\state, #-16 * 4 - 16] + ldp q2, q3, [\state, #-16 * 2 - 16] + ldp q0, q1, [\state, #-16 * 0 - 16] 0: .endm -- cgit v1.2.3 From d476d94f180af3f0fca77394651d4a98f4df1c54 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 2 Feb 2015 16:44:39 +0000 Subject: arm64: compat: Remove incorrect comment in compat_siginfo The comment was right originally but the _pad array size was wrong. It was fixed in the meantime but the comment not updated. Reported-by: Peter Maydell Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/compat.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 3fb053fa6e9..7fbed6919b5 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -161,7 +161,6 @@ typedef struct compat_siginfo { int si_code; union { - /* The padding is the same size as AArch64. */ int _pad[128/sizeof(int) - 3]; /* kill() */ -- cgit v1.2.3