From e5a16a5c4602c119262f350274021f90465f479d Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Fri, 24 Jun 2022 14:13:05 +0200 Subject: ia64, processor: fix -Wincompatible-pointer-types in ia64_get_irr() test_bit(), as any other bitmap op, takes `unsigned long *` as a second argument (pointer to the actual bitmap), as any bitmap itself is an array of unsigned longs. However, the ia64_get_irr() code passes a ref to `u64` as a second argument. This works with the ia64 bitops implementation due to that they have `void *` as the second argument and then cast it later on. This works with the bitmap API itself due to that `unsigned long` has the same size on ia64 as `u64` (`unsigned long long`), but from the compiler PoV those two are different. Define @irr as `unsigned long` to fix that. That implies no functional changes. Has been hidden for 16 years! Fixes: a58786917ce2 ("[IA64] avoid broken SAL_CACHE_FLUSH implementations") Cc: stable@vger.kernel.org # 2.6.16+ Reported-by: kernel test robot Signed-off-by: Alexander Lobakin Reviewed-by: Andy Shevchenko Reviewed-by: Yury Norov Signed-off-by: Yury Norov --- arch/ia64/include/asm/processor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 7cbce290f4e5..757c2f6d8d4b 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -538,7 +538,7 @@ ia64_get_irr(unsigned int vector) { unsigned int reg = vector / 64; unsigned int bit = vector % 64; - u64 irr; + unsigned long irr; switch (reg) { case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break; -- cgit v1.2.3 From 0e862838f290147ea9c16db852d8d494b552d38d Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Fri, 24 Jun 2022 14:13:07 +0200 Subject: bitops: unify non-atomic bitops prototypes across architectures Currently, there is a mess with the prototypes of the non-atomic bitops across the different architectures: ret bool, int, unsigned long nr int, long, unsigned int, unsigned long addr volatile unsigned long *, volatile void * Thankfully, it doesn't provoke any bugs, but can sometimes make the compiler angry when it's not handy at all. Adjust all the prototypes to the following standard: ret bool retval can be only 0 or 1 nr unsigned long native; signed makes no sense addr volatile unsigned long * bitmaps are arrays of ulongs Next, some architectures don't define 'arch_' versions as they don't support instrumentation, others do. To make sure there is always the same set of callables present and to ease any potential future changes, make them all follow the rule: * architecture-specific files define only 'arch_' versions; * non-prefixed versions can be defined only in asm-generic files; and place the non-prefixed definitions into a new file in asm-generic to be included by non-instrumented architectures. Finally, add some static assertions in order to prevent people from making a mess in this room again. I also used the %__always_inline attribute consistently, so that they always get resolved to the actual operations. Suggested-by: Andy Shevchenko Signed-off-by: Alexander Lobakin Acked-by: Mark Rutland Reviewed-by: Yury Norov Reviewed-by: Andy Shevchenko Signed-off-by: Yury Norov --- arch/alpha/include/asm/bitops.h | 32 ++++++++++---------- arch/hexagon/include/asm/bitops.h | 24 +++++++++------ arch/ia64/include/asm/bitops.h | 42 ++++++++++++++------------- arch/m68k/include/asm/bitops.h | 49 +++++++++++++++++++++---------- arch/s390/include/asm/bitops.h | 61 ++++++++++++++++++++------------------- arch/sh/include/asm/bitops-op32.h | 34 ++++++++++++++-------- arch/x86/include/asm/bitops.h | 22 +++++++------- 7 files changed, 153 insertions(+), 111 deletions(-) (limited to 'arch') diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h index e1d8483a45f2..492c7713ddae 100644 --- a/arch/alpha/include/asm/bitops.h +++ b/arch/alpha/include/asm/bitops.h @@ -46,8 +46,8 @@ set_bit(unsigned long nr, volatile void * addr) /* * WARNING: non atomic version. */ -static inline void -__set_bit(unsigned long nr, volatile void * addr) +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { int *m = ((int *) addr) + (nr >> 5); @@ -82,8 +82,8 @@ clear_bit_unlock(unsigned long nr, volatile void * addr) /* * WARNING: non atomic version. */ -static __inline__ void -__clear_bit(unsigned long nr, volatile void * addr) +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { int *m = ((int *) addr) + (nr >> 5); @@ -94,7 +94,7 @@ static inline void __clear_bit_unlock(unsigned long nr, volatile void * addr) { smp_mb(); - __clear_bit(nr, addr); + arch___clear_bit(nr, addr); } static inline void @@ -118,8 +118,8 @@ change_bit(unsigned long nr, volatile void * addr) /* * WARNING: non atomic version. */ -static __inline__ void -__change_bit(unsigned long nr, volatile void * addr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { int *m = ((int *) addr) + (nr >> 5); @@ -186,8 +186,8 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr) /* * WARNING: non atomic version. */ -static inline int -__test_and_set_bit(unsigned long nr, volatile void * addr) +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = 1 << (nr & 0x1f); int *m = ((int *) addr) + (nr >> 5); @@ -230,8 +230,8 @@ test_and_clear_bit(unsigned long nr, volatile void * addr) /* * WARNING: non atomic version. */ -static inline int -__test_and_clear_bit(unsigned long nr, volatile void * addr) +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = 1 << (nr & 0x1f); int *m = ((int *) addr) + (nr >> 5); @@ -272,8 +272,8 @@ test_and_change_bit(unsigned long nr, volatile void * addr) /* * WARNING: non atomic version. */ -static __inline__ int -__test_and_change_bit(unsigned long nr, volatile void * addr) +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = 1 << (nr & 0x1f); int *m = ((int *) addr) + (nr >> 5); @@ -283,8 +283,8 @@ __test_and_change_bit(unsigned long nr, volatile void * addr) return (old & mask) != 0; } -static inline int -test_bit(int nr, const volatile void * addr) +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) { return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; } @@ -450,6 +450,8 @@ sched_find_first_bit(const unsigned long b[2]) return __ffs(tmp) + ofs; } +#include + #include #include diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h index 75d6ba3643b8..da500471ac73 100644 --- a/arch/hexagon/include/asm/bitops.h +++ b/arch/hexagon/include/asm/bitops.h @@ -127,38 +127,45 @@ static inline void change_bit(int nr, volatile void *addr) * be atomic, particularly for things like slab_lock and slab_unlock. * */ -static inline void __clear_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { test_and_clear_bit(nr, addr); } -static inline void __set_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { test_and_set_bit(nr, addr); } -static inline void __change_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { test_and_change_bit(nr, addr); } /* Apparently, at least some of these are allowed to be non-atomic */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { return test_and_clear_bit(nr, addr); } -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { return test_and_set_bit(nr, addr); } -static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { return test_and_change_bit(nr, addr); } -static inline int __test_bit(int nr, const volatile unsigned long *addr) +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) { int retval; @@ -172,8 +179,6 @@ static inline int __test_bit(int nr, const volatile unsigned long *addr) return retval; } -#define test_bit(nr, addr) __test_bit(nr, addr) - /* * ffz - find first zero in word. * @word: The word to search @@ -271,6 +276,7 @@ static inline unsigned long __fls(unsigned long word) } #include +#include #include #include diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 577be93c0818..9f62af7fd7c4 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -53,7 +53,7 @@ set_bit (int nr, volatile void *addr) } /** - * __set_bit - Set a bit in memory + * arch___set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * @@ -61,8 +61,8 @@ set_bit (int nr, volatile void *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static __inline__ void -__set_bit (int nr, volatile void *addr) +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); } @@ -135,7 +135,7 @@ __clear_bit_unlock(int nr, void *addr) } /** - * __clear_bit - Clears a bit in memory (non-atomic version) + * arch___clear_bit - Clears a bit in memory (non-atomic version) * @nr: the bit to clear * @addr: the address to start counting from * @@ -143,8 +143,8 @@ __clear_bit_unlock(int nr, void *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static __inline__ void -__clear_bit (int nr, volatile void *addr) +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31)); } @@ -175,7 +175,7 @@ change_bit (int nr, volatile void *addr) } /** - * __change_bit - Toggle a bit in memory + * arch___change_bit - Toggle a bit in memory * @nr: the bit to toggle * @addr: the address to start counting from * @@ -183,8 +183,8 @@ change_bit (int nr, volatile void *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static __inline__ void -__change_bit (int nr, volatile void *addr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31)); } @@ -224,7 +224,7 @@ test_and_set_bit (int nr, volatile void *addr) #define test_and_set_bit_lock test_and_set_bit /** - * __test_and_set_bit - Set a bit and return its old value + * arch___test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * @@ -232,8 +232,8 @@ test_and_set_bit (int nr, volatile void *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static __inline__ int -__test_and_set_bit (int nr, volatile void *addr) +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { __u32 *p = (__u32 *) addr + (nr >> 5); __u32 m = 1 << (nr & 31); @@ -269,7 +269,7 @@ test_and_clear_bit (int nr, volatile void *addr) } /** - * __test_and_clear_bit - Clear a bit and return its old value + * arch___test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * @@ -277,8 +277,8 @@ test_and_clear_bit (int nr, volatile void *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static __inline__ int -__test_and_clear_bit(int nr, volatile void * addr) +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { __u32 *p = (__u32 *) addr + (nr >> 5); __u32 m = 1 << (nr & 31); @@ -314,14 +314,14 @@ test_and_change_bit (int nr, volatile void *addr) } /** - * __test_and_change_bit - Change a bit and return its old value + * arch___test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is non-atomic and can be reordered. */ -static __inline__ int -__test_and_change_bit (int nr, void *addr) +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { __u32 old, bit = (1 << (nr & 31)); __u32 *m = (__u32 *) addr + (nr >> 5); @@ -331,8 +331,8 @@ __test_and_change_bit (int nr, void *addr) return (old & bit) != 0; } -static __inline__ int -test_bit (int nr, const volatile void *addr) +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) { return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); } @@ -443,6 +443,8 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x) #ifdef __KERNEL__ +#include + #include #include diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index 51283db53667..71495faf2a90 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h @@ -65,8 +65,11 @@ static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr) bfset_mem_set_bit(nr, vaddr)) #endif -#define __set_bit(nr, vaddr) set_bit(nr, vaddr) - +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) +{ + set_bit(nr, addr); +} static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr) { @@ -105,8 +108,11 @@ static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr) bfclr_mem_clear_bit(nr, vaddr)) #endif -#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr) - +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + clear_bit(nr, addr); +} static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr) { @@ -145,14 +151,17 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr) bfchg_mem_change_bit(nr, vaddr)) #endif -#define __change_bit(nr, vaddr) change_bit(nr, vaddr) - - -static inline int test_bit(int nr, const volatile unsigned long *vaddr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { - return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; + change_bit(nr, addr); } +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) +{ + return (addr[nr >> 5] & (1UL << (nr & 31))) != 0; +} static inline int bset_reg_test_and_set_bit(int nr, volatile unsigned long *vaddr) @@ -201,8 +210,11 @@ static inline int bfset_mem_test_and_set_bit(int nr, bfset_mem_test_and_set_bit(nr, vaddr)) #endif -#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr) - +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) +{ + return test_and_set_bit(nr, addr); +} static inline int bclr_reg_test_and_clear_bit(int nr, volatile unsigned long *vaddr) @@ -251,8 +263,11 @@ static inline int bfclr_mem_test_and_clear_bit(int nr, bfclr_mem_test_and_clear_bit(nr, vaddr)) #endif -#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr) - +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + return test_and_clear_bit(nr, addr); +} static inline int bchg_reg_test_and_change_bit(int nr, volatile unsigned long *vaddr) @@ -301,8 +316,11 @@ static inline int bfchg_mem_test_and_change_bit(int nr, bfchg_mem_test_and_change_bit(nr, vaddr)) #endif -#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr) - +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + return test_and_change_bit(nr, addr); +} /* * The true 68020 and more advanced processors support the "bfffo" @@ -522,6 +540,7 @@ static inline int __fls(int x) #define clear_bit_unlock clear_bit #define __clear_bit_unlock clear_bit_unlock +#include #include #include #include diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 191dc7898b0f..9a7d15da966e 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -113,75 +113,76 @@ static inline bool arch_test_and_change_bit(unsigned long nr, return old & mask; } -static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr) +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); - *addr |= mask; + *p |= mask; } -static inline void arch___clear_bit(unsigned long nr, - volatile unsigned long *ptr) +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); - *addr &= ~mask; + *p &= ~mask; } -static inline void arch___change_bit(unsigned long nr, - volatile unsigned long *ptr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); - *addr ^= mask; + *p ^= mask; } -static inline bool arch___test_and_set_bit(unsigned long nr, - volatile unsigned long *ptr) +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); unsigned long old; - old = *addr; - *addr |= mask; + old = *p; + *p |= mask; return old & mask; } -static inline bool arch___test_and_clear_bit(unsigned long nr, - volatile unsigned long *ptr) +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); unsigned long old; - old = *addr; - *addr &= ~mask; + old = *p; + *p &= ~mask; return old & mask; } -static inline bool arch___test_and_change_bit(unsigned long nr, - volatile unsigned long *ptr) +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned long *addr = __bitops_word(nr, ptr); + unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); unsigned long old; - old = *addr; - *addr ^= mask; + old = *p; + *p ^= mask; return old & mask; } -static inline bool arch_test_bit(unsigned long nr, - const volatile unsigned long *ptr) +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) { - const volatile unsigned long *addr = __bitops_word(nr, ptr); + const volatile unsigned long *p = __bitops_word(nr, addr); unsigned long mask = __bitops_mask(nr); - return *addr & mask; + return *p & mask; } static inline bool arch_test_and_set_bit_lock(unsigned long nr, diff --git a/arch/sh/include/asm/bitops-op32.h b/arch/sh/include/asm/bitops-op32.h index cfe5465acce7..565a85d8b7fb 100644 --- a/arch/sh/include/asm/bitops-op32.h +++ b/arch/sh/include/asm/bitops-op32.h @@ -2,6 +2,8 @@ #ifndef __ASM_SH_BITOPS_OP32_H #define __ASM_SH_BITOPS_OP32_H +#include + /* * The bit modifying instructions on SH-2A are only capable of working * with a 3-bit immediate, which signifies the shift position for the bit @@ -16,7 +18,8 @@ #define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE) #endif -static inline void __set_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { if (__builtin_constant_p(nr)) { __asm__ __volatile__ ( @@ -33,7 +36,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) } } -static inline void __clear_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { if (__builtin_constant_p(nr)) { __asm__ __volatile__ ( @@ -52,7 +56,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) } /** - * __change_bit - Toggle a bit in memory + * arch___change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * @@ -60,7 +64,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __change_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { if (__builtin_constant_p(nr)) { __asm__ __volatile__ ( @@ -79,7 +84,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) } /** - * __test_and_set_bit - Set a bit and return its old value + * arch___test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * @@ -87,7 +92,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -98,7 +104,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) } /** - * __test_and_clear_bit - Clear a bit and return its old value + * arch___test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * @@ -106,7 +112,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -117,8 +124,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) } /* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, - volatile unsigned long *addr) +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -129,13 +136,16 @@ static inline int __test_and_change_bit(int nr, } /** - * test_bit - Determine whether a bit is set + * arch_test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ -static inline int test_bit(int nr, const volatile unsigned long *addr) +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) { return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +#include + #endif /* __ASM_SH_BITOPS_OP32_H */ diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index a288ecd230ab..973c6bd17f98 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -63,7 +63,7 @@ arch_set_bit(long nr, volatile unsigned long *addr) } static __always_inline void -arch___set_bit(long nr, volatile unsigned long *addr) +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); } @@ -89,7 +89,7 @@ arch_clear_bit_unlock(long nr, volatile unsigned long *addr) } static __always_inline void -arch___clear_bit(long nr, volatile unsigned long *addr) +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); } @@ -114,7 +114,7 @@ arch___clear_bit_unlock(long nr, volatile unsigned long *addr) } static __always_inline void -arch___change_bit(long nr, volatile unsigned long *addr) +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); } @@ -145,7 +145,7 @@ arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr) } static __always_inline bool -arch___test_and_set_bit(long nr, volatile unsigned long *addr) +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { bool oldbit; @@ -171,7 +171,7 @@ arch_test_and_clear_bit(long nr, volatile unsigned long *addr) * this without also updating arch/x86/kernel/kvm.c */ static __always_inline bool -arch___test_and_clear_bit(long nr, volatile unsigned long *addr) +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { bool oldbit; @@ -183,7 +183,7 @@ arch___test_and_clear_bit(long nr, volatile unsigned long *addr) } static __always_inline bool -arch___test_and_change_bit(long nr, volatile unsigned long *addr) +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { bool oldbit; @@ -219,10 +219,12 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l return oldbit; } -#define arch_test_bit(nr, addr) \ - (__builtin_constant_p((nr)) \ - ? constant_test_bit((nr), (addr)) \ - : variable_test_bit((nr), (addr))) +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) +{ + return __builtin_constant_p(nr) ? constant_test_bit(nr, addr) : + variable_test_bit(nr, addr); +} /** * __ffs - find first set bit in word -- cgit v1.2.3 From e69eb9c460f128b71c6b995d75a05244e4b6cc3e Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Fri, 24 Jun 2022 14:13:09 +0200 Subject: bitops: wrap non-atomic bitops with a transparent macro In preparation for altering the non-atomic bitops with a macro, wrap them in a transparent definition. This requires prepending one more '_' to their names in order to be able to do that seamlessly. It is a simple change, given that all the non-prefixed definitions are now in asm-generic. sparc32 already has several triple-underscored functions, so I had to rename them ('___' -> 'sp32_'). Signed-off-by: Alexander Lobakin Reviewed-by: Marco Elver Reviewed-by: Andy Shevchenko Signed-off-by: Yury Norov --- arch/sparc/include/asm/bitops_32.h | 18 +++++++++--------- arch/sparc/lib/atomic32.c | 12 ++++++------ 2 files changed, 15 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/sparc/include/asm/bitops_32.h b/arch/sparc/include/asm/bitops_32.h index 889afa9f990f..3448c191b484 100644 --- a/arch/sparc/include/asm/bitops_32.h +++ b/arch/sparc/include/asm/bitops_32.h @@ -19,9 +19,9 @@ #error only can be included directly #endif -unsigned long ___set_bit(unsigned long *addr, unsigned long mask); -unsigned long ___clear_bit(unsigned long *addr, unsigned long mask); -unsigned long ___change_bit(unsigned long *addr, unsigned long mask); +unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask); +unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask); +unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask); /* * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0' @@ -36,7 +36,7 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *add ADDR = ((unsigned long *) addr) + (nr >> 5); mask = 1 << (nr & 31); - return ___set_bit(ADDR, mask) != 0; + return sp32___set_bit(ADDR, mask) != 0; } static inline void set_bit(unsigned long nr, volatile unsigned long *addr) @@ -46,7 +46,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) ADDR = ((unsigned long *) addr) + (nr >> 5); mask = 1 << (nr & 31); - (void) ___set_bit(ADDR, mask); + (void) sp32___set_bit(ADDR, mask); } static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) @@ -56,7 +56,7 @@ static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *a ADDR = ((unsigned long *) addr) + (nr >> 5); mask = 1 << (nr & 31); - return ___clear_bit(ADDR, mask) != 0; + return sp32___clear_bit(ADDR, mask) != 0; } static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) @@ -66,7 +66,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) ADDR = ((unsigned long *) addr) + (nr >> 5); mask = 1 << (nr & 31); - (void) ___clear_bit(ADDR, mask); + (void) sp32___clear_bit(ADDR, mask); } static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) @@ -76,7 +76,7 @@ static inline int test_and_change_bit(unsigned long nr, volatile unsigned long * ADDR = ((unsigned long *) addr) + (nr >> 5); mask = 1 << (nr & 31); - return ___change_bit(ADDR, mask) != 0; + return sp32___change_bit(ADDR, mask) != 0; } static inline void change_bit(unsigned long nr, volatile unsigned long *addr) @@ -86,7 +86,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) ADDR = ((unsigned long *) addr) + (nr >> 5); mask = 1 << (nr & 31); - (void) ___change_bit(ADDR, mask); + (void) sp32___change_bit(ADDR, mask); } #include diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index 8b81d0f00c97..cf80d1ae352b 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -120,7 +120,7 @@ void arch_atomic_set(atomic_t *v, int i) } EXPORT_SYMBOL(arch_atomic_set); -unsigned long ___set_bit(unsigned long *addr, unsigned long mask) +unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask) { unsigned long old, flags; @@ -131,9 +131,9 @@ unsigned long ___set_bit(unsigned long *addr, unsigned long mask) return old & mask; } -EXPORT_SYMBOL(___set_bit); +EXPORT_SYMBOL(sp32___set_bit); -unsigned long ___clear_bit(unsigned long *addr, unsigned long mask) +unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask) { unsigned long old, flags; @@ -144,9 +144,9 @@ unsigned long ___clear_bit(unsigned long *addr, unsigned long mask) return old & mask; } -EXPORT_SYMBOL(___clear_bit); +EXPORT_SYMBOL(sp32___clear_bit); -unsigned long ___change_bit(unsigned long *addr, unsigned long mask) +unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask) { unsigned long old, flags; @@ -157,7 +157,7 @@ unsigned long ___change_bit(unsigned long *addr, unsigned long mask) return old & mask; } -EXPORT_SYMBOL(___change_bit); +EXPORT_SYMBOL(sp32___change_bit); unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) { -- cgit v1.2.3 From 0b4736a424a1358d613057a24ff97813305513e2 Mon Sep 17 00:00:00 2001 From: Yury Norov Date: Fri, 1 Jul 2022 05:54:23 -0700 Subject: arm: align find_bit declarations with generic kernel ARM has their own implementation for find_bit functions, and function declarations are different with those in generic headers. Fix it. Signed-off-by: Yury Norov --- arch/arm/include/asm/bitops.h | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 8e94fe7ab5eb..714440fa2fc6 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -160,18 +160,20 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p); /* * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. */ -extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size); -extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset); -extern int _find_first_bit_le(const unsigned long *p, unsigned size); -extern int _find_next_bit_le(const unsigned long *p, int size, int offset); +unsigned long _find_first_zero_bit_le(const unsigned long *p, unsigned long size); +unsigned long _find_next_zero_bit_le(const unsigned long *p, + unsigned long size, unsigned long offset); +unsigned long _find_first_bit_le(const unsigned long *p, unsigned long size); +unsigned long _find_next_bit_le(const unsigned long *p, unsigned long size, unsigned long offset); /* * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. */ -extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size); -extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset); -extern int _find_first_bit_be(const unsigned long *p, unsigned size); -extern int _find_next_bit_be(const unsigned long *p, int size, int offset); +unsigned long _find_first_zero_bit_be(const unsigned long *p, unsigned long size); +unsigned long _find_next_zero_bit_be(const unsigned long *p, + unsigned long size, unsigned long offset); +unsigned long _find_first_bit_be(const unsigned long *p, unsigned long size); +unsigned long _find_next_bit_be(const unsigned long *p, unsigned long size, unsigned long offset); #ifndef CONFIG_SMP /* -- cgit v1.2.3 From 3a2ba42cbd0b669ce3837ba400905f93dd06c79f Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Fri, 15 Jul 2022 17:15:36 +0200 Subject: x86/olpc: fix 'logical not is only applied to the left hand side' The bitops compile-time optimization series revealed one more problem in olpc-xo1-sci.c:send_ebook_state(), resulted in GCC warnings: arch/x86/platform/olpc/olpc-xo1-sci.c: In function 'send_ebook_state': arch/x86/platform/olpc/olpc-xo1-sci.c:83:63: warning: logical not is only applied to the left hand side of comparison [-Wlogical-not-parentheses] 83 | if (!!test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == state) | ^~ arch/x86/platform/olpc/olpc-xo1-sci.c:83:13: note: add parentheses around left hand side expression to silence this warning Despite this code working as intended, this redundant double negation of boolean value, together with comparing to `char` with no explicit conversion to bool, makes compilers think the author made some unintentional logical mistakes here. Make it the other way around and negate the char instead to silence the warnings. Fixes: d2aa37411b8e ("x86/olpc/xo1/sci: Produce wakeup events for buttons and switches") Cc: stable@vger.kernel.org # 3.5+ Reported-by: Guenter Roeck Reported-by: kernel test robot Reviewed-and-tested-by: Guenter Roeck Signed-off-by: Alexander Lobakin Signed-off-by: Yury Norov --- arch/x86/platform/olpc/olpc-xo1-sci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c index f03a6883dcc6..89f25af4b3c3 100644 --- a/arch/x86/platform/olpc/olpc-xo1-sci.c +++ b/arch/x86/platform/olpc/olpc-xo1-sci.c @@ -80,7 +80,7 @@ static void send_ebook_state(void) return; } - if (!!test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == state) + if (test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == !!state) return; /* Nothing new to report. */ input_report_switch(ebook_switch_idev, SW_TABLET_MODE, state); -- cgit v1.2.3 From 3e731203153de1c06a8b7a4f15061e9051c09a6f Mon Sep 17 00:00:00 2001 From: Yury Norov Date: Sat, 23 Jul 2022 14:45:36 -0700 Subject: powerpc: drop dependency on in archrandom.h archrandom.h includes to refer ppc_md. This causes circular header dependency, if generic nodemask.h includes random.h: In file included from include/linux/cred.h:16, from include/linux/seq_file.h:13, from arch/powerpc/include/asm/machdep.h:6, from arch/powerpc/include/asm/archrandom.h:5, from include/linux/random.h:109, from include/linux/nodemask.h:97, from include/linux/list_lru.h:12, from include/linux/fs.h:13, from include/linux/compat.h:17, from arch/powerpc/kernel/asm-offsets.c:12: include/linux/sched.h:1203:9: error: unknown type name 'nodemask_t' 1203 | nodemask_t mems_allowed; | ^~~~~~~~~~ Fix it by removing dependency from archrandom.h Now as arch_get_random_seed_long() moved to c-file, and not exported, it's not available for modules. As Michael Ellerman says: I think we actually don't need it exported to modules, I think it's a private detail of the RNG <-> architecture interface, not something that modules should be calling. CC: Andy Shevchenko CC: Benjamin Herrenschmidt CC: Michael Ellerman CC: Paul Mackerras CC: Rasmus Villemoes CC: Stephen Rothwell CC: linuxppc-dev@lists.ozlabs.org Suggested-by: Michael Ellerman (for non-exporting) Acked-by: Michael Ellerman Acked-by: Michael Ellerman Signed-off-by: Yury Norov --- arch/powerpc/include/asm/archrandom.h | 9 +-------- arch/powerpc/kernel/setup-common.c | 12 ++++++++++++ 2 files changed, 13 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h index 9a53e29680f4..21def59ef1a6 100644 --- a/arch/powerpc/include/asm/archrandom.h +++ b/arch/powerpc/include/asm/archrandom.h @@ -4,7 +4,7 @@ #ifdef CONFIG_ARCH_RANDOM -#include +bool __must_check arch_get_random_seed_long(unsigned long *v); static inline bool __must_check arch_get_random_long(unsigned long *v) { @@ -16,13 +16,6 @@ static inline bool __must_check arch_get_random_int(unsigned int *v) return false; } -static inline bool __must_check arch_get_random_seed_long(unsigned long *v) -{ - if (ppc_md.get_random_seed) - return ppc_md.get_random_seed(v); - - return false; -} static inline bool __must_check arch_get_random_seed_int(unsigned int *v) { diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index eb0077b302e2..5175726c703d 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -171,6 +171,18 @@ EXPORT_SYMBOL_GPL(machine_power_off); void (*pm_power_off)(void); EXPORT_SYMBOL_GPL(pm_power_off); +#ifdef CONFIG_ARCH_RANDOM +bool __must_check arch_get_random_seed_long(unsigned long *v) +{ + if (ppc_md.get_random_seed) + return ppc_md.get_random_seed(v); + + return false; +} +EXPORT_SYMBOL(arch_get_random_seed_long); + +#endif + void machine_halt(void) { machine_shutdown(); -- cgit v1.2.3