aboutsummaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2011-06-02 10:19:41 -0400
committerPekka Enberg <penberg@kernel.org>2011-06-03 19:33:49 +0300
commitd4d84fef6d0366b585b7de13527a0faeca84d9ce (patch)
treec67449976f955cff4a2fe8a74affc2dd67c37b7e /mm/slub.c
parent55922c9d1b84b89cb946c777fddccb3247e7df2c (diff)
slub: always align cpu_slab to honor cmpxchg_double requirement
On an architecture without CMPXCHG_LOCAL but with DEBUG_VM enabled, the VM_BUG_ON() in __pcpu_double_call_return_bool() will cause an early panic during boot unless we always align cpu_slab properly. In principle we could remove the alignment-testing VM_BUG_ON() for architectures that don't have CMPXCHG_LOCAL, but leaving it in means that new code will tend not to break x86 even if it is introduced on another platform, and it's low cost to require alignment. Acked-by: David Rientjes <rientjes@google.com> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 7be0223531b..35f351f2619 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2320,16 +2320,12 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
-#ifdef CONFIG_CMPXCHG_LOCAL
/*
- * Must align to double word boundary for the double cmpxchg instructions
- * to work.
+ * Must align to double word boundary for the double cmpxchg
+ * instructions to work; see __pcpu_double_call_return_bool().
*/
- s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2 * sizeof(void *));
-#else
- /* Regular alignment is sufficient */
- s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
-#endif
+ s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
+ 2 * sizeof(void *));
if (!s->cpu_slab)
return 0;