aboutsummaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-03-15 14:54:15 -0700
committerAKASHI Takahiro <takahiro.akashi@linaro.org>2017-06-14 10:51:06 +0900
commit9452a80ce49a0c978be0fa2b143f2596247166fb (patch)
tree6f3b32c740c27e4486ac018a69613c37b776ed01 /mm/slab.c
parent3cd7d7814b4bd106097cca9cbc73c399ba872f5b (diff)
mm/slab: activate debug_pagealloc in SLAB when it is actually enabled
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 24a615d42d74..e9c484921b4c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1847,7 +1847,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if (cachep->size % PAGE_SIZE == 0 &&
+ if (debug_pagealloc_enabled() &&
+ cachep->size % PAGE_SIZE == 0 &&
OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp),
cachep->size / PAGE_SIZE, 1);
@@ -2187,7 +2188,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
* to check size >= 256. It guarantees that all necessary small
* sized slab is initialized in current slab initialization sequence.
*/
- if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
+ if (debug_pagealloc_enabled() &&
+ !slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
size >= 256 && cachep->object_size > cache_line_size() &&
ALIGN(size, cachep->align) < PAGE_SIZE) {
cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
@@ -2243,7 +2245,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
* poisoning, then it's going to smash the contents of
* the redzone and userword anyhow, so switch them off.
*/
- if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
+ if (debug_pagealloc_enabled() &&
+ size % PAGE_SIZE == 0 && flags & SLAB_POISON)
flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
#endif
}
@@ -2737,7 +2740,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
set_obj_status(page, objnr, OBJECT_FREE);
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
+ if (debug_pagealloc_enabled() &&
+ (cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
store_stackinfo(cachep, objp, caller);
kernel_map_pages(virt_to_page(objp),
cachep->size / PAGE_SIZE, 0);
@@ -2874,7 +2878,8 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
return objp;
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
+ if (debug_pagealloc_enabled() &&
+ (cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp),
cachep->size / PAGE_SIZE, 1);
else