aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2012-06-12 08:50:15 -0400
committerSteven Rostedt <rostedt@goodmis.org>2012-06-12 08:50:15 -0400
commit4431f95bfe5fe0d7909599c0e50d649f2ef6b9e1 (patch)
treed1bebccd5dd85a30b90e298c70d77f31341a12f9 /mm
parent9ff5baa988e758044a62a05b1f668b5ff556517c (diff)
parent7313dd1519ea5cd81f67bc26b180e472d102ff88 (diff)
Merge tag 'v3.2.20' into v3.2-rt
This is the 3.2.20 stable release
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c29
-rw-r--r--mm/slub.c9
-rw-r--r--mm/vmalloc.c11
-rw-r--r--mm/vmscan.c2
4 files changed, 36 insertions, 15 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7120c2e2cf82..5f5c545cdf06 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2068,6 +2068,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
kref_get(&reservations->refs);
}
+static void resv_map_put(struct vm_area_struct *vma)
+{
+ struct resv_map *reservations = vma_resv_map(vma);
+
+ if (!reservations)
+ return;
+ kref_put(&reservations->refs, resv_map_release);
+}
+
static void hugetlb_vm_op_close(struct vm_area_struct *vma)
{
struct hstate *h = hstate_vma(vma);
@@ -2083,7 +2092,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
reserve = (end - start) -
region_count(&reservations->regions, start, end);
- kref_put(&reservations->refs, resv_map_release);
+ resv_map_put(vma);
if (reserve) {
hugetlb_acct_memory(h, -reserve);
@@ -2884,12 +2893,16 @@ int hugetlb_reserve_pages(struct inode *inode,
set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
}
- if (chg < 0)
- return chg;
+ if (chg < 0) {
+ ret = chg;
+ goto out_err;
+ }
/* There must be enough filesystem quota for the mapping */
- if (hugetlb_get_quota(inode->i_mapping, chg))
- return -ENOSPC;
+ if (hugetlb_get_quota(inode->i_mapping, chg)) {
+ ret = -ENOSPC;
+ goto out_err;
+ }
/*
* Check enough hugepages are available for the reservation.
@@ -2898,7 +2911,7 @@ int hugetlb_reserve_pages(struct inode *inode,
ret = hugetlb_acct_memory(h, chg);
if (ret < 0) {
hugetlb_put_quota(inode->i_mapping, chg);
- return ret;
+ goto out_err;
}
/*
@@ -2915,6 +2928,10 @@ int hugetlb_reserve_pages(struct inode *inode,
if (!vma || vma->vm_flags & VM_MAYSHARE)
region_add(&inode->i_mapping->private_list, from, to);
return 0;
+out_err:
+ if (vma)
+ resv_map_put(vma);
+ return ret;
}
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
diff --git a/mm/slub.c b/mm/slub.c
index a99c785828c6..af47188da4d3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1506,15 +1506,19 @@ static inline void *acquire_slab(struct kmem_cache *s,
freelist = page->freelist;
counters = page->counters;
new.counters = counters;
- if (mode)
+ if (mode) {
new.inuse = page->objects;
+ new.freelist = NULL;
+ } else {
+ new.freelist = freelist;
+ }
VM_BUG_ON(new.frozen);
new.frozen = 1;
} while (!__cmpxchg_double_slab(s, page,
freelist, counters,
- NULL, new.counters,
+ new.freelist, new.counters,
"lock and freeze"));
remove_partial(n, page);
@@ -1556,7 +1560,6 @@ static void *get_partial_node(struct kmem_cache *s,
object = t;
available = page->objects - page->inuse;
} else {
- page->freelist = t;
available = put_cpu_partial(s, page, 0);
}
if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 071155a6f83b..66df815f95a0 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -256,7 +256,7 @@ struct vmap_area {
struct rb_node rb_node; /* address sorted rbtree */
struct list_head list; /* address sorted list */
struct list_head purge_list; /* "lazy purge" list */
- void *private;
+ struct vm_struct *vm;
struct rcu_head rcu_head;
};
@@ -1162,9 +1162,10 @@ void __init vmalloc_init(void)
/* Import existing vmlist entries. */
for (tmp = vmlist; tmp; tmp = tmp->next) {
va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
- va->flags = tmp->flags | VM_VM_AREA;
+ va->flags = VM_VM_AREA;
va->va_start = (unsigned long)tmp->addr;
va->va_end = va->va_start + tmp->size;
+ va->vm = tmp;
__insert_vmap_area(va);
}
@@ -1262,7 +1263,7 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
vm->addr = (void *)va->va_start;
vm->size = va->va_end - va->va_start;
vm->caller = caller;
- va->private = vm;
+ va->vm = vm;
va->flags |= VM_VM_AREA;
}
@@ -1385,7 +1386,7 @@ static struct vm_struct *find_vm_area(const void *addr)
va = find_vmap_area((unsigned long)addr);
if (va && va->flags & VM_VM_AREA)
- return va->private;
+ return va->vm;
return NULL;
}
@@ -1404,7 +1405,7 @@ struct vm_struct *remove_vm_area(const void *addr)
va = find_vmap_area((unsigned long)addr);
if (va && va->flags & VM_VM_AREA) {
- struct vm_struct *vm = va->private;
+ struct vm_struct *vm = va->vm;
if (!(vm->flags & VM_UNLIST)) {
struct vm_struct *tmp, **p;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 39f9629346a1..aa50ccf46a62 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -697,7 +697,7 @@ static enum page_references page_check_references(struct page *page,
return PAGEREF_RECLAIM;
if (referenced_ptes) {
- if (PageAnon(page))
+ if (PageSwapBacked(page))
return PAGEREF_ACTIVATE;
/*
* All mapped pages start out with page table