aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@linaro.org>2018-02-01 12:02:34 +0800
committerAlex Shi <alex.shi@linaro.org>2018-02-01 12:02:34 +0800
commita40f2a595adfe0be6ced06fdb4c4a24ae3291a91 (patch)
tree4e99ec8bd3ce134bdbafd54a0fc873f11c33e21b /mm
parent293c379504006b5ea8b4e7109ba4ebc3211f9b91 (diff)
parent49fe90b853dfb1087d0a734cd7f4af1aa00c8e53 (diff)
Merge tag 'v4.4.114' into linux-linaro-lsk-v4.4lsk-v4.4-18.02
This is the 4.4.114 stable release
Diffstat (limited to 'mm')
-rw-r--r--mm/cma.c15
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory-failure.c7
-rw-r--r--mm/mmap.c6
-rw-r--r--mm/page_alloc.c6
5 files changed, 21 insertions, 15 deletions
diff --git a/mm/cma.c b/mm/cma.c
index bd0e1412475e..43f4a122e969 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -54,7 +54,7 @@ unsigned long cma_get_size(const struct cma *cma)
}
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
- int align_order)
+ unsigned int align_order)
{
if (align_order <= cma->order_per_bit)
return 0;
@@ -62,17 +62,14 @@ static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
}
/*
- * Find a PFN aligned to the specified order and return an offset represented in
- * order_per_bits.
+ * Find the offset of the base PFN from the specified align_order.
+ * The value returned is represented in order_per_bits.
*/
static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
- int align_order)
+ unsigned int align_order)
{
- if (align_order <= cma->order_per_bit)
- return 0;
-
- return (ALIGN(cma->base_pfn, (1UL << align_order))
- - cma->base_pfn) >> cma->order_per_bit;
+ return (cma->base_pfn & ((1UL << align_order) - 1))
+ >> cma->order_per_bit;
}
static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e25b93a4267d..55a9facb8e8d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5576,7 +5576,7 @@ static void uncharge_list(struct list_head *page_list)
next = page->lru.next;
VM_BUG_ON_PAGE(PageLRU(page), page);
- VM_BUG_ON_PAGE(page_count(page), page);
+ VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page);
if (!page->mem_cgroup)
continue;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 091fe9b06663..92a647957f91 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -539,6 +539,13 @@ static int delete_from_lru_cache(struct page *p)
*/
ClearPageActive(p);
ClearPageUnevictable(p);
+
+ /*
+ * Poisoned page might never drop its ref count to 0 so we have
+ * to uncharge it manually from its memcg.
+ */
+ mem_cgroup_uncharge(p);
+
/*
* drop the page count elevated by isolate_lru_page()
*/
diff --git a/mm/mmap.c b/mm/mmap.c
index eaa460ddcaf9..cc84b97ca250 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2188,7 +2188,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
gap_addr = TASK_SIZE;
next = vma->vm_next;
- if (next && next->vm_start < gap_addr) {
+ if (next && next->vm_start < gap_addr &&
+ (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
if (!(next->vm_flags & VM_GROWSUP))
return -ENOMEM;
/* Check that both stack segments have the same anon_vma? */
@@ -2273,7 +2274,8 @@ int expand_downwards(struct vm_area_struct *vma,
if (gap_addr > address)
return -ENOMEM;
prev = vma->vm_prev;
- if (prev && prev->vm_end > gap_addr) {
+ if (prev && prev->vm_end > gap_addr &&
+ (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
if (!(prev->vm_flags & VM_GROWSDOWN))
return -ENOMEM;
/* Check that both stack segments have the same anon_vma? */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 417700241e52..ca6db068319c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2461,9 +2461,6 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
if (!area->nr_free)
continue;
- if (alloc_harder)
- return true;
-
for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
if (!list_empty(&area->free_list[mt]))
return true;
@@ -2475,6 +2472,9 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
return true;
}
#endif
+ if (alloc_harder &&
+ !list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
+ return true;
}
return false;
}