aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2012-10-05 23:16:06 -0400
committerSteven Rostedt <rostedt@goodmis.org>2012-10-05 23:16:06 -0400
commitb6c8cfdbc5d5dcaea664ffea9c300c6e5d70ab09 (patch)
tree14200faac151c696751180742eae30b15385012d /mm
parent78475c9d785a6d7b3d110e8ebcc9a4d6f1ff473b (diff)
parent1c7eb28096b50831697a9cf6f8bf1af0e5b234bc (diff)
Merge tag 'v3.4.12' into v3.4-rt
This is the 3.4.12 stable release
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c8
-rw-r--r--mm/memory_hotplug.c16
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/vmscan.c6
4 files changed, 20 insertions, 12 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 0131170c9d54..53cf62b186b6 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -766,13 +766,17 @@ void * __init alloc_bootmem_section(unsigned long size,
unsigned long section_nr)
{
bootmem_data_t *bdata;
- unsigned long pfn, goal;
+ unsigned long pfn, goal, limit;
pfn = section_nr_to_pfn(section_nr);
goal = pfn << PAGE_SHIFT;
+ limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
- return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, 0);
+ if (goal + size > limit)
+ limit = 0;
+
+ return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
}
#endif
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 6629fafd6ce4..9ad7d1ef6ac1 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -127,9 +127,6 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
struct mem_section *ms;
struct page *page, *memmap;
- if (!pfn_valid(start_pfn))
- return;
-
section_nr = pfn_to_section_nr(start_pfn);
ms = __nr_to_section(section_nr);
@@ -188,9 +185,16 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)
end_pfn = pfn + pgdat->node_spanned_pages;
/* register_section info */
- for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
- register_page_bootmem_info_section(pfn);
-
+ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ /*
+ * Some platforms can assign the same pfn to multiple nodes - on
+ * node0 as well as nodeN. To avoid registering a pfn against
+ * multiple nodes we check that this pfn does not already
+ * reside in some other node.
+ */
+ if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
+ register_page_bootmem_info_section(pfn);
+ }
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4a68c8f20e3e..e097a563af7e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -592,7 +592,7 @@ static inline void __free_one_page(struct page *page,
combined_idx = buddy_idx & page_idx;
higher_page = page + (combined_idx - page_idx);
buddy_idx = __find_buddy_index(combined_idx, order + 1);
- higher_buddy = page + (buddy_idx - combined_idx);
+ higher_buddy = higher_page + (buddy_idx - combined_idx);
if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
list_add_tail(&page->lru,
&zone->free_area[order].free_list[migratetype]);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index be5bc0af2e76..e989ee22f100 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1983,10 +1983,10 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
* proportional to the fraction of recently scanned pages on
* each list that were recently referenced and in active use.
*/
- ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
+ ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
ap /= reclaim_stat->recent_rotated[0] + 1;
- fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
+ fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
fp /= reclaim_stat->recent_rotated[1] + 1;
spin_unlock_irq(&mz->zone->lru_lock);
@@ -1999,7 +1999,7 @@ out:
unsigned long scan;
scan = zone_nr_lru_pages(mz, lru);
- if (priority || noswap) {
+ if (priority || noswap || !vmscan_swappiness(mz, sc)) {
scan >>= priority;
if (!scan && force_scan)
scan = SWAP_CLUSTER_MAX;