summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMichal Nazarewicz <mina86@xxxxxxxxxx>2011-12-20 08:54:30 +0800
committerAndy Green <andy.green@linaro.org>2011-12-26 15:09:47 +0800
commite7146e6ca3a0fd7fcc8b52ca212cd80e16b3b4da (patch)
tree690d80900de3be7a9f8dd5b8caf87cd462098ecc /mm
parented96647fdc582a104c2a370c6c1bd313503f9ac8 (diff)
The MIGRATE_CMA migration type has two main characteristics:
(i) only movable pages can be allocated from MIGRATE_CMA pageblocks and (ii) page allocator will never change migration type of MIGRATE_CMA pageblocks. This guarantees (to some degree) that page in a MIGRATE_CMA page block can always be migrated somewhere else (unless there's no memory left in the system). It is designed to be used for allocating big chunks (eg. 10MiB) of physically contiguous memory. Once driver requests contiguous memory, pages from MIGRATE_CMA pageblocks may be migrated away to create a contiguous block. To minimise number of migrations, MIGRATE_CMA migration type is the last type tried when page allocator falls back to other migration types then requested. Signed-off-by: Michal Nazarewicz <mina86@xxxxxxxxxx> [m.szyprowski: removed CONFIG_CMA_MIGRATE_TYPE] Signed-off-by: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx> Signed-off-by: Kyungmin Park <kyungmin.park@xxxxxxxxxxx>
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/compaction.c11
-rw-r--r--mm/page_alloc.c68
3 files changed, 61 insertions, 20 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 011b110365c..e080cacd769 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -192,7 +192,7 @@ config COMPACTION
config MIGRATION
bool "Page migration"
def_bool y
- depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
+ depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA
help
Allows the migration of the physical location of pages of processes
while the virtual addresses are not changed. This is useful in
diff --git a/mm/compaction.c b/mm/compaction.c
index e71ceaf634b..3e073412e0f 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -19,6 +19,11 @@
#define CREATE_TRACE_POINTS
#include <trace/events/compaction.h>
+static inline bool is_migrate_cma_or_movable(int migratetype)
+{
+ return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
+}
+
unsigned long release_freepages(struct list_head *freelist)
{
struct page *page, *next;
@@ -114,8 +119,8 @@ static bool suitable_migration_target(struct page *page)
if (PageBuddy(page) && page_order(page) >= pageblock_order)
return true;
- /* If the block is MIGRATE_MOVABLE, allow migration */
- if (migratetype == MIGRATE_MOVABLE)
+ /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
+ if (is_migrate_cma_or_movable(migratetype))
return true;
/* Otherwise skip the block */
@@ -324,7 +329,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
*/
pageblock_nr = low_pfn >> pageblock_order;
if (!cc->sync && last_pageblock_nr != pageblock_nr &&
- get_pageblock_migratetype(page) != MIGRATE_MOVABLE) {
+ is_migrate_cma_or_movable(get_pageblock_migratetype(page))) {
low_pfn += pageblock_nr_pages;
low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
last_pageblock_nr = pageblock_nr;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index dab9eaddfdb..0e939133017 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -733,6 +733,26 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
}
}
+#ifdef CONFIG_CMA
+/*
+ * Free whole pageblock and set it's migration type to MIGRATE_CMA.
+ */
+void __init init_cma_reserved_pageblock(struct page *page)
+{
+ unsigned i = pageblock_nr_pages;
+ struct page *p = page;
+
+ do {
+ __ClearPageReserved(p);
+ set_page_count(p, 0);
+ } while (++p, --i);
+
+ set_page_refcounted(page);
+ set_pageblock_migratetype(page, MIGRATE_CMA);
+ __free_pages(page, pageblock_order);
+ totalram_pages += pageblock_nr_pages;
+}
+#endif
/*
* The order of subdivision here is critical for the IO subsystem.
@@ -841,11 +861,10 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
* This array describes the order lists are fallen back to when
* the free lists for the desirable migrate type are depleted
*/
-static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
+static int fallbacks[MIGRATE_PCPTYPES][4] = {
[MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
- [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
- [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
+ [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_CMA , MIGRATE_RESERVE },
};
/*
@@ -940,12 +959,12 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
/* Find the largest possible block of pages in the other list */
for (current_order = MAX_ORDER-1; current_order >= order;
--current_order) {
- for (i = 0; i < MIGRATE_TYPES - 1; i++) {
+ for (i = 0; i < ARRAY_SIZE(fallbacks[0]); i++) {
migratetype = fallbacks[start_migratetype][i];
/* MIGRATE_RESERVE handled later if necessary */
if (migratetype == MIGRATE_RESERVE)
- continue;
+ break;
area = &(zone->free_area[current_order]);
if (list_empty(&area->free_list[migratetype]))
@@ -960,11 +979,18 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
* pages to the preferred allocation list. If falling
* back for a reclaimable kernel allocation, be more
* aggressive about taking ownership of free pages
+ *
+ * On the other hand, never change migration
+ * type of MIGRATE_CMA pageblocks nor move CMA
+ * pages on different free lists. We don't
+ * want unmovable pages to be allocated from
+ * MIGRATE_CMA areas.
*/
- if (unlikely(current_order >= (pageblock_order >> 1)) ||
- start_migratetype == MIGRATE_RECLAIMABLE ||
- page_group_by_mobility_disabled) {
- unsigned long pages;
+ if (!is_pageblock_cma(page) &&
+ (unlikely(current_order >= pageblock_order / 2) ||
+ start_migratetype == MIGRATE_RECLAIMABLE ||
+ page_group_by_mobility_disabled)) {
+ int pages;
pages = move_freepages_block(zone, page,
start_migratetype);
@@ -982,11 +1008,14 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
rmv_page_order(page);
/* Take ownership for orders >= pageblock_order */
- if (current_order >= pageblock_order)
+ if (current_order >= pageblock_order &&
+ !is_pageblock_cma(page))
change_pageblock_range(page, current_order,
start_migratetype);
- expand(zone, page, order, current_order, area, migratetype);
+ expand(zone, page, order, current_order, area,
+ is_migrate_cma(start_migratetype)
+ ? start_migratetype : migratetype);
trace_mm_page_alloc_extfrag(page, order, current_order,
start_migratetype, migratetype);
@@ -1058,7 +1087,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
list_add(&page->lru, list);
else
list_add_tail(&page->lru, list);
- set_page_private(page, migratetype);
+ if (is_pageblock_cma(page))
+ set_page_private(page, MIGRATE_CMA);
+ else
+ set_page_private(page, migratetype);
list = &page->lru;
}
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
@@ -1289,8 +1321,12 @@ int split_free_page(struct page *page)
if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1;
- for (; page < endpage; page += pageblock_nr_pages)
- set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+ for (; page < endpage; page += pageblock_nr_pages) {
+ int mt = get_pageblock_migratetype(page);
+ if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
+ set_pageblock_migratetype(page,
+ MIGRATE_MOVABLE);
+ }
}
return 1 << order;
@@ -5580,8 +5616,8 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
*/
if (zone_idx(zone) == ZONE_MOVABLE)
return true;
-
- if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
+ if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE ||
+ is_pageblock_cma(page))
return true;
pfn = page_to_pfn(page);