aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAndy Whitcroft <apw@canonical.com>2010-07-29 09:46:41 +0100
committerJohn Rigby <john.rigby@linaro.org>2012-06-22 16:12:07 -0600
commit4076aa5aa4f44570a8115bb8bb32e94205d018d5 (patch)
tree5cd622d5576764dd5f972209a32bd5de947e4170 /mm
parent322a7410e4b977a43f69961bc2838980c4f1062f (diff)
UBUNTU: SAUCE: (no-up) add tracing for user initiated readahead requests
Track pages which undergo readahead and for each record which were actually consumed, via either read or faulted into a map. This allows userspace readahead applications (such as ureadahead) to track which pages in core at the end of a boot are actually required and generate an optimal readahead pack. It also allows pack adjustment and optimisation in parallel with readahead, allowing the pack to evolve to be accurate as userspace paths change. The status of the pages are reported back via the mincore() call using a newly allocated bit. Signed-off-by: Andy Whitcroft <apw@canonical.com> Acked-by: Stefan Bader <stefan.bader@canonical.com> Signed-off-by: Leann Ogasawara <leann.ogasawara@canonical.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/memory.c7
-rw-r--r--mm/mincore.c2
-rw-r--r--mm/page_alloc.c1
-rw-r--r--mm/readahead.c1
5 files changed, 13 insertions, 1 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index a4a5260b027..c97ce2627a8 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1304,6 +1304,9 @@ int file_read_actor(read_descriptor_t *desc, struct page *page,
if (size > count)
size = count;
+ if (PageReadaheadUnused(page))
+ ClearPageReadaheadUnused(page);
+
/*
* Faults on the destination of a read are common, so do it before
* taking the kmap.
diff --git a/mm/memory.c b/mm/memory.c
index 1b7dc662bf9..fcee128452b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3231,10 +3231,15 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
else
VM_BUG_ON(!PageLocked(vmf.page));
+ page = vmf.page;
+
+ /* Mark the page as used on fault. */
+ if (PageReadaheadUnused(page))
+ ClearPageReadaheadUnused(page);
+
/*
* Should we do an early C-O-W break?
*/
- page = vmf.page;
if (flags & FAULT_FLAG_WRITE) {
if (!(vma->vm_flags & VM_SHARED)) {
page = cow_page;
diff --git a/mm/mincore.c b/mm/mincore.c
index 936b4cee8cb..7c2874a9a73 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -80,6 +80,8 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
#endif
if (page) {
present = PageUptodate(page);
+ if (present)
+ present |= (PageReadaheadUnused(page) << 7);
page_cache_release(page);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 44030096da6..e3ef8cf13b7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5970,6 +5970,7 @@ static const struct trace_print_flags pageflag_names[] = {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
{1UL << PG_compound_lock, "compound_lock" },
#endif
+ {1UL << PG_readaheadunused, "readaheadunused"},
};
static void dump_page_flags(unsigned long flags)
diff --git a/mm/readahead.c b/mm/readahead.c
index ea8f8fa2164..3de0a197573 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -189,6 +189,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
list_add(&page->lru, &page_pool);
if (page_idx == nr_to_read - lookahead_size)
SetPageReadahead(page);
+ SetPageReadaheadUnused(page);
ret++;
}