aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAndy Whitcroft <apw@canonical.com>2010-07-29 09:46:41 +0100
committerJohn Rigby <john.rigby@linaro.org>2012-11-14 18:19:19 -0700
commit156cf94d200395c7f551ebf64a06606b1aa7721e (patch)
treed9566d217e912f14a2cdd78dad6e1394967588b9 /mm
parent79e2b55e34abd35295e7703fab3a42c9c2828de8 (diff)
UBUNTU: SAUCE: (no-up) add tracing for user initiated readahead requests
Track pages which undergo readahead and for each record which were actually consumed, via either read or faulted into a map. This allows userspace readahead applications (such as ureadahead) to track which pages in core at the end of a boot are actually required and generate an optimal readahead pack. It also allows pack adjustment and optimisation in parallel with readahead, allowing the pack to evolve to be accurate as userspace paths change. The status of the pages are reported back via the mincore() call using a newly allocated bit. Signed-off-by: Andy Whitcroft <apw@canonical.com> Acked-by: Stefan Bader <stefan.bader@canonical.com> Signed-off-by: Leann Ogasawara <leann.ogasawara@canonical.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/memory.c7
-rw-r--r--mm/mincore.c2
-rw-r--r--mm/page_alloc.c1
-rw-r--r--mm/readahead.c1
5 files changed, 13 insertions, 1 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 83efee76a5c..d1c13c279a6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1304,6 +1304,9 @@ int file_read_actor(read_descriptor_t *desc, struct page *page,
if (size > count)
size = count;
+ if (PageReadaheadUnused(page))
+ ClearPageReadaheadUnused(page);
+
/*
* Faults on the destination of a read are common, so do it before
* taking the kmap.
diff --git a/mm/memory.c b/mm/memory.c
index fb135ba4aba..2cd00c76fdf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3265,10 +3265,15 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
else
VM_BUG_ON(!PageLocked(vmf.page));
+ page = vmf.page;
+
+ /* Mark the page as used on fault. */
+ if (PageReadaheadUnused(page))
+ ClearPageReadaheadUnused(page);
+
/*
* Should we do an early C-O-W break?
*/
- page = vmf.page;
if (flags & FAULT_FLAG_WRITE) {
if (!(vma->vm_flags & VM_SHARED)) {
page = cow_page;
diff --git a/mm/mincore.c b/mm/mincore.c
index 936b4cee8cb..7c2874a9a73 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -80,6 +80,8 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
#endif
if (page) {
present = PageUptodate(page);
+ if (present)
+ present |= (PageReadaheadUnused(page) << 7);
page_cache_release(page);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5b74de6702e..f9c93feba44 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6056,6 +6056,7 @@ static const struct trace_print_flags pageflag_names[] = {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
{1UL << PG_compound_lock, "compound_lock" },
#endif
+ {1UL << PG_readaheadunused, "readaheadunused"},
};
static void dump_page_flags(unsigned long flags)
diff --git a/mm/readahead.c b/mm/readahead.c
index 7963f239123..0f3f7b9f32c 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -189,6 +189,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
list_add(&page->lru, &page_pool);
if (page_idx == nr_to_read - lookahead_size)
SetPageReadahead(page);
+ SetPageReadaheadUnused(page);
ret++;
}