aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c20
-rw-r--r--mm/fremap.c2
-rw-r--r--mm/pagewalk.c10
-rw-r--r--mm/slab.c5
-rw-r--r--mm/swap.c5
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/vmalloc.c6
7 files changed, 33 insertions, 17 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index df343d1e634..07e9d9258b4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -343,7 +343,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
EXPORT_SYMBOL(sync_page_range);
/**
- * sync_page_range_nolock
+ * sync_page_range_nolock - write & wait on all pages in the passed range without locking
* @inode: target inode
* @mapping: target address_space
* @pos: beginning offset in pages to write
@@ -611,7 +611,10 @@ int __lock_page_killable(struct page *page)
sync_page_killable, TASK_KILLABLE);
}
-/*
+/**
+ * __lock_page_nosync - get a lock on the page, without calling sync_page()
+ * @page: the page to lock
+ *
* Variant of lock_page that does not require the caller to hold a reference
* on the page's mapping.
*/
@@ -1538,9 +1541,20 @@ repeat:
return page;
}
-/*
+/**
+ * read_cache_page_async - read into page cache, fill it if needed
+ * @mapping: the page's address_space
+ * @index: the page index
+ * @filler: function to perform the read
+ * @data: destination for read data
+ *
* Same as read_cache_page, but don't wait for page to become unlocked
* after submitting it to the filler.
+ *
+ * Read into the page cache. If a page already exists, and PageUptodate() is
+ * not set, try to fill the page but don't wait for it to become unlocked.
+ *
+ * If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page_async(struct address_space *mapping,
pgoff_t index,
diff --git a/mm/fremap.c b/mm/fremap.c
index 69a37c2bdf8..07a9c82ce1a 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -113,7 +113,7 @@ static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma,
* mmap()/mremap() it does not create any new vmas. The new mappings are
* also safe across swapout.
*
- * NOTE: the 'prot' parameter right now is ignored (but must be zero),
+ * NOTE: the @prot parameter right now is ignored (but must be zero),
* and the vma's default protection is used. Arbitrary protections
* might be implemented in the future.
*/
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index b4f27d22da9..1cf1417ef8b 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -77,11 +77,11 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
/**
* walk_page_range - walk a memory map's page tables with a callback
- * @mm - memory map to walk
- * @addr - starting address
- * @end - ending address
- * @walk - set of callbacks to invoke for each level of the tree
- * @private - private data passed to the callback function
+ * @mm: memory map to walk
+ * @addr: starting address
+ * @end: ending address
+ * @walk: set of callbacks to invoke for each level of the tree
+ * @private: private data passed to the callback function
*
* Recursively walk the page table for the memory area in a VMA,
* calling supplied callbacks. Callbacks are called in-order (first
diff --git a/mm/slab.c b/mm/slab.c
index e6c698f5567..bb4070e1079 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3624,12 +3624,11 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
EXPORT_SYMBOL(kmem_cache_alloc);
/**
- * kmem_ptr_validate - check if an untrusted pointer might
- * be a slab entry.
+ * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
* @cachep: the cache we're checking against
* @ptr: pointer to validate
*
- * This verifies that the untrusted pointer looks sane:
+ * This verifies that the untrusted pointer looks sane;
* it is _not_ a guarantee that the pointer is actually
* part of the slab cache in question, but it at least
* validates that the pointer can be dereferenced and
diff --git a/mm/swap.c b/mm/swap.c
index d4ec59aa5c4..aa1139ccf3a 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -78,12 +78,11 @@ void put_page(struct page *page)
EXPORT_SYMBOL(put_page);
/**
- * put_pages_list(): release a list of pages
+ * put_pages_list() - release a list of pages
+ * @pages: list of pages threaded on page->lru
*
* Release a list of pages which are strung together on page.lru. Currently
* used by read_cache_pages() and related error recovery code.
- *
- * @pages: list of pages threaded on page->lru
*/
void put_pages_list(struct list_head *pages)
{
diff --git a/mm/swap_state.c b/mm/swap_state.c
index ec42f01a8d0..50757ee3f9f 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -115,6 +115,7 @@ void __delete_from_swap_cache(struct page *page)
/**
* add_to_swap - allocate swap space for a page
* @page: page we want to move to swap
+ * @gfp_mask: memory allocation flags
*
* Allocate swap space for the page and add the page to the
* swap cache. Caller needs to hold the page lock.
@@ -315,6 +316,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/**
* swapin_readahead - swap in pages in hope we need them soon
* @entry: swap entry of this memory
+ * @gfp_mask: memory allocation flags
* @vma: user vma this address belongs to
* @addr: target address for mempolicy
*
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 950c0be9ca8..ecf91f8034b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -757,7 +757,8 @@ finished:
* @vma: vma to cover (map full range of vma)
* @addr: vmalloc memory
* @pgoff: number of pages into addr before first page to map
- * @returns: 0 for success, -Exxx on failure
+ *
+ * Returns: 0 for success, -Exxx on failure
*
* This function checks that addr is a valid vmalloc'ed area, and
* that it is big enough to cover the vma. Will return failure if
@@ -829,7 +830,8 @@ static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
/**
* alloc_vm_area - allocate a range of kernel address space
* @size: size of the area
- * @returns: NULL on failure, vm_struct on success
+ *
+ * Returns: NULL on failure, vm_struct on success
*
* This function reserves a range of kernel address space, and
* allocates pagetables to map that range. No actual mappings