提交 7682486b 编写于 作者: R Randy Dunlap 提交者: Linus Torvalds

mm: fix various kernel-doc comments

Fix various kernel-doc notation in mm/:

filemap.c: add function short description; convert 2 to kernel-doc
fremap.c: change parameter 'prot' to @prot
pagewalk.c: change "-" in function parameters to ":"
slab.c: fix short description of kmem_ptr_validate()
swap.c: fix description & parameters of put_pages_list()
swap_state.c: fix function parameters
vmalloc.c: change "@returns" to "Returns:" since that is not a parameter
Signed-off-by: NRandy Dunlap <randy.dunlap@oracle.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 6cb2a210
...@@ -343,7 +343,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping, ...@@ -343,7 +343,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
EXPORT_SYMBOL(sync_page_range); EXPORT_SYMBOL(sync_page_range);
/** /**
* sync_page_range_nolock * sync_page_range_nolock - write & wait on all pages in the passed range without locking
* @inode: target inode * @inode: target inode
* @mapping: target address_space * @mapping: target address_space
* @pos: beginning offset in pages to write * @pos: beginning offset in pages to write
...@@ -611,7 +611,10 @@ int __lock_page_killable(struct page *page) ...@@ -611,7 +611,10 @@ int __lock_page_killable(struct page *page)
sync_page_killable, TASK_KILLABLE); sync_page_killable, TASK_KILLABLE);
} }
/* /**
* __lock_page_nosync - get a lock on the page, without calling sync_page()
* @page: the page to lock
*
* Variant of lock_page that does not require the caller to hold a reference * Variant of lock_page that does not require the caller to hold a reference
* on the page's mapping. * on the page's mapping.
*/ */
...@@ -1538,9 +1541,20 @@ static struct page *__read_cache_page(struct address_space *mapping, ...@@ -1538,9 +1541,20 @@ static struct page *__read_cache_page(struct address_space *mapping,
return page; return page;
} }
/* /**
* read_cache_page_async - read into page cache, fill it if needed
* @mapping: the page's address_space
* @index: the page index
* @filler: function to perform the read
* @data: destination for read data
*
* Same as read_cache_page, but don't wait for page to become unlocked * Same as read_cache_page, but don't wait for page to become unlocked
* after submitting it to the filler. * after submitting it to the filler.
*
* Read into the page cache. If a page already exists, and PageUptodate() is
* not set, try to fill the page but don't wait for it to become unlocked.
*
* If the page does not get brought uptodate, return -EIO.
*/ */
struct page *read_cache_page_async(struct address_space *mapping, struct page *read_cache_page_async(struct address_space *mapping,
pgoff_t index, pgoff_t index,
......
...@@ -113,7 +113,7 @@ static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -113,7 +113,7 @@ static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma,
* mmap()/mremap() it does not create any new vmas. The new mappings are * mmap()/mremap() it does not create any new vmas. The new mappings are
* also safe across swapout. * also safe across swapout.
* *
* NOTE: the 'prot' parameter right now is ignored (but must be zero), * NOTE: the @prot parameter right now is ignored (but must be zero),
* and the vma's default protection is used. Arbitrary protections * and the vma's default protection is used. Arbitrary protections
* might be implemented in the future. * might be implemented in the future.
*/ */
......
...@@ -77,11 +77,11 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, ...@@ -77,11 +77,11 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
/** /**
* walk_page_range - walk a memory map's page tables with a callback * walk_page_range - walk a memory map's page tables with a callback
* @mm - memory map to walk * @mm: memory map to walk
* @addr - starting address * @addr: starting address
* @end - ending address * @end: ending address
* @walk - set of callbacks to invoke for each level of the tree * @walk: set of callbacks to invoke for each level of the tree
* @private - private data passed to the callback function * @private: private data passed to the callback function
* *
* Recursively walk the page table for the memory area in a VMA, * Recursively walk the page table for the memory area in a VMA,
* calling supplied callbacks. Callbacks are called in-order (first * calling supplied callbacks. Callbacks are called in-order (first
......
...@@ -3624,12 +3624,11 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) ...@@ -3624,12 +3624,11 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
EXPORT_SYMBOL(kmem_cache_alloc); EXPORT_SYMBOL(kmem_cache_alloc);
/** /**
* kmem_ptr_validate - check if an untrusted pointer might * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
* be a slab entry.
* @cachep: the cache we're checking against * @cachep: the cache we're checking against
* @ptr: pointer to validate * @ptr: pointer to validate
* *
* This verifies that the untrusted pointer looks sane: * This verifies that the untrusted pointer looks sane;
* it is _not_ a guarantee that the pointer is actually * it is _not_ a guarantee that the pointer is actually
* part of the slab cache in question, but it at least * part of the slab cache in question, but it at least
* validates that the pointer can be dereferenced and * validates that the pointer can be dereferenced and
......
...@@ -78,12 +78,11 @@ void put_page(struct page *page) ...@@ -78,12 +78,11 @@ void put_page(struct page *page)
EXPORT_SYMBOL(put_page); EXPORT_SYMBOL(put_page);
/** /**
* put_pages_list(): release a list of pages * put_pages_list() - release a list of pages
* @pages: list of pages threaded on page->lru
* *
* Release a list of pages which are strung together on page.lru. Currently * Release a list of pages which are strung together on page.lru. Currently
* used by read_cache_pages() and related error recovery code. * used by read_cache_pages() and related error recovery code.
*
* @pages: list of pages threaded on page->lru
*/ */
void put_pages_list(struct list_head *pages) void put_pages_list(struct list_head *pages)
{ {
......
...@@ -115,6 +115,7 @@ void __delete_from_swap_cache(struct page *page) ...@@ -115,6 +115,7 @@ void __delete_from_swap_cache(struct page *page)
/** /**
* add_to_swap - allocate swap space for a page * add_to_swap - allocate swap space for a page
* @page: page we want to move to swap * @page: page we want to move to swap
* @gfp_mask: memory allocation flags
* *
* Allocate swap space for the page and add the page to the * Allocate swap space for the page and add the page to the
* swap cache. Caller needs to hold the page lock. * swap cache. Caller needs to hold the page lock.
...@@ -315,6 +316,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -315,6 +316,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/** /**
* swapin_readahead - swap in pages in hope we need them soon * swapin_readahead - swap in pages in hope we need them soon
* @entry: swap entry of this memory * @entry: swap entry of this memory
* @gfp_mask: memory allocation flags
* @vma: user vma this address belongs to * @vma: user vma this address belongs to
* @addr: target address for mempolicy * @addr: target address for mempolicy
* *
......
...@@ -757,7 +757,8 @@ long vwrite(char *buf, char *addr, unsigned long count) ...@@ -757,7 +757,8 @@ long vwrite(char *buf, char *addr, unsigned long count)
* @vma: vma to cover (map full range of vma) * @vma: vma to cover (map full range of vma)
* @addr: vmalloc memory * @addr: vmalloc memory
* @pgoff: number of pages into addr before first page to map * @pgoff: number of pages into addr before first page to map
* @returns: 0 for success, -Exxx on failure *
* Returns: 0 for success, -Exxx on failure
* *
* This function checks that addr is a valid vmalloc'ed area, and * This function checks that addr is a valid vmalloc'ed area, and
* that it is big enough to cover the vma. Will return failure if * that it is big enough to cover the vma. Will return failure if
...@@ -829,7 +830,8 @@ static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) ...@@ -829,7 +830,8 @@ static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
/** /**
* alloc_vm_area - allocate a range of kernel address space * alloc_vm_area - allocate a range of kernel address space
* @size: size of the area * @size: size of the area
* @returns: NULL on failure, vm_struct on success *
* Returns: NULL on failure, vm_struct on success
* *
* This function reserves a range of kernel address space, and * This function reserves a range of kernel address space, and
* allocates pagetables to map that range. No actual mappings * allocates pagetables to map that range. No actual mappings
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册