提交 0d74f86f 编写于 作者: K Konrad Rzeszutek Wilk 提交者: Dave Airlie

ttm: Fix spelling mistakes and remove unused #ifdef

. and some comments to make it easier to understand.

Ackedby: Randy Dunlap <randy.dunlap@oracle.com>
[v2: Added some more updates from Randy Dunlap]
Signed-off-by: NKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: NDave Airlie <airlied@redhat.com>
上级 20633442
...@@ -355,7 +355,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) ...@@ -355,7 +355,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
if (nr_free) if (nr_free)
goto restart; goto restart;
/* Not allowed to fall tough or break because /* Not allowed to fall through or break because
* following context is inside spinlock while we are * following context is inside spinlock while we are
* outside here. * outside here.
*/ */
...@@ -556,7 +556,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, ...@@ -556,7 +556,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
} }
/** /**
* Fill the given pool if there isn't enough pages and requested number of * Fill the given pool if there aren't enough pages and the requested number of
* pages is small. * pages is small.
*/ */
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
...@@ -576,8 +576,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, ...@@ -576,8 +576,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
pool->fill_lock = true; pool->fill_lock = true;
/* If allocation request is small and there is not enough /* If allocation request is small and there are not enough
* pages in pool we fill the pool first */ * pages in a pool we fill the pool up first. */
if (count < _manager->options.small if (count < _manager->options.small
&& count > pool->npages) { && count > pool->npages) {
struct list_head new_pages; struct list_head new_pages;
...@@ -614,9 +614,9 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, ...@@ -614,9 +614,9 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
} }
/** /**
* Cut count nubmer of pages from the pool and put them to return list * Cut 'count' number of pages from the pool and put them on the return list.
* *
* @return count of pages still to allocate to fill the request. * @return count of pages still required to fulfill the request.
*/ */
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
struct list_head *pages, int ttm_flags, struct list_head *pages, int ttm_flags,
...@@ -637,7 +637,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, ...@@ -637,7 +637,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
goto out; goto out;
} }
/* find the last pages to include for requested number of pages. Split /* find the last pages to include for requested number of pages. Split
* pool to begin and halves to reduce search space. */ * pool to begin and halve it to reduce search space. */
if (count <= pool->npages/2) { if (count <= pool->npages/2) {
i = 0; i = 0;
list_for_each(p, &pool->list) { list_for_each(p, &pool->list) {
...@@ -651,7 +651,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, ...@@ -651,7 +651,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
break; break;
} }
} }
/* Cut count number of pages from pool */ /* Cut 'count' number of pages from the pool */
list_cut_position(pages, &pool->list, p); list_cut_position(pages, &pool->list, p);
pool->npages -= count; pool->npages -= count;
count = 0; count = 0;
......
...@@ -662,9 +662,6 @@ extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, ...@@ -662,9 +662,6 @@ extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
#if 0
#endif
/** /**
* ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object. * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
* *
......
...@@ -78,7 +78,7 @@ struct ttm_backend_func { ...@@ -78,7 +78,7 @@ struct ttm_backend_func {
* *
* Bind the backend pages into the aperture in the location * Bind the backend pages into the aperture in the location
* indicated by @bo_mem. This function should be able to handle * indicated by @bo_mem. This function should be able to handle
* differences between aperture- and system page sizes. * differences between aperture and system page sizes.
*/ */
int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem); int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
...@@ -88,7 +88,7 @@ struct ttm_backend_func { ...@@ -88,7 +88,7 @@ struct ttm_backend_func {
* @backend: Pointer to a struct ttm_backend. * @backend: Pointer to a struct ttm_backend.
* *
* Unbind previously bound backend pages. This function should be * Unbind previously bound backend pages. This function should be
* able to handle differences between aperture- and system page sizes. * able to handle differences between aperture and system page sizes.
*/ */
int (*unbind) (struct ttm_backend *backend); int (*unbind) (struct ttm_backend *backend);
...@@ -786,7 +786,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev); ...@@ -786,7 +786,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
* ttm_bo_device_init * ttm_bo_device_init
* *
* @bdev: A pointer to a struct ttm_bo_device to initialize. * @bdev: A pointer to a struct ttm_bo_device to initialize.
* @mem_global: A pointer to an initialized struct ttm_mem_global. * @glob: A pointer to an initialized struct ttm_bo_global.
* @driver: A pointer to a struct ttm_bo_driver set up by the caller. * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
* @file_page_offset: Offset into the device address space that is available * @file_page_offset: Offset into the device address space that is available
* for buffer data. This ensures compatibility with other users of the * for buffer data. This ensures compatibility with other users of the
......
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
* @do_shrink: The callback function. * @do_shrink: The callback function.
* *
* Arguments to the do_shrink functions are intended to be passed using * Arguments to the do_shrink functions are intended to be passed using
* inheritance. That is, the argument class derives from struct ttm_mem_srink, * inheritance. That is, the argument class derives from struct ttm_mem_shrink,
* and can be accessed using container_of(). * and can be accessed using container_of().
*/ */
......
...@@ -111,7 +111,7 @@ struct ttm_object_device; ...@@ -111,7 +111,7 @@ struct ttm_object_device;
* *
* @ref_obj_release: A function to be called when a reference object * @ref_obj_release: A function to be called when a reference object
* with another ttm_ref_type than TTM_REF_USAGE is deleted. * with another ttm_ref_type than TTM_REF_USAGE is deleted.
* this function may, for example, release a lock held by a user-space * This function may, for example, release a lock held by a user-space
* process. * process.
* *
* This struct is intended to be used as a base struct for objects that * This struct is intended to be used as a base struct for objects that
...@@ -172,7 +172,7 @@ extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file ...@@ -172,7 +172,7 @@ extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
/** /**
* ttm_base_object_unref * ttm_base_object_unref
* *
* @p_base: Pointer to a pointer referncing a struct ttm_base_object. * @p_base: Pointer to a pointer referencing a struct ttm_base_object.
* *
* Decrements the base object refcount and clears the pointer pointed to by * Decrements the base object refcount and clears the pointer pointed to by
* p_base. * p_base.
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
/** /**
* Get count number of pages from pool to pages list. * Get count number of pages from pool to pages list.
* *
* @pages: heado of empty linked list where pages are filled. * @pages: head of empty linked list where pages are filled.
* @flags: ttm flags for page allocation. * @flags: ttm flags for page allocation.
* @cstate: ttm caching state for the page. * @cstate: ttm caching state for the page.
* @count: number of pages to allocate. * @count: number of pages to allocate.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册