提交 f2a85e19 编写于 作者: C Chris Wilson

drm,i915: Introduce drm_malloc_gfp()

I have instances where I want to use drm_malloc_ab() but with a custom
gfp mask. And with those, where I want a temporary allocation, I want to
try a high-order kmalloc() before using a vmalloc().

So refactor my usage into drm_malloc_gfp().
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Cc: dri-devel@lists.freedesktop.org
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: NVille Syrjälä <ville.syrjala@linux.intel.com>
Acked-by: NDave Airlie <airlied@redhat.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1460113874-17366-6-git-send-email-chris@chris-wilson.co.uk
上级 eae2c43b
...@@ -2423,9 +2423,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) ...@@ -2423,9 +2423,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
int n; int n;
n = obj->base.size >> PAGE_SHIFT; n = obj->base.size >> PAGE_SHIFT;
pages = kmalloc(n*sizeof(*pages), GFP_TEMPORARY | __GFP_NOWARN); pages = drm_malloc_gfp(n, sizeof(*pages), GFP_TEMPORARY);
if (pages == NULL)
pages = drm_malloc_ab(n, sizeof(*pages));
if (pages != NULL) { if (pages != NULL) {
n = 0; n = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, for_each_sg_page(obj->pages->sgl, &sg_iter,
......
...@@ -1783,11 +1783,9 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ...@@ -1783,11 +1783,9 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, exec2_list = drm_malloc_gfp(args->buffer_count,
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); sizeof(*exec2_list),
if (exec2_list == NULL) GFP_TEMPORARY);
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
if (exec2_list == NULL) { if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n", DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count); args->buffer_count);
......
...@@ -3401,8 +3401,9 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, ...@@ -3401,8 +3401,9 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
int ret = -ENOMEM; int ret = -ENOMEM;
/* Allocate a temporary list of source pages for random access. */ /* Allocate a temporary list of source pages for random access. */
page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE, page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE,
sizeof(dma_addr_t)); sizeof(dma_addr_t),
GFP_TEMPORARY);
if (!page_addr_list) if (!page_addr_list)
return ERR_PTR(ret); return ERR_PTR(ret);
......
...@@ -494,10 +494,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ...@@ -494,10 +494,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
ret = -ENOMEM; ret = -ENOMEM;
pinned = 0; pinned = 0;
pvec = kmalloc(npages*sizeof(struct page *), pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (pvec == NULL)
pvec = drm_malloc_ab(npages, sizeof(struct page *));
if (pvec != NULL) { if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm; struct mm_struct *mm = obj->userptr.mm->mm;
...@@ -634,15 +631,12 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -634,15 +631,12 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
pvec = NULL; pvec = NULL;
pinned = 0; pinned = 0;
if (obj->userptr.mm->mm == current->mm) { if (obj->userptr.mm->mm == current->mm) {
pvec = kmalloc(num_pages*sizeof(struct page *), pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); GFP_TEMPORARY);
if (pvec == NULL) {
pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
if (pvec == NULL) { if (pvec == NULL) {
__i915_gem_userptr_set_active(obj, false); __i915_gem_userptr_set_active(obj, false);
return -ENOMEM; return -ENOMEM;
} }
}
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
!obj->userptr.read_only, pvec); !obj->userptr.read_only, pvec);
......
...@@ -54,6 +54,25 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size) ...@@ -54,6 +54,25 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
} }
static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp)
{
if (size != 0 && nmemb > SIZE_MAX / size)
return NULL;
if (size * nmemb <= PAGE_SIZE)
return kmalloc(nmemb * size, gfp);
if (gfp & __GFP_RECLAIMABLE) {
void *ptr = kmalloc(nmemb * size,
gfp | __GFP_NOWARN | __GFP_NORETRY);
if (ptr)
return ptr;
}
return __vmalloc(size * nmemb,
gfp | __GFP_HIGHMEM, PAGE_KERNEL);
}
static __inline void drm_free_large(void *ptr) static __inline void drm_free_large(void *ptr)
{ {
kvfree(ptr); kvfree(ptr);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册