提交 c944a308 编写于 作者: C Chris Wilson

drm/i915: Implement dma_buf_ops->kmap

Since kmap allows us to block we can pin the pages and use our normal
page lookup routine making the implementation simple, or as some might
say quick and dirty.

Testcase: igt/drv_selftest/dmabuf
Testcase: igt/prime_rw
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: NJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170503202517.16797-1-chris@chris-wilson.co.uk
上级 ad15f74a
......@@ -122,12 +122,36 @@ static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long
}
static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct page *page;
if (page_num >= obj->base.size >> PAGE_SHIFT)
return NULL;
if (!i915_gem_object_has_struct_page(obj))
return NULL;
if (i915_gem_object_pin_pages(obj))
return NULL;
/* Synchronisation is left to the caller (via .begin_cpu_access()) */
page = i915_gem_object_get_page(obj, page_num);
if (IS_ERR(page))
goto err_unpin;
return kmap(page);
err_unpin:
i915_gem_object_unpin_pages(obj);
return NULL;
}
static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
kunmap(virt_to_page(addr));
i915_gem_object_unpin_pages(obj);
}
static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
......
......@@ -271,6 +271,105 @@ static int igt_dmabuf_export_vmap(void *arg)
return err;
}
static int igt_dmabuf_export_kmap(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
void *ptr;
int err;
obj = i915_gem_object_create(i915, 2*PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
i915_gem_object_put(obj);
if (IS_ERR(dmabuf)) {
err = PTR_ERR(dmabuf);
pr_err("i915_gem_prime_export failed with err=%d\n", err);
return err;
}
ptr = dma_buf_kmap(dmabuf, 0);
if (!ptr) {
pr_err("dma_buf_kmap failed\n");
err = -ENOMEM;
goto err;
}
if (memchr_inv(ptr, 0, PAGE_SIZE)) {
dma_buf_kunmap(dmabuf, 0, ptr);
pr_err("Exported page[0] not initialiased to zero!\n");
err = -EINVAL;
goto err;
}
memset(ptr, 0xc5, PAGE_SIZE);
dma_buf_kunmap(dmabuf, 0, ptr);
ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(ptr)) {
err = PTR_ERR(ptr);
pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
goto err;
}
memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
i915_gem_object_unpin_map(obj);
ptr = dma_buf_kmap(dmabuf, 1);
if (!ptr) {
pr_err("dma_buf_kmap failed\n");
err = -ENOMEM;
goto err;
}
if (memchr_inv(ptr, 0xaa, PAGE_SIZE)) {
dma_buf_kunmap(dmabuf, 1, ptr);
pr_err("Exported page[1] not set to 0xaa!\n");
err = -EINVAL;
goto err;
}
memset(ptr, 0xc5, PAGE_SIZE);
dma_buf_kunmap(dmabuf, 1, ptr);
ptr = dma_buf_kmap(dmabuf, 0);
if (!ptr) {
pr_err("dma_buf_kmap failed\n");
err = -ENOMEM;
goto err;
}
if (memchr_inv(ptr, 0xc5, PAGE_SIZE)) {
dma_buf_kunmap(dmabuf, 0, ptr);
pr_err("Exported page[0] did not retain 0xc5!\n");
err = -EINVAL;
goto err;
}
dma_buf_kunmap(dmabuf, 0, ptr);
ptr = dma_buf_kmap(dmabuf, 2);
if (ptr) {
pr_err("Erroneously kmapped beyond the end of the object!\n");
dma_buf_kunmap(dmabuf, 2, ptr);
err = -EINVAL;
goto err;
}
ptr = dma_buf_kmap(dmabuf, -1);
if (ptr) {
pr_err("Erroneously kmapped before the start of the object!\n");
dma_buf_kunmap(dmabuf, -1, ptr);
err = -EINVAL;
goto err;
}
err = 0;
err:
dma_buf_put(dmabuf);
return err;
}
int i915_gem_dmabuf_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
......@@ -279,6 +378,7 @@ int i915_gem_dmabuf_mock_selftests(void)
SUBTEST(igt_dmabuf_import),
SUBTEST(igt_dmabuf_import_ownership),
SUBTEST(igt_dmabuf_export_vmap),
SUBTEST(igt_dmabuf_export_kmap),
};
struct drm_i915_private *i915;
int err;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册