提交 465ed660 编写于 作者: P Prathyush K 提交者: Inki Dae

drm/exynos: remove 'pages' and 'page_size' elements in exynos gem buffer

Changelog v2:

Removed redundant check for invalid sgl.
Added check for valid page_offset in the beginning of exynos_drm_gem_map_buf.

Changelog v1:

The 'pages' structure is not required since we can use the 'sgt'. Even for
CONTIG buffers, a SGT is created (which will have just one sgl). This SGT
can be used during mmap instead of 'pages'. The 'page_size' element of the
structure is also not used anywhere and is removed.
This patch also fixes a memory leak where the 'pages' structure was being
allocated during gem buffer allocation but not being freed during deallocate.
Signed-off-by: NPrathyush K <prathyush.k@samsung.com>
Signed-off-by: NInki Dae <inki.dae@samsung.com>
Signed-off-by: NKyungmin Park <kyungmin.park@samsung.com>
上级 dd265850
......@@ -34,8 +34,6 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf)
{
int ret = 0;
unsigned int npages, i = 0;
struct scatterlist *sgl;
enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS;
DRM_DEBUG_KMS("%s\n", __FILE__);
......@@ -73,22 +71,6 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
goto err_free_sgt;
}
npages = buf->sgt->nents;
buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
if (!buf->pages) {
DRM_ERROR("failed to allocate pages.\n");
ret = -ENOMEM;
goto err_free_table;
}
sgl = buf->sgt->sgl;
while (i < npages) {
buf->pages[i] = sg_page(sgl);
sgl = sg_next(sgl);
i++;
}
DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->kvaddr,
(unsigned long)buf->dma_addr,
......@@ -96,8 +78,6 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
return ret;
err_free_table:
sg_free_table(buf->sgt);
err_free_sgt:
kfree(buf->sgt);
buf->sgt = NULL;
......
......@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
void exynos_drm_fini_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buffer);
/* allocate physical memory region and setup sgt and pages. */
/* allocate physical memory region and setup sgt. */
int exynos_drm_alloc_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buf,
unsigned int flags);
/* release physical memory region, sgt and pages. */
/* release physical memory region, and sgt. */
void exynos_drm_free_buf(struct drm_device *dev,
unsigned int flags,
struct exynos_drm_gem_buf *buffer);
......
......@@ -87,8 +87,7 @@ static struct sg_table *
goto err_unlock;
}
DRM_DEBUG_PRIME("buffer size = 0x%lx page_size = 0x%lx\n",
buf->size, buf->page_size);
DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
err_unlock:
mutex_unlock(&dev->struct_mutex);
......
......@@ -99,34 +99,23 @@ static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
unsigned long pfn;
int i;
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
if (!buf->sgt)
return -EINTR;
sgl = buf->sgt->sgl;
for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
if (!sgl) {
DRM_ERROR("invalid SG table\n");
return -EINTR;
}
if (page_offset < (sgl->length >> PAGE_SHIFT))
break;
page_offset -= (sgl->length >> PAGE_SHIFT);
}
if (i >= buf->sgt->nents) {
DRM_ERROR("invalid page offset\n");
return -EINVAL;
}
if (!buf->sgt)
return -EINTR;
pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
} else {
if (!buf->pages)
return -EINTR;
if (page_offset >= (buf->size >> PAGE_SHIFT)) {
DRM_ERROR("invalid page offset\n");
return -EINVAL;
}
pfn = page_to_pfn(buf->pages[0]) + page_offset;
sgl = buf->sgt->sgl;
for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
if (page_offset < (sgl->length >> PAGE_SHIFT))
break;
page_offset -= (sgl->length >> PAGE_SHIFT);
}
pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
return vm_insert_mixed(vma, f_vaddr, pfn);
}
......
......@@ -41,8 +41,6 @@
* device address with IOMMU.
* @write: whether pages will be written to by the caller.
* @sgt: sg table to transfer page data.
* @pages: contain all pages to allocated memory region.
* @page_size: could be 4K, 64K or 1MB.
* @size: size of allocated memory region.
* @pfnmap: indicate whether memory region from userptr is mmaped with
* VM_PFNMAP or not.
......@@ -54,8 +52,6 @@ struct exynos_drm_gem_buf {
struct dma_attrs dma_attrs;
unsigned int write;
struct sg_table *sgt;
struct page **pages;
unsigned long page_size;
unsigned long size;
bool pfnmap;
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册