exynos_drm_dmabuf.c 7.2 KB
Newer Older
I
Inki Dae 已提交
1 2 3 4 5
/* exynos_drm_dmabuf.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 * Author: Inki Dae <inki.dae@samsung.com>
 *
6 7 8 9
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
I
Inki Dae 已提交
10 11
 */

12 13
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
I
Inki Dae 已提交
14 15 16 17 18
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"

#include <linux/dma-buf.h>

19 20 21 22 23 24 25 26
struct exynos_drm_dmabuf_attachment {
	struct sg_table sgt;
	enum dma_data_direction dir;
};

static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
					struct device *dev,
					struct dma_buf_attachment *attach)
I
Inki Dae 已提交
27
{
28
	struct exynos_drm_dmabuf_attachment *exynos_attach;
I
Inki Dae 已提交
29

30 31 32
	exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
	if (!exynos_attach)
		return -ENOMEM;
I
Inki Dae 已提交
33

34 35
	exynos_attach->dir = DMA_NONE;
	attach->priv = exynos_attach;
I
Inki Dae 已提交
36

37 38
	return 0;
}
I
Inki Dae 已提交
39

40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
					struct dma_buf_attachment *attach)
{
	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
	struct sg_table *sgt;

	if (!exynos_attach)
		return;

	sgt = &exynos_attach->sgt;

	if (exynos_attach->dir != DMA_NONE)
		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
				exynos_attach->dir);

	sg_free_table(sgt);
	kfree(exynos_attach);
	attach->priv = NULL;
I
Inki Dae 已提交
58 59 60 61 62 63
}

static struct sg_table *
		exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
					enum dma_data_direction dir)
{
64
	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
I
Inki Dae 已提交
65 66 67
	struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
	struct drm_device *dev = gem_obj->base.dev;
	struct exynos_drm_gem_buf *buf;
68
	struct scatterlist *rd, *wr;
I
Inki Dae 已提交
69
	struct sg_table *sgt = NULL;
70 71
	unsigned int i;
	int nents, ret;
I
Inki Dae 已提交
72 73 74

	DRM_DEBUG_PRIME("%s\n", __FILE__);

75 76 77 78 79 80 81 82 83 84 85
	if (WARN_ON(dir == DMA_NONE))
		return ERR_PTR(-EINVAL);

	/* just return current sgt if already requested. */
	if (exynos_attach->dir == dir)
		return &exynos_attach->sgt;

	/* reattaching is not allowed. */
	if (WARN_ON(exynos_attach->dir != DMA_NONE))
		return ERR_PTR(-EBUSY);

I
Inki Dae 已提交
86
	buf = gem_obj->buffer;
87 88
	if (!buf) {
		DRM_ERROR("buffer is null.\n");
89 90 91 92 93 94 95 96 97
		return ERR_PTR(-ENOMEM);
	}

	sgt = &exynos_attach->sgt;

	ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
	if (ret) {
		DRM_ERROR("failed to alloc sgt.\n");
		return ERR_PTR(-ENOMEM);
I
Inki Dae 已提交
98 99
	}

100
	mutex_lock(&dev->struct_mutex);
I
Inki Dae 已提交
101

102 103 104 105 106 107 108
	rd = buf->sgt->sgl;
	wr = sgt->sgl;
	for (i = 0; i < sgt->orig_nents; ++i) {
		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
		rd = sg_next(rd);
		wr = sg_next(wr);
	}
109

110
	nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
111 112
	if (!nents) {
		DRM_ERROR("failed to map sgl with iommu.\n");
113
		sgt = ERR_PTR(-EIO);
114 115
		goto err_unlock;
	}
I
Inki Dae 已提交
116

117 118 119
	exynos_attach->dir = dir;
	attach->priv = exynos_attach;

120
	DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
I
Inki Dae 已提交
121 122 123 124 125 126 127 128 129 130

err_unlock:
	mutex_unlock(&dev->struct_mutex);
	return sgt;
}

static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
						struct sg_table *sgt,
						enum dma_data_direction dir)
{
131
	/* Nothing to do. */
I
Inki Dae 已提交
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
}

static void exynos_dmabuf_release(struct dma_buf *dmabuf)
{
	struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;

	DRM_DEBUG_PRIME("%s\n", __FILE__);

	/*
	 * exynos_dmabuf_release() call means that file object's
	 * f_count is 0 and it calls drm_gem_object_handle_unreference()
	 * to drop the references that these values had been increased
	 * at drm_prime_handle_to_fd()
	 */
	if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
		exynos_gem_obj->base.export_dma_buf = NULL;

		/*
		 * drop this gem object refcount to release allocated buffer
		 * and resources.
		 */
		drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
	}
}

static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
						unsigned long page_num)
{
	/* TODO */

	return NULL;
}

static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
						unsigned long page_num,
						void *addr)
{
	/* TODO */
}

static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
					unsigned long page_num)
{
	/* TODO */

	return NULL;
}

static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
					unsigned long page_num, void *addr)
{
	/* TODO */
}

186 187 188 189 190 191
static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
	struct vm_area_struct *vma)
{
	return -ENOTTY;
}

I
Inki Dae 已提交
192
static struct dma_buf_ops exynos_dmabuf_ops = {
193 194
	.attach			= exynos_gem_attach_dma_buf,
	.detach			= exynos_gem_detach_dma_buf,
I
Inki Dae 已提交
195 196 197 198 199 200
	.map_dma_buf		= exynos_gem_map_dma_buf,
	.unmap_dma_buf		= exynos_gem_unmap_dma_buf,
	.kmap			= exynos_gem_dmabuf_kmap,
	.kmap_atomic		= exynos_gem_dmabuf_kmap_atomic,
	.kunmap			= exynos_gem_dmabuf_kunmap,
	.kunmap_atomic		= exynos_gem_dmabuf_kunmap_atomic,
201
	.mmap			= exynos_gem_dmabuf_mmap,
I
Inki Dae 已提交
202 203 204 205 206 207 208 209 210
	.release		= exynos_dmabuf_release,
};

struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
				struct drm_gem_object *obj, int flags)
{
	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);

	return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
211
				exynos_gem_obj->base.size, flags);
I
Inki Dae 已提交
212 213 214 215 216 217 218 219 220 221
}

struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
222
	int ret;
I
Inki Dae 已提交
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245

	DRM_DEBUG_PRIME("%s\n", __FILE__);

	/* is this one of own objects? */
	if (dma_buf->ops == &exynos_dmabuf_ops) {
		struct drm_gem_object *obj;

		exynos_gem_obj = dma_buf->priv;
		obj = &exynos_gem_obj->base;

		/* is it from our device? */
		if (obj->dev == drm_dev) {
			drm_gem_object_reference(obj);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, drm_dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(-EINVAL);


	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
246
	if (IS_ERR_OR_NULL(sgt)) {
I
Inki Dae 已提交
247 248 249 250 251 252 253 254 255 256 257 258 259 260
		ret = PTR_ERR(sgt);
		goto err_buf_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
		ret = -ENOMEM;
		goto err_unmap_attach;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
261
		goto err_free_buffer;
I
Inki Dae 已提交
262 263 264 265
	}

	sgl = sgt->sgl;

266 267
	buffer->size = dma_buf->size;
	buffer->dma_addr = sg_dma_address(sgl);
268

269
	if (sgt->nents == 1) {
270 271 272
		/* always physically continuous memory if sgt->nents is 1. */
		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
	} else {
273 274 275 276 277 278
		/*
		 * this case could be CONTIG or NONCONTIG type but for now
		 * sets NONCONTIG.
		 * TODO. we have to find a way that exporter can notify
		 * the type of its own buffer to importer.
		 */
279
		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
I
Inki Dae 已提交
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
	}

	exynos_gem_obj->buffer = buffer;
	buffer->sgt = sgt;
	exynos_gem_obj->base.import_attach = attach;

	DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
								buffer->size);

	return &exynos_gem_obj->base;

err_free_buffer:
	kfree(buffer);
	buffer = NULL;
err_unmap_attach:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}

MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
MODULE_LICENSE("GPL");