exynos_drm_gem.c 14.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/* exynos_drm_gem.c
 *
 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 * Author: Inki Dae <inki.dae@samsung.com>
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

26
#include <drm/drmP.h>
27

28
#include <linux/shmem_fs.h>
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
#include <drm/exynos_drm.h>

#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"

static unsigned int convert_to_vm_err_msg(int msg)
{
	unsigned int out_msg;

	switch (msg) {
	case 0:
	case -ERESTARTSYS:
	case -EINTR:
		out_msg = VM_FAULT_NOPAGE;
		break;

	case -ENOMEM:
		out_msg = VM_FAULT_OOM;
		break;

	default:
		out_msg = VM_FAULT_SIGBUS;
		break;
	}

	return out_msg;
}

58
static int check_gem_flags(unsigned int flags)
59
{
60 61 62 63 64 65 66 67
	if (flags & ~(EXYNOS_BO_MASK)) {
		DRM_ERROR("invalid flags.\n");
		return -EINVAL;
	}

	return 0;
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
					struct vm_area_struct *vma)
{
	DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);

	/* non-cachable as default. */
	if (obj->flags & EXYNOS_BO_CACHABLE)
		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
	else if (obj->flags & EXYNOS_BO_WC)
		vma->vm_page_prot =
			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
	else
		vma->vm_page_prot =
			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
}

84 85
static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
{
86
	/* TODO */
87

88
	return roundup(size, PAGE_SIZE);
89 90
}

91
static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
92 93 94 95 96 97 98 99 100 101 102 103
					struct vm_area_struct *vma,
					unsigned long f_vaddr,
					pgoff_t page_offset)
{
	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
	unsigned long pfn;

	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
		if (!buf->pages)
			return -EINTR;

104 105 106
		pfn = page_to_pfn(buf->pages[page_offset++]);
	} else
		pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
107 108 109 110

	return vm_insert_mixed(vma, f_vaddr, pfn);
}

111 112 113
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
					struct drm_file *file_priv,
					unsigned int *handle)
114 115 116 117 118 119 120 121 122
{
	int ret;

	/*
	 * allocate a id of idr table where the obj is registered
	 * and handle has the id what user can see.
	 */
	ret = drm_gem_handle_create(file_priv, obj, handle);
	if (ret)
123
		return ret;
124 125 126 127 128 129

	DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);

	/* drop reference from allocate - handle holds it now. */
	drm_gem_object_unreference_unlocked(obj);

130 131 132 133 134 135
	return 0;
}

void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
{
	struct drm_gem_object *obj;
136
	struct exynos_drm_gem_buf *buf;
137 138 139 140

	DRM_DEBUG_KMS("%s\n", __FILE__);

	obj = &exynos_gem_obj->base;
141
	buf = exynos_gem_obj->buffer;
142 143

	DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
144

145 146 147 148 149 150 151 152 153
	/*
	 * do not release memory region from exporter.
	 *
	 * the region will be released by exporter
	 * once dmabuf's refcount becomes 0.
	 */
	if (obj->import_attach)
		goto out;

154
	exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
155

156
out:
157
	exynos_drm_fini_buf(obj->dev, buf);
158
	exynos_gem_obj->buffer = NULL;
159 160 161 162 163

	if (obj->map_list.map)
		drm_gem_free_mmap_offset(obj);

	/* release file pointer to gem object. */
164 165 166
	drm_gem_object_release(obj);

	kfree(exynos_gem_obj);
167
	exynos_gem_obj = NULL;
168 169
}

I
Inki Dae 已提交
170
struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
171 172 173 174 175 176 177 178 179 180 181 182
						      unsigned long size)
{
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct drm_gem_object *obj;
	int ret;

	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
	if (!exynos_gem_obj) {
		DRM_ERROR("failed to allocate exynos gem object\n");
		return NULL;
	}

183
	exynos_gem_obj->size = size;
184 185 186 187 188 189 190 191 192 193 194 195
	obj = &exynos_gem_obj->base;

	ret = drm_gem_object_init(dev, obj, size);
	if (ret < 0) {
		DRM_ERROR("failed to initialize gem object\n");
		kfree(exynos_gem_obj);
		return NULL;
	}

	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);

	return exynos_gem_obj;
196 197
}

198
struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
199 200
						unsigned int flags,
						unsigned long size)
201
{
202
	struct exynos_drm_gem_obj *exynos_gem_obj;
203 204
	struct exynos_drm_gem_buf *buf;
	int ret;
205

206 207 208 209 210 211 212
	if (!size) {
		DRM_ERROR("invalid size.\n");
		return ERR_PTR(-EINVAL);
	}

	size = roundup_gem_size(size, flags);
	DRM_DEBUG_KMS("%s\n", __FILE__);
213

214 215 216
	ret = check_gem_flags(flags);
	if (ret)
		return ERR_PTR(ret);
217 218 219

	buf = exynos_drm_init_buf(dev, size);
	if (!buf)
J
Joonyoung Shim 已提交
220
		return ERR_PTR(-ENOMEM);
221

222 223
	exynos_gem_obj = exynos_drm_gem_init(dev, size);
	if (!exynos_gem_obj) {
224
		ret = -ENOMEM;
225
		goto err_fini_buf;
226 227
	}

228 229 230 231 232
	exynos_gem_obj->buffer = buf;

	/* set memory type and cache attribute from user side. */
	exynos_gem_obj->flags = flags;

233 234 235 236
	ret = exynos_drm_alloc_buf(dev, buf, flags);
	if (ret < 0) {
		drm_gem_object_release(&exynos_gem_obj->base);
		goto err_fini_buf;
237
	}
238 239

	return exynos_gem_obj;
240 241

err_fini_buf:
242 243
	exynos_drm_fini_buf(dev, buf);
	return ERR_PTR(ret);
244 245
}

246
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
J
Joonyoung Shim 已提交
247
				struct drm_file *file_priv)
248 249
{
	struct drm_exynos_gem_create *args = data;
J
Joonyoung Shim 已提交
250
	struct exynos_drm_gem_obj *exynos_gem_obj;
251
	int ret;
252

253
	DRM_DEBUG_KMS("%s\n", __FILE__);
254

255
	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
256 257 258
	if (IS_ERR(exynos_gem_obj))
		return PTR_ERR(exynos_gem_obj);

259 260 261 262 263 264 265
	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
			&args->handle);
	if (ret) {
		exynos_drm_gem_destroy(exynos_gem_obj);
		return ret;
	}

266 267 268
	return 0;
}

I
Inki Dae 已提交
269
dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
270
					unsigned int gem_handle,
I
Inki Dae 已提交
271
					struct drm_file *filp)
272 273 274 275
{
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct drm_gem_object *obj;

I
Inki Dae 已提交
276
	obj = drm_gem_object_lookup(dev, filp, gem_handle);
277 278 279 280 281 282 283 284 285 286 287 288
	if (!obj) {
		DRM_ERROR("failed to lookup gem object.\n");
		return ERR_PTR(-EINVAL);
	}

	exynos_gem_obj = to_exynos_gem_obj(obj);

	return &exynos_gem_obj->buffer->dma_addr;
}

void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
					unsigned int gem_handle,
I
Inki Dae 已提交
289
					struct drm_file *filp)
290 291 292 293
{
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct drm_gem_object *obj;

I
Inki Dae 已提交
294
	obj = drm_gem_object_lookup(dev, filp, gem_handle);
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
	if (!obj) {
		DRM_ERROR("failed to lookup gem object.\n");
		return;
	}

	exynos_gem_obj = to_exynos_gem_obj(obj);

	drm_gem_object_unreference_unlocked(obj);

	/*
	 * decrease obj->refcount one more time because we has already
	 * increased it at exynos_drm_gem_get_dma_addr().
	 */
	drm_gem_object_unreference_unlocked(obj);
}

311
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
J
Joonyoung Shim 已提交
312
				    struct drm_file *file_priv)
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
{
	struct drm_exynos_gem_map_off *args = data;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
			args->handle, (unsigned long)args->offset);

	if (!(dev->driver->driver_features & DRIVER_GEM)) {
		DRM_ERROR("does not support GEM.\n");
		return -ENODEV;
	}

	return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
			&args->offset);
}

static int exynos_drm_gem_mmap_buffer(struct file *filp,
J
Joonyoung Shim 已提交
331
				      struct vm_area_struct *vma)
332 333 334
{
	struct drm_gem_object *obj = filp->private_data;
	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
I
Inki Dae 已提交
335
	struct exynos_drm_gem_buf *buffer;
336
	unsigned long vm_size;
337 338 339

	DRM_DEBUG_KMS("%s\n", __FILE__);

340
	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
341

342
	update_vm_cache_attr(exynos_gem_obj, vma);
343

344
	vm_size = vma->vm_end - vma->vm_start;
345

346
	/*
I
Inki Dae 已提交
347
	 * a buffer contains information to physically continuous memory
348 349
	 * allocated by user request or at framebuffer creation.
	 */
I
Inki Dae 已提交
350
	buffer = exynos_gem_obj->buffer;
351 352

	/* check if user-requested size is valid. */
I
Inki Dae 已提交
353
	if (vm_size > buffer->size)
354 355
		return -EINVAL;

356 357 358
	return dma_mmap_attrs(obj->dev->dev, vma, buffer->kvaddr,
				buffer->dma_addr, buffer->size,
				&buffer->dma_attrs);
359 360 361 362 363 364 365
}

static const struct file_operations exynos_drm_gem_fops = {
	.mmap = exynos_drm_gem_mmap_buffer,
};

int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
J
Joonyoung Shim 已提交
366
			      struct drm_file *file_priv)
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
{
	struct drm_exynos_gem_mmap *args = data;
	struct drm_gem_object *obj;
	unsigned int addr;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	if (!(dev->driver->driver_features & DRIVER_GEM)) {
		DRM_ERROR("does not support GEM.\n");
		return -ENODEV;
	}

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (!obj) {
		DRM_ERROR("failed to lookup gem object.\n");
		return -EINVAL;
	}

	obj->filp->f_op = &exynos_drm_gem_fops;
	obj->filp->private_data = obj;

388
	addr = vm_mmap(obj->filp, 0, args->size,
389 390 391 392 393 394 395 396 397 398 399 400 401 402
			PROT_READ | PROT_WRITE, MAP_SHARED, 0);

	drm_gem_object_unreference_unlocked(obj);

	if (IS_ERR((void *)addr))
		return PTR_ERR((void *)addr);

	args->mapped = addr;

	DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);

	return 0;
}

403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
				      struct drm_file *file_priv)
{	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct drm_exynos_gem_info *args = data;
	struct drm_gem_object *obj;

	mutex_lock(&dev->struct_mutex);

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (!obj) {
		DRM_ERROR("failed to lookup gem object.\n");
		mutex_unlock(&dev->struct_mutex);
		return -EINVAL;
	}

	exynos_gem_obj = to_exynos_gem_obj(obj);

	args->flags = exynos_gem_obj->flags;
	args->size = exynos_gem_obj->size;

	drm_gem_object_unreference(obj);
	mutex_unlock(&dev->struct_mutex);

	return 0;
}

429 430 431 432 433 434 435
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{
	DRM_DEBUG_KMS("%s\n", __FILE__);

	return 0;
}

J
Joonyoung Shim 已提交
436
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
437
{
I
Inki Dae 已提交
438 439 440
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buf;

441 442
	DRM_DEBUG_KMS("%s\n", __FILE__);

I
Inki Dae 已提交
443 444 445 446 447 448
	exynos_gem_obj = to_exynos_gem_obj(obj);
	buf = exynos_gem_obj->buffer;

	if (obj->import_attach)
		drm_prime_gem_destroy(obj, buf->sgt);

449
	exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
450 451 452
}

int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
J
Joonyoung Shim 已提交
453 454
			       struct drm_device *dev,
			       struct drm_mode_create_dumb *args)
455 456
{
	struct exynos_drm_gem_obj *exynos_gem_obj;
457
	int ret;
458 459 460 461 462 463 464 465 466

	DRM_DEBUG_KMS("%s\n", __FILE__);

	/*
	 * alocate memory to be used for framebuffer.
	 * - this callback would be called by user application
	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
	 */

467
	args->pitch = args->width * ((args->bpp + 7) / 8);
I
Inki Dae 已提交
468
	args->size = args->pitch * args->height;
469

470
	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
471 472 473
	if (IS_ERR(exynos_gem_obj))
		return PTR_ERR(exynos_gem_obj);

474 475 476 477 478 479 480
	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
			&args->handle);
	if (ret) {
		exynos_drm_gem_destroy(exynos_gem_obj);
		return ret;
	}

481 482 483 484
	return 0;
}

int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
J
Joonyoung Shim 已提交
485 486
				   struct drm_device *dev, uint32_t handle,
				   uint64_t *offset)
487 488
{
	struct drm_gem_object *obj;
489
	int ret = 0;
490 491 492 493 494 495 496 497 498 499 500 501 502 503

	DRM_DEBUG_KMS("%s\n", __FILE__);

	mutex_lock(&dev->struct_mutex);

	/*
	 * get offset of memory allocated for drm framebuffer.
	 * - this callback would be called by user application
	 *	with DRM_IOCTL_MODE_MAP_DUMB command.
	 */

	obj = drm_gem_object_lookup(dev, file_priv, handle);
	if (!obj) {
		DRM_ERROR("failed to lookup gem object.\n");
504 505
		ret = -EINVAL;
		goto unlock;
506 507
	}

508 509
	if (!obj->map_list.map) {
		ret = drm_gem_create_mmap_offset(obj);
510 511 512
		if (ret)
			goto out;
	}
513

514
	*offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
515 516
	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);

517 518 519
out:
	drm_gem_object_unreference(obj);
unlock:
520
	mutex_unlock(&dev->struct_mutex);
521
	return ret;
522 523
}

J
Joonyoung Shim 已提交
524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
				struct drm_device *dev,
				unsigned int handle)
{
	int ret;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	/*
	 * obj->refcount and obj->handle_count are decreased and
	 * if both them are 0 then exynos_drm_gem_free_object()
	 * would be called by callback to release resources.
	 */
	ret = drm_gem_handle_delete(file_priv, handle);
	if (ret < 0) {
		DRM_ERROR("failed to delete drm_gem_handle.\n");
		return ret;
	}

	return 0;
}

546 547 548 549
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct drm_gem_object *obj = vma->vm_private_data;
	struct drm_device *dev = obj->dev;
550
	unsigned long f_vaddr;
551 552 553 554 555
	pgoff_t page_offset;
	int ret;

	page_offset = ((unsigned long)vmf->virtual_address -
			vma->vm_start) >> PAGE_SHIFT;
556
	f_vaddr = (unsigned long)vmf->virtual_address;
557 558 559

	mutex_lock(&dev->struct_mutex);

560
	ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
561
	if (ret < 0)
562
		DRM_ERROR("failed to map a buffer with user.\n");
563 564 565 566 567 568 569 570

	mutex_unlock(&dev->struct_mutex);

	return convert_to_vm_err_msg(ret);
}

int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
571 572
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct drm_gem_object *obj;
573 574 575 576 577 578 579 580 581 582 583
	int ret;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	/* set vm_area_struct. */
	ret = drm_gem_mmap(filp, vma);
	if (ret < 0) {
		DRM_ERROR("failed to mmap.\n");
		return ret;
	}

584 585 586 587 588 589 590 591 592 593
	obj = vma->vm_private_data;
	exynos_gem_obj = to_exynos_gem_obj(obj);

	ret = check_gem_flags(exynos_gem_obj->flags);
	if (ret) {
		drm_gem_vm_close(vma);
		drm_gem_free_mmap_offset(obj);
		return ret;
	}

594 595 596
	vma->vm_flags &= ~VM_PFNMAP;
	vma->vm_flags |= VM_MIXEDMAP;

597 598
	update_vm_cache_attr(exynos_gem_obj, vma);

599 600
	return ret;
}