etnaviv_gem.c 18.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 * Copyright (C) 2015-2018 Etnaviv Project
4 5
 */

S
Sam Ravnborg 已提交
6 7
#include <drm/drm_prime.h>
#include <linux/dma-mapping.h>
8
#include <linux/shmem_fs.h>
S
Sam Ravnborg 已提交
9 10
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
11 12 13 14 15 16

#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"

17 18 19
static struct lock_class_key etnaviv_shm_lock_class;
static struct lock_class_key etnaviv_userptr_lock_class;

20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
{
	struct drm_device *dev = etnaviv_obj->base.dev;
	struct sg_table *sgt = etnaviv_obj->sgt;

	/*
	 * For non-cached buffers, ensure the new pages are clean
	 * because display controller, GPU, etc. are not coherent.
	 */
	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
		dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
}

static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
{
	struct drm_device *dev = etnaviv_obj->base.dev;
	struct sg_table *sgt = etnaviv_obj->sgt;

	/*
	 * For non-cached buffers, ensure the new pages are clean
	 * because display controller, GPU, etc. are not coherent:
	 *
	 * WARNING: The DMA API does not support concurrent CPU
	 * and device access to the memory area.  With BIDIRECTIONAL,
	 * we will clean the cache lines which overlap the region,
	 * and invalidate all cache lines (partially) contained in
	 * the region.
	 *
	 * If you have dirty data in the overlapping cache lines,
	 * that will corrupt the GPU-written data.  If you have
	 * written into the remainder of the region, this can
	 * discard those writes.
	 */
	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
		dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
}

/* called with etnaviv_obj->lock held */
static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{
	struct drm_device *dev = etnaviv_obj->base.dev;
	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);

	if (IS_ERR(p)) {
64
		dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
		return PTR_ERR(p);
	}

	etnaviv_obj->pages = p;

	return 0;
}

static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
{
	if (etnaviv_obj->sgt) {
		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
		sg_free_table(etnaviv_obj->sgt);
		kfree(etnaviv_obj->sgt);
		etnaviv_obj->sgt = NULL;
	}
	if (etnaviv_obj->pages) {
		drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
				  true, false);

		etnaviv_obj->pages = NULL;
	}
}

struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{
	int ret;

	lockdep_assert_held(&etnaviv_obj->lock);

	if (!etnaviv_obj->pages) {
		ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
		if (ret < 0)
			return ERR_PTR(ret);
	}

	if (!etnaviv_obj->sgt) {
		struct drm_device *dev = etnaviv_obj->base.dev;
		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
		struct sg_table *sgt;

		sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
		if (IS_ERR(sgt)) {
			dev_err(dev->dev, "failed to allocate sgt: %ld\n",
				PTR_ERR(sgt));
			return ERR_CAST(sgt);
		}

		etnaviv_obj->sgt = sgt;

		etnaviv_gem_scatter_map(etnaviv_obj);
	}

	return etnaviv_obj->pages;
}

void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
{
	lockdep_assert_held(&etnaviv_obj->lock);
	/* when we start tracking the pin count, then do something here */
}

127
static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
		struct vm_area_struct *vma)
{
	pgprot_t vm_page_prot;

	vma->vm_flags &= ~VM_PFNMAP;
	vma->vm_flags |= VM_MIXEDMAP;

	vm_page_prot = vm_get_page_prot(vma->vm_flags);

	if (etnaviv_obj->flags & ETNA_BO_WC) {
		vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
	} else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
		vma->vm_page_prot = pgprot_noncached(vm_page_prot);
	} else {
		/*
		 * Shunt off cached objs to shmem file so they have their own
		 * address_space (so unmap_mapping_range does what we want,
		 * in particular in the case of mmap'd dmabufs)
		 */
		fput(vma->vm_file);
148
		get_file(etnaviv_obj->base.filp);
149
		vma->vm_pgoff = 0;
150
		vma->vm_file  = etnaviv_obj->base.filp;
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169

		vma->vm_page_prot = vm_page_prot;
	}

	return 0;
}

int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct etnaviv_gem_object *obj;
	int ret;

	ret = drm_gem_mmap(filp, vma);
	if (ret) {
		DBG("mmap failed: %d", ret);
		return ret;
	}

	obj = to_etnaviv_bo(vma->vm_private_data);
170
	return obj->ops->mmap(obj, vma);
171 172
}

173
vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
174
{
175
	struct vm_area_struct *vma = vmf->vma;
176 177 178 179
	struct drm_gem_object *obj = vma->vm_private_data;
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
	struct page **pages, *page;
	pgoff_t pgoff;
180
	int err;
181 182 183

	/*
	 * Make sure we don't parallel update on a fault, nor move or remove
184
	 * something from beneath our feet.  Note that vmf_insert_page() is
185 186
	 * specifically coded to take care of this, so we don't have to.
	 */
187 188 189
	err = mutex_lock_interruptible(&etnaviv_obj->lock);
	if (err)
		return VM_FAULT_NOPAGE;
190 191 192 193 194
	/* make sure we have pages attached now */
	pages = etnaviv_gem_get_pages(etnaviv_obj);
	mutex_unlock(&etnaviv_obj->lock);

	if (IS_ERR(pages)) {
195 196
		err = PTR_ERR(pages);
		return vmf_error(err);
197 198 199
	}

	/* We don't use vmf->pgoff since that has the fake offset: */
200
	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
201 202 203

	page = pages[pgoff];

204
	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
205 206
	     page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);

207
	return vmf_insert_page(vma, vmf->address, page);
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
}

int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
{
	int ret;

	/* Make it mmapable */
	ret = drm_gem_create_mmap_offset(obj);
	if (ret)
		dev_err(obj->dev->dev, "could not allocate mmap offset\n");
	else
		*offset = drm_vma_node_offset_addr(&obj->vma_node);

	return ret;
}

static struct etnaviv_vram_mapping *
etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
L
Lucas Stach 已提交
226
			     struct etnaviv_iommu_context *context)
227 228 229 230
{
	struct etnaviv_vram_mapping *mapping;

	list_for_each_entry(mapping, &obj->vram_list, obj_node) {
L
Lucas Stach 已提交
231
		if (mapping->context == context)
232 233 234 235 236 237
			return mapping;
	}

	return NULL;
}

238 239 240 241 242 243 244 245 246
void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
{
	struct etnaviv_gem_object *etnaviv_obj = mapping->object;

	mutex_lock(&etnaviv_obj->lock);
	WARN_ON(mapping->use == 0);
	mapping->use -= 1;
	mutex_unlock(&etnaviv_obj->lock);

247
	drm_gem_object_put_unlocked(&etnaviv_obj->base);
248 249 250
}

struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
L
Lucas Stach 已提交
251 252
	struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
	u64 va)
253 254 255 256 257 258 259
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
	struct etnaviv_vram_mapping *mapping;
	struct page **pages;
	int ret = 0;

	mutex_lock(&etnaviv_obj->lock);
260
	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
261 262 263 264 265 266 267 268
	if (mapping) {
		/*
		 * Holding the object lock prevents the use count changing
		 * beneath us.  If the use count is zero, the MMU might be
		 * reaping this object, so take the lock and re-check that
		 * the MMU owns this mapping to close this race.
		 */
		if (mapping->use == 0) {
269 270
			mutex_lock(&mmu_context->lock);
			if (mapping->context == mmu_context)
271 272 273
				mapping->use += 1;
			else
				mapping = NULL;
274
			mutex_unlock(&mmu_context->lock);
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
			if (mapping)
				goto out;
		} else {
			mapping->use += 1;
			goto out;
		}
	}

	pages = etnaviv_gem_get_pages(etnaviv_obj);
	if (IS_ERR(pages)) {
		ret = PTR_ERR(pages);
		goto out;
	}

	/*
	 * See if we have a reaped vram mapping we can re-use before
	 * allocating a fresh mapping.
	 */
	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
	if (!mapping) {
		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
296 297 298 299
		if (!mapping) {
			ret = -ENOMEM;
			goto out;
		}
300 301 302 303 304 305 306

		INIT_LIST_HEAD(&mapping->scan_node);
		mapping->object = etnaviv_obj;
	} else {
		list_del(&mapping->obj_node);
	}

307 308
	etnaviv_iommu_context_get(mmu_context);
	mapping->context = mmu_context;
309 310
	mapping->use = 1;

311
	ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
312
				    mmu_context->global->memory_base,
L
Lucas Stach 已提交
313
				    mapping, va);
314 315
	if (ret < 0) {
		etnaviv_iommu_context_put(mmu_context);
316
		kfree(mapping);
317
	} else {
318
		list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
319
	}
320 321 322 323

out:
	mutex_unlock(&etnaviv_obj->lock);

324 325
	if (ret)
		return ERR_PTR(ret);
326

327
	/* Take a reference on the object */
328
	drm_gem_object_get(obj);
329
	return mapping;
330 331
}

332
void *etnaviv_gem_vmap(struct drm_gem_object *obj)
333 334 335
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);

336 337
	if (etnaviv_obj->vaddr)
		return etnaviv_obj->vaddr;
338

339 340 341 342 343 344 345
	mutex_lock(&etnaviv_obj->lock);
	/*
	 * Need to check again, as we might have raced with another thread
	 * while waiting for the mutex.
	 */
	if (!etnaviv_obj->vaddr)
		etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
346 347 348 349 350
	mutex_unlock(&etnaviv_obj->lock);

	return etnaviv_obj->vaddr;
}

351 352 353 354 355 356 357 358 359 360 361 362 363 364
static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
{
	struct page **pages;

	lockdep_assert_held(&obj->lock);

	pages = etnaviv_gem_get_pages(obj);
	if (IS_ERR(pages))
		return NULL;

	return vmap(pages, obj->base.size >> PAGE_SHIFT,
			VM_MAP, pgprot_writecombine(PAGE_KERNEL));
}

365 366 367 368 369 370 371 372 373 374 375
static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
{
	if (op & ETNA_PREP_READ)
		return DMA_FROM_DEVICE;
	else if (op & ETNA_PREP_WRITE)
		return DMA_TO_DEVICE;
	else
		return DMA_BIDIRECTIONAL;
}

int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
376
		struct drm_etnaviv_timespec *timeout)
377 378 379 380
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
	struct drm_device *dev = obj->dev;
	bool write = !!(op & ETNA_PREP_WRITE);
381 382
	int ret;

383 384 385 386 387 388 389 390 391 392
	if (!etnaviv_obj->sgt) {
		void *ret;

		mutex_lock(&etnaviv_obj->lock);
		ret = etnaviv_gem_get_pages(etnaviv_obj);
		mutex_unlock(&etnaviv_obj->lock);
		if (IS_ERR(ret))
			return PTR_ERR(ret);
	}

393
	if (op & ETNA_PREP_NOSYNC) {
394
		if (!dma_resv_test_signaled_rcu(obj->resv,
395 396 397 398 399
							  write))
			return -EBUSY;
	} else {
		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);

400
		ret = dma_resv_wait_timeout_rcu(obj->resv,
401 402 403 404
							  write, true, remain);
		if (ret <= 0)
			return ret == 0 ? -ETIMEDOUT : ret;
	}
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433

	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
		dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
				    etnaviv_obj->sgt->nents,
				    etnaviv_op_to_dma_dir(op));
		etnaviv_obj->last_cpu_prep_op = op;
	}

	return 0;
}

int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);

	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
		/* fini without a prep is almost certainly a userspace error */
		WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
		dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
			etnaviv_obj->sgt->nents,
			etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
		etnaviv_obj->last_cpu_prep_op = 0;
	}

	return 0;
}

int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
434
	struct drm_etnaviv_timespec *timeout)
435 436 437 438 439 440 441
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);

	return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
}

#ifdef CONFIG_DEBUG_FS
442
static void etnaviv_gem_describe_fence(struct dma_fence *fence,
443 444
	const char *type, struct seq_file *m)
{
445
	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
446
		seq_printf(m, "\t%9s: %s %s seq %llu\n",
447 448 449 450 451 452 453 454 455
			   type,
			   fence->ops->get_driver_name(fence),
			   fence->ops->get_timeline_name(fence),
			   fence->seqno);
}

static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
456 457
	struct dma_resv *robj = obj->resv;
	struct dma_resv_list *fobj;
458
	struct dma_fence *fence;
459 460 461 462
	unsigned long off = drm_vma_node_start(&obj->vma_node);

	seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
			etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
463
			obj->name, kref_read(&obj->refcount),
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
			off, etnaviv_obj->vaddr, obj->size);

	rcu_read_lock();
	fobj = rcu_dereference(robj->fence);
	if (fobj) {
		unsigned int i, shared_count = fobj->shared_count;

		for (i = 0; i < shared_count; i++) {
			fence = rcu_dereference(fobj->shared[i]);
			etnaviv_gem_describe_fence(fence, "Shared", m);
		}
	}

	fence = rcu_dereference(robj->fence_excl);
	if (fence)
		etnaviv_gem_describe_fence(fence, "Exclusive", m);
	rcu_read_unlock();
}

void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
	struct seq_file *m)
{
	struct etnaviv_gem_object *etnaviv_obj;
	int count = 0;
	size_t size = 0;

	mutex_lock(&priv->gem_lock);
	list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
		struct drm_gem_object *obj = &etnaviv_obj->base;

		seq_puts(m, "   ");
		etnaviv_gem_describe(obj, m);
		count++;
		size += obj->size;
	}
	mutex_unlock(&priv->gem_lock);

	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
}
#endif

static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
{
507
	vunmap(etnaviv_obj->vaddr);
508 509 510 511 512 513
	put_pages(etnaviv_obj);
}

static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
	.get_pages = etnaviv_gem_shmem_get_pages,
	.release = etnaviv_gem_shmem_release,
514
	.vmap = etnaviv_gem_vmap_impl,
515
	.mmap = etnaviv_gem_mmap_obj,
516 517 518 519 520
};

void etnaviv_gem_free_object(struct drm_gem_object *obj)
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
521
	struct etnaviv_drm_private *priv = obj->dev->dev_private;
522 523 524 525 526
	struct etnaviv_vram_mapping *mapping, *tmp;

	/* object should not be active */
	WARN_ON(is_active(etnaviv_obj));

527
	mutex_lock(&priv->gem_lock);
528
	list_del(&etnaviv_obj->gem_node);
529
	mutex_unlock(&priv->gem_lock);
530 531 532

	list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
				 obj_node) {
L
Lucas Stach 已提交
533
		struct etnaviv_iommu_context *context = mapping->context;
534 535 536

		WARN_ON(mapping->use);

537
		if (context) {
L
Lucas Stach 已提交
538
			etnaviv_iommu_unmap_gem(context, mapping);
539 540
			etnaviv_iommu_context_put(context);
		}
541 542 543 544 545 546 547 548 549 550 551 552

		list_del(&mapping->obj_node);
		kfree(mapping);
	}

	drm_gem_free_mmap_offset(obj);
	etnaviv_obj->ops->release(etnaviv_obj);
	drm_gem_object_release(obj);

	kfree(etnaviv_obj);
}

553
void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
554 555 556 557 558 559 560 561 562 563
{
	struct etnaviv_drm_private *priv = dev->dev_private;
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);

	mutex_lock(&priv->gem_lock);
	list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
	mutex_unlock(&priv->gem_lock);
}

static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
564
	const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
{
	struct etnaviv_gem_object *etnaviv_obj;
	unsigned sz = sizeof(*etnaviv_obj);
	bool valid = true;

	/* validate flags */
	switch (flags & ETNA_BO_CACHE_MASK) {
	case ETNA_BO_UNCACHED:
	case ETNA_BO_CACHED:
	case ETNA_BO_WC:
		break;
	default:
		valid = false;
	}

	if (!valid) {
		dev_err(dev->dev, "invalid cache flag: %x\n",
			(flags & ETNA_BO_CACHE_MASK));
		return -EINVAL;
	}

	etnaviv_obj = kzalloc(sz, GFP_KERNEL);
	if (!etnaviv_obj)
		return -ENOMEM;

	etnaviv_obj->flags = flags;
	etnaviv_obj->ops = ops;

	mutex_init(&etnaviv_obj->lock);
	INIT_LIST_HEAD(&etnaviv_obj->vram_list);

	*obj = &etnaviv_obj->base;

	return 0;
}

601 602 603
/* convenience method to construct a GEM buffer object, and userspace handle */
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
	u32 size, u32 flags, u32 *handle)
604
{
605
	struct etnaviv_drm_private *priv = dev->dev_private;
606 607 608 609 610
	struct drm_gem_object *obj = NULL;
	int ret;

	size = PAGE_ALIGN(size);

611
	ret = etnaviv_gem_new_impl(dev, size, flags,
612 613 614 615
				   &etnaviv_gem_shmem_ops, &obj);
	if (ret)
		goto fail;

616 617
	lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);

618 619 620 621
	ret = drm_gem_object_init(dev, obj, size);
	if (ret)
		goto fail;

622 623 624 625 626 627
	/*
	 * Our buffers are kept pinned, so allocating them from the MOVABLE
	 * zone is a really bad idea, and conflicts with CMA. See comments
	 * above new_inode() why this is required _and_ expected if you're
	 * going to pin these pages.
	 */
628
	mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
629

630
	etnaviv_gem_obj_add(dev, obj);
631 632 633 634

	ret = drm_gem_handle_create(file, obj, handle);

	/* drop reference from allocate - handle holds it now */
635
fail:
636
	drm_gem_object_put_unlocked(obj);
637 638 639 640 641

	return ret;
}

int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
642
	const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
643 644 645 646
{
	struct drm_gem_object *obj;
	int ret;

647
	ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
648 649 650 651 652 653 654 655 656 657 658 659 660
	if (ret)
		return ret;

	drm_gem_private_object_init(dev, obj, size);

	*res = to_etnaviv_bo(obj);

	return 0;
}

static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{
	struct page **pvec = NULL;
661 662
	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
663

664
	might_lock_read(&current->mm->mmap_lock);
665

666 667
	if (userptr->mm != current->mm)
		return -EPERM;
668

669 670 671 672 673 674 675 676
	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
	if (!pvec)
		return -ENOMEM;

	do {
		unsigned num_pages = npages - pinned;
		uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
		struct page **pages = pvec + pinned;
677

678 679 680 681
		ret = get_user_pages_fast(ptr, num_pages,
					  !userptr->ro ? FOLL_WRITE : 0, pages);
		if (ret < 0) {
			release_pages(pvec, pinned);
M
Michal Hocko 已提交
682
			kvfree(pvec);
683
			return ret;
684 685
		}

686
		pinned += ret;
687

688
	} while (pinned < npages);
689

690
	etnaviv_obj->pages = pvec;
691

692
	return 0;
693 694 695 696 697 698 699 700 701 702 703 704
}

static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
{
	if (etnaviv_obj->sgt) {
		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
		sg_free_table(etnaviv_obj->sgt);
		kfree(etnaviv_obj->sgt);
	}
	if (etnaviv_obj->pages) {
		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;

705
		release_pages(etnaviv_obj->pages, npages);
M
Michal Hocko 已提交
706
		kvfree(etnaviv_obj->pages);
707 708 709
	}
}

710 711 712 713 714 715
static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
		struct vm_area_struct *vma)
{
	return -EINVAL;
}

716 717 718
static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
	.get_pages = etnaviv_gem_userptr_get_pages,
	.release = etnaviv_gem_userptr_release,
719
	.vmap = etnaviv_gem_vmap_impl,
720
	.mmap = etnaviv_gem_userptr_mmap_obj,
721 722 723 724 725 726 727 728
};

int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
	uintptr_t ptr, u32 size, u32 flags, u32 *handle)
{
	struct etnaviv_gem_object *etnaviv_obj;
	int ret;

729
	ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
730 731 732 733
				      &etnaviv_gem_userptr_ops, &etnaviv_obj);
	if (ret)
		return ret;

734 735
	lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);

736
	etnaviv_obj->userptr.ptr = ptr;
737
	etnaviv_obj->userptr.mm = current->mm;
738 739
	etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);

740
	etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
741 742

	ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
743

744
	/* drop reference from allocate - handle holds it now */
745
	drm_gem_object_put_unlocked(&etnaviv_obj->base);
746 747
	return ret;
}