etnaviv_gem.c 18.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 * Copyright (C) 2015-2018 Etnaviv Project
4 5
 */

S
Sam Ravnborg 已提交
6 7
#include <drm/drm_prime.h>
#include <linux/dma-mapping.h>
8
#include <linux/shmem_fs.h>
S
Sam Ravnborg 已提交
9 10
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
11 12 13 14 15 16

#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"

17 18 19
static struct lock_class_key etnaviv_shm_lock_class;
static struct lock_class_key etnaviv_userptr_lock_class;

20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
{
	struct drm_device *dev = etnaviv_obj->base.dev;
	struct sg_table *sgt = etnaviv_obj->sgt;

	/*
	 * For non-cached buffers, ensure the new pages are clean
	 * because display controller, GPU, etc. are not coherent.
	 */
	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
		dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
}

static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
{
	struct drm_device *dev = etnaviv_obj->base.dev;
	struct sg_table *sgt = etnaviv_obj->sgt;

	/*
	 * For non-cached buffers, ensure the new pages are clean
	 * because display controller, GPU, etc. are not coherent:
	 *
	 * WARNING: The DMA API does not support concurrent CPU
	 * and device access to the memory area.  With BIDIRECTIONAL,
	 * we will clean the cache lines which overlap the region,
	 * and invalidate all cache lines (partially) contained in
	 * the region.
	 *
	 * If you have dirty data in the overlapping cache lines,
	 * that will corrupt the GPU-written data.  If you have
	 * written into the remainder of the region, this can
	 * discard those writes.
	 */
	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
		dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
}

/* called with etnaviv_obj->lock held */
static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{
	struct drm_device *dev = etnaviv_obj->base.dev;
	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);

	if (IS_ERR(p)) {
64
		dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
		return PTR_ERR(p);
	}

	etnaviv_obj->pages = p;

	return 0;
}

static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
{
	if (etnaviv_obj->sgt) {
		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
		sg_free_table(etnaviv_obj->sgt);
		kfree(etnaviv_obj->sgt);
		etnaviv_obj->sgt = NULL;
	}
	if (etnaviv_obj->pages) {
		drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
				  true, false);

		etnaviv_obj->pages = NULL;
	}
}

struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{
	int ret;

	lockdep_assert_held(&etnaviv_obj->lock);

	if (!etnaviv_obj->pages) {
		ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
		if (ret < 0)
			return ERR_PTR(ret);
	}

	if (!etnaviv_obj->sgt) {
		struct drm_device *dev = etnaviv_obj->base.dev;
		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
		struct sg_table *sgt;

106 107
		sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
					    etnaviv_obj->pages, npages);
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
		if (IS_ERR(sgt)) {
			dev_err(dev->dev, "failed to allocate sgt: %ld\n",
				PTR_ERR(sgt));
			return ERR_CAST(sgt);
		}

		etnaviv_obj->sgt = sgt;

		etnaviv_gem_scatter_map(etnaviv_obj);
	}

	return etnaviv_obj->pages;
}

void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
{
	lockdep_assert_held(&etnaviv_obj->lock);
	/* when we start tracking the pin count, then do something here */
}

128
static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
		struct vm_area_struct *vma)
{
	pgprot_t vm_page_prot;

	vma->vm_flags &= ~VM_PFNMAP;
	vma->vm_flags |= VM_MIXEDMAP;

	vm_page_prot = vm_get_page_prot(vma->vm_flags);

	if (etnaviv_obj->flags & ETNA_BO_WC) {
		vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
	} else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
		vma->vm_page_prot = pgprot_noncached(vm_page_prot);
	} else {
		/*
		 * Shunt off cached objs to shmem file so they have their own
		 * address_space (so unmap_mapping_range does what we want,
		 * in particular in the case of mmap'd dmabufs)
		 */
		fput(vma->vm_file);
149
		get_file(etnaviv_obj->base.filp);
150
		vma->vm_pgoff = 0;
151
		vma->vm_file  = etnaviv_obj->base.filp;
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170

		vma->vm_page_prot = vm_page_prot;
	}

	return 0;
}

int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct etnaviv_gem_object *obj;
	int ret;

	ret = drm_gem_mmap(filp, vma);
	if (ret) {
		DBG("mmap failed: %d", ret);
		return ret;
	}

	obj = to_etnaviv_bo(vma->vm_private_data);
171
	return obj->ops->mmap(obj, vma);
172 173
}

174
vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
175
{
176
	struct vm_area_struct *vma = vmf->vma;
177 178 179 180
	struct drm_gem_object *obj = vma->vm_private_data;
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
	struct page **pages, *page;
	pgoff_t pgoff;
181
	int err;
182 183 184

	/*
	 * Make sure we don't parallel update on a fault, nor move or remove
185
	 * something from beneath our feet.  Note that vmf_insert_page() is
186 187
	 * specifically coded to take care of this, so we don't have to.
	 */
188 189 190
	err = mutex_lock_interruptible(&etnaviv_obj->lock);
	if (err)
		return VM_FAULT_NOPAGE;
191 192 193 194 195
	/* make sure we have pages attached now */
	pages = etnaviv_gem_get_pages(etnaviv_obj);
	mutex_unlock(&etnaviv_obj->lock);

	if (IS_ERR(pages)) {
196 197
		err = PTR_ERR(pages);
		return vmf_error(err);
198 199 200
	}

	/* We don't use vmf->pgoff since that has the fake offset: */
201
	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
202 203 204

	page = pages[pgoff];

205
	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
206 207
	     page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);

208
	return vmf_insert_page(vma, vmf->address, page);
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
}

int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
{
	int ret;

	/* Make it mmapable */
	ret = drm_gem_create_mmap_offset(obj);
	if (ret)
		dev_err(obj->dev->dev, "could not allocate mmap offset\n");
	else
		*offset = drm_vma_node_offset_addr(&obj->vma_node);

	return ret;
}

static struct etnaviv_vram_mapping *
etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
L
Lucas Stach 已提交
227
			     struct etnaviv_iommu_context *context)
228 229 230 231
{
	struct etnaviv_vram_mapping *mapping;

	list_for_each_entry(mapping, &obj->vram_list, obj_node) {
L
Lucas Stach 已提交
232
		if (mapping->context == context)
233 234 235 236 237 238
			return mapping;
	}

	return NULL;
}

239 240 241 242 243 244 245 246 247
void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
{
	struct etnaviv_gem_object *etnaviv_obj = mapping->object;

	mutex_lock(&etnaviv_obj->lock);
	WARN_ON(mapping->use == 0);
	mapping->use -= 1;
	mutex_unlock(&etnaviv_obj->lock);

248
	drm_gem_object_put(&etnaviv_obj->base);
249 250 251
}

struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
L
Lucas Stach 已提交
252 253
	struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
	u64 va)
254 255 256 257 258 259 260
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
	struct etnaviv_vram_mapping *mapping;
	struct page **pages;
	int ret = 0;

	mutex_lock(&etnaviv_obj->lock);
261
	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
262 263 264 265 266 267 268 269
	if (mapping) {
		/*
		 * Holding the object lock prevents the use count changing
		 * beneath us.  If the use count is zero, the MMU might be
		 * reaping this object, so take the lock and re-check that
		 * the MMU owns this mapping to close this race.
		 */
		if (mapping->use == 0) {
270 271
			mutex_lock(&mmu_context->lock);
			if (mapping->context == mmu_context)
272 273 274
				mapping->use += 1;
			else
				mapping = NULL;
275
			mutex_unlock(&mmu_context->lock);
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
			if (mapping)
				goto out;
		} else {
			mapping->use += 1;
			goto out;
		}
	}

	pages = etnaviv_gem_get_pages(etnaviv_obj);
	if (IS_ERR(pages)) {
		ret = PTR_ERR(pages);
		goto out;
	}

	/*
	 * See if we have a reaped vram mapping we can re-use before
	 * allocating a fresh mapping.
	 */
	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
	if (!mapping) {
		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
297 298 299 300
		if (!mapping) {
			ret = -ENOMEM;
			goto out;
		}
301 302 303 304 305 306 307

		INIT_LIST_HEAD(&mapping->scan_node);
		mapping->object = etnaviv_obj;
	} else {
		list_del(&mapping->obj_node);
	}

308 309
	etnaviv_iommu_context_get(mmu_context);
	mapping->context = mmu_context;
310 311
	mapping->use = 1;

312
	ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
313
				    mmu_context->global->memory_base,
L
Lucas Stach 已提交
314
				    mapping, va);
315 316
	if (ret < 0) {
		etnaviv_iommu_context_put(mmu_context);
317
		kfree(mapping);
318
	} else {
319
		list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
320
	}
321 322 323 324

out:
	mutex_unlock(&etnaviv_obj->lock);

325 326
	if (ret)
		return ERR_PTR(ret);
327

328
	/* Take a reference on the object */
329
	drm_gem_object_get(obj);
330
	return mapping;
331 332
}

333
void *etnaviv_gem_vmap(struct drm_gem_object *obj)
334 335 336
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);

337 338
	if (etnaviv_obj->vaddr)
		return etnaviv_obj->vaddr;
339

340 341 342 343 344 345 346
	mutex_lock(&etnaviv_obj->lock);
	/*
	 * Need to check again, as we might have raced with another thread
	 * while waiting for the mutex.
	 */
	if (!etnaviv_obj->vaddr)
		etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
347 348 349 350 351
	mutex_unlock(&etnaviv_obj->lock);

	return etnaviv_obj->vaddr;
}

352 353 354 355 356 357 358 359 360 361 362 363 364 365
static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
{
	struct page **pages;

	lockdep_assert_held(&obj->lock);

	pages = etnaviv_gem_get_pages(obj);
	if (IS_ERR(pages))
		return NULL;

	return vmap(pages, obj->base.size >> PAGE_SHIFT,
			VM_MAP, pgprot_writecombine(PAGE_KERNEL));
}

366 367 368 369 370 371 372 373 374 375 376
static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
{
	if (op & ETNA_PREP_READ)
		return DMA_FROM_DEVICE;
	else if (op & ETNA_PREP_WRITE)
		return DMA_TO_DEVICE;
	else
		return DMA_BIDIRECTIONAL;
}

int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
377
		struct drm_etnaviv_timespec *timeout)
378 379 380 381
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
	struct drm_device *dev = obj->dev;
	bool write = !!(op & ETNA_PREP_WRITE);
382 383
	int ret;

384 385 386 387 388 389 390 391 392 393
	if (!etnaviv_obj->sgt) {
		void *ret;

		mutex_lock(&etnaviv_obj->lock);
		ret = etnaviv_gem_get_pages(etnaviv_obj);
		mutex_unlock(&etnaviv_obj->lock);
		if (IS_ERR(ret))
			return PTR_ERR(ret);
	}

394
	if (op & ETNA_PREP_NOSYNC) {
395
		if (!dma_resv_test_signaled_rcu(obj->resv,
396 397 398 399 400
							  write))
			return -EBUSY;
	} else {
		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);

401
		ret = dma_resv_wait_timeout_rcu(obj->resv,
402 403 404 405
							  write, true, remain);
		if (ret <= 0)
			return ret == 0 ? -ETIMEDOUT : ret;
	}
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434

	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
		dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
				    etnaviv_obj->sgt->nents,
				    etnaviv_op_to_dma_dir(op));
		etnaviv_obj->last_cpu_prep_op = op;
	}

	return 0;
}

int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);

	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
		/* fini without a prep is almost certainly a userspace error */
		WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
		dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
			etnaviv_obj->sgt->nents,
			etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
		etnaviv_obj->last_cpu_prep_op = 0;
	}

	return 0;
}

int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
435
	struct drm_etnaviv_timespec *timeout)
436 437 438 439 440 441 442
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);

	return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
}

#ifdef CONFIG_DEBUG_FS
443
static void etnaviv_gem_describe_fence(struct dma_fence *fence,
444 445
	const char *type, struct seq_file *m)
{
446
	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
447
		seq_printf(m, "\t%9s: %s %s seq %llu\n",
448 449 450 451 452 453 454 455 456
			   type,
			   fence->ops->get_driver_name(fence),
			   fence->ops->get_timeline_name(fence),
			   fence->seqno);
}

static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
457 458
	struct dma_resv *robj = obj->resv;
	struct dma_resv_list *fobj;
459
	struct dma_fence *fence;
460 461 462 463
	unsigned long off = drm_vma_node_start(&obj->vma_node);

	seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
			etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
464
			obj->name, kref_read(&obj->refcount),
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
			off, etnaviv_obj->vaddr, obj->size);

	rcu_read_lock();
	fobj = rcu_dereference(robj->fence);
	if (fobj) {
		unsigned int i, shared_count = fobj->shared_count;

		for (i = 0; i < shared_count; i++) {
			fence = rcu_dereference(fobj->shared[i]);
			etnaviv_gem_describe_fence(fence, "Shared", m);
		}
	}

	fence = rcu_dereference(robj->fence_excl);
	if (fence)
		etnaviv_gem_describe_fence(fence, "Exclusive", m);
	rcu_read_unlock();
}

void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
	struct seq_file *m)
{
	struct etnaviv_gem_object *etnaviv_obj;
	int count = 0;
	size_t size = 0;

	mutex_lock(&priv->gem_lock);
	list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
		struct drm_gem_object *obj = &etnaviv_obj->base;

		seq_puts(m, "   ");
		etnaviv_gem_describe(obj, m);
		count++;
		size += obj->size;
	}
	mutex_unlock(&priv->gem_lock);

	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
}
#endif

static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
{
508
	vunmap(etnaviv_obj->vaddr);
509 510 511 512 513 514
	put_pages(etnaviv_obj);
}

static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
	.get_pages = etnaviv_gem_shmem_get_pages,
	.release = etnaviv_gem_shmem_release,
515
	.vmap = etnaviv_gem_vmap_impl,
516
	.mmap = etnaviv_gem_mmap_obj,
517 518 519 520 521
};

void etnaviv_gem_free_object(struct drm_gem_object *obj)
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
522
	struct etnaviv_drm_private *priv = obj->dev->dev_private;
523 524 525 526 527
	struct etnaviv_vram_mapping *mapping, *tmp;

	/* object should not be active */
	WARN_ON(is_active(etnaviv_obj));

528
	mutex_lock(&priv->gem_lock);
529
	list_del(&etnaviv_obj->gem_node);
530
	mutex_unlock(&priv->gem_lock);
531 532 533

	list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
				 obj_node) {
L
Lucas Stach 已提交
534
		struct etnaviv_iommu_context *context = mapping->context;
535 536 537

		WARN_ON(mapping->use);

538
		if (context) {
L
Lucas Stach 已提交
539
			etnaviv_iommu_unmap_gem(context, mapping);
540 541
			etnaviv_iommu_context_put(context);
		}
542 543 544 545 546 547 548 549 550 551 552 553

		list_del(&mapping->obj_node);
		kfree(mapping);
	}

	drm_gem_free_mmap_offset(obj);
	etnaviv_obj->ops->release(etnaviv_obj);
	drm_gem_object_release(obj);

	kfree(etnaviv_obj);
}

554
void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
555 556 557 558 559 560 561 562 563 564
{
	struct etnaviv_drm_private *priv = dev->dev_private;
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);

	mutex_lock(&priv->gem_lock);
	list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
	mutex_unlock(&priv->gem_lock);
}

static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
565
	const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
{
	struct etnaviv_gem_object *etnaviv_obj;
	unsigned sz = sizeof(*etnaviv_obj);
	bool valid = true;

	/* validate flags */
	switch (flags & ETNA_BO_CACHE_MASK) {
	case ETNA_BO_UNCACHED:
	case ETNA_BO_CACHED:
	case ETNA_BO_WC:
		break;
	default:
		valid = false;
	}

	if (!valid) {
		dev_err(dev->dev, "invalid cache flag: %x\n",
			(flags & ETNA_BO_CACHE_MASK));
		return -EINVAL;
	}

	etnaviv_obj = kzalloc(sz, GFP_KERNEL);
	if (!etnaviv_obj)
		return -ENOMEM;

	etnaviv_obj->flags = flags;
	etnaviv_obj->ops = ops;

	mutex_init(&etnaviv_obj->lock);
	INIT_LIST_HEAD(&etnaviv_obj->vram_list);

	*obj = &etnaviv_obj->base;

	return 0;
}

602 603 604
/* convenience method to construct a GEM buffer object, and userspace handle */
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
	u32 size, u32 flags, u32 *handle)
605
{
606
	struct etnaviv_drm_private *priv = dev->dev_private;
607 608 609 610 611
	struct drm_gem_object *obj = NULL;
	int ret;

	size = PAGE_ALIGN(size);

612
	ret = etnaviv_gem_new_impl(dev, size, flags,
613 614 615 616
				   &etnaviv_gem_shmem_ops, &obj);
	if (ret)
		goto fail;

617 618
	lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);

619 620 621 622
	ret = drm_gem_object_init(dev, obj, size);
	if (ret)
		goto fail;

623 624 625 626 627 628
	/*
	 * Our buffers are kept pinned, so allocating them from the MOVABLE
	 * zone is a really bad idea, and conflicts with CMA. See comments
	 * above new_inode() why this is required _and_ expected if you're
	 * going to pin these pages.
	 */
629
	mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
630

631
	etnaviv_gem_obj_add(dev, obj);
632 633 634 635

	ret = drm_gem_handle_create(file, obj, handle);

	/* drop reference from allocate - handle holds it now */
636
fail:
637
	drm_gem_object_put(obj);
638 639 640 641 642

	return ret;
}

int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
643
	const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
644 645 646 647
{
	struct drm_gem_object *obj;
	int ret;

648
	ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
649 650 651 652 653 654 655 656 657 658 659 660 661
	if (ret)
		return ret;

	drm_gem_private_object_init(dev, obj, size);

	*res = to_etnaviv_bo(obj);

	return 0;
}

static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{
	struct page **pvec = NULL;
662 663
	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
664

665
	might_lock_read(&current->mm->mmap_lock);
666

667 668
	if (userptr->mm != current->mm)
		return -EPERM;
669

670 671 672 673 674 675 676 677
	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
	if (!pvec)
		return -ENOMEM;

	do {
		unsigned num_pages = npages - pinned;
		uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
		struct page **pages = pvec + pinned;
678

679
		ret = pin_user_pages_fast(ptr, num_pages,
680 681
					  !userptr->ro ? FOLL_WRITE : 0, pages);
		if (ret < 0) {
682
			unpin_user_pages(pvec, pinned);
M
Michal Hocko 已提交
683
			kvfree(pvec);
684
			return ret;
685 686
		}

687
		pinned += ret;
688

689
	} while (pinned < npages);
690

691
	etnaviv_obj->pages = pvec;
692

693
	return 0;
694 695 696 697 698 699 700 701 702 703 704 705
}

static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
{
	if (etnaviv_obj->sgt) {
		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
		sg_free_table(etnaviv_obj->sgt);
		kfree(etnaviv_obj->sgt);
	}
	if (etnaviv_obj->pages) {
		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;

706
		unpin_user_pages(etnaviv_obj->pages, npages);
M
Michal Hocko 已提交
707
		kvfree(etnaviv_obj->pages);
708 709 710
	}
}

711 712 713 714 715 716
static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
		struct vm_area_struct *vma)
{
	return -EINVAL;
}

717 718 719
static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
	.get_pages = etnaviv_gem_userptr_get_pages,
	.release = etnaviv_gem_userptr_release,
720
	.vmap = etnaviv_gem_vmap_impl,
721
	.mmap = etnaviv_gem_userptr_mmap_obj,
722 723 724 725 726 727 728 729
};

int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
	uintptr_t ptr, u32 size, u32 flags, u32 *handle)
{
	struct etnaviv_gem_object *etnaviv_obj;
	int ret;

730
	ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
731 732 733 734
				      &etnaviv_gem_userptr_ops, &etnaviv_obj);
	if (ret)
		return ret;

735 736
	lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);

737
	etnaviv_obj->userptr.ptr = ptr;
738
	etnaviv_obj->userptr.mm = current->mm;
739 740
	etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);

741
	etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
742 743

	ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
744

745
	/* drop reference from allocate - handle holds it now */
746
	drm_gem_object_put(&etnaviv_obj->base);
747 748
	return ret;
}