etnaviv_gem.c 18.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 * Copyright (C) 2015-2018 Etnaviv Project
4 5
 */

S
Sam Ravnborg 已提交
6 7
#include <drm/drm_prime.h>
#include <linux/dma-mapping.h>
8
#include <linux/shmem_fs.h>
S
Sam Ravnborg 已提交
9 10
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
11 12 13 14 15 16

#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"

17 18 19
static struct lock_class_key etnaviv_shm_lock_class;
static struct lock_class_key etnaviv_userptr_lock_class;

20 21 22 23 24 25 26 27 28 29
static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
{
	struct drm_device *dev = etnaviv_obj->base.dev;
	struct sg_table *sgt = etnaviv_obj->sgt;

	/*
	 * For non-cached buffers, ensure the new pages are clean
	 * because display controller, GPU, etc. are not coherent.
	 */
	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
30
		dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
}

static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
{
	struct drm_device *dev = etnaviv_obj->base.dev;
	struct sg_table *sgt = etnaviv_obj->sgt;

	/*
	 * For non-cached buffers, ensure the new pages are clean
	 * because display controller, GPU, etc. are not coherent:
	 *
	 * WARNING: The DMA API does not support concurrent CPU
	 * and device access to the memory area.  With BIDIRECTIONAL,
	 * we will clean the cache lines which overlap the region,
	 * and invalidate all cache lines (partially) contained in
	 * the region.
	 *
	 * If you have dirty data in the overlapping cache lines,
	 * that will corrupt the GPU-written data.  If you have
	 * written into the remainder of the region, this can
	 * discard those writes.
	 */
	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
54
		dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
55 56 57 58 59 60 61 62 63
}

/* called with etnaviv_obj->lock held */
static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{
	struct drm_device *dev = etnaviv_obj->base.dev;
	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);

	if (IS_ERR(p)) {
64
		dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
		return PTR_ERR(p);
	}

	etnaviv_obj->pages = p;

	return 0;
}

static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
{
	if (etnaviv_obj->sgt) {
		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
		sg_free_table(etnaviv_obj->sgt);
		kfree(etnaviv_obj->sgt);
		etnaviv_obj->sgt = NULL;
	}
	if (etnaviv_obj->pages) {
		drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
				  true, false);

		etnaviv_obj->pages = NULL;
	}
}

struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{
	int ret;

	lockdep_assert_held(&etnaviv_obj->lock);

	if (!etnaviv_obj->pages) {
		ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
		if (ret < 0)
			return ERR_PTR(ret);
	}

	if (!etnaviv_obj->sgt) {
		struct drm_device *dev = etnaviv_obj->base.dev;
		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
		struct sg_table *sgt;

106 107
		sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
					    etnaviv_obj->pages, npages);
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
		if (IS_ERR(sgt)) {
			dev_err(dev->dev, "failed to allocate sgt: %ld\n",
				PTR_ERR(sgt));
			return ERR_CAST(sgt);
		}

		etnaviv_obj->sgt = sgt;

		etnaviv_gem_scatter_map(etnaviv_obj);
	}

	return etnaviv_obj->pages;
}

void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
{
	lockdep_assert_held(&etnaviv_obj->lock);
	/* when we start tracking the pin count, then do something here */
}

128
static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
		struct vm_area_struct *vma)
{
	pgprot_t vm_page_prot;

	vma->vm_flags &= ~VM_PFNMAP;
	vma->vm_flags |= VM_MIXEDMAP;

	vm_page_prot = vm_get_page_prot(vma->vm_flags);

	if (etnaviv_obj->flags & ETNA_BO_WC) {
		vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
	} else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
		vma->vm_page_prot = pgprot_noncached(vm_page_prot);
	} else {
		/*
		 * Shunt off cached objs to shmem file so they have their own
		 * address_space (so unmap_mapping_range does what we want,
		 * in particular in the case of mmap'd dmabufs)
		 */
		vma->vm_pgoff = 0;
149
		vma_set_file(vma, etnaviv_obj->base.filp);
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168

		vma->vm_page_prot = vm_page_prot;
	}

	return 0;
}

int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct etnaviv_gem_object *obj;
	int ret;

	ret = drm_gem_mmap(filp, vma);
	if (ret) {
		DBG("mmap failed: %d", ret);
		return ret;
	}

	obj = to_etnaviv_bo(vma->vm_private_data);
169
	return obj->ops->mmap(obj, vma);
170 171
}

172
static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
173
{
174
	struct vm_area_struct *vma = vmf->vma;
175 176 177 178
	struct drm_gem_object *obj = vma->vm_private_data;
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
	struct page **pages, *page;
	pgoff_t pgoff;
179
	int err;
180 181 182

	/*
	 * Make sure we don't parallel update on a fault, nor move or remove
183
	 * something from beneath our feet.  Note that vmf_insert_page() is
184 185
	 * specifically coded to take care of this, so we don't have to.
	 */
186 187 188
	err = mutex_lock_interruptible(&etnaviv_obj->lock);
	if (err)
		return VM_FAULT_NOPAGE;
189 190 191 192 193
	/* make sure we have pages attached now */
	pages = etnaviv_gem_get_pages(etnaviv_obj);
	mutex_unlock(&etnaviv_obj->lock);

	if (IS_ERR(pages)) {
194 195
		err = PTR_ERR(pages);
		return vmf_error(err);
196 197 198
	}

	/* We don't use vmf->pgoff since that has the fake offset: */
199
	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
200 201 202

	page = pages[pgoff];

203
	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
204 205
	     page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);

206
	return vmf_insert_page(vma, vmf->address, page);
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
}

int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
{
	int ret;

	/* Make it mmapable */
	ret = drm_gem_create_mmap_offset(obj);
	if (ret)
		dev_err(obj->dev->dev, "could not allocate mmap offset\n");
	else
		*offset = drm_vma_node_offset_addr(&obj->vma_node);

	return ret;
}

static struct etnaviv_vram_mapping *
etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
L
Lucas Stach 已提交
225
			     struct etnaviv_iommu_context *context)
226 227 228 229
{
	struct etnaviv_vram_mapping *mapping;

	list_for_each_entry(mapping, &obj->vram_list, obj_node) {
L
Lucas Stach 已提交
230
		if (mapping->context == context)
231 232 233 234 235 236
			return mapping;
	}

	return NULL;
}

237 238 239 240 241 242 243 244 245
void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
{
	struct etnaviv_gem_object *etnaviv_obj = mapping->object;

	mutex_lock(&etnaviv_obj->lock);
	WARN_ON(mapping->use == 0);
	mapping->use -= 1;
	mutex_unlock(&etnaviv_obj->lock);

246
	drm_gem_object_put(&etnaviv_obj->base);
247 248 249
}

struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
L
Lucas Stach 已提交
250 251
	struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
	u64 va)
252 253 254 255 256 257 258
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
	struct etnaviv_vram_mapping *mapping;
	struct page **pages;
	int ret = 0;

	mutex_lock(&etnaviv_obj->lock);
259
	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
260 261 262 263 264 265 266 267
	if (mapping) {
		/*
		 * Holding the object lock prevents the use count changing
		 * beneath us.  If the use count is zero, the MMU might be
		 * reaping this object, so take the lock and re-check that
		 * the MMU owns this mapping to close this race.
		 */
		if (mapping->use == 0) {
268 269
			mutex_lock(&mmu_context->lock);
			if (mapping->context == mmu_context)
270 271 272
				mapping->use += 1;
			else
				mapping = NULL;
273
			mutex_unlock(&mmu_context->lock);
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
			if (mapping)
				goto out;
		} else {
			mapping->use += 1;
			goto out;
		}
	}

	pages = etnaviv_gem_get_pages(etnaviv_obj);
	if (IS_ERR(pages)) {
		ret = PTR_ERR(pages);
		goto out;
	}

	/*
	 * See if we have a reaped vram mapping we can re-use before
	 * allocating a fresh mapping.
	 */
	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
	if (!mapping) {
		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
295 296 297 298
		if (!mapping) {
			ret = -ENOMEM;
			goto out;
		}
299 300 301 302 303 304 305

		INIT_LIST_HEAD(&mapping->scan_node);
		mapping->object = etnaviv_obj;
	} else {
		list_del(&mapping->obj_node);
	}

306 307
	etnaviv_iommu_context_get(mmu_context);
	mapping->context = mmu_context;
308 309
	mapping->use = 1;

310
	ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
311
				    mmu_context->global->memory_base,
L
Lucas Stach 已提交
312
				    mapping, va);
313 314
	if (ret < 0) {
		etnaviv_iommu_context_put(mmu_context);
315
		kfree(mapping);
316
	} else {
317
		list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
318
	}
319 320 321 322

out:
	mutex_unlock(&etnaviv_obj->lock);

323 324
	if (ret)
		return ERR_PTR(ret);
325

326
	/* Take a reference on the object */
327
	drm_gem_object_get(obj);
328
	return mapping;
329 330
}

331
void *etnaviv_gem_vmap(struct drm_gem_object *obj)
332 333 334
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);

335 336
	if (etnaviv_obj->vaddr)
		return etnaviv_obj->vaddr;
337

338 339 340 341 342 343 344
	mutex_lock(&etnaviv_obj->lock);
	/*
	 * Need to check again, as we might have raced with another thread
	 * while waiting for the mutex.
	 */
	if (!etnaviv_obj->vaddr)
		etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
345 346 347 348 349
	mutex_unlock(&etnaviv_obj->lock);

	return etnaviv_obj->vaddr;
}

350 351 352 353 354 355 356 357 358 359 360 361 362 363
static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
{
	struct page **pages;

	lockdep_assert_held(&obj->lock);

	pages = etnaviv_gem_get_pages(obj);
	if (IS_ERR(pages))
		return NULL;

	return vmap(pages, obj->base.size >> PAGE_SHIFT,
			VM_MAP, pgprot_writecombine(PAGE_KERNEL));
}

364 365 366 367 368 369 370 371 372 373 374
static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
{
	if (op & ETNA_PREP_READ)
		return DMA_FROM_DEVICE;
	else if (op & ETNA_PREP_WRITE)
		return DMA_TO_DEVICE;
	else
		return DMA_BIDIRECTIONAL;
}

int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
375
		struct drm_etnaviv_timespec *timeout)
376 377 378 379
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
	struct drm_device *dev = obj->dev;
	bool write = !!(op & ETNA_PREP_WRITE);
380 381
	int ret;

382 383 384 385 386 387 388 389 390 391
	if (!etnaviv_obj->sgt) {
		void *ret;

		mutex_lock(&etnaviv_obj->lock);
		ret = etnaviv_gem_get_pages(etnaviv_obj);
		mutex_unlock(&etnaviv_obj->lock);
		if (IS_ERR(ret))
			return PTR_ERR(ret);
	}

392
	if (op & ETNA_PREP_NOSYNC) {
393
		if (!dma_resv_test_signaled_rcu(obj->resv,
394 395 396 397 398
							  write))
			return -EBUSY;
	} else {
		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);

399
		ret = dma_resv_wait_timeout_rcu(obj->resv,
400 401 402 403
							  write, true, remain);
		if (ret <= 0)
			return ret == 0 ? -ETIMEDOUT : ret;
	}
404 405

	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
406 407
		dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
					 etnaviv_op_to_dma_dir(op));
408 409 410 411 412 413 414 415 416 417 418 419 420 421
		etnaviv_obj->last_cpu_prep_op = op;
	}

	return 0;
}

int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);

	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
		/* fini without a prep is almost certainly a userspace error */
		WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
422
		dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
423 424 425 426 427 428 429 430
			etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
		etnaviv_obj->last_cpu_prep_op = 0;
	}

	return 0;
}

int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
431
	struct drm_etnaviv_timespec *timeout)
432 433 434 435 436 437 438
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);

	return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
}

#ifdef CONFIG_DEBUG_FS
439
static void etnaviv_gem_describe_fence(struct dma_fence *fence,
440 441
	const char *type, struct seq_file *m)
{
442
	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
443
		seq_printf(m, "\t%9s: %s %s seq %llu\n",
444 445 446 447 448 449 450 451 452
			   type,
			   fence->ops->get_driver_name(fence),
			   fence->ops->get_timeline_name(fence),
			   fence->seqno);
}

static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
453 454
	struct dma_resv *robj = obj->resv;
	struct dma_resv_list *fobj;
455
	struct dma_fence *fence;
456 457 458 459
	unsigned long off = drm_vma_node_start(&obj->vma_node);

	seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
			etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
460
			obj->name, kref_read(&obj->refcount),
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
			off, etnaviv_obj->vaddr, obj->size);

	rcu_read_lock();
	fobj = rcu_dereference(robj->fence);
	if (fobj) {
		unsigned int i, shared_count = fobj->shared_count;

		for (i = 0; i < shared_count; i++) {
			fence = rcu_dereference(fobj->shared[i]);
			etnaviv_gem_describe_fence(fence, "Shared", m);
		}
	}

	fence = rcu_dereference(robj->fence_excl);
	if (fence)
		etnaviv_gem_describe_fence(fence, "Exclusive", m);
	rcu_read_unlock();
}

void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
	struct seq_file *m)
{
	struct etnaviv_gem_object *etnaviv_obj;
	int count = 0;
	size_t size = 0;

	mutex_lock(&priv->gem_lock);
	list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
		struct drm_gem_object *obj = &etnaviv_obj->base;

		seq_puts(m, "   ");
		etnaviv_gem_describe(obj, m);
		count++;
		size += obj->size;
	}
	mutex_unlock(&priv->gem_lock);

	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
}
#endif

static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
{
504
	vunmap(etnaviv_obj->vaddr);
505 506 507 508 509 510
	put_pages(etnaviv_obj);
}

static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
	.get_pages = etnaviv_gem_shmem_get_pages,
	.release = etnaviv_gem_shmem_release,
511
	.vmap = etnaviv_gem_vmap_impl,
512
	.mmap = etnaviv_gem_mmap_obj,
513 514 515 516 517
};

void etnaviv_gem_free_object(struct drm_gem_object *obj)
{
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
518
	struct etnaviv_drm_private *priv = obj->dev->dev_private;
519 520 521 522 523
	struct etnaviv_vram_mapping *mapping, *tmp;

	/* object should not be active */
	WARN_ON(is_active(etnaviv_obj));

524
	mutex_lock(&priv->gem_lock);
525
	list_del(&etnaviv_obj->gem_node);
526
	mutex_unlock(&priv->gem_lock);
527 528 529

	list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
				 obj_node) {
L
Lucas Stach 已提交
530
		struct etnaviv_iommu_context *context = mapping->context;
531 532 533

		WARN_ON(mapping->use);

534
		if (context) {
L
Lucas Stach 已提交
535
			etnaviv_iommu_unmap_gem(context, mapping);
536 537
			etnaviv_iommu_context_put(context);
		}
538 539 540 541 542 543 544 545 546 547 548 549

		list_del(&mapping->obj_node);
		kfree(mapping);
	}

	drm_gem_free_mmap_offset(obj);
	etnaviv_obj->ops->release(etnaviv_obj);
	drm_gem_object_release(obj);

	kfree(etnaviv_obj);
}

550
void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
551 552 553 554 555 556 557 558 559
{
	struct etnaviv_drm_private *priv = dev->dev_private;
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);

	mutex_lock(&priv->gem_lock);
	list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
	mutex_unlock(&priv->gem_lock);
}

560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
static const struct vm_operations_struct vm_ops = {
	.fault = etnaviv_gem_fault,
	.open = drm_gem_vm_open,
	.close = drm_gem_vm_close,
};

static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
	.free = etnaviv_gem_free_object,
	.pin = etnaviv_gem_prime_pin,
	.unpin = etnaviv_gem_prime_unpin,
	.get_sg_table = etnaviv_gem_prime_get_sg_table,
	.vmap = etnaviv_gem_prime_vmap,
	.vm_ops = &vm_ops,
};

575
static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
576
	const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
{
	struct etnaviv_gem_object *etnaviv_obj;
	unsigned sz = sizeof(*etnaviv_obj);
	bool valid = true;

	/* validate flags */
	switch (flags & ETNA_BO_CACHE_MASK) {
	case ETNA_BO_UNCACHED:
	case ETNA_BO_CACHED:
	case ETNA_BO_WC:
		break;
	default:
		valid = false;
	}

	if (!valid) {
		dev_err(dev->dev, "invalid cache flag: %x\n",
			(flags & ETNA_BO_CACHE_MASK));
		return -EINVAL;
	}

	etnaviv_obj = kzalloc(sz, GFP_KERNEL);
	if (!etnaviv_obj)
		return -ENOMEM;

	etnaviv_obj->flags = flags;
	etnaviv_obj->ops = ops;

	mutex_init(&etnaviv_obj->lock);
	INIT_LIST_HEAD(&etnaviv_obj->vram_list);

	*obj = &etnaviv_obj->base;
609
	(*obj)->funcs = &etnaviv_gem_object_funcs;
610 611 612 613

	return 0;
}

614 615 616
/* convenience method to construct a GEM buffer object, and userspace handle */
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
	u32 size, u32 flags, u32 *handle)
617
{
618
	struct etnaviv_drm_private *priv = dev->dev_private;
619 620 621 622 623
	struct drm_gem_object *obj = NULL;
	int ret;

	size = PAGE_ALIGN(size);

624
	ret = etnaviv_gem_new_impl(dev, size, flags,
625 626 627 628
				   &etnaviv_gem_shmem_ops, &obj);
	if (ret)
		goto fail;

629 630
	lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);

631 632 633 634
	ret = drm_gem_object_init(dev, obj, size);
	if (ret)
		goto fail;

635 636 637 638 639 640
	/*
	 * Our buffers are kept pinned, so allocating them from the MOVABLE
	 * zone is a really bad idea, and conflicts with CMA. See comments
	 * above new_inode() why this is required _and_ expected if you're
	 * going to pin these pages.
	 */
641
	mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
642

643
	etnaviv_gem_obj_add(dev, obj);
644 645 646 647

	ret = drm_gem_handle_create(file, obj, handle);

	/* drop reference from allocate - handle holds it now */
648
fail:
649
	drm_gem_object_put(obj);
650 651 652 653 654

	return ret;
}

int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
655
	const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
656 657 658 659
{
	struct drm_gem_object *obj;
	int ret;

660
	ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
661 662 663 664 665 666 667 668 669 670 671 672 673
	if (ret)
		return ret;

	drm_gem_private_object_init(dev, obj, size);

	*res = to_etnaviv_bo(obj);

	return 0;
}

static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{
	struct page **pvec = NULL;
674 675
	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
676

677
	might_lock_read(&current->mm->mmap_lock);
678

679 680
	if (userptr->mm != current->mm)
		return -EPERM;
681

682 683 684 685 686 687 688 689
	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
	if (!pvec)
		return -ENOMEM;

	do {
		unsigned num_pages = npages - pinned;
		uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
		struct page **pages = pvec + pinned;
690

691
		ret = pin_user_pages_fast(ptr, num_pages,
692 693
					  FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM,
					  pages);
694
		if (ret < 0) {
695
			unpin_user_pages(pvec, pinned);
M
Michal Hocko 已提交
696
			kvfree(pvec);
697
			return ret;
698 699
		}

700
		pinned += ret;
701

702
	} while (pinned < npages);
703

704
	etnaviv_obj->pages = pvec;
705

706
	return 0;
707 708 709 710 711 712 713 714 715 716 717 718
}

static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
{
	if (etnaviv_obj->sgt) {
		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
		sg_free_table(etnaviv_obj->sgt);
		kfree(etnaviv_obj->sgt);
	}
	if (etnaviv_obj->pages) {
		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;

719
		unpin_user_pages(etnaviv_obj->pages, npages);
M
Michal Hocko 已提交
720
		kvfree(etnaviv_obj->pages);
721 722 723
	}
}

724 725 726 727 728 729
static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
		struct vm_area_struct *vma)
{
	return -EINVAL;
}

730 731 732
static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
	.get_pages = etnaviv_gem_userptr_get_pages,
	.release = etnaviv_gem_userptr_release,
733
	.vmap = etnaviv_gem_vmap_impl,
734
	.mmap = etnaviv_gem_userptr_mmap_obj,
735 736 737 738 739 740 741 742
};

int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
	uintptr_t ptr, u32 size, u32 flags, u32 *handle)
{
	struct etnaviv_gem_object *etnaviv_obj;
	int ret;

743
	ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
744 745 746 747
				      &etnaviv_gem_userptr_ops, &etnaviv_obj);
	if (ret)
		return ret;

748 749
	lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);

750
	etnaviv_obj->userptr.ptr = ptr;
751
	etnaviv_obj->userptr.mm = current->mm;
752 753
	etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);

754
	etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
755 756

	ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
757

758
	/* drop reference from allocate - handle holds it now */
759
	drm_gem_object_put(&etnaviv_obj->base);
760 761
	return ret;
}