msm_gem.c 28.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8
/*
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 */

#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
R
Rob Clark 已提交
9
#include <linux/dma-buf.h>
10
#include <linux/pfn_t.h>
11

S
Sam Ravnborg 已提交
12 13
#include <drm/drm_prime.h>

14
#include "msm_drv.h"
15
#include "msm_fence.h"
16
#include "msm_gem.h"
R
Rob Clark 已提交
17
#include "msm_gpu.h"
18
#include "msm_mmu.h"
19

20 21 22
static void msm_gem_vunmap_locked(struct drm_gem_object *obj);


23 24 25 26 27 28 29 30
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;
	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
			priv->vram.paddr;
}

31 32 33 34 35 36
static bool use_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	return !msm_obj->vram_node;
}

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
/*
 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
 * API.  Really GPU cache is out of scope here (handled on cmdstream)
 * and all we need to do is invalidate newly allocated pages before
 * mapping to CPU as uncached/writecombine.
 *
 * On top of this, we have the added headache, that depending on
 * display generation, the display's iommu may be wired up to either
 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
 * that here we either have dma-direct or iommu ops.
 *
 * Let this be a cautionary tail of abstraction gone wrong.
 */

static void sync_for_device(struct msm_gem_object *msm_obj)
{
	struct device *dev = msm_obj->base.dev->dev;

55
	if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
56 57 58 59 60 61 62 63 64 65 66 67
		dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
	} else {
		dma_map_sg(dev, msm_obj->sgt->sgl,
			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
	}
}

static void sync_for_cpu(struct msm_gem_object *msm_obj)
{
	struct device *dev = msm_obj->base.dev->dev;

68
	if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
69 70 71 72 73 74 75 76
		dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
	} else {
		dma_unmap_sg(dev, msm_obj->sgt->sgl,
			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
	}
}

77
/* allocate pages from VRAM carveout, used when no IOMMU: */
78
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
79 80 81 82 83 84 85
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;
	dma_addr_t paddr;
	struct page **p;
	int ret, i;

M
Michal Hocko 已提交
86
	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
87 88 89
	if (!p)
		return ERR_PTR(-ENOMEM);

90
	spin_lock(&priv->vram.lock);
91
	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
92
	spin_unlock(&priv->vram.lock);
93
	if (ret) {
M
Michal Hocko 已提交
94
		kvfree(p);
95 96 97 98 99 100 101 102 103 104 105
		return ERR_PTR(ret);
	}

	paddr = physaddr(obj);
	for (i = 0; i < npages; i++) {
		p[i] = phys_to_page(paddr);
		paddr += PAGE_SIZE;
	}

	return p;
}
106 107 108 109 110 111 112

static struct page **get_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (!msm_obj->pages) {
		struct drm_device *dev = obj->dev;
113
		struct page **p;
114 115
		int npages = obj->size >> PAGE_SHIFT;

116
		if (use_pages(obj))
117
			p = drm_gem_get_pages(obj);
118 119 120
		else
			p = get_pages_vram(obj, npages);

121
		if (IS_ERR(p)) {
122
			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
123 124 125 126
					PTR_ERR(p));
			return p;
		}

127 128
		msm_obj->pages = p;

129
		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
130
		if (IS_ERR(msm_obj->sgt)) {
131 132
			void *ptr = ERR_CAST(msm_obj->sgt);

133
			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
134 135
			msm_obj->sgt = NULL;
			return ptr;
136 137 138 139 140 141
		}

		/* For non-cached buffers, ensure the new pages are clean
		 * because display controller, GPU, etc. are not coherent:
		 */
		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
142
			sync_for_device(msm_obj);
143 144 145 146 147
	}

	return msm_obj->pages;
}

148 149 150 151 152 153 154 155 156 157 158 159
static void put_pages_vram(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;

	spin_lock(&priv->vram.lock);
	drm_mm_remove_node(msm_obj->vram_node);
	spin_unlock(&priv->vram.lock);

	kvfree(msm_obj->pages);
}

160 161 162 163 164
static void put_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (msm_obj->pages) {
165 166 167 168 169 170
		if (msm_obj->sgt) {
			/* For non-cached buffers, ensure the new
			 * pages are clean because display controller,
			 * GPU, etc. are not coherent:
			 */
			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
171
				sync_for_cpu(msm_obj);
172 173

			sg_free_table(msm_obj->sgt);
174 175
			kfree(msm_obj->sgt);
		}
176

177
		if (use_pages(obj))
178
			drm_gem_put_pages(obj, msm_obj->pages, true, false);
179 180
		else
			put_pages_vram(obj);
181

182 183 184 185
		msm_obj->pages = NULL;
	}
}

R
Rob Clark 已提交
186 187
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
{
188
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
189
	struct page **p;
190 191 192 193 194 195 196 197

	mutex_lock(&msm_obj->lock);

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
		mutex_unlock(&msm_obj->lock);
		return ERR_PTR(-EBUSY);
	}

R
Rob Clark 已提交
198
	p = get_pages(obj);
199
	mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
200 201 202 203 204 205 206 207
	return p;
}

void msm_gem_put_pages(struct drm_gem_object *obj)
{
	/* when we start tracking the pin count, then do something here */
}

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
int msm_gem_mmap_obj(struct drm_gem_object *obj,
		struct vm_area_struct *vma)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	vma->vm_flags &= ~VM_PFNMAP;
	vma->vm_flags |= VM_MIXEDMAP;

	if (msm_obj->flags & MSM_BO_WC) {
		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
	} else {
		/*
		 * Shunt off cached objs to shmem file so they have their own
		 * address_space (so unmap_mapping_range does what we want,
		 * in particular in the case of mmap'd dmabufs)
		 */
		fput(vma->vm_file);
		get_file(obj->filp);
		vma->vm_pgoff = 0;
		vma->vm_file  = obj->filp;

		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
	}

	return 0;
}

int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int ret;

	ret = drm_gem_mmap(filp, vma);
	if (ret) {
		DBG("mmap failed: %d", ret);
		return ret;
	}

	return msm_gem_mmap_obj(vma->vm_private_data, vma);
}

250
vm_fault_t msm_gem_fault(struct vm_fault *vmf)
251
{
252
	struct vm_area_struct *vma = vmf->vma;
253
	struct drm_gem_object *obj = vma->vm_private_data;
254
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
255 256 257
	struct page **pages;
	unsigned long pfn;
	pgoff_t pgoff;
258 259
	int err;
	vm_fault_t ret;
260

261 262 263
	/*
	 * vm_ops.open/drm_gem_mmap_obj and close get and put
	 * a reference on obj. So, we dont need to hold one here.
264
	 */
265 266 267
	err = mutex_lock_interruptible(&msm_obj->lock);
	if (err) {
		ret = VM_FAULT_NOPAGE;
268
		goto out;
269
	}
270

271 272 273 274 275
	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
		mutex_unlock(&msm_obj->lock);
		return VM_FAULT_SIGBUS;
	}

276 277 278
	/* make sure we have pages attached now */
	pages = get_pages(obj);
	if (IS_ERR(pages)) {
279
		ret = vmf_error(PTR_ERR(pages));
280 281 282 283
		goto out_unlock;
	}

	/* We don't use vmf->pgoff since that has the fake offset: */
284
	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
285

286
	pfn = page_to_pfn(pages[pgoff]);
287

288
	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
289 290
			pfn, pfn << PAGE_SHIFT);

291
	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
292
out_unlock:
293
	mutex_unlock(&msm_obj->lock);
294
out:
295
	return ret;
296 297 298 299 300 301
}

/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
302
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
303 304
	int ret;

305
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
306 307 308 309 310

	/* Make it mmapable */
	ret = drm_gem_create_mmap_offset(obj);

	if (ret) {
311
		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
312 313 314 315 316 317 318 319 320
		return 0;
	}

	return drm_vma_node_offset_addr(&obj->vma_node);
}

uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{
	uint64_t offset;
321 322 323
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	mutex_lock(&msm_obj->lock);
324
	offset = mmap_offset(obj);
325
	mutex_unlock(&msm_obj->lock);
326 327 328
	return offset;
}

329 330 331 332 333 334
static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

335 336
	WARN_ON(!mutex_is_locked(&msm_obj->lock));

337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
	if (!vma)
		return ERR_PTR(-ENOMEM);

	vma->aspace = aspace;

	list_add_tail(&vma->list, &msm_obj->vmas);

	return vma;
}

static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

354
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372

	list_for_each_entry(vma, &msm_obj->vmas, list) {
		if (vma->aspace == aspace)
			return vma;
	}

	return NULL;
}

static void del_vma(struct msm_gem_vma *vma)
{
	if (!vma)
		return;

	list_del(&vma->list);
	kfree(vma);
}

373
/* Called with msm_obj->lock locked */
R
Rob Clark 已提交
374 375 376 377
static void
put_iova(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
378
	struct msm_gem_vma *vma, *tmp;
R
Rob Clark 已提交
379

380
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
R
Rob Clark 已提交
381

382
	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
383 384 385 386
		if (vma->aspace) {
			msm_gem_purge_vma(vma->aspace, vma);
			msm_gem_close_vma(vma->aspace, vma);
		}
387
		del_vma(vma);
R
Rob Clark 已提交
388 389 390
	}
}

391
static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
392
		struct msm_gem_address_space *aspace, uint64_t *iova)
393 394
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
395
	struct msm_gem_vma *vma;
396 397
	int ret = 0;

398
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
399

400
	vma = lookup_vma(obj, aspace);
401

402 403
	if (!vma) {
		vma = add_vma(obj, aspace);
404 405
		if (IS_ERR(vma))
			return PTR_ERR(vma);
406

407 408 409 410
		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
		if (ret) {
			del_vma(vma);
			return ret;
411
		}
412 413
	}

414 415
	*iova = vma->iova;
	return 0;
416 417 418 419 420 421 422 423
}

static int msm_gem_pin_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;
	struct page **pages;
R
Rob Clark 已提交
424 425 426 427
	int prot = IOMMU_READ;

	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
		prot |= IOMMU_WRITE;
428 429 430 431 432 433 434 435 436 437 438 439 440 441

	WARN_ON(!mutex_is_locked(&msm_obj->lock));

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
		return -EBUSY;

	vma = lookup_vma(obj, aspace);
	if (WARN_ON(!vma))
		return -EINVAL;

	pages = get_pages(obj);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

R
Rob Clark 已提交
442 443
	return msm_gem_map_vma(aspace, vma, prot,
			msm_obj->sgt, obj->size >> PAGE_SHIFT);
444 445
}

446 447
/* get iova and pin it. Should have a matching put */
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
		struct msm_gem_address_space *aspace, uint64_t *iova)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	u64 local;
	int ret;

	mutex_lock(&msm_obj->lock);

	ret = msm_gem_get_iova_locked(obj, aspace, &local);

	if (!ret)
		ret = msm_gem_pin_iova(obj, aspace);

	if (!ret)
		*iova = local;
463

464
	mutex_unlock(&msm_obj->lock);
465 466 467
	return ret;
}

468 469 470 471
/*
 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
 * valid for the life of the object
 */
472 473 474 475 476 477 478 479 480 481 482 483 484
int msm_gem_get_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	int ret;

	mutex_lock(&msm_obj->lock);
	ret = msm_gem_get_iova_locked(obj, aspace, iova);
	mutex_unlock(&msm_obj->lock);

	return ret;
}

R
Rob Clark 已提交
485
/* get iova without taking a reference, used in places where you have
486
 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
R
Rob Clark 已提交
487
 */
488 489
uint64_t msm_gem_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
R
Rob Clark 已提交
490
{
491
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
492 493
	struct msm_gem_vma *vma;

494
	mutex_lock(&msm_obj->lock);
495
	vma = lookup_vma(obj, aspace);
496
	mutex_unlock(&msm_obj->lock);
497 498 499
	WARN_ON(!vma);

	return vma ? vma->iova : 0;
R
Rob Clark 已提交
500 501
}

502 503 504 505 506 507
/*
 * Unpin a iova by updating the reference counts. The memory isn't actually
 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
 * to get rid of it
 */
void msm_gem_unpin_iova(struct drm_gem_object *obj,
508
		struct msm_gem_address_space *aspace)
509
{
510 511 512 513 514 515 516 517 518 519
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

	mutex_lock(&msm_obj->lock);
	vma = lookup_vma(obj, aspace);

	if (!WARN_ON(!vma))
		msm_gem_unmap_vma(aspace, vma);

	mutex_unlock(&msm_obj->lock);
520 521 522 523 524 525 526 527
}

int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
		struct drm_mode_create_dumb *args)
{
	args->pitch = align_pitch(args->width, args->bpp);
	args->size  = PAGE_ALIGN(args->pitch * args->height);
	return msm_gem_new_handle(dev, file, args->size,
528
			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
529 530 531 532 533 534 535 536 537
}

int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
		uint32_t handle, uint64_t *offset)
{
	struct drm_gem_object *obj;
	int ret = 0;

	/* GEM does all our handle to object mapping */
538
	obj = drm_gem_object_lookup(file, handle);
539 540 541 542 543 544 545
	if (obj == NULL) {
		ret = -ENOENT;
		goto fail;
	}

	*offset = msm_gem_mmap_offset(obj);

546
	drm_gem_object_put(obj);
547 548 549 550 551

fail:
	return ret;
}

552
static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
553 554
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
555 556 557 558
	int ret = 0;

	mutex_lock(&msm_obj->lock);

559
	if (WARN_ON(msm_obj->madv > madv)) {
560
		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
561
			msm_obj->madv, madv);
562 563 564 565 566 567 568 569 570 571 572 573
		mutex_unlock(&msm_obj->lock);
		return ERR_PTR(-EBUSY);
	}

	/* increment vmap_count *before* vmap() call, so shrinker can
	 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
	 * This guarantees that we won't try to msm_gem_vunmap() this
	 * same object from within the vmap() call (while we already
	 * hold msm_obj->lock)
	 */
	msm_obj->vmap_count++;

574 575
	if (!msm_obj->vaddr) {
		struct page **pages = get_pages(obj);
576 577 578 579
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto fail;
		}
580 581
		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
582 583 584 585
		if (msm_obj->vaddr == NULL) {
			ret = -ENOMEM;
			goto fail;
		}
586
	}
587 588

	mutex_unlock(&msm_obj->lock);
589 590
	return msm_obj->vaddr;

591 592 593 594
fail:
	msm_obj->vmap_count--;
	mutex_unlock(&msm_obj->lock);
	return ERR_PTR(ret);
595 596
}

597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{
	return get_vaddr(obj, MSM_MADV_WILLNEED);
}

/*
 * Don't use this!  It is for the very special case of dumping
 * submits from GPU hangs or faults, were the bo may already
 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
 * active list.
 */
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
{
	return get_vaddr(obj, __MSM_MADV_PURGED);
}

613
void msm_gem_put_vaddr(struct drm_gem_object *obj)
614
{
R
Rob Clark 已提交
615
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
616 617

	mutex_lock(&msm_obj->lock);
R
Rob Clark 已提交
618 619
	WARN_ON(msm_obj->vmap_count < 1);
	msm_obj->vmap_count--;
620
	mutex_unlock(&msm_obj->lock);
621 622
}

R
Rob Clark 已提交
623 624 625 626 627 628 629
/* Update madvise status, returns true if not purged, else
 * false or -errno.
 */
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

630 631
	mutex_lock(&msm_obj->lock);

R
Rob Clark 已提交
632 633 634 635 636
	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));

	if (msm_obj->madv != __MSM_MADV_PURGED)
		msm_obj->madv = madv;

637 638 639 640 641
	madv = msm_obj->madv;

	mutex_unlock(&msm_obj->lock);

	return (madv != __MSM_MADV_PURGED);
R
Rob Clark 已提交
642 643
}

644
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
R
Rob Clark 已提交
645 646 647 648 649 650 651 652
{
	struct drm_device *dev = obj->dev;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
	WARN_ON(!is_purgeable(msm_obj));
	WARN_ON(obj->import_attach);

653 654
	mutex_lock_nested(&msm_obj->lock, subclass);

R
Rob Clark 已提交
655 656
	put_iova(obj);

657
	msm_gem_vunmap_locked(obj);
R
Rob Clark 已提交
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674

	put_pages(obj);

	msm_obj->madv = __MSM_MADV_PURGED;

	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
	drm_gem_free_mmap_offset(obj);

	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);

	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
			0, (loff_t)-1);
675 676

	mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
677 678
}

679
static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
R
Rob Clark 已提交
680 681 682
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

683 684
	WARN_ON(!mutex_is_locked(&msm_obj->lock));

R
Rob Clark 已提交
685 686 687 688 689 690 691
	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
		return;

	vunmap(msm_obj->vaddr);
	msm_obj->vaddr = NULL;
}

692 693 694 695 696 697 698 699 700
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	mutex_lock_nested(&msm_obj->lock, subclass);
	msm_gem_vunmap_locked(obj);
	mutex_unlock(&msm_obj->lock);
}

R
Rob Clark 已提交
701 702 703 704
/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
		struct msm_fence_context *fctx, bool exclusive)
{
705
	struct dma_resv_list *fobj;
706
	struct dma_fence *fence;
R
Rob Clark 已提交
707 708
	int i, ret;

709
	fobj = dma_resv_get_list(obj->resv);
R
Rob Clark 已提交
710
	if (!fobj || (fobj->shared_count == 0)) {
711
		fence = dma_resv_get_excl(obj->resv);
R
Rob Clark 已提交
712 713
		/* don't need to wait on our own fences, since ring is fifo */
		if (fence && (fence->context != fctx->context)) {
714
			ret = dma_fence_wait(fence, true);
R
Rob Clark 已提交
715 716 717 718 719 720 721 722 723 724
			if (ret)
				return ret;
		}
	}

	if (!exclusive || !fobj)
		return 0;

	for (i = 0; i < fobj->shared_count; i++) {
		fence = rcu_dereference_protected(fobj->shared[i],
725
						dma_resv_held(obj->resv));
R
Rob Clark 已提交
726
		if (fence->context != fctx->context) {
727
			ret = dma_fence_wait(fence, true);
R
Rob Clark 已提交
728 729 730 731 732 733 734 735
			if (ret)
				return ret;
		}
	}

	return 0;
}

R
Rob Clark 已提交
736
void msm_gem_move_to_active(struct drm_gem_object *obj,
737
		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
R
Rob Clark 已提交
738 739
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
740
	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
R
Rob Clark 已提交
741
	msm_obj->gpu = gpu;
R
Rob Clark 已提交
742
	if (exclusive)
743
		dma_resv_add_excl_fence(obj->resv, fence);
R
Rob Clark 已提交
744
	else
745
		dma_resv_add_shared_fence(obj->resv, fence);
R
Rob Clark 已提交
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
	list_del_init(&msm_obj->mm_list);
	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
}

void msm_gem_move_to_inactive(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

	msm_obj->gpu = NULL;
	list_del_init(&msm_obj->mm_list);
	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
}

R
Rob Clark 已提交
763
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
R
Rob Clark 已提交
764
{
R
Rob Clark 已提交
765
	bool write = !!(op & MSM_PREP_WRITE);
766 767 768 769
	unsigned long remain =
		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
	long ret;

770
	ret = dma_resv_wait_timeout_rcu(obj->resv, write,
771 772 773 774 775
						  true,  remain);
	if (ret == 0)
		return remain == 0 ? -EBUSY : -ETIMEDOUT;
	else if (ret < 0)
		return ret;
R
Rob Clark 已提交
776 777

	/* TODO cache maintenance */
778

R
Rob Clark 已提交
779
	return 0;
R
Rob Clark 已提交
780
}
781

R
Rob Clark 已提交
782 783 784
int msm_gem_cpu_fini(struct drm_gem_object *obj)
{
	/* TODO cache maintenance */
785 786 787 788
	return 0;
}

#ifdef CONFIG_DEBUG_FS
789
static void describe_fence(struct dma_fence *fence, const char *type,
R
Rob Clark 已提交
790 791
		struct seq_file *m)
{
792
	if (!dma_fence_is_signaled(fence))
793
		seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
R
Rob Clark 已提交
794 795 796 797 798
				fence->ops->get_driver_name(fence),
				fence->ops->get_timeline_name(fence),
				fence->seqno);
}

799 800 801
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
802 803
	struct dma_resv *robj = obj->resv;
	struct dma_resv_list *fobj;
804
	struct dma_fence *fence;
805
	struct msm_gem_vma *vma;
806
	uint64_t off = drm_vma_node_start(&obj->vma_node);
R
Rob Clark 已提交
807
	const char *madv;
808

809
	mutex_lock(&msm_obj->lock);
R
Rob Clark 已提交
810

R
Rob Clark 已提交
811 812 813 814 815 816 817 818 819 820 821 822 823
	switch (msm_obj->madv) {
	case __MSM_MADV_PURGED:
		madv = " purged";
		break;
	case MSM_MADV_DONTNEED:
		madv = " purgeable";
		break;
	case MSM_MADV_WILLNEED:
	default:
		madv = "";
		break;
	}

824
	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
R
Rob Clark 已提交
825
			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
826
			obj->name, kref_read(&obj->refcount),
827 828
			off, msm_obj->vaddr);

829
	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
830

831 832
	if (!list_empty(&msm_obj->vmas)) {

833
		seq_puts(m, "      vmas:");
834 835

		list_for_each_entry(vma, &msm_obj->vmas, list)
836 837
			seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
				vma->aspace != NULL ? vma->aspace->name : NULL,
838 839
				vma->iova, vma->mapped ? "mapped" : "unmapped",
				vma->inuse);
840 841 842

		seq_puts(m, "\n");
	}
R
Rob Clark 已提交
843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858

	rcu_read_lock();
	fobj = rcu_dereference(robj->fence);
	if (fobj) {
		unsigned int i, shared_count = fobj->shared_count;

		for (i = 0; i < shared_count; i++) {
			fence = rcu_dereference(fobj->shared[i]);
			describe_fence(fence, "Shared", m);
		}
	}

	fence = rcu_dereference(robj->fence_excl);
	if (fence)
		describe_fence(fence, "Exclusive", m);
	rcu_read_unlock();
859 860

	mutex_unlock(&msm_obj->lock);
861 862 863 864 865 866 867 868
}

void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
{
	struct msm_gem_object *msm_obj;
	int count = 0;
	size_t size = 0;

869
	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
870 871
	list_for_each_entry(msm_obj, list, mm_list) {
		struct drm_gem_object *obj = &msm_obj->base;
872
		seq_puts(m, "   ");
873 874 875 876 877 878 879 880 881
		msm_gem_describe(obj, m);
		count++;
		size += obj->size;
	}

	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
}
#endif

882
/* don't call directly!  Use drm_gem_object_put_locked() and friends */
883 884 885
void msm_gem_free_object(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
886 887 888 889 890 891 892 893 894 895 896
	struct drm_device *dev = obj->dev;
	struct msm_drm_private *priv = dev->dev_private;

	if (llist_add(&msm_obj->freed, &priv->free_list))
		queue_work(priv->wq, &priv->free_work);
}

static void free_object(struct msm_gem_object *msm_obj)
{
	struct drm_gem_object *obj = &msm_obj->base;
	struct drm_device *dev = obj->dev;
897 898 899

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

R
Rob Clark 已提交
900 901 902
	/* object should not be on active list: */
	WARN_ON(is_active(msm_obj));

903 904
	list_del(&msm_obj->mm_list);

905 906
	mutex_lock(&msm_obj->lock);

R
Rob Clark 已提交
907
	put_iova(obj);
908

R
Rob Clark 已提交
909 910 911 912 913 914 915 916
	if (obj->import_attach) {
		if (msm_obj->vaddr)
			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);

		/* Don't drop the pages for imported dmabuf, as they are not
		 * ours, just free the array we allocated:
		 */
		if (msm_obj->pages)
M
Michal Hocko 已提交
917
			kvfree(msm_obj->pages);
918

919
		drm_prime_gem_destroy(obj, msm_obj->sgt);
R
Rob Clark 已提交
920
	} else {
921
		msm_gem_vunmap_locked(obj);
R
Rob Clark 已提交
922 923
		put_pages(obj);
	}
924 925 926

	drm_gem_object_release(obj);

927
	mutex_unlock(&msm_obj->lock);
928 929 930
	kfree(msm_obj);
}

931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
void msm_gem_free_work(struct work_struct *work)
{
	struct msm_drm_private *priv =
		container_of(work, struct msm_drm_private, free_work);
	struct drm_device *dev = priv->dev;
	struct llist_node *freed;
	struct msm_gem_object *msm_obj, *next;

	while ((freed = llist_del_all(&priv->free_list))) {

		mutex_lock(&dev->struct_mutex);

		llist_for_each_entry_safe(msm_obj, next,
					  freed, freed)
			free_object(msm_obj);

		mutex_unlock(&dev->struct_mutex);

		if (need_resched())
			break;
	}
}

954 955
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
956 957
		uint32_t size, uint32_t flags, uint32_t *handle,
		char *name)
958 959 960 961 962 963 964 965 966
{
	struct drm_gem_object *obj;
	int ret;

	obj = msm_gem_new(dev, size, flags);

	if (IS_ERR(obj))
		return PTR_ERR(obj);

967 968 969
	if (name)
		msm_gem_object_set_name(obj, "%s", name);

970 971 972
	ret = drm_gem_handle_create(file, obj, handle);

	/* drop reference from allocate - handle holds it now */
973
	drm_gem_object_put(obj);
974 975 976 977

	return ret;
}

R
Rob Clark 已提交
978 979
static int msm_gem_new_impl(struct drm_device *dev,
		uint32_t size, uint32_t flags,
980 981
		struct drm_gem_object **obj,
		bool struct_mutex_locked)
982 983 984 985 986 987 988 989 990 991
{
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_gem_object *msm_obj;

	switch (flags & MSM_BO_CACHE_MASK) {
	case MSM_BO_UNCACHED:
	case MSM_BO_CACHED:
	case MSM_BO_WC:
		break;
	default:
992
		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
993
				(flags & MSM_BO_CACHE_MASK));
R
Rob Clark 已提交
994
		return -EINVAL;
995 996
	}

997
	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
R
Rob Clark 已提交
998 999
	if (!msm_obj)
		return -ENOMEM;
1000

1001 1002
	mutex_init(&msm_obj->lock);

1003
	msm_obj->flags = flags;
R
Rob Clark 已提交
1004
	msm_obj->madv = MSM_MADV_WILLNEED;
1005

R
Rob Clark 已提交
1006
	INIT_LIST_HEAD(&msm_obj->submit_entry);
1007 1008
	INIT_LIST_HEAD(&msm_obj->vmas);

1009 1010 1011 1012 1013 1014 1015 1016
	if (struct_mutex_locked) {
		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
	} else {
		mutex_lock(&dev->struct_mutex);
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
		mutex_unlock(&dev->struct_mutex);
	}
1017

R
Rob Clark 已提交
1018 1019 1020 1021 1022
	*obj = &msm_obj->base;

	return 0;
}

1023 1024
static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
		uint32_t size, uint32_t flags, bool struct_mutex_locked)
R
Rob Clark 已提交
1025
{
1026
	struct msm_drm_private *priv = dev->dev_private;
1027
	struct drm_gem_object *obj = NULL;
1028
	bool use_vram = false;
R
Rob Clark 已提交
1029 1030 1031 1032
	int ret;

	size = PAGE_ALIGN(size);

J
Jonathan Marek 已提交
1033
	if (!msm_use_mmu(dev))
1034
		use_vram = true;
1035
	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1036 1037 1038 1039 1040
		use_vram = true;

	if (WARN_ON(use_vram && !priv->vram.size))
		return ERR_PTR(-EINVAL);

1041 1042 1043 1044 1045 1046
	/* Disallow zero sized objects as they make the underlying
	 * infrastructure grumpy
	 */
	if (size == 0)
		return ERR_PTR(-EINVAL);

1047
	ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
R
Rob Clark 已提交
1048 1049 1050
	if (ret)
		goto fail;

1051
	if (use_vram) {
1052
		struct msm_gem_vma *vma;
1053
		struct page **pages;
1054 1055 1056
		struct msm_gem_object *msm_obj = to_msm_bo(obj);

		mutex_lock(&msm_obj->lock);
1057

1058
		vma = add_vma(obj, NULL);
1059
		mutex_unlock(&msm_obj->lock);
1060 1061 1062 1063 1064 1065 1066
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
			goto fail;
		}

		to_msm_bo(obj)->vram_node = &vma->node;

1067 1068 1069 1070 1071 1072 1073
		drm_gem_private_object_init(dev, obj, size);

		pages = get_pages(obj);
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto fail;
		}
1074 1075

		vma->iova = physaddr(obj);
1076
	} else {
1077 1078 1079
		ret = drm_gem_object_init(dev, obj, size);
		if (ret)
			goto fail;
1080 1081 1082 1083 1084 1085 1086
		/*
		 * Our buffers are kept pinned, so allocating them from the
		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
		 * See comments above new_inode() why this is required _and_
		 * expected if you're going to pin these pages.
		 */
		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1087
	}
R
Rob Clark 已提交
1088 1089 1090 1091

	return obj;

fail:
1092
	drm_gem_object_put(obj);
R
Rob Clark 已提交
1093 1094 1095
	return ERR_PTR(ret);
}

1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
		uint32_t size, uint32_t flags)
{
	return _msm_gem_new(dev, size, flags, true);
}

struct drm_gem_object *msm_gem_new(struct drm_device *dev,
		uint32_t size, uint32_t flags)
{
	return _msm_gem_new(dev, size, flags, false);
}

R
Rob Clark 已提交
1108
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1109
		struct dma_buf *dmabuf, struct sg_table *sgt)
R
Rob Clark 已提交
1110 1111 1112
{
	struct msm_gem_object *msm_obj;
	struct drm_gem_object *obj;
1113
	uint32_t size;
R
Rob Clark 已提交
1114 1115
	int ret, npages;

1116
	/* if we don't have IOMMU, don't bother pretending we can import: */
J
Jonathan Marek 已提交
1117
	if (!msm_use_mmu(dev)) {
1118
		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1119 1120 1121
		return ERR_PTR(-EINVAL);
	}

1122
	size = PAGE_ALIGN(dmabuf->size);
R
Rob Clark 已提交
1123

1124
	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
R
Rob Clark 已提交
1125 1126 1127 1128 1129 1130 1131 1132
	if (ret)
		goto fail;

	drm_gem_private_object_init(dev, obj, size);

	npages = size / PAGE_SIZE;

	msm_obj = to_msm_bo(obj);
1133
	mutex_lock(&msm_obj->lock);
R
Rob Clark 已提交
1134
	msm_obj->sgt = sgt;
M
Michal Hocko 已提交
1135
	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
R
Rob Clark 已提交
1136
	if (!msm_obj->pages) {
1137
		mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
1138 1139 1140 1141 1142
		ret = -ENOMEM;
		goto fail;
	}

	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1143 1144
	if (ret) {
		mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
1145
		goto fail;
1146
	}
R
Rob Clark 已提交
1147

1148
	mutex_unlock(&msm_obj->lock);
1149 1150 1151
	return obj;

fail:
1152
	drm_gem_object_put(obj);
1153 1154
	return ERR_PTR(ret);
}
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167

static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova, bool locked)
{
	void *vaddr;
	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
	int ret;

	if (IS_ERR(obj))
		return ERR_CAST(obj);

	if (iova) {
1168
		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1169 1170
		if (ret)
			goto err;
1171 1172 1173
	}

	vaddr = msm_gem_get_vaddr(obj);
1174
	if (IS_ERR(vaddr)) {
1175
		msm_gem_unpin_iova(obj, aspace);
1176 1177
		ret = PTR_ERR(vaddr);
		goto err;
1178 1179 1180 1181 1182 1183
	}

	if (bo)
		*bo = obj;

	return vaddr;
1184 1185
err:
	if (locked)
1186
		drm_gem_object_put_locked(obj);
1187
	else
1188
		drm_gem_object_put(obj);
1189 1190 1191

	return ERR_PTR(ret);

1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
}

void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova)
{
	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
}

void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova)
{
	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
}
1207 1208 1209 1210 1211 1212 1213 1214

void msm_gem_kernel_put(struct drm_gem_object *bo,
		struct msm_gem_address_space *aspace, bool locked)
{
	if (IS_ERR_OR_NULL(bo))
		return;

	msm_gem_put_vaddr(bo);
1215
	msm_gem_unpin_iova(bo, aspace);
1216 1217

	if (locked)
1218
		drm_gem_object_put_locked(bo);
1219
	else
1220
		drm_gem_object_put(bo);
1221
}
1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234

void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
{
	struct msm_gem_object *msm_obj = to_msm_bo(bo);
	va_list ap;

	if (!fmt)
		return;

	va_start(ap, fmt);
	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
	va_end(ap);
}