msm_gem.c 28.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6
/*
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 */

7
#include <linux/dma-map-ops.h>
8 9
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
R
Rob Clark 已提交
10
#include <linux/dma-buf.h>
11
#include <linux/pfn_t.h>
12

S
Sam Ravnborg 已提交
13 14
#include <drm/drm_prime.h>

15
#include "msm_drv.h"
16
#include "msm_fence.h"
17
#include "msm_gem.h"
R
Rob Clark 已提交
18
#include "msm_gpu.h"
19
#include "msm_mmu.h"
20

21 22 23
static void msm_gem_vunmap_locked(struct drm_gem_object *obj);


24 25 26 27 28 29 30 31
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;
	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
			priv->vram.paddr;
}

32 33 34 35 36 37
static bool use_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	return !msm_obj->vram_node;
}

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
/*
 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
 * API.  Really GPU cache is out of scope here (handled on cmdstream)
 * and all we need to do is invalidate newly allocated pages before
 * mapping to CPU as uncached/writecombine.
 *
 * On top of this, we have the added headache, that depending on
 * display generation, the display's iommu may be wired up to either
 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
 * that here we either have dma-direct or iommu ops.
 *
 * Let this be a cautionary tail of abstraction gone wrong.
 */

static void sync_for_device(struct msm_gem_object *msm_obj)
{
	struct device *dev = msm_obj->base.dev->dev;

56
	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
57 58 59 60 61 62
}

static void sync_for_cpu(struct msm_gem_object *msm_obj)
{
	struct device *dev = msm_obj->base.dev->dev;

63
	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
64 65
}

66
/* allocate pages from VRAM carveout, used when no IOMMU: */
67
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
68 69 70 71 72 73 74
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;
	dma_addr_t paddr;
	struct page **p;
	int ret, i;

M
Michal Hocko 已提交
75
	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
76 77 78
	if (!p)
		return ERR_PTR(-ENOMEM);

79
	spin_lock(&priv->vram.lock);
80
	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
81
	spin_unlock(&priv->vram.lock);
82
	if (ret) {
M
Michal Hocko 已提交
83
		kvfree(p);
84 85 86 87 88 89 90 91 92 93 94
		return ERR_PTR(ret);
	}

	paddr = physaddr(obj);
	for (i = 0; i < npages; i++) {
		p[i] = phys_to_page(paddr);
		paddr += PAGE_SIZE;
	}

	return p;
}
95 96 97 98 99 100 101

static struct page **get_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (!msm_obj->pages) {
		struct drm_device *dev = obj->dev;
102
		struct page **p;
103 104
		int npages = obj->size >> PAGE_SHIFT;

105
		if (use_pages(obj))
106
			p = drm_gem_get_pages(obj);
107 108 109
		else
			p = get_pages_vram(obj, npages);

110
		if (IS_ERR(p)) {
111
			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
112 113 114 115
					PTR_ERR(p));
			return p;
		}

116 117
		msm_obj->pages = p;

118
		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
119
		if (IS_ERR(msm_obj->sgt)) {
120 121
			void *ptr = ERR_CAST(msm_obj->sgt);

122
			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
123 124
			msm_obj->sgt = NULL;
			return ptr;
125 126 127 128 129 130
		}

		/* For non-cached buffers, ensure the new pages are clean
		 * because display controller, GPU, etc. are not coherent:
		 */
		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
131
			sync_for_device(msm_obj);
132 133 134 135 136
	}

	return msm_obj->pages;
}

137 138 139 140 141 142 143 144 145 146 147 148
static void put_pages_vram(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;

	spin_lock(&priv->vram.lock);
	drm_mm_remove_node(msm_obj->vram_node);
	spin_unlock(&priv->vram.lock);

	kvfree(msm_obj->pages);
}

149 150 151 152 153
static void put_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (msm_obj->pages) {
154 155 156 157 158 159
		if (msm_obj->sgt) {
			/* For non-cached buffers, ensure the new
			 * pages are clean because display controller,
			 * GPU, etc. are not coherent:
			 */
			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
160
				sync_for_cpu(msm_obj);
161 162

			sg_free_table(msm_obj->sgt);
163 164
			kfree(msm_obj->sgt);
		}
165

166
		if (use_pages(obj))
167
			drm_gem_put_pages(obj, msm_obj->pages, true, false);
168 169
		else
			put_pages_vram(obj);
170

171 172 173 174
		msm_obj->pages = NULL;
	}
}

R
Rob Clark 已提交
175 176
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
{
177
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
178
	struct page **p;
179

R
Rob Clark 已提交
180
	msm_gem_lock(obj);
181 182

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
R
Rob Clark 已提交
183
		msm_gem_unlock(obj);
184 185 186
		return ERR_PTR(-EBUSY);
	}

R
Rob Clark 已提交
187
	p = get_pages(obj);
R
Rob Clark 已提交
188
	msm_gem_unlock(obj);
R
Rob Clark 已提交
189 190 191 192 193 194 195 196
	return p;
}

void msm_gem_put_pages(struct drm_gem_object *obj)
{
	/* when we start tracking the pin count, then do something here */
}

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
int msm_gem_mmap_obj(struct drm_gem_object *obj,
		struct vm_area_struct *vma)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	vma->vm_flags &= ~VM_PFNMAP;
	vma->vm_flags |= VM_MIXEDMAP;

	if (msm_obj->flags & MSM_BO_WC) {
		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
	} else {
		/*
		 * Shunt off cached objs to shmem file so they have their own
		 * address_space (so unmap_mapping_range does what we want,
		 * in particular in the case of mmap'd dmabufs)
		 */
		fput(vma->vm_file);
		get_file(obj->filp);
		vma->vm_pgoff = 0;
		vma->vm_file  = obj->filp;

		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
	}

	return 0;
}

int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int ret;

	ret = drm_gem_mmap(filp, vma);
	if (ret) {
		DBG("mmap failed: %d", ret);
		return ret;
	}

	return msm_gem_mmap_obj(vma->vm_private_data, vma);
}

239
vm_fault_t msm_gem_fault(struct vm_fault *vmf)
240
{
241
	struct vm_area_struct *vma = vmf->vma;
242
	struct drm_gem_object *obj = vma->vm_private_data;
243
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
244 245 246
	struct page **pages;
	unsigned long pfn;
	pgoff_t pgoff;
247 248
	int err;
	vm_fault_t ret;
249

250 251 252
	/*
	 * vm_ops.open/drm_gem_mmap_obj and close get and put
	 * a reference on obj. So, we dont need to hold one here.
253
	 */
R
Rob Clark 已提交
254
	err = msm_gem_lock_interruptible(obj);
255 256
	if (err) {
		ret = VM_FAULT_NOPAGE;
257
		goto out;
258
	}
259

260
	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
R
Rob Clark 已提交
261
		msm_gem_unlock(obj);
262 263 264
		return VM_FAULT_SIGBUS;
	}

265 266 267
	/* make sure we have pages attached now */
	pages = get_pages(obj);
	if (IS_ERR(pages)) {
268
		ret = vmf_error(PTR_ERR(pages));
269 270 271 272
		goto out_unlock;
	}

	/* We don't use vmf->pgoff since that has the fake offset: */
273
	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
274

275
	pfn = page_to_pfn(pages[pgoff]);
276

277
	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
278 279
			pfn, pfn << PAGE_SHIFT);

280
	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
281
out_unlock:
R
Rob Clark 已提交
282
	msm_gem_unlock(obj);
283
out:
284
	return ret;
285 286 287 288 289 290 291 292
}

/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	int ret;

R
Rob Clark 已提交
293
	WARN_ON(!msm_gem_is_locked(obj));
294 295 296 297 298

	/* Make it mmapable */
	ret = drm_gem_create_mmap_offset(obj);

	if (ret) {
299
		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
300 301 302 303 304 305 306 307 308
		return 0;
	}

	return drm_vma_node_offset_addr(&obj->vma_node);
}

uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{
	uint64_t offset;
309

R
Rob Clark 已提交
310
	msm_gem_lock(obj);
311
	offset = mmap_offset(obj);
R
Rob Clark 已提交
312
	msm_gem_unlock(obj);
313 314 315
	return offset;
}

316 317 318 319 320 321
static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

R
Rob Clark 已提交
322
	WARN_ON(!msm_gem_is_locked(obj));
323

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
	if (!vma)
		return ERR_PTR(-ENOMEM);

	vma->aspace = aspace;

	list_add_tail(&vma->list, &msm_obj->vmas);

	return vma;
}

static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

R
Rob Clark 已提交
341
	WARN_ON(!msm_gem_is_locked(obj));
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359

	list_for_each_entry(vma, &msm_obj->vmas, list) {
		if (vma->aspace == aspace)
			return vma;
	}

	return NULL;
}

static void del_vma(struct msm_gem_vma *vma)
{
	if (!vma)
		return;

	list_del(&vma->list);
	kfree(vma);
}

R
Rob Clark 已提交
360
/* Called with msm_obj locked */
R
Rob Clark 已提交
361 362 363 364
static void
put_iova(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
365
	struct msm_gem_vma *vma, *tmp;
R
Rob Clark 已提交
366

R
Rob Clark 已提交
367
	WARN_ON(!msm_gem_is_locked(obj));
R
Rob Clark 已提交
368

369
	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
370 371 372 373
		if (vma->aspace) {
			msm_gem_purge_vma(vma->aspace, vma);
			msm_gem_close_vma(vma->aspace, vma);
		}
374
		del_vma(vma);
R
Rob Clark 已提交
375 376 377
	}
}

378
static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
379 380
		struct msm_gem_address_space *aspace, uint64_t *iova,
		u64 range_start, u64 range_end)
381
{
382
	struct msm_gem_vma *vma;
383 384
	int ret = 0;

R
Rob Clark 已提交
385
	WARN_ON(!msm_gem_is_locked(obj));
386

387
	vma = lookup_vma(obj, aspace);
388

389 390
	if (!vma) {
		vma = add_vma(obj, aspace);
391 392
		if (IS_ERR(vma))
			return PTR_ERR(vma);
393

394 395
		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
			range_start, range_end);
396 397 398
		if (ret) {
			del_vma(vma);
			return ret;
399
		}
400 401
	}

402 403
	*iova = vma->iova;
	return 0;
404 405 406 407 408 409 410 411
}

static int msm_gem_pin_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;
	struct page **pages;
R
Rob Clark 已提交
412 413 414 415
	int prot = IOMMU_READ;

	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
		prot |= IOMMU_WRITE;
416

417 418 419
	if (msm_obj->flags & MSM_BO_MAP_PRIV)
		prot |= IOMMU_PRIV;

R
Rob Clark 已提交
420
	WARN_ON(!msm_gem_is_locked(obj));
421 422 423 424 425 426 427 428 429 430 431 432

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
		return -EBUSY;

	vma = lookup_vma(obj, aspace);
	if (WARN_ON(!vma))
		return -EINVAL;

	pages = get_pages(obj);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

R
Rob Clark 已提交
433 434
	return msm_gem_map_vma(aspace, vma, prot,
			msm_obj->sgt, obj->size >> PAGE_SHIFT);
435 436
}

437 438 439 440 441 442 443
/*
 * get iova and pin it. Should have a matching put
 * limits iova to specified range (in pages)
 */
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova,
		u64 range_start, u64 range_end)
444 445 446 447
{
	u64 local;
	int ret;

R
Rob Clark 已提交
448
	msm_gem_lock(obj);
449

450 451
	ret = msm_gem_get_iova_locked(obj, aspace, &local,
		range_start, range_end);
452 453 454 455 456 457

	if (!ret)
		ret = msm_gem_pin_iova(obj, aspace);

	if (!ret)
		*iova = local;
458

R
Rob Clark 已提交
459
	msm_gem_unlock(obj);
460 461 462
	return ret;
}

463 464 465 466 467 468 469
/* get iova and pin it. Should have a matching put */
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova)
{
	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
}

470 471 472 473
/*
 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
 * valid for the life of the object
 */
474 475 476 477 478
int msm_gem_get_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova)
{
	int ret;

R
Rob Clark 已提交
479
	msm_gem_lock(obj);
480
	ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX);
R
Rob Clark 已提交
481
	msm_gem_unlock(obj);
482 483 484 485

	return ret;
}

R
Rob Clark 已提交
486
/* get iova without taking a reference, used in places where you have
487
 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
R
Rob Clark 已提交
488
 */
489 490
uint64_t msm_gem_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
R
Rob Clark 已提交
491
{
492 493
	struct msm_gem_vma *vma;

R
Rob Clark 已提交
494
	msm_gem_lock(obj);
495
	vma = lookup_vma(obj, aspace);
R
Rob Clark 已提交
496
	msm_gem_unlock(obj);
497 498 499
	WARN_ON(!vma);

	return vma ? vma->iova : 0;
R
Rob Clark 已提交
500 501
}

502 503 504 505 506 507
/*
 * Unpin a iova by updating the reference counts. The memory isn't actually
 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
 * to get rid of it
 */
void msm_gem_unpin_iova(struct drm_gem_object *obj,
508
		struct msm_gem_address_space *aspace)
509
{
510 511
	struct msm_gem_vma *vma;

R
Rob Clark 已提交
512
	msm_gem_lock(obj);
513 514 515 516 517
	vma = lookup_vma(obj, aspace);

	if (!WARN_ON(!vma))
		msm_gem_unmap_vma(aspace, vma);

R
Rob Clark 已提交
518
	msm_gem_unlock(obj);
519 520 521 522 523 524 525 526
}

int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
		struct drm_mode_create_dumb *args)
{
	args->pitch = align_pitch(args->width, args->bpp);
	args->size  = PAGE_ALIGN(args->pitch * args->height);
	return msm_gem_new_handle(dev, file, args->size,
527
			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
528 529 530 531 532 533 534 535 536
}

int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
		uint32_t handle, uint64_t *offset)
{
	struct drm_gem_object *obj;
	int ret = 0;

	/* GEM does all our handle to object mapping */
537
	obj = drm_gem_object_lookup(file, handle);
538 539 540 541 542 543 544
	if (obj == NULL) {
		ret = -ENOENT;
		goto fail;
	}

	*offset = msm_gem_mmap_offset(obj);

545
	drm_gem_object_put(obj);
546 547 548 549 550

fail:
	return ret;
}

551
static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
552 553
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
554 555
	int ret = 0;

556 557 558
	if (obj->import_attach)
		return ERR_PTR(-ENODEV);

R
Rob Clark 已提交
559
	msm_gem_lock(obj);
560

561
	if (WARN_ON(msm_obj->madv > madv)) {
562
		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
563
			msm_obj->madv, madv);
R
Rob Clark 已提交
564
		msm_gem_unlock(obj);
565 566 567 568
		return ERR_PTR(-EBUSY);
	}

	/* increment vmap_count *before* vmap() call, so shrinker can
R
Rob Clark 已提交
569
	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
570 571
	 * This guarantees that we won't try to msm_gem_vunmap() this
	 * same object from within the vmap() call (while we already
R
Rob Clark 已提交
572
	 * hold msm_obj lock)
573 574 575
	 */
	msm_obj->vmap_count++;

576 577
	if (!msm_obj->vaddr) {
		struct page **pages = get_pages(obj);
578 579 580 581
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto fail;
		}
582 583
		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
584 585 586 587
		if (msm_obj->vaddr == NULL) {
			ret = -ENOMEM;
			goto fail;
		}
588
	}
589

R
Rob Clark 已提交
590
	msm_gem_unlock(obj);
591 592
	return msm_obj->vaddr;

593 594
fail:
	msm_obj->vmap_count--;
R
Rob Clark 已提交
595
	msm_gem_unlock(obj);
596
	return ERR_PTR(ret);
597 598
}

599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{
	return get_vaddr(obj, MSM_MADV_WILLNEED);
}

/*
 * Don't use this!  It is for the very special case of dumping
 * submits from GPU hangs or faults, were the bo may already
 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
 * active list.
 */
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
{
	return get_vaddr(obj, __MSM_MADV_PURGED);
}

615
void msm_gem_put_vaddr(struct drm_gem_object *obj)
616
{
R
Rob Clark 已提交
617
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
618

R
Rob Clark 已提交
619
	msm_gem_lock(obj);
R
Rob Clark 已提交
620 621
	WARN_ON(msm_obj->vmap_count < 1);
	msm_obj->vmap_count--;
R
Rob Clark 已提交
622
	msm_gem_unlock(obj);
623 624
}

R
Rob Clark 已提交
625 626 627 628 629 630 631
/* Update madvise status, returns true if not purged, else
 * false or -errno.
 */
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

R
Rob Clark 已提交
632
	msm_gem_lock(obj);
633

R
Rob Clark 已提交
634 635 636 637 638
	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));

	if (msm_obj->madv != __MSM_MADV_PURGED)
		msm_obj->madv = madv;

639 640
	madv = msm_obj->madv;

R
Rob Clark 已提交
641
	msm_gem_unlock(obj);
642 643

	return (madv != __MSM_MADV_PURGED);
R
Rob Clark 已提交
644 645
}

646
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
R
Rob Clark 已提交
647 648 649 650 651 652 653 654
{
	struct drm_device *dev = obj->dev;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
	WARN_ON(!is_purgeable(msm_obj));
	WARN_ON(obj->import_attach);

655 656
	mutex_lock_nested(&msm_obj->lock, subclass);

R
Rob Clark 已提交
657 658
	put_iova(obj);

659
	msm_gem_vunmap_locked(obj);
R
Rob Clark 已提交
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676

	put_pages(obj);

	msm_obj->madv = __MSM_MADV_PURGED;

	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
	drm_gem_free_mmap_offset(obj);

	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);

	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
			0, (loff_t)-1);
677

R
Rob Clark 已提交
678
	msm_gem_unlock(obj);
R
Rob Clark 已提交
679 680
}

681
static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
R
Rob Clark 已提交
682 683 684
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

R
Rob Clark 已提交
685
	WARN_ON(!msm_gem_is_locked(obj));
686

R
Rob Clark 已提交
687 688 689 690 691 692 693
	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
		return;

	vunmap(msm_obj->vaddr);
	msm_obj->vaddr = NULL;
}

694 695 696 697 698 699
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	mutex_lock_nested(&msm_obj->lock, subclass);
	msm_gem_vunmap_locked(obj);
R
Rob Clark 已提交
700
	msm_gem_unlock(obj);
701 702
}

R
Rob Clark 已提交
703 704 705 706
/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
		struct msm_fence_context *fctx, bool exclusive)
{
707
	struct dma_resv_list *fobj;
708
	struct dma_fence *fence;
R
Rob Clark 已提交
709 710
	int i, ret;

711
	fobj = dma_resv_get_list(obj->resv);
R
Rob Clark 已提交
712
	if (!fobj || (fobj->shared_count == 0)) {
713
		fence = dma_resv_get_excl(obj->resv);
R
Rob Clark 已提交
714 715
		/* don't need to wait on our own fences, since ring is fifo */
		if (fence && (fence->context != fctx->context)) {
716
			ret = dma_fence_wait(fence, true);
R
Rob Clark 已提交
717 718 719 720 721 722 723 724 725 726
			if (ret)
				return ret;
		}
	}

	if (!exclusive || !fobj)
		return 0;

	for (i = 0; i < fobj->shared_count; i++) {
		fence = rcu_dereference_protected(fobj->shared[i],
727
						dma_resv_held(obj->resv));
R
Rob Clark 已提交
728
		if (fence->context != fctx->context) {
729
			ret = dma_fence_wait(fence, true);
R
Rob Clark 已提交
730 731 732 733 734 735 736 737
			if (ret)
				return ret;
		}
	}

	return 0;
}

738
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
R
Rob Clark 已提交
739 740
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
741
	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
R
Rob Clark 已提交
742
	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
743 744 745 746 747 748

	if (!atomic_fetch_inc(&msm_obj->active_count)) {
		msm_obj->gpu = gpu;
		list_del_init(&msm_obj->mm_list);
		list_add_tail(&msm_obj->mm_list, &gpu->active_list);
	}
R
Rob Clark 已提交
749 750
}

751
void msm_gem_active_put(struct drm_gem_object *obj)
R
Rob Clark 已提交
752 753
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
754
	struct msm_drm_private *priv = obj->dev->dev_private;
R
Rob Clark 已提交
755

756
	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
R
Rob Clark 已提交
757

758 759 760 761 762
	if (!atomic_dec_return(&msm_obj->active_count)) {
		msm_obj->gpu = NULL;
		list_del_init(&msm_obj->mm_list);
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
	}
R
Rob Clark 已提交
763 764
}

R
Rob Clark 已提交
765
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
R
Rob Clark 已提交
766
{
R
Rob Clark 已提交
767
	bool write = !!(op & MSM_PREP_WRITE);
768 769 770 771
	unsigned long remain =
		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
	long ret;

772
	ret = dma_resv_wait_timeout_rcu(obj->resv, write,
773 774 775 776 777
						  true,  remain);
	if (ret == 0)
		return remain == 0 ? -EBUSY : -ETIMEDOUT;
	else if (ret < 0)
		return ret;
R
Rob Clark 已提交
778 779

	/* TODO cache maintenance */
780

R
Rob Clark 已提交
781
	return 0;
R
Rob Clark 已提交
782
}
783

R
Rob Clark 已提交
784 785 786
int msm_gem_cpu_fini(struct drm_gem_object *obj)
{
	/* TODO cache maintenance */
787 788 789 790
	return 0;
}

#ifdef CONFIG_DEBUG_FS
791
static void describe_fence(struct dma_fence *fence, const char *type,
R
Rob Clark 已提交
792 793
		struct seq_file *m)
{
794
	if (!dma_fence_is_signaled(fence))
795
		seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
R
Rob Clark 已提交
796 797 798 799 800
				fence->ops->get_driver_name(fence),
				fence->ops->get_timeline_name(fence),
				fence->seqno);
}

801 802 803
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
804 805
	struct dma_resv *robj = obj->resv;
	struct dma_resv_list *fobj;
806
	struct dma_fence *fence;
807
	struct msm_gem_vma *vma;
808
	uint64_t off = drm_vma_node_start(&obj->vma_node);
R
Rob Clark 已提交
809
	const char *madv;
810

R
Rob Clark 已提交
811
	msm_gem_lock(obj);
R
Rob Clark 已提交
812

R
Rob Clark 已提交
813 814 815 816 817 818 819 820 821 822 823 824 825
	switch (msm_obj->madv) {
	case __MSM_MADV_PURGED:
		madv = " purged";
		break;
	case MSM_MADV_DONTNEED:
		madv = " purgeable";
		break;
	case MSM_MADV_WILLNEED:
	default:
		madv = "";
		break;
	}

826
	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
R
Rob Clark 已提交
827
			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
828
			obj->name, kref_read(&obj->refcount),
829 830
			off, msm_obj->vaddr);

831
	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
832

833 834
	if (!list_empty(&msm_obj->vmas)) {

835
		seq_puts(m, "      vmas:");
836

837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
		list_for_each_entry(vma, &msm_obj->vmas, list) {
			const char *name, *comm;
			if (vma->aspace) {
				struct msm_gem_address_space *aspace = vma->aspace;
				struct task_struct *task =
					get_pid_task(aspace->pid, PIDTYPE_PID);
				if (task) {
					comm = kstrdup(task->comm, GFP_KERNEL);
				} else {
					comm = NULL;
				}
				name = aspace->name;
			} else {
				name = comm = NULL;
			}
			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
				name, comm ? ":" : "", comm ? comm : "",
				vma->aspace, vma->iova,
				vma->mapped ? "mapped" : "unmapped",
856
				vma->inuse);
857 858
			kfree(comm);
		}
859 860 861

		seq_puts(m, "\n");
	}
R
Rob Clark 已提交
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877

	rcu_read_lock();
	fobj = rcu_dereference(robj->fence);
	if (fobj) {
		unsigned int i, shared_count = fobj->shared_count;

		for (i = 0; i < shared_count; i++) {
			fence = rcu_dereference(fobj->shared[i]);
			describe_fence(fence, "Shared", m);
		}
	}

	fence = rcu_dereference(robj->fence_excl);
	if (fence)
		describe_fence(fence, "Exclusive", m);
	rcu_read_unlock();
878

R
Rob Clark 已提交
879
	msm_gem_unlock(obj);
880 881 882 883 884 885 886 887
}

void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
{
	struct msm_gem_object *msm_obj;
	int count = 0;
	size_t size = 0;

888
	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
889 890
	list_for_each_entry(msm_obj, list, mm_list) {
		struct drm_gem_object *obj = &msm_obj->base;
891
		seq_puts(m, "   ");
892 893 894 895 896 897 898 899 900
		msm_gem_describe(obj, m);
		count++;
		size += obj->size;
	}

	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
}
#endif

901
/* don't call directly!  Use drm_gem_object_put_locked() and friends */
902 903 904
void msm_gem_free_object(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
905 906 907 908 909 910 911 912 913 914 915
	struct drm_device *dev = obj->dev;
	struct msm_drm_private *priv = dev->dev_private;

	if (llist_add(&msm_obj->freed, &priv->free_list))
		queue_work(priv->wq, &priv->free_work);
}

static void free_object(struct msm_gem_object *msm_obj)
{
	struct drm_gem_object *obj = &msm_obj->base;
	struct drm_device *dev = obj->dev;
916 917 918

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

R
Rob Clark 已提交
919 920 921
	/* object should not be on active list: */
	WARN_ON(is_active(msm_obj));

922 923
	list_del(&msm_obj->mm_list);

R
Rob Clark 已提交
924
	msm_gem_lock(obj);
925

R
Rob Clark 已提交
926
	put_iova(obj);
927

R
Rob Clark 已提交
928
	if (obj->import_attach) {
929
		WARN_ON(msm_obj->vaddr);
R
Rob Clark 已提交
930 931 932 933 934

		/* Don't drop the pages for imported dmabuf, as they are not
		 * ours, just free the array we allocated:
		 */
		if (msm_obj->pages)
M
Michal Hocko 已提交
935
			kvfree(msm_obj->pages);
936

937
		drm_prime_gem_destroy(obj, msm_obj->sgt);
R
Rob Clark 已提交
938
	} else {
939
		msm_gem_vunmap_locked(obj);
R
Rob Clark 已提交
940 941
		put_pages(obj);
	}
942 943 944

	drm_gem_object_release(obj);

R
Rob Clark 已提交
945
	msm_gem_unlock(obj);
946 947 948
	kfree(msm_obj);
}

949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971
void msm_gem_free_work(struct work_struct *work)
{
	struct msm_drm_private *priv =
		container_of(work, struct msm_drm_private, free_work);
	struct drm_device *dev = priv->dev;
	struct llist_node *freed;
	struct msm_gem_object *msm_obj, *next;

	while ((freed = llist_del_all(&priv->free_list))) {

		mutex_lock(&dev->struct_mutex);

		llist_for_each_entry_safe(msm_obj, next,
					  freed, freed)
			free_object(msm_obj);

		mutex_unlock(&dev->struct_mutex);

		if (need_resched())
			break;
	}
}

972 973
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
974 975
		uint32_t size, uint32_t flags, uint32_t *handle,
		char *name)
976 977 978 979 980 981 982 983 984
{
	struct drm_gem_object *obj;
	int ret;

	obj = msm_gem_new(dev, size, flags);

	if (IS_ERR(obj))
		return PTR_ERR(obj);

985 986 987
	if (name)
		msm_gem_object_set_name(obj, "%s", name);

988 989 990
	ret = drm_gem_handle_create(file, obj, handle);

	/* drop reference from allocate - handle holds it now */
991
	drm_gem_object_put(obj);
992 993 994 995

	return ret;
}

R
Rob Clark 已提交
996 997
static int msm_gem_new_impl(struct drm_device *dev,
		uint32_t size, uint32_t flags,
998
		struct drm_gem_object **obj)
999 1000 1001 1002 1003 1004 1005 1006 1007
{
	struct msm_gem_object *msm_obj;

	switch (flags & MSM_BO_CACHE_MASK) {
	case MSM_BO_UNCACHED:
	case MSM_BO_CACHED:
	case MSM_BO_WC:
		break;
	default:
1008
		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
1009
				(flags & MSM_BO_CACHE_MASK));
R
Rob Clark 已提交
1010
		return -EINVAL;
1011 1012
	}

1013
	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
R
Rob Clark 已提交
1014 1015
	if (!msm_obj)
		return -ENOMEM;
1016

1017 1018
	mutex_init(&msm_obj->lock);

1019
	msm_obj->flags = flags;
R
Rob Clark 已提交
1020
	msm_obj->madv = MSM_MADV_WILLNEED;
1021

R
Rob Clark 已提交
1022
	INIT_LIST_HEAD(&msm_obj->submit_entry);
1023 1024
	INIT_LIST_HEAD(&msm_obj->vmas);

R
Rob Clark 已提交
1025 1026 1027 1028 1029
	*obj = &msm_obj->base;

	return 0;
}

1030 1031
static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
		uint32_t size, uint32_t flags, bool struct_mutex_locked)
R
Rob Clark 已提交
1032
{
1033
	struct msm_drm_private *priv = dev->dev_private;
1034
	struct msm_gem_object *msm_obj;
1035
	struct drm_gem_object *obj = NULL;
1036
	bool use_vram = false;
R
Rob Clark 已提交
1037 1038 1039 1040
	int ret;

	size = PAGE_ALIGN(size);

J
Jonathan Marek 已提交
1041
	if (!msm_use_mmu(dev))
1042
		use_vram = true;
1043
	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1044 1045 1046 1047 1048
		use_vram = true;

	if (WARN_ON(use_vram && !priv->vram.size))
		return ERR_PTR(-EINVAL);

1049 1050 1051 1052 1053 1054
	/* Disallow zero sized objects as they make the underlying
	 * infrastructure grumpy
	 */
	if (size == 0)
		return ERR_PTR(-EINVAL);

1055
	ret = msm_gem_new_impl(dev, size, flags, &obj);
R
Rob Clark 已提交
1056 1057 1058
	if (ret)
		goto fail;

1059 1060
	msm_obj = to_msm_bo(obj);

1061
	if (use_vram) {
1062
		struct msm_gem_vma *vma;
1063
		struct page **pages;
1064

R
Rob Clark 已提交
1065
		msm_gem_lock(obj);
1066

1067
		vma = add_vma(obj, NULL);
R
Rob Clark 已提交
1068
		msm_gem_unlock(obj);
1069 1070 1071 1072 1073 1074 1075
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
			goto fail;
		}

		to_msm_bo(obj)->vram_node = &vma->node;

1076 1077 1078 1079 1080 1081 1082
		drm_gem_private_object_init(dev, obj, size);

		pages = get_pages(obj);
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto fail;
		}
1083 1084

		vma->iova = physaddr(obj);
1085
	} else {
1086 1087 1088
		ret = drm_gem_object_init(dev, obj, size);
		if (ret)
			goto fail;
1089 1090 1091 1092 1093 1094 1095
		/*
		 * Our buffers are kept pinned, so allocating them from the
		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
		 * See comments above new_inode() why this is required _and_
		 * expected if you're going to pin these pages.
		 */
		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1096
	}
R
Rob Clark 已提交
1097

1098 1099 1100 1101 1102 1103 1104 1105 1106
	if (struct_mutex_locked) {
		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
	} else {
		mutex_lock(&dev->struct_mutex);
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
		mutex_unlock(&dev->struct_mutex);
	}

R
Rob Clark 已提交
1107 1108 1109
	return obj;

fail:
1110
	drm_gem_object_put(obj);
R
Rob Clark 已提交
1111 1112 1113
	return ERR_PTR(ret);
}

1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
		uint32_t size, uint32_t flags)
{
	return _msm_gem_new(dev, size, flags, true);
}

struct drm_gem_object *msm_gem_new(struct drm_device *dev,
		uint32_t size, uint32_t flags)
{
	return _msm_gem_new(dev, size, flags, false);
}

R
Rob Clark 已提交
1126
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1127
		struct dma_buf *dmabuf, struct sg_table *sgt)
R
Rob Clark 已提交
1128
{
1129
	struct msm_drm_private *priv = dev->dev_private;
R
Rob Clark 已提交
1130 1131
	struct msm_gem_object *msm_obj;
	struct drm_gem_object *obj;
1132
	uint32_t size;
R
Rob Clark 已提交
1133 1134
	int ret, npages;

1135
	/* if we don't have IOMMU, don't bother pretending we can import: */
J
Jonathan Marek 已提交
1136
	if (!msm_use_mmu(dev)) {
1137
		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1138 1139 1140
		return ERR_PTR(-EINVAL);
	}

1141
	size = PAGE_ALIGN(dmabuf->size);
R
Rob Clark 已提交
1142

1143
	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
R
Rob Clark 已提交
1144 1145 1146 1147 1148 1149 1150 1151
	if (ret)
		goto fail;

	drm_gem_private_object_init(dev, obj, size);

	npages = size / PAGE_SIZE;

	msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
1152
	msm_gem_lock(obj);
R
Rob Clark 已提交
1153
	msm_obj->sgt = sgt;
M
Michal Hocko 已提交
1154
	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
R
Rob Clark 已提交
1155
	if (!msm_obj->pages) {
R
Rob Clark 已提交
1156
		msm_gem_unlock(obj);
R
Rob Clark 已提交
1157 1158 1159 1160 1161
		ret = -ENOMEM;
		goto fail;
	}

	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1162
	if (ret) {
R
Rob Clark 已提交
1163
		msm_gem_unlock(obj);
R
Rob Clark 已提交
1164
		goto fail;
1165
	}
R
Rob Clark 已提交
1166

R
Rob Clark 已提交
1167
	msm_gem_unlock(obj);
1168 1169 1170 1171 1172

	mutex_lock(&dev->struct_mutex);
	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
	mutex_unlock(&dev->struct_mutex);

1173 1174 1175
	return obj;

fail:
1176
	drm_gem_object_put(obj);
1177 1178
	return ERR_PTR(ret);
}
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191

static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova, bool locked)
{
	void *vaddr;
	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
	int ret;

	if (IS_ERR(obj))
		return ERR_CAST(obj);

	if (iova) {
1192
		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1193 1194
		if (ret)
			goto err;
1195 1196 1197
	}

	vaddr = msm_gem_get_vaddr(obj);
1198
	if (IS_ERR(vaddr)) {
1199
		msm_gem_unpin_iova(obj, aspace);
1200 1201
		ret = PTR_ERR(vaddr);
		goto err;
1202 1203 1204 1205 1206 1207
	}

	if (bo)
		*bo = obj;

	return vaddr;
1208 1209
err:
	if (locked)
1210
		drm_gem_object_put_locked(obj);
1211
	else
1212
		drm_gem_object_put(obj);
1213 1214 1215

	return ERR_PTR(ret);

1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
}

void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova)
{
	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
}

void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova)
{
	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
}
1231 1232 1233 1234 1235 1236 1237 1238

void msm_gem_kernel_put(struct drm_gem_object *bo,
		struct msm_gem_address_space *aspace, bool locked)
{
	if (IS_ERR_OR_NULL(bo))
		return;

	msm_gem_put_vaddr(bo);
1239
	msm_gem_unpin_iova(bo, aspace);
1240 1241

	if (locked)
1242
		drm_gem_object_put_locked(bo);
1243
	else
1244
		drm_gem_object_put(bo);
1245
}
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258

void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
{
	struct msm_gem_object *msm_obj = to_msm_bo(bo);
	va_list ap;

	if (!fmt)
		return;

	va_start(ap, fmt);
	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
	va_end(ap);
}