msm_gem.c 26.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
R
Rob Clark 已提交
20
#include <linux/dma-buf.h>
21
#include <linux/pfn_t.h>
22 23

#include "msm_drv.h"
24
#include "msm_fence.h"
25
#include "msm_gem.h"
R
Rob Clark 已提交
26
#include "msm_gpu.h"
27
#include "msm_mmu.h"
28

29 30 31
static void msm_gem_vunmap_locked(struct drm_gem_object *obj);


32 33 34 35 36 37 38 39
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;
	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
			priv->vram.paddr;
}

40 41 42 43 44 45
static bool use_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	return !msm_obj->vram_node;
}

46
/* allocate pages from VRAM carveout, used when no IOMMU: */
47
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
48 49 50 51 52 53 54
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;
	dma_addr_t paddr;
	struct page **p;
	int ret, i;

M
Michal Hocko 已提交
55
	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
56 57 58
	if (!p)
		return ERR_PTR(-ENOMEM);

59
	spin_lock(&priv->vram.lock);
60
	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
61
	spin_unlock(&priv->vram.lock);
62
	if (ret) {
M
Michal Hocko 已提交
63
		kvfree(p);
64 65 66 67 68 69 70 71 72 73 74
		return ERR_PTR(ret);
	}

	paddr = physaddr(obj);
	for (i = 0; i < npages; i++) {
		p[i] = phys_to_page(paddr);
		paddr += PAGE_SIZE;
	}

	return p;
}
75 76 77 78 79 80 81

static struct page **get_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (!msm_obj->pages) {
		struct drm_device *dev = obj->dev;
82
		struct page **p;
83 84
		int npages = obj->size >> PAGE_SHIFT;

85
		if (use_pages(obj))
86
			p = drm_gem_get_pages(obj);
87 88 89
		else
			p = get_pages_vram(obj, npages);

90
		if (IS_ERR(p)) {
91
			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
92 93 94 95
					PTR_ERR(p));
			return p;
		}

96 97
		msm_obj->pages = p;

98
		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
99
		if (IS_ERR(msm_obj->sgt)) {
100 101
			void *ptr = ERR_CAST(msm_obj->sgt);

102
			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
103 104
			msm_obj->sgt = NULL;
			return ptr;
105 106 107 108 109 110 111 112 113 114 115 116 117
		}

		/* For non-cached buffers, ensure the new pages are clean
		 * because display controller, GPU, etc. are not coherent:
		 */
		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
	}

	return msm_obj->pages;
}

118 119 120 121 122 123 124 125 126 127 128 129
static void put_pages_vram(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;

	spin_lock(&priv->vram.lock);
	drm_mm_remove_node(msm_obj->vram_node);
	spin_unlock(&priv->vram.lock);

	kvfree(msm_obj->pages);
}

130 131 132 133 134
static void put_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (msm_obj->pages) {
135 136 137 138 139 140 141 142 143
		if (msm_obj->sgt) {
			/* For non-cached buffers, ensure the new
			 * pages are clean because display controller,
			 * GPU, etc. are not coherent:
			 */
			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
				dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
					     msm_obj->sgt->nents,
					     DMA_BIDIRECTIONAL);
144 145

			sg_free_table(msm_obj->sgt);
146 147
			kfree(msm_obj->sgt);
		}
148

149
		if (use_pages(obj))
150
			drm_gem_put_pages(obj, msm_obj->pages, true, false);
151 152
		else
			put_pages_vram(obj);
153

154 155 156 157
		msm_obj->pages = NULL;
	}
}

R
Rob Clark 已提交
158 159
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
{
160
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
161
	struct page **p;
162 163 164 165 166 167 168 169

	mutex_lock(&msm_obj->lock);

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
		mutex_unlock(&msm_obj->lock);
		return ERR_PTR(-EBUSY);
	}

R
Rob Clark 已提交
170
	p = get_pages(obj);
171
	mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
172 173 174 175 176 177 178 179
	return p;
}

void msm_gem_put_pages(struct drm_gem_object *obj)
{
	/* when we start tracking the pin count, then do something here */
}

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
int msm_gem_mmap_obj(struct drm_gem_object *obj,
		struct vm_area_struct *vma)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	vma->vm_flags &= ~VM_PFNMAP;
	vma->vm_flags |= VM_MIXEDMAP;

	if (msm_obj->flags & MSM_BO_WC) {
		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
	} else {
		/*
		 * Shunt off cached objs to shmem file so they have their own
		 * address_space (so unmap_mapping_range does what we want,
		 * in particular in the case of mmap'd dmabufs)
		 */
		fput(vma->vm_file);
		get_file(obj->filp);
		vma->vm_pgoff = 0;
		vma->vm_file  = obj->filp;

		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
	}

	return 0;
}

int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int ret;

	ret = drm_gem_mmap(filp, vma);
	if (ret) {
		DBG("mmap failed: %d", ret);
		return ret;
	}

	return msm_gem_mmap_obj(vma->vm_private_data, vma);
}

222
vm_fault_t msm_gem_fault(struct vm_fault *vmf)
223
{
224
	struct vm_area_struct *vma = vmf->vma;
225
	struct drm_gem_object *obj = vma->vm_private_data;
226
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
227 228 229
	struct page **pages;
	unsigned long pfn;
	pgoff_t pgoff;
230 231
	int err;
	vm_fault_t ret;
232

233 234 235
	/*
	 * vm_ops.open/drm_gem_mmap_obj and close get and put
	 * a reference on obj. So, we dont need to hold one here.
236
	 */
237 238 239
	err = mutex_lock_interruptible(&msm_obj->lock);
	if (err) {
		ret = VM_FAULT_NOPAGE;
240
		goto out;
241
	}
242

243 244 245 246 247
	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
		mutex_unlock(&msm_obj->lock);
		return VM_FAULT_SIGBUS;
	}

248 249 250
	/* make sure we have pages attached now */
	pages = get_pages(obj);
	if (IS_ERR(pages)) {
251
		ret = vmf_error(PTR_ERR(pages));
252 253 254 255
		goto out_unlock;
	}

	/* We don't use vmf->pgoff since that has the fake offset: */
256
	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
257

258
	pfn = page_to_pfn(pages[pgoff]);
259

260
	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
261 262
			pfn, pfn << PAGE_SHIFT);

263
	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
264
out_unlock:
265
	mutex_unlock(&msm_obj->lock);
266
out:
267
	return ret;
268 269 270 271 272 273
}

/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
274
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
275 276
	int ret;

277
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
278 279 280 281 282

	/* Make it mmapable */
	ret = drm_gem_create_mmap_offset(obj);

	if (ret) {
283
		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
284 285 286 287 288 289 290 291 292
		return 0;
	}

	return drm_vma_node_offset_addr(&obj->vma_node);
}

uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{
	uint64_t offset;
293 294 295
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	mutex_lock(&msm_obj->lock);
296
	offset = mmap_offset(obj);
297
	mutex_unlock(&msm_obj->lock);
298 299 300
	return offset;
}

301 302 303 304 305 306
static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

307 308
	WARN_ON(!mutex_is_locked(&msm_obj->lock));

309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
	if (!vma)
		return ERR_PTR(-ENOMEM);

	vma->aspace = aspace;

	list_add_tail(&vma->list, &msm_obj->vmas);

	return vma;
}

static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

326
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344

	list_for_each_entry(vma, &msm_obj->vmas, list) {
		if (vma->aspace == aspace)
			return vma;
	}

	return NULL;
}

static void del_vma(struct msm_gem_vma *vma)
{
	if (!vma)
		return;

	list_del(&vma->list);
	kfree(vma);
}

345
/* Called with msm_obj->lock locked */
R
Rob Clark 已提交
346 347 348 349
static void
put_iova(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
350
	struct msm_gem_vma *vma, *tmp;
R
Rob Clark 已提交
351

352
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
R
Rob Clark 已提交
353

354
	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
355
		msm_gem_unmap_vma(vma->aspace, vma);
356
		del_vma(vma);
R
Rob Clark 已提交
357 358 359
	}
}

360
static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
361
		struct msm_gem_address_space *aspace, uint64_t *iova)
362 363
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
364
	struct msm_gem_vma *vma;
365 366
	int ret = 0;

367
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
368

369
	vma = lookup_vma(obj, aspace);
370

371 372
	if (!vma) {
		vma = add_vma(obj, aspace);
373 374
		if (IS_ERR(vma))
			return PTR_ERR(vma);
375

376 377 378 379
		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
		if (ret) {
			del_vma(vma);
			return ret;
380
		}
381 382
	}

383 384
	*iova = vma->iova;
	return 0;
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
}

static int msm_gem_pin_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;
	struct page **pages;

	WARN_ON(!mutex_is_locked(&msm_obj->lock));

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
		return -EBUSY;

	vma = lookup_vma(obj, aspace);
	if (WARN_ON(!vma))
		return -EINVAL;

	pages = get_pages(obj);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

	return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
			obj->size >> PAGE_SHIFT);
}

411 412
/* get iova and pin it. Should have a matching put */
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
		struct msm_gem_address_space *aspace, uint64_t *iova)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	u64 local;
	int ret;

	mutex_lock(&msm_obj->lock);

	ret = msm_gem_get_iova_locked(obj, aspace, &local);

	if (!ret)
		ret = msm_gem_pin_iova(obj, aspace);

	if (!ret)
		*iova = local;
428

429
	mutex_unlock(&msm_obj->lock);
430 431 432
	return ret;
}

433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
/* Get an iova but don't pin the memory behind it */
int msm_gem_get_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	int ret;

	mutex_lock(&msm_obj->lock);
	ret = msm_gem_get_iova_locked(obj, aspace, iova);
	mutex_unlock(&msm_obj->lock);

	return ret;
}


R
Rob Clark 已提交
448
/* get iova without taking a reference, used in places where you have
449
 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
R
Rob Clark 已提交
450
 */
451 452
uint64_t msm_gem_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
R
Rob Clark 已提交
453
{
454
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
455 456
	struct msm_gem_vma *vma;

457
	mutex_lock(&msm_obj->lock);
458
	vma = lookup_vma(obj, aspace);
459
	mutex_unlock(&msm_obj->lock);
460 461 462
	WARN_ON(!vma);

	return vma ? vma->iova : 0;
R
Rob Clark 已提交
463 464
}

465 466
void msm_gem_put_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
{
	// XXX TODO ..
	// NOTE: probably don't need a _locked() version.. we wouldn't
	// normally unmap here, but instead just mark that it could be
	// unmapped (if the iova refcnt drops to zero), but then later
	// if another _get_iova_locked() fails we can start unmapping
	// things that are no longer needed..
}

int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
		struct drm_mode_create_dumb *args)
{
	args->pitch = align_pitch(args->width, args->bpp);
	args->size  = PAGE_ALIGN(args->pitch * args->height);
	return msm_gem_new_handle(dev, file, args->size,
			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
}

int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
		uint32_t handle, uint64_t *offset)
{
	struct drm_gem_object *obj;
	int ret = 0;

	/* GEM does all our handle to object mapping */
492
	obj = drm_gem_object_lookup(file, handle);
493 494 495 496 497 498 499
	if (obj == NULL) {
		ret = -ENOENT;
		goto fail;
	}

	*offset = msm_gem_mmap_offset(obj);

500
	drm_gem_object_put_unlocked(obj);
501 502 503 504 505

fail:
	return ret;
}

506
static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
507 508
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
509 510 511 512
	int ret = 0;

	mutex_lock(&msm_obj->lock);

513
	if (WARN_ON(msm_obj->madv > madv)) {
514
		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
515
			msm_obj->madv, madv);
516 517 518 519 520 521 522 523 524 525 526 527
		mutex_unlock(&msm_obj->lock);
		return ERR_PTR(-EBUSY);
	}

	/* increment vmap_count *before* vmap() call, so shrinker can
	 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
	 * This guarantees that we won't try to msm_gem_vunmap() this
	 * same object from within the vmap() call (while we already
	 * hold msm_obj->lock)
	 */
	msm_obj->vmap_count++;

528 529
	if (!msm_obj->vaddr) {
		struct page **pages = get_pages(obj);
530 531 532 533
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto fail;
		}
534 535
		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
536 537 538 539
		if (msm_obj->vaddr == NULL) {
			ret = -ENOMEM;
			goto fail;
		}
540
	}
541 542

	mutex_unlock(&msm_obj->lock);
543 544
	return msm_obj->vaddr;

545 546 547 548
fail:
	msm_obj->vmap_count--;
	mutex_unlock(&msm_obj->lock);
	return ERR_PTR(ret);
549 550
}

551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{
	return get_vaddr(obj, MSM_MADV_WILLNEED);
}

/*
 * Don't use this!  It is for the very special case of dumping
 * submits from GPU hangs or faults, were the bo may already
 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
 * active list.
 */
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
{
	return get_vaddr(obj, __MSM_MADV_PURGED);
}

567
void msm_gem_put_vaddr(struct drm_gem_object *obj)
568
{
R
Rob Clark 已提交
569
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
570 571

	mutex_lock(&msm_obj->lock);
R
Rob Clark 已提交
572 573
	WARN_ON(msm_obj->vmap_count < 1);
	msm_obj->vmap_count--;
574
	mutex_unlock(&msm_obj->lock);
575 576
}

R
Rob Clark 已提交
577 578 579 580 581 582 583
/* Update madvise status, returns true if not purged, else
 * false or -errno.
 */
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

584 585
	mutex_lock(&msm_obj->lock);

R
Rob Clark 已提交
586 587 588 589 590
	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));

	if (msm_obj->madv != __MSM_MADV_PURGED)
		msm_obj->madv = madv;

591 592 593 594 595
	madv = msm_obj->madv;

	mutex_unlock(&msm_obj->lock);

	return (madv != __MSM_MADV_PURGED);
R
Rob Clark 已提交
596 597
}

598
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
R
Rob Clark 已提交
599 600 601 602 603 604 605 606
{
	struct drm_device *dev = obj->dev;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
	WARN_ON(!is_purgeable(msm_obj));
	WARN_ON(obj->import_attach);

607 608
	mutex_lock_nested(&msm_obj->lock, subclass);

R
Rob Clark 已提交
609 610
	put_iova(obj);

611
	msm_gem_vunmap_locked(obj);
R
Rob Clark 已提交
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628

	put_pages(obj);

	msm_obj->madv = __MSM_MADV_PURGED;

	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
	drm_gem_free_mmap_offset(obj);

	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);

	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
			0, (loff_t)-1);
629 630

	mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
631 632
}

633
static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
R
Rob Clark 已提交
634 635 636
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

637 638
	WARN_ON(!mutex_is_locked(&msm_obj->lock));

R
Rob Clark 已提交
639 640 641 642 643 644 645
	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
		return;

	vunmap(msm_obj->vaddr);
	msm_obj->vaddr = NULL;
}

646 647 648 649 650 651 652 653 654
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	mutex_lock_nested(&msm_obj->lock, subclass);
	msm_gem_vunmap_locked(obj);
	mutex_unlock(&msm_obj->lock);
}

R
Rob Clark 已提交
655 656 657 658 659 660
/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
		struct msm_fence_context *fctx, bool exclusive)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct reservation_object_list *fobj;
661
	struct dma_fence *fence;
R
Rob Clark 已提交
662 663 664 665 666 667 668
	int i, ret;

	fobj = reservation_object_get_list(msm_obj->resv);
	if (!fobj || (fobj->shared_count == 0)) {
		fence = reservation_object_get_excl(msm_obj->resv);
		/* don't need to wait on our own fences, since ring is fifo */
		if (fence && (fence->context != fctx->context)) {
669
			ret = dma_fence_wait(fence, true);
R
Rob Clark 已提交
670 671 672 673 674 675 676 677 678 679 680 681
			if (ret)
				return ret;
		}
	}

	if (!exclusive || !fobj)
		return 0;

	for (i = 0; i < fobj->shared_count; i++) {
		fence = rcu_dereference_protected(fobj->shared[i],
						reservation_object_held(msm_obj->resv));
		if (fence->context != fctx->context) {
682
			ret = dma_fence_wait(fence, true);
R
Rob Clark 已提交
683 684 685 686 687 688 689 690
			if (ret)
				return ret;
		}
	}

	return 0;
}

R
Rob Clark 已提交
691
void msm_gem_move_to_active(struct drm_gem_object *obj,
692
		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
R
Rob Clark 已提交
693 694
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
695
	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
R
Rob Clark 已提交
696
	msm_obj->gpu = gpu;
R
Rob Clark 已提交
697 698
	if (exclusive)
		reservation_object_add_excl_fence(msm_obj->resv, fence);
R
Rob Clark 已提交
699
	else
R
Rob Clark 已提交
700
		reservation_object_add_shared_fence(msm_obj->resv, fence);
R
Rob Clark 已提交
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
	list_del_init(&msm_obj->mm_list);
	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
}

void msm_gem_move_to_inactive(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

	msm_obj->gpu = NULL;
	list_del_init(&msm_obj->mm_list);
	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
}

R
Rob Clark 已提交
718
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
R
Rob Clark 已提交
719 720
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
721
	bool write = !!(op & MSM_PREP_WRITE);
722 723 724 725 726 727 728 729 730 731
	unsigned long remain =
		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
	long ret;

	ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
						  true,  remain);
	if (ret == 0)
		return remain == 0 ? -EBUSY : -ETIMEDOUT;
	else if (ret < 0)
		return ret;
R
Rob Clark 已提交
732 733

	/* TODO cache maintenance */
734

R
Rob Clark 已提交
735
	return 0;
R
Rob Clark 已提交
736
}
737

R
Rob Clark 已提交
738 739 740
int msm_gem_cpu_fini(struct drm_gem_object *obj)
{
	/* TODO cache maintenance */
741 742 743 744
	return 0;
}

#ifdef CONFIG_DEBUG_FS
745
static void describe_fence(struct dma_fence *fence, const char *type,
R
Rob Clark 已提交
746 747
		struct seq_file *m)
{
748
	if (!dma_fence_is_signaled(fence))
R
Rob Clark 已提交
749 750 751 752 753 754
		seq_printf(m, "\t%9s: %s %s seq %u\n", type,
				fence->ops->get_driver_name(fence),
				fence->ops->get_timeline_name(fence),
				fence->seqno);
}

755 756 757
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
758 759
	struct reservation_object *robj = msm_obj->resv;
	struct reservation_object_list *fobj;
760
	struct dma_fence *fence;
761
	struct msm_gem_vma *vma;
762
	uint64_t off = drm_vma_node_start(&obj->vma_node);
R
Rob Clark 已提交
763
	const char *madv;
764

765
	mutex_lock(&msm_obj->lock);
R
Rob Clark 已提交
766

R
Rob Clark 已提交
767 768 769 770 771 772 773 774 775 776 777 778 779
	switch (msm_obj->madv) {
	case __MSM_MADV_PURGED:
		madv = " purged";
		break;
	case MSM_MADV_DONTNEED:
		madv = " purgeable";
		break;
	case MSM_MADV_WILLNEED:
	default:
		madv = "";
		break;
	}

780
	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
R
Rob Clark 已提交
781
			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
782
			obj->name, kref_read(&obj->refcount),
783 784
			off, msm_obj->vaddr);

785
	seq_printf(m, " %08zu%9s\n", obj->size, madv);
786

787 788 789 790 791 792 793 794 795 796
	if (!list_empty(&msm_obj->vmas)) {

		seq_puts(m, "   vmas:");

		list_for_each_entry(vma, &msm_obj->vmas, list)
			seq_printf(m, " [%s: %08llx,%s]", vma->aspace->name,
				vma->iova, vma->mapped ? "mapped" : "unmapped");

		seq_puts(m, "\n");
	}
R
Rob Clark 已提交
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812

	rcu_read_lock();
	fobj = rcu_dereference(robj->fence);
	if (fobj) {
		unsigned int i, shared_count = fobj->shared_count;

		for (i = 0; i < shared_count; i++) {
			fence = rcu_dereference(fobj->shared[i]);
			describe_fence(fence, "Shared", m);
		}
	}

	fence = rcu_dereference(robj->fence_excl);
	if (fence)
		describe_fence(fence, "Exclusive", m);
	rcu_read_unlock();
813 814

	mutex_unlock(&msm_obj->lock);
815 816 817 818 819 820 821 822
}

void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
{
	struct msm_gem_object *msm_obj;
	int count = 0;
	size_t size = 0;

823
	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv\n");
824 825
	list_for_each_entry(msm_obj, list, mm_list) {
		struct drm_gem_object *obj = &msm_obj->base;
826
		seq_puts(m, "   ");
827 828 829 830 831 832 833 834 835
		msm_gem_describe(obj, m);
		count++;
		size += obj->size;
	}

	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
}
#endif

836
/* don't call directly!  Use drm_gem_object_put() and friends */
837 838 839 840 841 842 843
void msm_gem_free_object(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

R
Rob Clark 已提交
844 845 846
	/* object should not be on active list: */
	WARN_ON(is_active(msm_obj));

847 848
	list_del(&msm_obj->mm_list);

849 850
	mutex_lock(&msm_obj->lock);

R
Rob Clark 已提交
851
	put_iova(obj);
852

R
Rob Clark 已提交
853 854 855 856 857 858 859 860
	if (obj->import_attach) {
		if (msm_obj->vaddr)
			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);

		/* Don't drop the pages for imported dmabuf, as they are not
		 * ours, just free the array we allocated:
		 */
		if (msm_obj->pages)
M
Michal Hocko 已提交
861
			kvfree(msm_obj->pages);
862

863
		drm_prime_gem_destroy(obj, msm_obj->sgt);
R
Rob Clark 已提交
864
	} else {
865
		msm_gem_vunmap_locked(obj);
R
Rob Clark 已提交
866 867
		put_pages(obj);
	}
868

R
Rob Clark 已提交
869 870 871
	if (msm_obj->resv == &msm_obj->_resv)
		reservation_object_fini(msm_obj->resv);

872 873
	drm_gem_object_release(obj);

874
	mutex_unlock(&msm_obj->lock);
875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892
	kfree(msm_obj);
}

/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
		uint32_t size, uint32_t flags, uint32_t *handle)
{
	struct drm_gem_object *obj;
	int ret;

	obj = msm_gem_new(dev, size, flags);

	if (IS_ERR(obj))
		return PTR_ERR(obj);

	ret = drm_gem_handle_create(file, obj, handle);

	/* drop reference from allocate - handle holds it now */
893
	drm_gem_object_put_unlocked(obj);
894 895 896 897

	return ret;
}

R
Rob Clark 已提交
898 899
static int msm_gem_new_impl(struct drm_device *dev,
		uint32_t size, uint32_t flags,
900
		struct reservation_object *resv,
901 902
		struct drm_gem_object **obj,
		bool struct_mutex_locked)
903 904 905 906 907 908 909 910 911 912
{
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_gem_object *msm_obj;

	switch (flags & MSM_BO_CACHE_MASK) {
	case MSM_BO_UNCACHED:
	case MSM_BO_CACHED:
	case MSM_BO_WC:
		break;
	default:
913
		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
914
				(flags & MSM_BO_CACHE_MASK));
R
Rob Clark 已提交
915
		return -EINVAL;
916 917
	}

918
	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
R
Rob Clark 已提交
919 920
	if (!msm_obj)
		return -ENOMEM;
921

922 923
	mutex_init(&msm_obj->lock);

924
	msm_obj->flags = flags;
R
Rob Clark 已提交
925
	msm_obj->madv = MSM_MADV_WILLNEED;
926

927 928 929 930 931 932
	if (resv) {
		msm_obj->resv = resv;
	} else {
		msm_obj->resv = &msm_obj->_resv;
		reservation_object_init(msm_obj->resv);
	}
933

R
Rob Clark 已提交
934
	INIT_LIST_HEAD(&msm_obj->submit_entry);
935 936
	INIT_LIST_HEAD(&msm_obj->vmas);

937 938 939 940 941 942 943 944
	if (struct_mutex_locked) {
		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
	} else {
		mutex_lock(&dev->struct_mutex);
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
		mutex_unlock(&dev->struct_mutex);
	}
945

R
Rob Clark 已提交
946 947 948 949 950
	*obj = &msm_obj->base;

	return 0;
}

951 952
static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
		uint32_t size, uint32_t flags, bool struct_mutex_locked)
R
Rob Clark 已提交
953
{
954
	struct msm_drm_private *priv = dev->dev_private;
955
	struct drm_gem_object *obj = NULL;
956
	bool use_vram = false;
R
Rob Clark 已提交
957 958 959 960
	int ret;

	size = PAGE_ALIGN(size);

961 962 963 964 965 966 967 968
	if (!iommu_present(&platform_bus_type))
		use_vram = true;
	else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
		use_vram = true;

	if (WARN_ON(use_vram && !priv->vram.size))
		return ERR_PTR(-EINVAL);

969 970 971 972 973 974
	/* Disallow zero sized objects as they make the underlying
	 * infrastructure grumpy
	 */
	if (size == 0)
		return ERR_PTR(-EINVAL);

975
	ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
R
Rob Clark 已提交
976 977 978
	if (ret)
		goto fail;

979
	if (use_vram) {
980
		struct msm_gem_vma *vma;
981
		struct page **pages;
982 983 984
		struct msm_gem_object *msm_obj = to_msm_bo(obj);

		mutex_lock(&msm_obj->lock);
985

986
		vma = add_vma(obj, NULL);
987
		mutex_unlock(&msm_obj->lock);
988 989 990 991 992 993 994
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
			goto fail;
		}

		to_msm_bo(obj)->vram_node = &vma->node;

995 996 997 998 999 1000 1001
		drm_gem_private_object_init(dev, obj, size);

		pages = get_pages(obj);
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto fail;
		}
1002 1003

		vma->iova = physaddr(obj);
1004
	} else {
1005 1006 1007 1008
		ret = drm_gem_object_init(dev, obj, size);
		if (ret)
			goto fail;
	}
R
Rob Clark 已提交
1009 1010 1011 1012

	return obj;

fail:
1013
	drm_gem_object_put_unlocked(obj);
R
Rob Clark 已提交
1014 1015 1016
	return ERR_PTR(ret);
}

1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
		uint32_t size, uint32_t flags)
{
	return _msm_gem_new(dev, size, flags, true);
}

struct drm_gem_object *msm_gem_new(struct drm_device *dev,
		uint32_t size, uint32_t flags)
{
	return _msm_gem_new(dev, size, flags, false);
}

R
Rob Clark 已提交
1029
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1030
		struct dma_buf *dmabuf, struct sg_table *sgt)
R
Rob Clark 已提交
1031 1032 1033
{
	struct msm_gem_object *msm_obj;
	struct drm_gem_object *obj;
1034
	uint32_t size;
R
Rob Clark 已提交
1035 1036
	int ret, npages;

1037 1038
	/* if we don't have IOMMU, don't bother pretending we can import: */
	if (!iommu_present(&platform_bus_type)) {
1039
		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1040 1041 1042
		return ERR_PTR(-EINVAL);
	}

1043
	size = PAGE_ALIGN(dmabuf->size);
R
Rob Clark 已提交
1044

1045
	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
R
Rob Clark 已提交
1046 1047 1048 1049 1050 1051 1052 1053
	if (ret)
		goto fail;

	drm_gem_private_object_init(dev, obj, size);

	npages = size / PAGE_SIZE;

	msm_obj = to_msm_bo(obj);
1054
	mutex_lock(&msm_obj->lock);
R
Rob Clark 已提交
1055
	msm_obj->sgt = sgt;
M
Michal Hocko 已提交
1056
	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
R
Rob Clark 已提交
1057
	if (!msm_obj->pages) {
1058
		mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
1059 1060 1061 1062 1063
		ret = -ENOMEM;
		goto fail;
	}

	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1064 1065
	if (ret) {
		mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
1066
		goto fail;
1067
	}
R
Rob Clark 已提交
1068

1069
	mutex_unlock(&msm_obj->lock);
1070 1071 1072
	return obj;

fail:
1073
	drm_gem_object_put_unlocked(obj);
1074 1075
	return ERR_PTR(ret);
}
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088

static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova, bool locked)
{
	void *vaddr;
	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
	int ret;

	if (IS_ERR(obj))
		return ERR_CAST(obj);

	if (iova) {
1089
		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1090 1091
		if (ret)
			goto err;
1092 1093 1094
	}

	vaddr = msm_gem_get_vaddr(obj);
1095
	if (IS_ERR(vaddr)) {
1096
		msm_gem_put_iova(obj, aspace);
1097 1098
		ret = PTR_ERR(vaddr);
		goto err;
1099 1100 1101 1102 1103 1104
	}

	if (bo)
		*bo = obj;

	return vaddr;
1105 1106 1107 1108 1109 1110 1111 1112
err:
	if (locked)
		drm_gem_object_put(obj);
	else
		drm_gem_object_put_unlocked(obj);

	return ERR_PTR(ret);

1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
}

void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova)
{
	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
}

void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova)
{
	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
}
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142

void msm_gem_kernel_put(struct drm_gem_object *bo,
		struct msm_gem_address_space *aspace, bool locked)
{
	if (IS_ERR_OR_NULL(bo))
		return;

	msm_gem_put_vaddr(bo);
	msm_gem_put_iova(bo, aspace);

	if (locked)
		drm_gem_object_put(bo);
	else
		drm_gem_object_put_unlocked(bo);
}