msm_gem.c 24.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
R
Rob Clark 已提交
20
#include <linux/dma-buf.h>
21
#include <linux/pfn_t.h>
22 23

#include "msm_drv.h"
24
#include "msm_fence.h"
25
#include "msm_gem.h"
R
Rob Clark 已提交
26
#include "msm_gpu.h"
27
#include "msm_mmu.h"
28

29 30 31
static void msm_gem_vunmap_locked(struct drm_gem_object *obj);


32 33 34 35 36 37 38 39
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;
	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
			priv->vram.paddr;
}

40 41 42 43 44 45
static bool use_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	return !msm_obj->vram_node;
}

46
/* allocate pages from VRAM carveout, used when no IOMMU: */
47
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
48 49 50 51 52 53 54
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;
	dma_addr_t paddr;
	struct page **p;
	int ret, i;

M
Michal Hocko 已提交
55
	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
56 57 58
	if (!p)
		return ERR_PTR(-ENOMEM);

59
	spin_lock(&priv->vram.lock);
60
	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
61
	spin_unlock(&priv->vram.lock);
62
	if (ret) {
M
Michal Hocko 已提交
63
		kvfree(p);
64 65 66 67 68 69 70 71 72 73 74
		return ERR_PTR(ret);
	}

	paddr = physaddr(obj);
	for (i = 0; i < npages; i++) {
		p[i] = phys_to_page(paddr);
		paddr += PAGE_SIZE;
	}

	return p;
}
75 76 77 78 79 80 81

static struct page **get_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (!msm_obj->pages) {
		struct drm_device *dev = obj->dev;
82
		struct page **p;
83 84
		int npages = obj->size >> PAGE_SHIFT;

85
		if (use_pages(obj))
86
			p = drm_gem_get_pages(obj);
87 88 89
		else
			p = get_pages_vram(obj, npages);

90 91 92 93 94 95
		if (IS_ERR(p)) {
			dev_err(dev->dev, "could not get pages: %ld\n",
					PTR_ERR(p));
			return p;
		}

96 97
		msm_obj->pages = p;

98
		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
99
		if (IS_ERR(msm_obj->sgt)) {
100 101
			void *ptr = ERR_CAST(msm_obj->sgt);

102
			dev_err(dev->dev, "failed to allocate sgt\n");
103 104
			msm_obj->sgt = NULL;
			return ptr;
105 106 107 108 109 110 111 112 113 114 115 116 117
		}

		/* For non-cached buffers, ensure the new pages are clean
		 * because display controller, GPU, etc. are not coherent:
		 */
		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
	}

	return msm_obj->pages;
}

118 119 120 121 122 123 124 125 126 127 128 129
static void put_pages_vram(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;

	spin_lock(&priv->vram.lock);
	drm_mm_remove_node(msm_obj->vram_node);
	spin_unlock(&priv->vram.lock);

	kvfree(msm_obj->pages);
}

130 131 132 133 134 135 136 137 138 139 140
static void put_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (msm_obj->pages) {
		/* For non-cached buffers, ensure the new pages are clean
		 * because display controller, GPU, etc. are not coherent:
		 */
		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
			dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
141 142 143 144

		if (msm_obj->sgt)
			sg_free_table(msm_obj->sgt);

145 146
		kfree(msm_obj->sgt);

147
		if (use_pages(obj))
148
			drm_gem_put_pages(obj, msm_obj->pages, true, false);
149 150
		else
			put_pages_vram(obj);
151

152 153 154 155
		msm_obj->pages = NULL;
	}
}

R
Rob Clark 已提交
156 157
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
{
158
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
159
	struct page **p;
160 161 162 163 164 165 166 167

	mutex_lock(&msm_obj->lock);

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
		mutex_unlock(&msm_obj->lock);
		return ERR_PTR(-EBUSY);
	}

R
Rob Clark 已提交
168
	p = get_pages(obj);
169
	mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
170 171 172 173 174 175 176 177
	return p;
}

void msm_gem_put_pages(struct drm_gem_object *obj)
{
	/* when we start tracking the pin count, then do something here */
}

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
int msm_gem_mmap_obj(struct drm_gem_object *obj,
		struct vm_area_struct *vma)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	vma->vm_flags &= ~VM_PFNMAP;
	vma->vm_flags |= VM_MIXEDMAP;

	if (msm_obj->flags & MSM_BO_WC) {
		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
	} else {
		/*
		 * Shunt off cached objs to shmem file so they have their own
		 * address_space (so unmap_mapping_range does what we want,
		 * in particular in the case of mmap'd dmabufs)
		 */
		fput(vma->vm_file);
		get_file(obj->filp);
		vma->vm_pgoff = 0;
		vma->vm_file  = obj->filp;

		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
	}

	return 0;
}

int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int ret;

	ret = drm_gem_mmap(filp, vma);
	if (ret) {
		DBG("mmap failed: %d", ret);
		return ret;
	}

	return msm_gem_mmap_obj(vma->vm_private_data, vma);
}

220
int msm_gem_fault(struct vm_fault *vmf)
221
{
222
	struct vm_area_struct *vma = vmf->vma;
223
	struct drm_gem_object *obj = vma->vm_private_data;
224
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
225 226 227 228 229
	struct page **pages;
	unsigned long pfn;
	pgoff_t pgoff;
	int ret;

230 231 232
	/*
	 * vm_ops.open/drm_gem_mmap_obj and close get and put
	 * a reference on obj. So, we dont need to hold one here.
233
	 */
234
	ret = mutex_lock_interruptible(&msm_obj->lock);
235 236 237
	if (ret)
		goto out;

238 239 240 241 242
	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
		mutex_unlock(&msm_obj->lock);
		return VM_FAULT_SIGBUS;
	}

243 244 245 246 247 248 249 250
	/* make sure we have pages attached now */
	pages = get_pages(obj);
	if (IS_ERR(pages)) {
		ret = PTR_ERR(pages);
		goto out_unlock;
	}

	/* We don't use vmf->pgoff since that has the fake offset: */
251
	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
252

253
	pfn = page_to_pfn(pages[pgoff]);
254

255
	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
256 257
			pfn, pfn << PAGE_SHIFT);

258
	ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
259 260

out_unlock:
261
	mutex_unlock(&msm_obj->lock);
262 263 264 265 266 267
out:
	switch (ret) {
	case -EAGAIN:
	case 0:
	case -ERESTARTSYS:
	case -EINTR:
268 269 270 271 272
	case -EBUSY:
		/*
		 * EBUSY is ok: this just means that another thread
		 * already did the job.
		 */
273 274 275 276 277 278 279 280 281 282 283 284
		return VM_FAULT_NOPAGE;
	case -ENOMEM:
		return VM_FAULT_OOM;
	default:
		return VM_FAULT_SIGBUS;
	}
}

/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
285
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
286 287
	int ret;

288
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303

	/* Make it mmapable */
	ret = drm_gem_create_mmap_offset(obj);

	if (ret) {
		dev_err(dev->dev, "could not allocate mmap offset\n");
		return 0;
	}

	return drm_vma_node_offset_addr(&obj->vma_node);
}

uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{
	uint64_t offset;
304 305 306
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	mutex_lock(&msm_obj->lock);
307
	offset = mmap_offset(obj);
308
	mutex_unlock(&msm_obj->lock);
309 310 311
	return offset;
}

312 313 314 315 316 317
static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

318 319
	WARN_ON(!mutex_is_locked(&msm_obj->lock));

320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
	if (!vma)
		return ERR_PTR(-ENOMEM);

	vma->aspace = aspace;

	list_add_tail(&vma->list, &msm_obj->vmas);

	return vma;
}

static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

337
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355

	list_for_each_entry(vma, &msm_obj->vmas, list) {
		if (vma->aspace == aspace)
			return vma;
	}

	return NULL;
}

static void del_vma(struct msm_gem_vma *vma)
{
	if (!vma)
		return;

	list_del(&vma->list);
	kfree(vma);
}

356
/* Called with msm_obj->lock locked */
R
Rob Clark 已提交
357 358 359 360
static void
put_iova(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
361
	struct msm_gem_vma *vma, *tmp;
R
Rob Clark 已提交
362

363
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
R
Rob Clark 已提交
364

365 366 367
	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
		msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
		del_vma(vma);
R
Rob Clark 已提交
368 369 370
	}
}

371 372
/* get iova, taking a reference.  Should have a matching put */
int msm_gem_get_iova(struct drm_gem_object *obj,
373
		struct msm_gem_address_space *aspace, uint64_t *iova)
374 375
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
376
	struct msm_gem_vma *vma;
377 378
	int ret = 0;

379 380 381 382 383 384
	mutex_lock(&msm_obj->lock);

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
		mutex_unlock(&msm_obj->lock);
		return -EBUSY;
	}
385

386
	vma = lookup_vma(obj, aspace);
387

388 389 390 391
	if (!vma) {
		struct page **pages;

		vma = add_vma(obj, aspace);
392 393 394 395
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
			goto unlock;
		}
396 397 398 399 400 401

		pages = get_pages(obj);
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto fail;
		}
402

403 404 405 406
		ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
				obj->size >> PAGE_SHIFT);
		if (ret)
			goto fail;
407 408
	}

409
	*iova = vma->iova;
410 411

	mutex_unlock(&msm_obj->lock);
412 413 414 415
	return 0;

fail:
	del_vma(vma);
416
unlock:
417
	mutex_unlock(&msm_obj->lock);
418 419 420
	return ret;
}

R
Rob Clark 已提交
421 422 423
/* get iova without taking a reference, used in places where you have
 * already done a 'msm_gem_get_iova()'.
 */
424 425
uint64_t msm_gem_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
R
Rob Clark 已提交
426
{
427
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
428 429
	struct msm_gem_vma *vma;

430
	mutex_lock(&msm_obj->lock);
431
	vma = lookup_vma(obj, aspace);
432
	mutex_unlock(&msm_obj->lock);
433 434 435
	WARN_ON(!vma);

	return vma ? vma->iova : 0;
R
Rob Clark 已提交
436 437
}

438 439
void msm_gem_put_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
{
	// XXX TODO ..
	// NOTE: probably don't need a _locked() version.. we wouldn't
	// normally unmap here, but instead just mark that it could be
	// unmapped (if the iova refcnt drops to zero), but then later
	// if another _get_iova_locked() fails we can start unmapping
	// things that are no longer needed..
}

int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
		struct drm_mode_create_dumb *args)
{
	args->pitch = align_pitch(args->width, args->bpp);
	args->size  = PAGE_ALIGN(args->pitch * args->height);
	return msm_gem_new_handle(dev, file, args->size,
			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
}

int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
		uint32_t handle, uint64_t *offset)
{
	struct drm_gem_object *obj;
	int ret = 0;

	/* GEM does all our handle to object mapping */
465
	obj = drm_gem_object_lookup(file, handle);
466 467 468 469 470 471 472
	if (obj == NULL) {
		ret = -ENOENT;
		goto fail;
	}

	*offset = msm_gem_mmap_offset(obj);

473
	drm_gem_object_put_unlocked(obj);
474 475 476 477 478

fail:
	return ret;
}

479
static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
480 481
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
482 483 484 485
	int ret = 0;

	mutex_lock(&msm_obj->lock);

486 487 488
	if (WARN_ON(msm_obj->madv > madv)) {
		dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
			msm_obj->madv, madv);
489 490 491 492 493 494 495 496 497 498 499 500
		mutex_unlock(&msm_obj->lock);
		return ERR_PTR(-EBUSY);
	}

	/* increment vmap_count *before* vmap() call, so shrinker can
	 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
	 * This guarantees that we won't try to msm_gem_vunmap() this
	 * same object from within the vmap() call (while we already
	 * hold msm_obj->lock)
	 */
	msm_obj->vmap_count++;

501 502
	if (!msm_obj->vaddr) {
		struct page **pages = get_pages(obj);
503 504 505 506
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto fail;
		}
507 508
		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
509 510 511 512
		if (msm_obj->vaddr == NULL) {
			ret = -ENOMEM;
			goto fail;
		}
513
	}
514 515

	mutex_unlock(&msm_obj->lock);
516 517
	return msm_obj->vaddr;

518 519 520 521
fail:
	msm_obj->vmap_count--;
	mutex_unlock(&msm_obj->lock);
	return ERR_PTR(ret);
522 523
}

524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{
	return get_vaddr(obj, MSM_MADV_WILLNEED);
}

/*
 * Don't use this!  It is for the very special case of dumping
 * submits from GPU hangs or faults, were the bo may already
 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
 * active list.
 */
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
{
	return get_vaddr(obj, __MSM_MADV_PURGED);
}

540
void msm_gem_put_vaddr(struct drm_gem_object *obj)
541
{
R
Rob Clark 已提交
542
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
543 544

	mutex_lock(&msm_obj->lock);
R
Rob Clark 已提交
545 546
	WARN_ON(msm_obj->vmap_count < 1);
	msm_obj->vmap_count--;
547
	mutex_unlock(&msm_obj->lock);
548 549
}

R
Rob Clark 已提交
550 551 552 553 554 555 556
/* Update madvise status, returns true if not purged, else
 * false or -errno.
 */
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

557 558
	mutex_lock(&msm_obj->lock);

R
Rob Clark 已提交
559 560 561 562 563
	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));

	if (msm_obj->madv != __MSM_MADV_PURGED)
		msm_obj->madv = madv;

564 565 566 567 568
	madv = msm_obj->madv;

	mutex_unlock(&msm_obj->lock);

	return (madv != __MSM_MADV_PURGED);
R
Rob Clark 已提交
569 570
}

571
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
R
Rob Clark 已提交
572 573 574 575 576 577 578 579
{
	struct drm_device *dev = obj->dev;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
	WARN_ON(!is_purgeable(msm_obj));
	WARN_ON(obj->import_attach);

580 581
	mutex_lock_nested(&msm_obj->lock, subclass);

R
Rob Clark 已提交
582 583
	put_iova(obj);

584
	msm_gem_vunmap_locked(obj);
R
Rob Clark 已提交
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601

	put_pages(obj);

	msm_obj->madv = __MSM_MADV_PURGED;

	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
	drm_gem_free_mmap_offset(obj);

	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);

	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
			0, (loff_t)-1);
602 603

	mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
604 605
}

606
static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
R
Rob Clark 已提交
607 608 609
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

610 611
	WARN_ON(!mutex_is_locked(&msm_obj->lock));

R
Rob Clark 已提交
612 613 614 615 616 617 618
	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
		return;

	vunmap(msm_obj->vaddr);
	msm_obj->vaddr = NULL;
}

619 620 621 622 623 624 625 626 627
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	mutex_lock_nested(&msm_obj->lock, subclass);
	msm_gem_vunmap_locked(obj);
	mutex_unlock(&msm_obj->lock);
}

R
Rob Clark 已提交
628 629 630 631 632 633
/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
		struct msm_fence_context *fctx, bool exclusive)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct reservation_object_list *fobj;
634
	struct dma_fence *fence;
R
Rob Clark 已提交
635 636 637 638 639 640 641
	int i, ret;

	fobj = reservation_object_get_list(msm_obj->resv);
	if (!fobj || (fobj->shared_count == 0)) {
		fence = reservation_object_get_excl(msm_obj->resv);
		/* don't need to wait on our own fences, since ring is fifo */
		if (fence && (fence->context != fctx->context)) {
642
			ret = dma_fence_wait(fence, true);
R
Rob Clark 已提交
643 644 645 646 647 648 649 650 651 652 653 654
			if (ret)
				return ret;
		}
	}

	if (!exclusive || !fobj)
		return 0;

	for (i = 0; i < fobj->shared_count; i++) {
		fence = rcu_dereference_protected(fobj->shared[i],
						reservation_object_held(msm_obj->resv));
		if (fence->context != fctx->context) {
655
			ret = dma_fence_wait(fence, true);
R
Rob Clark 已提交
656 657 658 659 660 661 662 663
			if (ret)
				return ret;
		}
	}

	return 0;
}

R
Rob Clark 已提交
664
void msm_gem_move_to_active(struct drm_gem_object *obj,
665
		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
R
Rob Clark 已提交
666 667
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
668
	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
R
Rob Clark 已提交
669
	msm_obj->gpu = gpu;
R
Rob Clark 已提交
670 671
	if (exclusive)
		reservation_object_add_excl_fence(msm_obj->resv, fence);
R
Rob Clark 已提交
672
	else
R
Rob Clark 已提交
673
		reservation_object_add_shared_fence(msm_obj->resv, fence);
R
Rob Clark 已提交
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
	list_del_init(&msm_obj->mm_list);
	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
}

void msm_gem_move_to_inactive(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

	msm_obj->gpu = NULL;
	list_del_init(&msm_obj->mm_list);
	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
}

R
Rob Clark 已提交
691
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
R
Rob Clark 已提交
692 693
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
694
	bool write = !!(op & MSM_PREP_WRITE);
695 696 697 698 699 700 701 702 703 704
	unsigned long remain =
		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
	long ret;

	ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
						  true,  remain);
	if (ret == 0)
		return remain == 0 ? -EBUSY : -ETIMEDOUT;
	else if (ret < 0)
		return ret;
R
Rob Clark 已提交
705 706

	/* TODO cache maintenance */
707

R
Rob Clark 已提交
708
	return 0;
R
Rob Clark 已提交
709
}
710

R
Rob Clark 已提交
711 712 713
int msm_gem_cpu_fini(struct drm_gem_object *obj)
{
	/* TODO cache maintenance */
714 715 716 717
	return 0;
}

#ifdef CONFIG_DEBUG_FS
718
static void describe_fence(struct dma_fence *fence, const char *type,
R
Rob Clark 已提交
719 720
		struct seq_file *m)
{
721
	if (!dma_fence_is_signaled(fence))
R
Rob Clark 已提交
722 723 724 725 726 727
		seq_printf(m, "\t%9s: %s %s seq %u\n", type,
				fence->ops->get_driver_name(fence),
				fence->ops->get_timeline_name(fence),
				fence->seqno);
}

728 729 730
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
731 732
	struct reservation_object *robj = msm_obj->resv;
	struct reservation_object_list *fobj;
733
	struct dma_fence *fence;
734
	struct msm_gem_vma *vma;
735
	uint64_t off = drm_vma_node_start(&obj->vma_node);
R
Rob Clark 已提交
736
	const char *madv;
737

738
	mutex_lock(&msm_obj->lock);
R
Rob Clark 已提交
739

R
Rob Clark 已提交
740 741 742 743 744 745 746 747 748 749 750 751 752
	switch (msm_obj->madv) {
	case __MSM_MADV_PURGED:
		madv = " purged";
		break;
	case MSM_MADV_DONTNEED:
		madv = " purgeable";
		break;
	case MSM_MADV_WILLNEED:
	default:
		madv = "";
		break;
	}

753
	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
R
Rob Clark 已提交
754
			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
755
			obj->name, kref_read(&obj->refcount),
756 757
			off, msm_obj->vaddr);

758 759 760
	/* FIXME: we need to print the address space here too */
	list_for_each_entry(vma, &msm_obj->vmas, list)
		seq_printf(m, " %08llx", vma->iova);
761 762

	seq_printf(m, " %zu%s\n", obj->size, madv);
R
Rob Clark 已提交
763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778

	rcu_read_lock();
	fobj = rcu_dereference(robj->fence);
	if (fobj) {
		unsigned int i, shared_count = fobj->shared_count;

		for (i = 0; i < shared_count; i++) {
			fence = rcu_dereference(fobj->shared[i]);
			describe_fence(fence, "Shared", m);
		}
	}

	fence = rcu_dereference(robj->fence_excl);
	if (fence)
		describe_fence(fence, "Exclusive", m);
	rcu_read_unlock();
779 780

	mutex_unlock(&msm_obj->lock);
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807
}

void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
{
	struct msm_gem_object *msm_obj;
	int count = 0;
	size_t size = 0;

	list_for_each_entry(msm_obj, list, mm_list) {
		struct drm_gem_object *obj = &msm_obj->base;
		seq_printf(m, "   ");
		msm_gem_describe(obj, m);
		count++;
		size += obj->size;
	}

	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
}
#endif

void msm_gem_free_object(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

R
Rob Clark 已提交
808 809 810
	/* object should not be on active list: */
	WARN_ON(is_active(msm_obj));

811 812
	list_del(&msm_obj->mm_list);

813 814
	mutex_lock(&msm_obj->lock);

R
Rob Clark 已提交
815
	put_iova(obj);
816

R
Rob Clark 已提交
817 818 819 820 821 822 823 824
	if (obj->import_attach) {
		if (msm_obj->vaddr)
			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);

		/* Don't drop the pages for imported dmabuf, as they are not
		 * ours, just free the array we allocated:
		 */
		if (msm_obj->pages)
M
Michal Hocko 已提交
825
			kvfree(msm_obj->pages);
826

827
		drm_prime_gem_destroy(obj, msm_obj->sgt);
R
Rob Clark 已提交
828
	} else {
829
		msm_gem_vunmap_locked(obj);
R
Rob Clark 已提交
830 831
		put_pages(obj);
	}
832

R
Rob Clark 已提交
833 834 835
	if (msm_obj->resv == &msm_obj->_resv)
		reservation_object_fini(msm_obj->resv);

836 837
	drm_gem_object_release(obj);

838
	mutex_unlock(&msm_obj->lock);
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
	kfree(msm_obj);
}

/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
		uint32_t size, uint32_t flags, uint32_t *handle)
{
	struct drm_gem_object *obj;
	int ret;

	obj = msm_gem_new(dev, size, flags);

	if (IS_ERR(obj))
		return PTR_ERR(obj);

	ret = drm_gem_handle_create(file, obj, handle);

	/* drop reference from allocate - handle holds it now */
857
	drm_gem_object_put_unlocked(obj);
858 859 860 861

	return ret;
}

R
Rob Clark 已提交
862 863
static int msm_gem_new_impl(struct drm_device *dev,
		uint32_t size, uint32_t flags,
864
		struct reservation_object *resv,
865 866
		struct drm_gem_object **obj,
		bool struct_mutex_locked)
867 868 869 870 871 872 873 874 875 876 877 878
{
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_gem_object *msm_obj;

	switch (flags & MSM_BO_CACHE_MASK) {
	case MSM_BO_UNCACHED:
	case MSM_BO_CACHED:
	case MSM_BO_WC:
		break;
	default:
		dev_err(dev->dev, "invalid cache flag: %x\n",
				(flags & MSM_BO_CACHE_MASK));
R
Rob Clark 已提交
879
		return -EINVAL;
880 881
	}

882
	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
R
Rob Clark 已提交
883 884
	if (!msm_obj)
		return -ENOMEM;
885

886 887
	mutex_init(&msm_obj->lock);

888
	msm_obj->flags = flags;
R
Rob Clark 已提交
889
	msm_obj->madv = MSM_MADV_WILLNEED;
890

891 892 893 894 895 896
	if (resv) {
		msm_obj->resv = resv;
	} else {
		msm_obj->resv = &msm_obj->_resv;
		reservation_object_init(msm_obj->resv);
	}
897

R
Rob Clark 已提交
898
	INIT_LIST_HEAD(&msm_obj->submit_entry);
899 900
	INIT_LIST_HEAD(&msm_obj->vmas);

901 902 903 904 905 906 907 908
	if (struct_mutex_locked) {
		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
	} else {
		mutex_lock(&dev->struct_mutex);
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
		mutex_unlock(&dev->struct_mutex);
	}
909

R
Rob Clark 已提交
910 911 912 913 914
	*obj = &msm_obj->base;

	return 0;
}

915 916
static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
		uint32_t size, uint32_t flags, bool struct_mutex_locked)
R
Rob Clark 已提交
917
{
918
	struct msm_drm_private *priv = dev->dev_private;
919
	struct drm_gem_object *obj = NULL;
920
	bool use_vram = false;
R
Rob Clark 已提交
921 922 923 924
	int ret;

	size = PAGE_ALIGN(size);

925 926 927 928 929 930 931 932
	if (!iommu_present(&platform_bus_type))
		use_vram = true;
	else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
		use_vram = true;

	if (WARN_ON(use_vram && !priv->vram.size))
		return ERR_PTR(-EINVAL);

933 934 935 936 937 938
	/* Disallow zero sized objects as they make the underlying
	 * infrastructure grumpy
	 */
	if (size == 0)
		return ERR_PTR(-EINVAL);

939
	ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
R
Rob Clark 已提交
940 941 942
	if (ret)
		goto fail;

943
	if (use_vram) {
944
		struct msm_gem_vma *vma;
945
		struct page **pages;
946 947 948
		struct msm_gem_object *msm_obj = to_msm_bo(obj);

		mutex_lock(&msm_obj->lock);
949

950
		vma = add_vma(obj, NULL);
951
		mutex_unlock(&msm_obj->lock);
952 953 954 955 956 957 958
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
			goto fail;
		}

		to_msm_bo(obj)->vram_node = &vma->node;

959 960 961 962 963 964 965
		drm_gem_private_object_init(dev, obj, size);

		pages = get_pages(obj);
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto fail;
		}
966 967

		vma->iova = physaddr(obj);
968
	} else {
969 970 971 972
		ret = drm_gem_object_init(dev, obj, size);
		if (ret)
			goto fail;
	}
R
Rob Clark 已提交
973 974 975 976

	return obj;

fail:
977
	drm_gem_object_put_unlocked(obj);
R
Rob Clark 已提交
978 979 980
	return ERR_PTR(ret);
}

981 982 983 984 985 986 987 988 989 990 991 992
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
		uint32_t size, uint32_t flags)
{
	return _msm_gem_new(dev, size, flags, true);
}

struct drm_gem_object *msm_gem_new(struct drm_device *dev,
		uint32_t size, uint32_t flags)
{
	return _msm_gem_new(dev, size, flags, false);
}

R
Rob Clark 已提交
993
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
994
		struct dma_buf *dmabuf, struct sg_table *sgt)
R
Rob Clark 已提交
995 996 997
{
	struct msm_gem_object *msm_obj;
	struct drm_gem_object *obj;
998
	uint32_t size;
R
Rob Clark 已提交
999 1000
	int ret, npages;

1001 1002 1003 1004 1005 1006
	/* if we don't have IOMMU, don't bother pretending we can import: */
	if (!iommu_present(&platform_bus_type)) {
		dev_err(dev->dev, "cannot import without IOMMU\n");
		return ERR_PTR(-EINVAL);
	}

1007
	size = PAGE_ALIGN(dmabuf->size);
R
Rob Clark 已提交
1008

1009
	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
R
Rob Clark 已提交
1010 1011 1012 1013 1014 1015 1016 1017
	if (ret)
		goto fail;

	drm_gem_private_object_init(dev, obj, size);

	npages = size / PAGE_SIZE;

	msm_obj = to_msm_bo(obj);
1018
	mutex_lock(&msm_obj->lock);
R
Rob Clark 已提交
1019
	msm_obj->sgt = sgt;
M
Michal Hocko 已提交
1020
	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
R
Rob Clark 已提交
1021
	if (!msm_obj->pages) {
1022
		mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
1023 1024 1025 1026 1027
		ret = -ENOMEM;
		goto fail;
	}

	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1028 1029
	if (ret) {
		mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
1030
		goto fail;
1031
	}
R
Rob Clark 已提交
1032

1033
	mutex_unlock(&msm_obj->lock);
1034 1035 1036
	return obj;

fail:
1037
	drm_gem_object_put_unlocked(obj);
1038 1039
	return ERR_PTR(ret);
}
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054

static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova, bool locked)
{
	void *vaddr;
	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
	int ret;

	if (IS_ERR(obj))
		return ERR_CAST(obj);

	if (iova) {
		ret = msm_gem_get_iova(obj, aspace, iova);
		if (ret) {
1055
			drm_gem_object_put(obj);
1056 1057 1058 1059 1060
			return ERR_PTR(ret);
		}
	}

	vaddr = msm_gem_get_vaddr(obj);
1061
	if (IS_ERR(vaddr)) {
1062
		msm_gem_put_iova(obj, aspace);
1063
		drm_gem_object_put(obj);
1064
		return ERR_CAST(vaddr);
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	}

	if (bo)
		*bo = obj;

	return vaddr;
}

void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova)
{
	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
}

void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova)
{
	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
}