msm_gem.c 23.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
R
Rob Clark 已提交
20
#include <linux/dma-buf.h>
21
#include <linux/pfn_t.h>
22 23

#include "msm_drv.h"
24
#include "msm_fence.h"
25
#include "msm_gem.h"
R
Rob Clark 已提交
26
#include "msm_gpu.h"
27
#include "msm_mmu.h"
28

29 30 31
static void msm_gem_vunmap_locked(struct drm_gem_object *obj);


32 33 34 35 36 37 38 39
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;
	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
			priv->vram.paddr;
}

40 41 42 43 44 45
static bool use_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	return !msm_obj->vram_node;
}

46
/* allocate pages from VRAM carveout, used when no IOMMU: */
47
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
48 49 50 51 52 53 54
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;
	dma_addr_t paddr;
	struct page **p;
	int ret, i;

M
Michal Hocko 已提交
55
	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
56 57 58
	if (!p)
		return ERR_PTR(-ENOMEM);

59
	spin_lock(&priv->vram.lock);
60
	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
61
	spin_unlock(&priv->vram.lock);
62
	if (ret) {
M
Michal Hocko 已提交
63
		kvfree(p);
64 65 66 67 68 69 70 71 72 73 74
		return ERR_PTR(ret);
	}

	paddr = physaddr(obj);
	for (i = 0; i < npages; i++) {
		p[i] = phys_to_page(paddr);
		paddr += PAGE_SIZE;
	}

	return p;
}
75 76 77 78 79 80 81

static struct page **get_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (!msm_obj->pages) {
		struct drm_device *dev = obj->dev;
82
		struct page **p;
83 84
		int npages = obj->size >> PAGE_SHIFT;

85
		if (use_pages(obj))
86
			p = drm_gem_get_pages(obj);
87 88 89
		else
			p = get_pages_vram(obj, npages);

90 91 92 93 94 95 96
		if (IS_ERR(p)) {
			dev_err(dev->dev, "could not get pages: %ld\n",
					PTR_ERR(p));
			return p;
		}

		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
97
		if (IS_ERR(msm_obj->sgt)) {
98
			dev_err(dev->dev, "failed to allocate sgt\n");
99
			return ERR_CAST(msm_obj->sgt);
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
		}

		msm_obj->pages = p;

		/* For non-cached buffers, ensure the new pages are clean
		 * because display controller, GPU, etc. are not coherent:
		 */
		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
	}

	return msm_obj->pages;
}

115 116 117 118 119 120 121 122 123 124 125 126
static void put_pages_vram(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;

	spin_lock(&priv->vram.lock);
	drm_mm_remove_node(msm_obj->vram_node);
	spin_unlock(&priv->vram.lock);

	kvfree(msm_obj->pages);
}

127 128 129 130 131 132 133 134 135 136 137 138 139 140
static void put_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (msm_obj->pages) {
		/* For non-cached buffers, ensure the new pages are clean
		 * because display controller, GPU, etc. are not coherent:
		 */
		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
			dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
		sg_free_table(msm_obj->sgt);
		kfree(msm_obj->sgt);

141
		if (use_pages(obj))
142
			drm_gem_put_pages(obj, msm_obj->pages, true, false);
143 144
		else
			put_pages_vram(obj);
145

146 147 148 149
		msm_obj->pages = NULL;
	}
}

R
Rob Clark 已提交
150 151
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
{
152
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
153
	struct page **p;
154 155 156 157 158 159 160 161

	mutex_lock(&msm_obj->lock);

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
		mutex_unlock(&msm_obj->lock);
		return ERR_PTR(-EBUSY);
	}

R
Rob Clark 已提交
162
	p = get_pages(obj);
163
	mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
164 165 166 167 168 169 170 171
	return p;
}

void msm_gem_put_pages(struct drm_gem_object *obj)
{
	/* when we start tracking the pin count, then do something here */
}

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
int msm_gem_mmap_obj(struct drm_gem_object *obj,
		struct vm_area_struct *vma)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	vma->vm_flags &= ~VM_PFNMAP;
	vma->vm_flags |= VM_MIXEDMAP;

	if (msm_obj->flags & MSM_BO_WC) {
		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
	} else {
		/*
		 * Shunt off cached objs to shmem file so they have their own
		 * address_space (so unmap_mapping_range does what we want,
		 * in particular in the case of mmap'd dmabufs)
		 */
		fput(vma->vm_file);
		get_file(obj->filp);
		vma->vm_pgoff = 0;
		vma->vm_file  = obj->filp;

		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
	}

	return 0;
}

int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int ret;

	ret = drm_gem_mmap(filp, vma);
	if (ret) {
		DBG("mmap failed: %d", ret);
		return ret;
	}

	return msm_gem_mmap_obj(vma->vm_private_data, vma);
}

214
int msm_gem_fault(struct vm_fault *vmf)
215
{
216
	struct vm_area_struct *vma = vmf->vma;
217
	struct drm_gem_object *obj = vma->vm_private_data;
218
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
219 220 221 222 223
	struct page **pages;
	unsigned long pfn;
	pgoff_t pgoff;
	int ret;

224 225 226
	/*
	 * vm_ops.open/drm_gem_mmap_obj and close get and put
	 * a reference on obj. So, we dont need to hold one here.
227
	 */
228
	ret = mutex_lock_interruptible(&msm_obj->lock);
229 230 231
	if (ret)
		goto out;

232 233 234 235 236
	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
		mutex_unlock(&msm_obj->lock);
		return VM_FAULT_SIGBUS;
	}

237 238 239 240 241 242 243 244
	/* make sure we have pages attached now */
	pages = get_pages(obj);
	if (IS_ERR(pages)) {
		ret = PTR_ERR(pages);
		goto out_unlock;
	}

	/* We don't use vmf->pgoff since that has the fake offset: */
245
	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
246

247
	pfn = page_to_pfn(pages[pgoff]);
248

249
	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
250 251
			pfn, pfn << PAGE_SHIFT);

252
	ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
253 254

out_unlock:
255
	mutex_unlock(&msm_obj->lock);
256 257 258 259 260 261
out:
	switch (ret) {
	case -EAGAIN:
	case 0:
	case -ERESTARTSYS:
	case -EINTR:
262 263 264 265 266
	case -EBUSY:
		/*
		 * EBUSY is ok: this just means that another thread
		 * already did the job.
		 */
267 268 269 270 271 272 273 274 275 276 277 278
		return VM_FAULT_NOPAGE;
	case -ENOMEM:
		return VM_FAULT_OOM;
	default:
		return VM_FAULT_SIGBUS;
	}
}

/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
279
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
280 281
	int ret;

282
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297

	/* Make it mmapable */
	ret = drm_gem_create_mmap_offset(obj);

	if (ret) {
		dev_err(dev->dev, "could not allocate mmap offset\n");
		return 0;
	}

	return drm_vma_node_offset_addr(&obj->vma_node);
}

uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{
	uint64_t offset;
298 299 300
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	mutex_lock(&msm_obj->lock);
301
	offset = mmap_offset(obj);
302
	mutex_unlock(&msm_obj->lock);
303 304 305
	return offset;
}

306 307 308 309 310 311
static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

312 313
	WARN_ON(!mutex_is_locked(&msm_obj->lock));

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
	if (!vma)
		return ERR_PTR(-ENOMEM);

	vma->aspace = aspace;

	list_add_tail(&vma->list, &msm_obj->vmas);

	return vma;
}

static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

331
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349

	list_for_each_entry(vma, &msm_obj->vmas, list) {
		if (vma->aspace == aspace)
			return vma;
	}

	return NULL;
}

static void del_vma(struct msm_gem_vma *vma)
{
	if (!vma)
		return;

	list_del(&vma->list);
	kfree(vma);
}

350
/* Called with msm_obj->lock locked */
R
Rob Clark 已提交
351 352 353 354
static void
put_iova(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
355
	struct msm_gem_vma *vma, *tmp;
R
Rob Clark 已提交
356

357
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
R
Rob Clark 已提交
358

359 360 361
	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
		msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
		del_vma(vma);
R
Rob Clark 已提交
362 363 364
	}
}

365 366
/* get iova, taking a reference.  Should have a matching put */
int msm_gem_get_iova(struct drm_gem_object *obj,
367
		struct msm_gem_address_space *aspace, uint64_t *iova)
368 369
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
370
	struct msm_gem_vma *vma;
371 372
	int ret = 0;

373 374 375 376 377 378
	mutex_lock(&msm_obj->lock);

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
		mutex_unlock(&msm_obj->lock);
		return -EBUSY;
	}
379

380
	vma = lookup_vma(obj, aspace);
381

382 383 384 385
	if (!vma) {
		struct page **pages;

		vma = add_vma(obj, aspace);
386 387 388 389
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
			goto unlock;
		}
390 391 392 393 394 395

		pages = get_pages(obj);
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto fail;
		}
396

397 398 399 400
		ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
				obj->size >> PAGE_SHIFT);
		if (ret)
			goto fail;
401 402
	}

403
	*iova = vma->iova;
404 405

	mutex_unlock(&msm_obj->lock);
406 407 408 409
	return 0;

fail:
	del_vma(vma);
410
unlock:
411
	mutex_unlock(&msm_obj->lock);
412 413 414
	return ret;
}

R
Rob Clark 已提交
415 416 417
/* get iova without taking a reference, used in places where you have
 * already done a 'msm_gem_get_iova()'.
 */
418 419
uint64_t msm_gem_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
R
Rob Clark 已提交
420
{
421
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
422 423
	struct msm_gem_vma *vma;

424
	mutex_lock(&msm_obj->lock);
425
	vma = lookup_vma(obj, aspace);
426
	mutex_unlock(&msm_obj->lock);
427 428 429
	WARN_ON(!vma);

	return vma ? vma->iova : 0;
R
Rob Clark 已提交
430 431
}

432 433
void msm_gem_put_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
{
	// XXX TODO ..
	// NOTE: probably don't need a _locked() version.. we wouldn't
	// normally unmap here, but instead just mark that it could be
	// unmapped (if the iova refcnt drops to zero), but then later
	// if another _get_iova_locked() fails we can start unmapping
	// things that are no longer needed..
}

int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
		struct drm_mode_create_dumb *args)
{
	args->pitch = align_pitch(args->width, args->bpp);
	args->size  = PAGE_ALIGN(args->pitch * args->height);
	return msm_gem_new_handle(dev, file, args->size,
			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
}

int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
		uint32_t handle, uint64_t *offset)
{
	struct drm_gem_object *obj;
	int ret = 0;

	/* GEM does all our handle to object mapping */
459
	obj = drm_gem_object_lookup(file, handle);
460 461 462 463 464 465 466 467 468 469 470 471 472
	if (obj == NULL) {
		ret = -ENOENT;
		goto fail;
	}

	*offset = msm_gem_mmap_offset(obj);

	drm_gem_object_unreference_unlocked(obj);

fail:
	return ret;
}

473
void *msm_gem_get_vaddr(struct drm_gem_object *obj)
474 475
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
	int ret = 0;

	mutex_lock(&msm_obj->lock);

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
		mutex_unlock(&msm_obj->lock);
		return ERR_PTR(-EBUSY);
	}

	/* increment vmap_count *before* vmap() call, so shrinker can
	 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
	 * This guarantees that we won't try to msm_gem_vunmap() this
	 * same object from within the vmap() call (while we already
	 * hold msm_obj->lock)
	 */
	msm_obj->vmap_count++;

493 494
	if (!msm_obj->vaddr) {
		struct page **pages = get_pages(obj);
495 496 497 498
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto fail;
		}
499 500
		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
501 502 503 504
		if (msm_obj->vaddr == NULL) {
			ret = -ENOMEM;
			goto fail;
		}
505
	}
506 507

	mutex_unlock(&msm_obj->lock);
508 509
	return msm_obj->vaddr;

510 511 512 513
fail:
	msm_obj->vmap_count--;
	mutex_unlock(&msm_obj->lock);
	return ERR_PTR(ret);
514 515
}

516
void msm_gem_put_vaddr(struct drm_gem_object *obj)
517
{
R
Rob Clark 已提交
518
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
519 520

	mutex_lock(&msm_obj->lock);
R
Rob Clark 已提交
521 522
	WARN_ON(msm_obj->vmap_count < 1);
	msm_obj->vmap_count--;
523
	mutex_unlock(&msm_obj->lock);
524 525
}

R
Rob Clark 已提交
526 527 528 529 530 531 532
/* Update madvise status, returns true if not purged, else
 * false or -errno.
 */
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

533 534
	mutex_lock(&msm_obj->lock);

R
Rob Clark 已提交
535 536 537 538 539
	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));

	if (msm_obj->madv != __MSM_MADV_PURGED)
		msm_obj->madv = madv;

540 541 542 543 544
	madv = msm_obj->madv;

	mutex_unlock(&msm_obj->lock);

	return (madv != __MSM_MADV_PURGED);
R
Rob Clark 已提交
545 546
}

547
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
R
Rob Clark 已提交
548 549 550 551 552 553 554 555
{
	struct drm_device *dev = obj->dev;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
	WARN_ON(!is_purgeable(msm_obj));
	WARN_ON(obj->import_attach);

556 557
	mutex_lock_nested(&msm_obj->lock, subclass);

R
Rob Clark 已提交
558 559
	put_iova(obj);

560
	msm_gem_vunmap_locked(obj);
R
Rob Clark 已提交
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577

	put_pages(obj);

	msm_obj->madv = __MSM_MADV_PURGED;

	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
	drm_gem_free_mmap_offset(obj);

	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);

	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
			0, (loff_t)-1);
578 579

	mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
580 581
}

582
static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
R
Rob Clark 已提交
583 584 585
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

586 587
	WARN_ON(!mutex_is_locked(&msm_obj->lock));

R
Rob Clark 已提交
588 589 590 591 592 593 594
	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
		return;

	vunmap(msm_obj->vaddr);
	msm_obj->vaddr = NULL;
}

595 596 597 598 599 600 601 602 603
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	mutex_lock_nested(&msm_obj->lock, subclass);
	msm_gem_vunmap_locked(obj);
	mutex_unlock(&msm_obj->lock);
}

R
Rob Clark 已提交
604 605 606 607 608 609
/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
		struct msm_fence_context *fctx, bool exclusive)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct reservation_object_list *fobj;
610
	struct dma_fence *fence;
R
Rob Clark 已提交
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
	int i, ret;

	if (!exclusive) {
		/* NOTE: _reserve_shared() must happen before _add_shared_fence(),
		 * which makes this a slightly strange place to call it.  OTOH this
		 * is a convenient can-fail point to hook it in.  (And similar to
		 * how etnaviv and nouveau handle this.)
		 */
		ret = reservation_object_reserve_shared(msm_obj->resv);
		if (ret)
			return ret;
	}

	fobj = reservation_object_get_list(msm_obj->resv);
	if (!fobj || (fobj->shared_count == 0)) {
		fence = reservation_object_get_excl(msm_obj->resv);
		/* don't need to wait on our own fences, since ring is fifo */
		if (fence && (fence->context != fctx->context)) {
629
			ret = dma_fence_wait(fence, true);
R
Rob Clark 已提交
630 631 632 633 634 635 636 637 638 639 640 641
			if (ret)
				return ret;
		}
	}

	if (!exclusive || !fobj)
		return 0;

	for (i = 0; i < fobj->shared_count; i++) {
		fence = rcu_dereference_protected(fobj->shared[i],
						reservation_object_held(msm_obj->resv));
		if (fence->context != fctx->context) {
642
			ret = dma_fence_wait(fence, true);
R
Rob Clark 已提交
643 644 645 646 647 648 649 650
			if (ret)
				return ret;
		}
	}

	return 0;
}

R
Rob Clark 已提交
651
void msm_gem_move_to_active(struct drm_gem_object *obj,
652
		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
R
Rob Clark 已提交
653 654
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
655
	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
R
Rob Clark 已提交
656
	msm_obj->gpu = gpu;
R
Rob Clark 已提交
657 658
	if (exclusive)
		reservation_object_add_excl_fence(msm_obj->resv, fence);
R
Rob Clark 已提交
659
	else
R
Rob Clark 已提交
660
		reservation_object_add_shared_fence(msm_obj->resv, fence);
R
Rob Clark 已提交
661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
	list_del_init(&msm_obj->mm_list);
	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
}

void msm_gem_move_to_inactive(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

	msm_obj->gpu = NULL;
	list_del_init(&msm_obj->mm_list);
	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
}

R
Rob Clark 已提交
678
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
R
Rob Clark 已提交
679 680
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
681
	bool write = !!(op & MSM_PREP_WRITE);
682 683 684 685 686 687 688 689 690 691
	unsigned long remain =
		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
	long ret;

	ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
						  true,  remain);
	if (ret == 0)
		return remain == 0 ? -EBUSY : -ETIMEDOUT;
	else if (ret < 0)
		return ret;
R
Rob Clark 已提交
692 693

	/* TODO cache maintenance */
694

R
Rob Clark 已提交
695
	return 0;
R
Rob Clark 已提交
696
}
697

R
Rob Clark 已提交
698 699 700
int msm_gem_cpu_fini(struct drm_gem_object *obj)
{
	/* TODO cache maintenance */
701 702 703 704
	return 0;
}

#ifdef CONFIG_DEBUG_FS
705
static void describe_fence(struct dma_fence *fence, const char *type,
R
Rob Clark 已提交
706 707
		struct seq_file *m)
{
708
	if (!dma_fence_is_signaled(fence))
R
Rob Clark 已提交
709 710 711 712 713 714
		seq_printf(m, "\t%9s: %s %s seq %u\n", type,
				fence->ops->get_driver_name(fence),
				fence->ops->get_timeline_name(fence),
				fence->seqno);
}

715 716 717
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
718 719
	struct reservation_object *robj = msm_obj->resv;
	struct reservation_object_list *fobj;
720
	struct dma_fence *fence;
721
	struct msm_gem_vma *vma;
722
	uint64_t off = drm_vma_node_start(&obj->vma_node);
R
Rob Clark 已提交
723
	const char *madv;
724

725
	mutex_lock(&msm_obj->lock);
R
Rob Clark 已提交
726

R
Rob Clark 已提交
727 728 729 730 731 732 733 734 735 736 737 738 739
	switch (msm_obj->madv) {
	case __MSM_MADV_PURGED:
		madv = " purged";
		break;
	case MSM_MADV_DONTNEED:
		madv = " purgeable";
		break;
	case MSM_MADV_WILLNEED:
	default:
		madv = "";
		break;
	}

740
	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
R
Rob Clark 已提交
741
			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
742
			obj->name, kref_read(&obj->refcount),
743 744
			off, msm_obj->vaddr);

745 746 747
	/* FIXME: we need to print the address space here too */
	list_for_each_entry(vma, &msm_obj->vmas, list)
		seq_printf(m, " %08llx", vma->iova);
748 749

	seq_printf(m, " %zu%s\n", obj->size, madv);
R
Rob Clark 已提交
750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765

	rcu_read_lock();
	fobj = rcu_dereference(robj->fence);
	if (fobj) {
		unsigned int i, shared_count = fobj->shared_count;

		for (i = 0; i < shared_count; i++) {
			fence = rcu_dereference(fobj->shared[i]);
			describe_fence(fence, "Shared", m);
		}
	}

	fence = rcu_dereference(robj->fence_excl);
	if (fence)
		describe_fence(fence, "Exclusive", m);
	rcu_read_unlock();
766 767

	mutex_unlock(&msm_obj->lock);
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
}

void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
{
	struct msm_gem_object *msm_obj;
	int count = 0;
	size_t size = 0;

	list_for_each_entry(msm_obj, list, mm_list) {
		struct drm_gem_object *obj = &msm_obj->base;
		seq_printf(m, "   ");
		msm_gem_describe(obj, m);
		count++;
		size += obj->size;
	}

	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
}
#endif

void msm_gem_free_object(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

R
Rob Clark 已提交
795 796 797
	/* object should not be on active list: */
	WARN_ON(is_active(msm_obj));

798 799
	list_del(&msm_obj->mm_list);

800 801
	mutex_lock(&msm_obj->lock);

R
Rob Clark 已提交
802
	put_iova(obj);
803

R
Rob Clark 已提交
804 805 806 807 808 809 810 811
	if (obj->import_attach) {
		if (msm_obj->vaddr)
			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);

		/* Don't drop the pages for imported dmabuf, as they are not
		 * ours, just free the array we allocated:
		 */
		if (msm_obj->pages)
M
Michal Hocko 已提交
812
			kvfree(msm_obj->pages);
813

814
		drm_prime_gem_destroy(obj, msm_obj->sgt);
R
Rob Clark 已提交
815
	} else {
816
		msm_gem_vunmap_locked(obj);
R
Rob Clark 已提交
817 818
		put_pages(obj);
	}
819

R
Rob Clark 已提交
820 821 822
	if (msm_obj->resv == &msm_obj->_resv)
		reservation_object_fini(msm_obj->resv);

823 824
	drm_gem_object_release(obj);

825
	mutex_unlock(&msm_obj->lock);
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
	kfree(msm_obj);
}

/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
		uint32_t size, uint32_t flags, uint32_t *handle)
{
	struct drm_gem_object *obj;
	int ret;

	obj = msm_gem_new(dev, size, flags);

	if (IS_ERR(obj))
		return PTR_ERR(obj);

	ret = drm_gem_handle_create(file, obj, handle);

	/* drop reference from allocate - handle holds it now */
	drm_gem_object_unreference_unlocked(obj);

	return ret;
}

R
Rob Clark 已提交
849 850
static int msm_gem_new_impl(struct drm_device *dev,
		uint32_t size, uint32_t flags,
851
		struct reservation_object *resv,
852 853
		struct drm_gem_object **obj,
		bool struct_mutex_locked)
854 855 856 857 858 859 860 861 862 863 864 865
{
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_gem_object *msm_obj;

	switch (flags & MSM_BO_CACHE_MASK) {
	case MSM_BO_UNCACHED:
	case MSM_BO_CACHED:
	case MSM_BO_WC:
		break;
	default:
		dev_err(dev->dev, "invalid cache flag: %x\n",
				(flags & MSM_BO_CACHE_MASK));
R
Rob Clark 已提交
866
		return -EINVAL;
867 868
	}

869
	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
R
Rob Clark 已提交
870 871
	if (!msm_obj)
		return -ENOMEM;
872

873 874
	mutex_init(&msm_obj->lock);

875
	msm_obj->flags = flags;
R
Rob Clark 已提交
876
	msm_obj->madv = MSM_MADV_WILLNEED;
877

878 879 880 881 882 883
	if (resv) {
		msm_obj->resv = resv;
	} else {
		msm_obj->resv = &msm_obj->_resv;
		reservation_object_init(msm_obj->resv);
	}
884

R
Rob Clark 已提交
885
	INIT_LIST_HEAD(&msm_obj->submit_entry);
886 887
	INIT_LIST_HEAD(&msm_obj->vmas);

888 889 890 891 892 893 894 895
	if (struct_mutex_locked) {
		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
	} else {
		mutex_lock(&dev->struct_mutex);
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
		mutex_unlock(&dev->struct_mutex);
	}
896

R
Rob Clark 已提交
897 898 899 900 901
	*obj = &msm_obj->base;

	return 0;
}

902 903
static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
		uint32_t size, uint32_t flags, bool struct_mutex_locked)
R
Rob Clark 已提交
904
{
905
	struct msm_drm_private *priv = dev->dev_private;
906
	struct drm_gem_object *obj = NULL;
907
	bool use_vram = false;
R
Rob Clark 已提交
908 909 910 911
	int ret;

	size = PAGE_ALIGN(size);

912 913 914 915 916 917 918 919
	if (!iommu_present(&platform_bus_type))
		use_vram = true;
	else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
		use_vram = true;

	if (WARN_ON(use_vram && !priv->vram.size))
		return ERR_PTR(-EINVAL);

920 921 922 923 924 925
	/* Disallow zero sized objects as they make the underlying
	 * infrastructure grumpy
	 */
	if (size == 0)
		return ERR_PTR(-EINVAL);

926
	ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
R
Rob Clark 已提交
927 928 929
	if (ret)
		goto fail;

930
	if (use_vram) {
931
		struct msm_gem_vma *vma;
932 933
		struct page **pages;

934 935 936 937 938 939 940 941
		vma = add_vma(obj, NULL);
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
			goto fail;
		}

		to_msm_bo(obj)->vram_node = &vma->node;

942 943 944 945 946 947 948
		drm_gem_private_object_init(dev, obj, size);

		pages = get_pages(obj);
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto fail;
		}
949 950

		vma->iova = physaddr(obj);
951
	} else {
952 953 954 955
		ret = drm_gem_object_init(dev, obj, size);
		if (ret)
			goto fail;
	}
R
Rob Clark 已提交
956 957 958 959

	return obj;

fail:
960
	drm_gem_object_unreference_unlocked(obj);
R
Rob Clark 已提交
961 962 963
	return ERR_PTR(ret);
}

964 965 966 967 968 969 970 971 972 973 974 975
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
		uint32_t size, uint32_t flags)
{
	return _msm_gem_new(dev, size, flags, true);
}

struct drm_gem_object *msm_gem_new(struct drm_device *dev,
		uint32_t size, uint32_t flags)
{
	return _msm_gem_new(dev, size, flags, false);
}

R
Rob Clark 已提交
976
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
977
		struct dma_buf *dmabuf, struct sg_table *sgt)
R
Rob Clark 已提交
978 979 980
{
	struct msm_gem_object *msm_obj;
	struct drm_gem_object *obj;
981
	uint32_t size;
R
Rob Clark 已提交
982 983
	int ret, npages;

984 985 986 987 988 989
	/* if we don't have IOMMU, don't bother pretending we can import: */
	if (!iommu_present(&platform_bus_type)) {
		dev_err(dev->dev, "cannot import without IOMMU\n");
		return ERR_PTR(-EINVAL);
	}

990
	size = PAGE_ALIGN(dmabuf->size);
R
Rob Clark 已提交
991

992
	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
R
Rob Clark 已提交
993 994 995 996 997 998 999 1000
	if (ret)
		goto fail;

	drm_gem_private_object_init(dev, obj, size);

	npages = size / PAGE_SIZE;

	msm_obj = to_msm_bo(obj);
1001
	mutex_lock(&msm_obj->lock);
R
Rob Clark 已提交
1002
	msm_obj->sgt = sgt;
M
Michal Hocko 已提交
1003
	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
R
Rob Clark 已提交
1004
	if (!msm_obj->pages) {
1005
		mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
1006 1007 1008 1009 1010
		ret = -ENOMEM;
		goto fail;
	}

	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1011 1012
	if (ret) {
		mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
1013
		goto fail;
1014
	}
R
Rob Clark 已提交
1015

1016
	mutex_unlock(&msm_obj->lock);
1017 1018 1019
	return obj;

fail:
1020
	drm_gem_object_unreference_unlocked(obj);
1021 1022
	return ERR_PTR(ret);
}