msm_gem.c 27.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8
/*
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 */

#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
R
Rob Clark 已提交
9
#include <linux/dma-buf.h>
10
#include <linux/pfn_t.h>
11 12

#include "msm_drv.h"
13
#include "msm_fence.h"
14
#include "msm_gem.h"
R
Rob Clark 已提交
15
#include "msm_gpu.h"
16
#include "msm_mmu.h"
17

18 19 20
static void msm_gem_vunmap_locked(struct drm_gem_object *obj);


21 22 23 24 25 26 27 28
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;
	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
			priv->vram.paddr;
}

29 30 31 32 33 34
static bool use_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	return !msm_obj->vram_node;
}

35
/* allocate pages from VRAM carveout, used when no IOMMU: */
36
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
37 38 39 40 41 42 43
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;
	dma_addr_t paddr;
	struct page **p;
	int ret, i;

M
Michal Hocko 已提交
44
	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
45 46 47
	if (!p)
		return ERR_PTR(-ENOMEM);

48
	spin_lock(&priv->vram.lock);
49
	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
50
	spin_unlock(&priv->vram.lock);
51
	if (ret) {
M
Michal Hocko 已提交
52
		kvfree(p);
53 54 55 56 57 58 59 60 61 62 63
		return ERR_PTR(ret);
	}

	paddr = physaddr(obj);
	for (i = 0; i < npages; i++) {
		p[i] = phys_to_page(paddr);
		paddr += PAGE_SIZE;
	}

	return p;
}
64 65 66 67 68 69 70

static struct page **get_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (!msm_obj->pages) {
		struct drm_device *dev = obj->dev;
71
		struct page **p;
72 73
		int npages = obj->size >> PAGE_SHIFT;

74
		if (use_pages(obj))
75
			p = drm_gem_get_pages(obj);
76 77 78
		else
			p = get_pages_vram(obj, npages);

79
		if (IS_ERR(p)) {
80
			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
81 82 83 84
					PTR_ERR(p));
			return p;
		}

85 86
		msm_obj->pages = p;

87
		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
88
		if (IS_ERR(msm_obj->sgt)) {
89 90
			void *ptr = ERR_CAST(msm_obj->sgt);

91
			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
92 93
			msm_obj->sgt = NULL;
			return ptr;
94 95 96 97 98 99
		}

		/* For non-cached buffers, ensure the new pages are clean
		 * because display controller, GPU, etc. are not coherent:
		 */
		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
100
			dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
101 102 103 104 105 106
					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
	}

	return msm_obj->pages;
}

107 108 109 110 111 112 113 114 115 116 117 118
static void put_pages_vram(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;

	spin_lock(&priv->vram.lock);
	drm_mm_remove_node(msm_obj->vram_node);
	spin_unlock(&priv->vram.lock);

	kvfree(msm_obj->pages);
}

119 120 121 122 123
static void put_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (msm_obj->pages) {
124 125 126 127 128 129
		if (msm_obj->sgt) {
			/* For non-cached buffers, ensure the new
			 * pages are clean because display controller,
			 * GPU, etc. are not coherent:
			 */
			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
130
				dma_sync_sg_for_cpu(obj->dev->dev, msm_obj->sgt->sgl,
131 132
					     msm_obj->sgt->nents,
					     DMA_BIDIRECTIONAL);
133 134

			sg_free_table(msm_obj->sgt);
135 136
			kfree(msm_obj->sgt);
		}
137

138
		if (use_pages(obj))
139
			drm_gem_put_pages(obj, msm_obj->pages, true, false);
140 141
		else
			put_pages_vram(obj);
142

143 144 145 146
		msm_obj->pages = NULL;
	}
}

R
Rob Clark 已提交
147 148
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
{
149
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
150
	struct page **p;
151 152 153 154 155 156 157 158

	mutex_lock(&msm_obj->lock);

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
		mutex_unlock(&msm_obj->lock);
		return ERR_PTR(-EBUSY);
	}

R
Rob Clark 已提交
159
	p = get_pages(obj);
160
	mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
161 162 163 164 165 166 167 168
	return p;
}

void msm_gem_put_pages(struct drm_gem_object *obj)
{
	/* when we start tracking the pin count, then do something here */
}

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
int msm_gem_mmap_obj(struct drm_gem_object *obj,
		struct vm_area_struct *vma)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	vma->vm_flags &= ~VM_PFNMAP;
	vma->vm_flags |= VM_MIXEDMAP;

	if (msm_obj->flags & MSM_BO_WC) {
		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
	} else {
		/*
		 * Shunt off cached objs to shmem file so they have their own
		 * address_space (so unmap_mapping_range does what we want,
		 * in particular in the case of mmap'd dmabufs)
		 */
		fput(vma->vm_file);
		get_file(obj->filp);
		vma->vm_pgoff = 0;
		vma->vm_file  = obj->filp;

		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
	}

	return 0;
}

int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int ret;

	ret = drm_gem_mmap(filp, vma);
	if (ret) {
		DBG("mmap failed: %d", ret);
		return ret;
	}

	return msm_gem_mmap_obj(vma->vm_private_data, vma);
}

211
vm_fault_t msm_gem_fault(struct vm_fault *vmf)
212
{
213
	struct vm_area_struct *vma = vmf->vma;
214
	struct drm_gem_object *obj = vma->vm_private_data;
215
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
216 217 218
	struct page **pages;
	unsigned long pfn;
	pgoff_t pgoff;
219 220
	int err;
	vm_fault_t ret;
221

222 223 224
	/*
	 * vm_ops.open/drm_gem_mmap_obj and close get and put
	 * a reference on obj. So, we dont need to hold one here.
225
	 */
226 227 228
	err = mutex_lock_interruptible(&msm_obj->lock);
	if (err) {
		ret = VM_FAULT_NOPAGE;
229
		goto out;
230
	}
231

232 233 234 235 236
	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
		mutex_unlock(&msm_obj->lock);
		return VM_FAULT_SIGBUS;
	}

237 238 239
	/* make sure we have pages attached now */
	pages = get_pages(obj);
	if (IS_ERR(pages)) {
240
		ret = vmf_error(PTR_ERR(pages));
241 242 243 244
		goto out_unlock;
	}

	/* We don't use vmf->pgoff since that has the fake offset: */
245
	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
246

247
	pfn = page_to_pfn(pages[pgoff]);
248

249
	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
250 251
			pfn, pfn << PAGE_SHIFT);

252
	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
253
out_unlock:
254
	mutex_unlock(&msm_obj->lock);
255
out:
256
	return ret;
257 258 259 260 261 262
}

/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
263
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
264 265
	int ret;

266
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
267 268 269 270 271

	/* Make it mmapable */
	ret = drm_gem_create_mmap_offset(obj);

	if (ret) {
272
		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
273 274 275 276 277 278 279 280 281
		return 0;
	}

	return drm_vma_node_offset_addr(&obj->vma_node);
}

uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{
	uint64_t offset;
282 283 284
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	mutex_lock(&msm_obj->lock);
285
	offset = mmap_offset(obj);
286
	mutex_unlock(&msm_obj->lock);
287 288 289
	return offset;
}

290 291 292 293 294 295
static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

296 297
	WARN_ON(!mutex_is_locked(&msm_obj->lock));

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
	if (!vma)
		return ERR_PTR(-ENOMEM);

	vma->aspace = aspace;

	list_add_tail(&vma->list, &msm_obj->vmas);

	return vma;
}

static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

315
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333

	list_for_each_entry(vma, &msm_obj->vmas, list) {
		if (vma->aspace == aspace)
			return vma;
	}

	return NULL;
}

static void del_vma(struct msm_gem_vma *vma)
{
	if (!vma)
		return;

	list_del(&vma->list);
	kfree(vma);
}

334
/* Called with msm_obj->lock locked */
R
Rob Clark 已提交
335 336 337 338
static void
put_iova(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
339
	struct msm_gem_vma *vma, *tmp;
R
Rob Clark 已提交
340

341
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
R
Rob Clark 已提交
342

343
	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
344 345 346 347
		if (vma->aspace) {
			msm_gem_purge_vma(vma->aspace, vma);
			msm_gem_close_vma(vma->aspace, vma);
		}
348
		del_vma(vma);
R
Rob Clark 已提交
349 350 351
	}
}

352
static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
353
		struct msm_gem_address_space *aspace, uint64_t *iova)
354 355
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
356
	struct msm_gem_vma *vma;
357 358
	int ret = 0;

359
	WARN_ON(!mutex_is_locked(&msm_obj->lock));
360

361
	vma = lookup_vma(obj, aspace);
362

363 364
	if (!vma) {
		vma = add_vma(obj, aspace);
365 366
		if (IS_ERR(vma))
			return PTR_ERR(vma);
367

368 369 370 371
		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
		if (ret) {
			del_vma(vma);
			return ret;
372
		}
373 374
	}

375 376
	*iova = vma->iova;
	return 0;
377 378 379 380 381 382 383 384
}

static int msm_gem_pin_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;
	struct page **pages;
R
Rob Clark 已提交
385 386 387 388
	int prot = IOMMU_READ;

	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
		prot |= IOMMU_WRITE;
389 390 391 392 393 394 395 396 397 398 399 400 401 402

	WARN_ON(!mutex_is_locked(&msm_obj->lock));

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
		return -EBUSY;

	vma = lookup_vma(obj, aspace);
	if (WARN_ON(!vma))
		return -EINVAL;

	pages = get_pages(obj);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

R
Rob Clark 已提交
403 404
	return msm_gem_map_vma(aspace, vma, prot,
			msm_obj->sgt, obj->size >> PAGE_SHIFT);
405 406
}

407 408
/* get iova and pin it. Should have a matching put */
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
		struct msm_gem_address_space *aspace, uint64_t *iova)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	u64 local;
	int ret;

	mutex_lock(&msm_obj->lock);

	ret = msm_gem_get_iova_locked(obj, aspace, &local);

	if (!ret)
		ret = msm_gem_pin_iova(obj, aspace);

	if (!ret)
		*iova = local;
424

425
	mutex_unlock(&msm_obj->lock);
426 427 428
	return ret;
}

429 430 431 432
/*
 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
 * valid for the life of the object
 */
433 434 435 436 437 438 439 440 441 442 443 444 445
int msm_gem_get_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	int ret;

	mutex_lock(&msm_obj->lock);
	ret = msm_gem_get_iova_locked(obj, aspace, iova);
	mutex_unlock(&msm_obj->lock);

	return ret;
}

R
Rob Clark 已提交
446
/* get iova without taking a reference, used in places where you have
447
 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
R
Rob Clark 已提交
448
 */
449 450
uint64_t msm_gem_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
R
Rob Clark 已提交
451
{
452
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
453 454
	struct msm_gem_vma *vma;

455
	mutex_lock(&msm_obj->lock);
456
	vma = lookup_vma(obj, aspace);
457
	mutex_unlock(&msm_obj->lock);
458 459 460
	WARN_ON(!vma);

	return vma ? vma->iova : 0;
R
Rob Clark 已提交
461 462
}

463 464 465 466 467 468
/*
 * Unpin a iova by updating the reference counts. The memory isn't actually
 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
 * to get rid of it
 */
void msm_gem_unpin_iova(struct drm_gem_object *obj,
469
		struct msm_gem_address_space *aspace)
470
{
471 472 473 474 475 476 477 478 479 480
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

	mutex_lock(&msm_obj->lock);
	vma = lookup_vma(obj, aspace);

	if (!WARN_ON(!vma))
		msm_gem_unmap_vma(aspace, vma);

	mutex_unlock(&msm_obj->lock);
481 482 483 484 485 486 487 488
}

int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
		struct drm_mode_create_dumb *args)
{
	args->pitch = align_pitch(args->width, args->bpp);
	args->size  = PAGE_ALIGN(args->pitch * args->height);
	return msm_gem_new_handle(dev, file, args->size,
489
			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
490 491 492 493 494 495 496 497 498
}

int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
		uint32_t handle, uint64_t *offset)
{
	struct drm_gem_object *obj;
	int ret = 0;

	/* GEM does all our handle to object mapping */
499
	obj = drm_gem_object_lookup(file, handle);
500 501 502 503 504 505 506
	if (obj == NULL) {
		ret = -ENOENT;
		goto fail;
	}

	*offset = msm_gem_mmap_offset(obj);

507
	drm_gem_object_put_unlocked(obj);
508 509 510 511 512

fail:
	return ret;
}

513
static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
514 515
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
516 517 518 519
	int ret = 0;

	mutex_lock(&msm_obj->lock);

520
	if (WARN_ON(msm_obj->madv > madv)) {
521
		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
522
			msm_obj->madv, madv);
523 524 525 526 527 528 529 530 531 532 533 534
		mutex_unlock(&msm_obj->lock);
		return ERR_PTR(-EBUSY);
	}

	/* increment vmap_count *before* vmap() call, so shrinker can
	 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
	 * This guarantees that we won't try to msm_gem_vunmap() this
	 * same object from within the vmap() call (while we already
	 * hold msm_obj->lock)
	 */
	msm_obj->vmap_count++;

535 536
	if (!msm_obj->vaddr) {
		struct page **pages = get_pages(obj);
537 538 539 540
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto fail;
		}
541 542
		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
543 544 545 546
		if (msm_obj->vaddr == NULL) {
			ret = -ENOMEM;
			goto fail;
		}
547
	}
548 549

	mutex_unlock(&msm_obj->lock);
550 551
	return msm_obj->vaddr;

552 553 554 555
fail:
	msm_obj->vmap_count--;
	mutex_unlock(&msm_obj->lock);
	return ERR_PTR(ret);
556 557
}

558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{
	return get_vaddr(obj, MSM_MADV_WILLNEED);
}

/*
 * Don't use this!  It is for the very special case of dumping
 * submits from GPU hangs or faults, were the bo may already
 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
 * active list.
 */
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
{
	return get_vaddr(obj, __MSM_MADV_PURGED);
}

574
void msm_gem_put_vaddr(struct drm_gem_object *obj)
575
{
R
Rob Clark 已提交
576
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
577 578

	mutex_lock(&msm_obj->lock);
R
Rob Clark 已提交
579 580
	WARN_ON(msm_obj->vmap_count < 1);
	msm_obj->vmap_count--;
581
	mutex_unlock(&msm_obj->lock);
582 583
}

R
Rob Clark 已提交
584 585 586 587 588 589 590
/* Update madvise status, returns true if not purged, else
 * false or -errno.
 */
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

591 592
	mutex_lock(&msm_obj->lock);

R
Rob Clark 已提交
593 594 595 596 597
	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));

	if (msm_obj->madv != __MSM_MADV_PURGED)
		msm_obj->madv = madv;

598 599 600 601 602
	madv = msm_obj->madv;

	mutex_unlock(&msm_obj->lock);

	return (madv != __MSM_MADV_PURGED);
R
Rob Clark 已提交
603 604
}

605
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
R
Rob Clark 已提交
606 607 608 609 610 611 612 613
{
	struct drm_device *dev = obj->dev;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
	WARN_ON(!is_purgeable(msm_obj));
	WARN_ON(obj->import_attach);

614 615
	mutex_lock_nested(&msm_obj->lock, subclass);

R
Rob Clark 已提交
616 617
	put_iova(obj);

618
	msm_gem_vunmap_locked(obj);
R
Rob Clark 已提交
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635

	put_pages(obj);

	msm_obj->madv = __MSM_MADV_PURGED;

	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
	drm_gem_free_mmap_offset(obj);

	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);

	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
			0, (loff_t)-1);
636 637

	mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
638 639
}

640
static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
R
Rob Clark 已提交
641 642 643
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

644 645
	WARN_ON(!mutex_is_locked(&msm_obj->lock));

R
Rob Clark 已提交
646 647 648 649 650 651 652
	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
		return;

	vunmap(msm_obj->vaddr);
	msm_obj->vaddr = NULL;
}

653 654 655 656 657 658 659 660 661
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	mutex_lock_nested(&msm_obj->lock, subclass);
	msm_gem_vunmap_locked(obj);
	mutex_unlock(&msm_obj->lock);
}

R
Rob Clark 已提交
662 663 664 665 666
/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
		struct msm_fence_context *fctx, bool exclusive)
{
	struct reservation_object_list *fobj;
667
	struct dma_fence *fence;
R
Rob Clark 已提交
668 669
	int i, ret;

670
	fobj = reservation_object_get_list(obj->resv);
R
Rob Clark 已提交
671
	if (!fobj || (fobj->shared_count == 0)) {
672
		fence = reservation_object_get_excl(obj->resv);
R
Rob Clark 已提交
673 674
		/* don't need to wait on our own fences, since ring is fifo */
		if (fence && (fence->context != fctx->context)) {
675
			ret = dma_fence_wait(fence, true);
R
Rob Clark 已提交
676 677 678 679 680 681 682 683 684 685
			if (ret)
				return ret;
		}
	}

	if (!exclusive || !fobj)
		return 0;

	for (i = 0; i < fobj->shared_count; i++) {
		fence = rcu_dereference_protected(fobj->shared[i],
686
						reservation_object_held(obj->resv));
R
Rob Clark 已提交
687
		if (fence->context != fctx->context) {
688
			ret = dma_fence_wait(fence, true);
R
Rob Clark 已提交
689 690 691 692 693 694 695 696
			if (ret)
				return ret;
		}
	}

	return 0;
}

R
Rob Clark 已提交
697
void msm_gem_move_to_active(struct drm_gem_object *obj,
698
		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
R
Rob Clark 已提交
699 700
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
701
	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
R
Rob Clark 已提交
702
	msm_obj->gpu = gpu;
R
Rob Clark 已提交
703
	if (exclusive)
704
		reservation_object_add_excl_fence(obj->resv, fence);
R
Rob Clark 已提交
705
	else
706
		reservation_object_add_shared_fence(obj->resv, fence);
R
Rob Clark 已提交
707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
	list_del_init(&msm_obj->mm_list);
	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
}

void msm_gem_move_to_inactive(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

	msm_obj->gpu = NULL;
	list_del_init(&msm_obj->mm_list);
	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
}

R
Rob Clark 已提交
724
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
R
Rob Clark 已提交
725
{
R
Rob Clark 已提交
726
	bool write = !!(op & MSM_PREP_WRITE);
727 728 729 730
	unsigned long remain =
		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
	long ret;

731
	ret = reservation_object_wait_timeout_rcu(obj->resv, write,
732 733 734 735 736
						  true,  remain);
	if (ret == 0)
		return remain == 0 ? -EBUSY : -ETIMEDOUT;
	else if (ret < 0)
		return ret;
R
Rob Clark 已提交
737 738

	/* TODO cache maintenance */
739

R
Rob Clark 已提交
740
	return 0;
R
Rob Clark 已提交
741
}
742

R
Rob Clark 已提交
743 744 745
int msm_gem_cpu_fini(struct drm_gem_object *obj)
{
	/* TODO cache maintenance */
746 747 748 749
	return 0;
}

#ifdef CONFIG_DEBUG_FS
750
static void describe_fence(struct dma_fence *fence, const char *type,
R
Rob Clark 已提交
751 752
		struct seq_file *m)
{
753
	if (!dma_fence_is_signaled(fence))
754
		seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
R
Rob Clark 已提交
755 756 757 758 759
				fence->ops->get_driver_name(fence),
				fence->ops->get_timeline_name(fence),
				fence->seqno);
}

760 761 762
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
763
	struct reservation_object *robj = obj->resv;
R
Rob Clark 已提交
764
	struct reservation_object_list *fobj;
765
	struct dma_fence *fence;
766
	struct msm_gem_vma *vma;
767
	uint64_t off = drm_vma_node_start(&obj->vma_node);
R
Rob Clark 已提交
768
	const char *madv;
769

770
	mutex_lock(&msm_obj->lock);
R
Rob Clark 已提交
771

R
Rob Clark 已提交
772 773 774 775 776 777 778 779 780 781 782 783 784
	switch (msm_obj->madv) {
	case __MSM_MADV_PURGED:
		madv = " purged";
		break;
	case MSM_MADV_DONTNEED:
		madv = " purgeable";
		break;
	case MSM_MADV_WILLNEED:
	default:
		madv = "";
		break;
	}

785
	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
R
Rob Clark 已提交
786
			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
787
			obj->name, kref_read(&obj->refcount),
788 789
			off, msm_obj->vaddr);

790
	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
791

792 793
	if (!list_empty(&msm_obj->vmas)) {

794
		seq_puts(m, "      vmas:");
795 796

		list_for_each_entry(vma, &msm_obj->vmas, list)
797 798
			seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
				vma->aspace != NULL ? vma->aspace->name : NULL,
799 800
				vma->iova, vma->mapped ? "mapped" : "unmapped",
				vma->inuse);
801 802 803

		seq_puts(m, "\n");
	}
R
Rob Clark 已提交
804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819

	rcu_read_lock();
	fobj = rcu_dereference(robj->fence);
	if (fobj) {
		unsigned int i, shared_count = fobj->shared_count;

		for (i = 0; i < shared_count; i++) {
			fence = rcu_dereference(fobj->shared[i]);
			describe_fence(fence, "Shared", m);
		}
	}

	fence = rcu_dereference(robj->fence_excl);
	if (fence)
		describe_fence(fence, "Exclusive", m);
	rcu_read_unlock();
820 821

	mutex_unlock(&msm_obj->lock);
822 823 824 825 826 827 828 829
}

void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
{
	struct msm_gem_object *msm_obj;
	int count = 0;
	size_t size = 0;

830
	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
831 832
	list_for_each_entry(msm_obj, list, mm_list) {
		struct drm_gem_object *obj = &msm_obj->base;
833
		seq_puts(m, "   ");
834 835 836 837 838 839 840 841 842
		msm_gem_describe(obj, m);
		count++;
		size += obj->size;
	}

	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
}
#endif

843
/* don't call directly!  Use drm_gem_object_put() and friends */
844 845 846
void msm_gem_free_object(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
847 848 849 850 851 852 853 854 855 856 857
	struct drm_device *dev = obj->dev;
	struct msm_drm_private *priv = dev->dev_private;

	if (llist_add(&msm_obj->freed, &priv->free_list))
		queue_work(priv->wq, &priv->free_work);
}

static void free_object(struct msm_gem_object *msm_obj)
{
	struct drm_gem_object *obj = &msm_obj->base;
	struct drm_device *dev = obj->dev;
858 859 860

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

R
Rob Clark 已提交
861 862 863
	/* object should not be on active list: */
	WARN_ON(is_active(msm_obj));

864 865
	list_del(&msm_obj->mm_list);

866 867
	mutex_lock(&msm_obj->lock);

R
Rob Clark 已提交
868
	put_iova(obj);
869

R
Rob Clark 已提交
870 871 872 873 874 875 876 877
	if (obj->import_attach) {
		if (msm_obj->vaddr)
			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);

		/* Don't drop the pages for imported dmabuf, as they are not
		 * ours, just free the array we allocated:
		 */
		if (msm_obj->pages)
M
Michal Hocko 已提交
878
			kvfree(msm_obj->pages);
879

880
		drm_prime_gem_destroy(obj, msm_obj->sgt);
R
Rob Clark 已提交
881
	} else {
882
		msm_gem_vunmap_locked(obj);
R
Rob Clark 已提交
883 884
		put_pages(obj);
	}
885 886 887

	drm_gem_object_release(obj);

888
	mutex_unlock(&msm_obj->lock);
889 890 891
	kfree(msm_obj);
}

892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
void msm_gem_free_work(struct work_struct *work)
{
	struct msm_drm_private *priv =
		container_of(work, struct msm_drm_private, free_work);
	struct drm_device *dev = priv->dev;
	struct llist_node *freed;
	struct msm_gem_object *msm_obj, *next;

	while ((freed = llist_del_all(&priv->free_list))) {

		mutex_lock(&dev->struct_mutex);

		llist_for_each_entry_safe(msm_obj, next,
					  freed, freed)
			free_object(msm_obj);

		mutex_unlock(&dev->struct_mutex);

		if (need_resched())
			break;
	}
}

915 916
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
917 918
		uint32_t size, uint32_t flags, uint32_t *handle,
		char *name)
919 920 921 922 923 924 925 926 927
{
	struct drm_gem_object *obj;
	int ret;

	obj = msm_gem_new(dev, size, flags);

	if (IS_ERR(obj))
		return PTR_ERR(obj);

928 929 930
	if (name)
		msm_gem_object_set_name(obj, "%s", name);

931 932 933
	ret = drm_gem_handle_create(file, obj, handle);

	/* drop reference from allocate - handle holds it now */
934
	drm_gem_object_put_unlocked(obj);
935 936 937 938

	return ret;
}

R
Rob Clark 已提交
939 940
static int msm_gem_new_impl(struct drm_device *dev,
		uint32_t size, uint32_t flags,
941
		struct reservation_object *resv,
942 943
		struct drm_gem_object **obj,
		bool struct_mutex_locked)
944 945 946 947 948 949 950 951 952 953
{
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_gem_object *msm_obj;

	switch (flags & MSM_BO_CACHE_MASK) {
	case MSM_BO_UNCACHED:
	case MSM_BO_CACHED:
	case MSM_BO_WC:
		break;
	default:
954
		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
955
				(flags & MSM_BO_CACHE_MASK));
R
Rob Clark 已提交
956
		return -EINVAL;
957 958
	}

959
	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
R
Rob Clark 已提交
960 961
	if (!msm_obj)
		return -ENOMEM;
962

963 964
	mutex_init(&msm_obj->lock);

965
	msm_obj->flags = flags;
R
Rob Clark 已提交
966
	msm_obj->madv = MSM_MADV_WILLNEED;
967

968 969
	if (resv)
		msm_obj->base.resv = resv;
970

R
Rob Clark 已提交
971
	INIT_LIST_HEAD(&msm_obj->submit_entry);
972 973
	INIT_LIST_HEAD(&msm_obj->vmas);

974 975 976 977 978 979 980 981
	if (struct_mutex_locked) {
		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
	} else {
		mutex_lock(&dev->struct_mutex);
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
		mutex_unlock(&dev->struct_mutex);
	}
982

R
Rob Clark 已提交
983 984 985 986 987
	*obj = &msm_obj->base;

	return 0;
}

988 989
static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
		uint32_t size, uint32_t flags, bool struct_mutex_locked)
R
Rob Clark 已提交
990
{
991
	struct msm_drm_private *priv = dev->dev_private;
992
	struct drm_gem_object *obj = NULL;
993
	bool use_vram = false;
R
Rob Clark 已提交
994 995 996 997
	int ret;

	size = PAGE_ALIGN(size);

J
Jonathan Marek 已提交
998
	if (!msm_use_mmu(dev))
999
		use_vram = true;
1000
	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1001 1002 1003 1004 1005
		use_vram = true;

	if (WARN_ON(use_vram && !priv->vram.size))
		return ERR_PTR(-EINVAL);

1006 1007 1008 1009 1010 1011
	/* Disallow zero sized objects as they make the underlying
	 * infrastructure grumpy
	 */
	if (size == 0)
		return ERR_PTR(-EINVAL);

1012
	ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
R
Rob Clark 已提交
1013 1014 1015
	if (ret)
		goto fail;

1016
	if (use_vram) {
1017
		struct msm_gem_vma *vma;
1018
		struct page **pages;
1019 1020 1021
		struct msm_gem_object *msm_obj = to_msm_bo(obj);

		mutex_lock(&msm_obj->lock);
1022

1023
		vma = add_vma(obj, NULL);
1024
		mutex_unlock(&msm_obj->lock);
1025 1026 1027 1028 1029 1030 1031
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
			goto fail;
		}

		to_msm_bo(obj)->vram_node = &vma->node;

1032 1033 1034 1035 1036 1037 1038
		drm_gem_private_object_init(dev, obj, size);

		pages = get_pages(obj);
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto fail;
		}
1039 1040

		vma->iova = physaddr(obj);
1041
	} else {
1042 1043 1044
		ret = drm_gem_object_init(dev, obj, size);
		if (ret)
			goto fail;
1045 1046 1047 1048 1049 1050 1051
		/*
		 * Our buffers are kept pinned, so allocating them from the
		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
		 * See comments above new_inode() why this is required _and_
		 * expected if you're going to pin these pages.
		 */
		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1052
	}
R
Rob Clark 已提交
1053 1054 1055 1056

	return obj;

fail:
1057
	drm_gem_object_put_unlocked(obj);
R
Rob Clark 已提交
1058 1059 1060
	return ERR_PTR(ret);
}

1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
		uint32_t size, uint32_t flags)
{
	return _msm_gem_new(dev, size, flags, true);
}

struct drm_gem_object *msm_gem_new(struct drm_device *dev,
		uint32_t size, uint32_t flags)
{
	return _msm_gem_new(dev, size, flags, false);
}

R
Rob Clark 已提交
1073
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1074
		struct dma_buf *dmabuf, struct sg_table *sgt)
R
Rob Clark 已提交
1075 1076 1077
{
	struct msm_gem_object *msm_obj;
	struct drm_gem_object *obj;
1078
	uint32_t size;
R
Rob Clark 已提交
1079 1080
	int ret, npages;

1081
	/* if we don't have IOMMU, don't bother pretending we can import: */
J
Jonathan Marek 已提交
1082
	if (!msm_use_mmu(dev)) {
1083
		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1084 1085 1086
		return ERR_PTR(-EINVAL);
	}

1087
	size = PAGE_ALIGN(dmabuf->size);
R
Rob Clark 已提交
1088

1089
	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
R
Rob Clark 已提交
1090 1091 1092 1093 1094 1095 1096 1097
	if (ret)
		goto fail;

	drm_gem_private_object_init(dev, obj, size);

	npages = size / PAGE_SIZE;

	msm_obj = to_msm_bo(obj);
1098
	mutex_lock(&msm_obj->lock);
R
Rob Clark 已提交
1099
	msm_obj->sgt = sgt;
M
Michal Hocko 已提交
1100
	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
R
Rob Clark 已提交
1101
	if (!msm_obj->pages) {
1102
		mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
1103 1104 1105 1106 1107
		ret = -ENOMEM;
		goto fail;
	}

	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1108 1109
	if (ret) {
		mutex_unlock(&msm_obj->lock);
R
Rob Clark 已提交
1110
		goto fail;
1111
	}
R
Rob Clark 已提交
1112

1113
	mutex_unlock(&msm_obj->lock);
1114 1115 1116
	return obj;

fail:
1117
	drm_gem_object_put_unlocked(obj);
1118 1119
	return ERR_PTR(ret);
}
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132

static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova, bool locked)
{
	void *vaddr;
	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
	int ret;

	if (IS_ERR(obj))
		return ERR_CAST(obj);

	if (iova) {
1133
		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1134 1135
		if (ret)
			goto err;
1136 1137 1138
	}

	vaddr = msm_gem_get_vaddr(obj);
1139
	if (IS_ERR(vaddr)) {
1140
		msm_gem_unpin_iova(obj, aspace);
1141 1142
		ret = PTR_ERR(vaddr);
		goto err;
1143 1144 1145 1146 1147 1148
	}

	if (bo)
		*bo = obj;

	return vaddr;
1149 1150 1151 1152 1153 1154 1155 1156
err:
	if (locked)
		drm_gem_object_put(obj);
	else
		drm_gem_object_put_unlocked(obj);

	return ERR_PTR(ret);

1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
}

void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova)
{
	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
}

void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova)
{
	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
}
1172 1173 1174 1175 1176 1177 1178 1179

void msm_gem_kernel_put(struct drm_gem_object *bo,
		struct msm_gem_address_space *aspace, bool locked)
{
	if (IS_ERR_OR_NULL(bo))
		return;

	msm_gem_put_vaddr(bo);
1180
	msm_gem_unpin_iova(bo, aspace);
1181 1182 1183 1184 1185 1186

	if (locked)
		drm_gem_object_put(bo);
	else
		drm_gem_object_put_unlocked(bo);
}
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199

void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
{
	struct msm_gem_object *msm_obj = to_msm_bo(bo);
	va_list ap;

	if (!fmt)
		return;

	va_start(ap, fmt);
	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
	va_end(ap);
}