msm_gem.c 28.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6
/*
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 */

7
#include <linux/dma-map-ops.h>
8 9
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
R
Rob Clark 已提交
10
#include <linux/dma-buf.h>
11
#include <linux/pfn_t.h>
12

S
Sam Ravnborg 已提交
13 14
#include <drm/drm_prime.h>

15
#include "msm_drv.h"
16
#include "msm_fence.h"
17
#include "msm_gem.h"
R
Rob Clark 已提交
18
#include "msm_gpu.h"
19
#include "msm_mmu.h"
20

21

22 23 24 25 26 27 28 29
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;
	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
			priv->vram.paddr;
}

30 31 32 33 34 35
static bool use_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	return !msm_obj->vram_node;
}

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
/*
 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
 * API.  Really GPU cache is out of scope here (handled on cmdstream)
 * and all we need to do is invalidate newly allocated pages before
 * mapping to CPU as uncached/writecombine.
 *
 * On top of this, we have the added headache, that depending on
 * display generation, the display's iommu may be wired up to either
 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
 * that here we either have dma-direct or iommu ops.
 *
 * Let this be a cautionary tail of abstraction gone wrong.
 */

static void sync_for_device(struct msm_gem_object *msm_obj)
{
	struct device *dev = msm_obj->base.dev->dev;

54
	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
55 56 57 58 59 60
}

static void sync_for_cpu(struct msm_gem_object *msm_obj)
{
	struct device *dev = msm_obj->base.dev->dev;

61
	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
62 63
}

64
/* allocate pages from VRAM carveout, used when no IOMMU: */
65
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
66 67 68 69 70 71 72
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;
	dma_addr_t paddr;
	struct page **p;
	int ret, i;

M
Michal Hocko 已提交
73
	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
74 75 76
	if (!p)
		return ERR_PTR(-ENOMEM);

77
	spin_lock(&priv->vram.lock);
78
	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
79
	spin_unlock(&priv->vram.lock);
80
	if (ret) {
M
Michal Hocko 已提交
81
		kvfree(p);
82 83 84 85 86 87 88 89 90 91 92
		return ERR_PTR(ret);
	}

	paddr = physaddr(obj);
	for (i = 0; i < npages; i++) {
		p[i] = phys_to_page(paddr);
		paddr += PAGE_SIZE;
	}

	return p;
}
93 94 95 96 97 98 99

static struct page **get_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (!msm_obj->pages) {
		struct drm_device *dev = obj->dev;
100
		struct page **p;
101 102
		int npages = obj->size >> PAGE_SHIFT;

103
		if (use_pages(obj))
104
			p = drm_gem_get_pages(obj);
105 106 107
		else
			p = get_pages_vram(obj, npages);

108
		if (IS_ERR(p)) {
109
			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
110 111 112 113
					PTR_ERR(p));
			return p;
		}

114 115
		msm_obj->pages = p;

116
		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
117
		if (IS_ERR(msm_obj->sgt)) {
118 119
			void *ptr = ERR_CAST(msm_obj->sgt);

120
			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
121 122
			msm_obj->sgt = NULL;
			return ptr;
123 124 125 126 127 128
		}

		/* For non-cached buffers, ensure the new pages are clean
		 * because display controller, GPU, etc. are not coherent:
		 */
		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
129
			sync_for_device(msm_obj);
130 131 132 133 134
	}

	return msm_obj->pages;
}

135 136 137 138 139 140 141 142 143 144 145 146
static void put_pages_vram(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;

	spin_lock(&priv->vram.lock);
	drm_mm_remove_node(msm_obj->vram_node);
	spin_unlock(&priv->vram.lock);

	kvfree(msm_obj->pages);
}

147 148 149 150 151
static void put_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (msm_obj->pages) {
152 153 154 155 156 157
		if (msm_obj->sgt) {
			/* For non-cached buffers, ensure the new
			 * pages are clean because display controller,
			 * GPU, etc. are not coherent:
			 */
			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
158
				sync_for_cpu(msm_obj);
159 160

			sg_free_table(msm_obj->sgt);
161 162
			kfree(msm_obj->sgt);
		}
163

164
		if (use_pages(obj))
165
			drm_gem_put_pages(obj, msm_obj->pages, true, false);
166 167
		else
			put_pages_vram(obj);
168

169 170 171 172
		msm_obj->pages = NULL;
	}
}

R
Rob Clark 已提交
173 174
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
{
175
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
176
	struct page **p;
177

R
Rob Clark 已提交
178
	msm_gem_lock(obj);
179 180

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
R
Rob Clark 已提交
181
		msm_gem_unlock(obj);
182 183 184
		return ERR_PTR(-EBUSY);
	}

R
Rob Clark 已提交
185
	p = get_pages(obj);
R
Rob Clark 已提交
186
	msm_gem_unlock(obj);
R
Rob Clark 已提交
187 188 189 190 191 192 193 194
	return p;
}

void msm_gem_put_pages(struct drm_gem_object *obj)
{
	/* when we start tracking the pin count, then do something here */
}

195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
int msm_gem_mmap_obj(struct drm_gem_object *obj,
		struct vm_area_struct *vma)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	vma->vm_flags &= ~VM_PFNMAP;
	vma->vm_flags |= VM_MIXEDMAP;

	if (msm_obj->flags & MSM_BO_WC) {
		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
	} else {
		/*
		 * Shunt off cached objs to shmem file so they have their own
		 * address_space (so unmap_mapping_range does what we want,
		 * in particular in the case of mmap'd dmabufs)
		 */
		fput(vma->vm_file);
		get_file(obj->filp);
		vma->vm_pgoff = 0;
		vma->vm_file  = obj->filp;

		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
	}

	return 0;
}

int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int ret;

	ret = drm_gem_mmap(filp, vma);
	if (ret) {
		DBG("mmap failed: %d", ret);
		return ret;
	}

	return msm_gem_mmap_obj(vma->vm_private_data, vma);
}

237
vm_fault_t msm_gem_fault(struct vm_fault *vmf)
238
{
239
	struct vm_area_struct *vma = vmf->vma;
240
	struct drm_gem_object *obj = vma->vm_private_data;
241
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
242 243 244
	struct page **pages;
	unsigned long pfn;
	pgoff_t pgoff;
245 246
	int err;
	vm_fault_t ret;
247

248 249 250
	/*
	 * vm_ops.open/drm_gem_mmap_obj and close get and put
	 * a reference on obj. So, we dont need to hold one here.
251
	 */
R
Rob Clark 已提交
252
	err = msm_gem_lock_interruptible(obj);
253 254
	if (err) {
		ret = VM_FAULT_NOPAGE;
255
		goto out;
256
	}
257

258
	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
R
Rob Clark 已提交
259
		msm_gem_unlock(obj);
260 261 262
		return VM_FAULT_SIGBUS;
	}

263 264 265
	/* make sure we have pages attached now */
	pages = get_pages(obj);
	if (IS_ERR(pages)) {
266
		ret = vmf_error(PTR_ERR(pages));
267 268 269 270
		goto out_unlock;
	}

	/* We don't use vmf->pgoff since that has the fake offset: */
271
	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
272

273
	pfn = page_to_pfn(pages[pgoff]);
274

275
	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
276 277
			pfn, pfn << PAGE_SHIFT);

278
	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
279
out_unlock:
R
Rob Clark 已提交
280
	msm_gem_unlock(obj);
281
out:
282
	return ret;
283 284 285 286 287 288 289 290
}

/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	int ret;

R
Rob Clark 已提交
291
	WARN_ON(!msm_gem_is_locked(obj));
292 293 294 295 296

	/* Make it mmapable */
	ret = drm_gem_create_mmap_offset(obj);

	if (ret) {
297
		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
298 299 300 301 302 303 304 305 306
		return 0;
	}

	return drm_vma_node_offset_addr(&obj->vma_node);
}

uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{
	uint64_t offset;
307

R
Rob Clark 已提交
308
	msm_gem_lock(obj);
309
	offset = mmap_offset(obj);
R
Rob Clark 已提交
310
	msm_gem_unlock(obj);
311 312 313
	return offset;
}

314 315 316 317 318 319
static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

R
Rob Clark 已提交
320
	WARN_ON(!msm_gem_is_locked(obj));
321

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
	if (!vma)
		return ERR_PTR(-ENOMEM);

	vma->aspace = aspace;

	list_add_tail(&vma->list, &msm_obj->vmas);

	return vma;
}

static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

R
Rob Clark 已提交
339
	WARN_ON(!msm_gem_is_locked(obj));
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357

	list_for_each_entry(vma, &msm_obj->vmas, list) {
		if (vma->aspace == aspace)
			return vma;
	}

	return NULL;
}

static void del_vma(struct msm_gem_vma *vma)
{
	if (!vma)
		return;

	list_del(&vma->list);
	kfree(vma);
}

R
Rob Clark 已提交
358
/* Called with msm_obj locked */
R
Rob Clark 已提交
359 360 361 362
static void
put_iova(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
363
	struct msm_gem_vma *vma, *tmp;
R
Rob Clark 已提交
364

R
Rob Clark 已提交
365
	WARN_ON(!msm_gem_is_locked(obj));
R
Rob Clark 已提交
366

367
	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
368 369 370 371
		if (vma->aspace) {
			msm_gem_purge_vma(vma->aspace, vma);
			msm_gem_close_vma(vma->aspace, vma);
		}
372
		del_vma(vma);
R
Rob Clark 已提交
373 374 375
	}
}

376
static int get_iova_locked(struct drm_gem_object *obj,
377 378
		struct msm_gem_address_space *aspace, uint64_t *iova,
		u64 range_start, u64 range_end)
379
{
380
	struct msm_gem_vma *vma;
381 382
	int ret = 0;

R
Rob Clark 已提交
383
	WARN_ON(!msm_gem_is_locked(obj));
384

385
	vma = lookup_vma(obj, aspace);
386

387 388
	if (!vma) {
		vma = add_vma(obj, aspace);
389 390
		if (IS_ERR(vma))
			return PTR_ERR(vma);
391

392 393
		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
			range_start, range_end);
394 395 396
		if (ret) {
			del_vma(vma);
			return ret;
397
		}
398 399
	}

400 401
	*iova = vma->iova;
	return 0;
402 403 404 405 406 407 408 409
}

static int msm_gem_pin_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;
	struct page **pages;
R
Rob Clark 已提交
410 411 412 413
	int prot = IOMMU_READ;

	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
		prot |= IOMMU_WRITE;
414

415 416 417
	if (msm_obj->flags & MSM_BO_MAP_PRIV)
		prot |= IOMMU_PRIV;

R
Rob Clark 已提交
418
	WARN_ON(!msm_gem_is_locked(obj));
419 420 421 422 423 424 425 426 427 428 429 430

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
		return -EBUSY;

	vma = lookup_vma(obj, aspace);
	if (WARN_ON(!vma))
		return -EINVAL;

	pages = get_pages(obj);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

R
Rob Clark 已提交
431 432
	return msm_gem_map_vma(aspace, vma, prot,
			msm_obj->sgt, obj->size >> PAGE_SHIFT);
433 434
}

435
static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
436 437
		struct msm_gem_address_space *aspace, uint64_t *iova,
		u64 range_start, u64 range_end)
438 439 440 441
{
	u64 local;
	int ret;

442
	WARN_ON(!msm_gem_is_locked(obj));
443

444
	ret = get_iova_locked(obj, aspace, &local,
445
		range_start, range_end);
446 447 448 449 450 451

	if (!ret)
		ret = msm_gem_pin_iova(obj, aspace);

	if (!ret)
		*iova = local;
452

453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
	return ret;
}

/*
 * get iova and pin it. Should have a matching put
 * limits iova to specified range (in pages)
 */
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova,
		u64 range_start, u64 range_end)
{
	int ret;

	msm_gem_lock(obj);
	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
R
Rob Clark 已提交
468
	msm_gem_unlock(obj);
469

470 471 472
	return ret;
}

473 474 475 476 477 478
int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova)
{
	return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
}

479 480 481 482 483 484 485
/* get iova and pin it. Should have a matching put */
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova)
{
	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
}

486 487 488 489
/*
 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
 * valid for the life of the object
 */
490 491 492 493 494
int msm_gem_get_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova)
{
	int ret;

R
Rob Clark 已提交
495
	msm_gem_lock(obj);
496
	ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
R
Rob Clark 已提交
497
	msm_gem_unlock(obj);
498 499 500 501

	return ret;
}

R
Rob Clark 已提交
502
/* get iova without taking a reference, used in places where you have
503
 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
R
Rob Clark 已提交
504
 */
505 506
uint64_t msm_gem_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
R
Rob Clark 已提交
507
{
508 509
	struct msm_gem_vma *vma;

R
Rob Clark 已提交
510
	msm_gem_lock(obj);
511
	vma = lookup_vma(obj, aspace);
R
Rob Clark 已提交
512
	msm_gem_unlock(obj);
513 514 515
	WARN_ON(!vma);

	return vma ? vma->iova : 0;
R
Rob Clark 已提交
516 517
}

518
/*
519
 * Locked variant of msm_gem_unpin_iova()
520
 */
521
void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
522
		struct msm_gem_address_space *aspace)
523
{
524 525
	struct msm_gem_vma *vma;

526 527
	WARN_ON(!msm_gem_is_locked(obj));

528 529 530 531
	vma = lookup_vma(obj, aspace);

	if (!WARN_ON(!vma))
		msm_gem_unmap_vma(aspace, vma);
532
}
533

534 535 536 537 538 539 540 541 542 543
/*
 * Unpin a iova by updating the reference counts. The memory isn't actually
 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
 * to get rid of it
 */
void msm_gem_unpin_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	msm_gem_lock(obj);
	msm_gem_unpin_iova_locked(obj, aspace);
R
Rob Clark 已提交
544
	msm_gem_unlock(obj);
545 546 547 548 549 550 551 552
}

int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
		struct drm_mode_create_dumb *args)
{
	args->pitch = align_pitch(args->width, args->bpp);
	args->size  = PAGE_ALIGN(args->pitch * args->height);
	return msm_gem_new_handle(dev, file, args->size,
553
			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
554 555 556 557 558 559 560 561 562
}

int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
		uint32_t handle, uint64_t *offset)
{
	struct drm_gem_object *obj;
	int ret = 0;

	/* GEM does all our handle to object mapping */
563
	obj = drm_gem_object_lookup(file, handle);
564 565 566 567 568 569 570
	if (obj == NULL) {
		ret = -ENOENT;
		goto fail;
	}

	*offset = msm_gem_mmap_offset(obj);

571
	drm_gem_object_put(obj);
572 573 574 575 576

fail:
	return ret;
}

577
static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
578 579
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
580 581
	int ret = 0;

582 583
	WARN_ON(!msm_gem_is_locked(obj));

584 585 586
	if (obj->import_attach)
		return ERR_PTR(-ENODEV);

587
	if (WARN_ON(msm_obj->madv > madv)) {
588
		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
589
			msm_obj->madv, madv);
590 591 592 593
		return ERR_PTR(-EBUSY);
	}

	/* increment vmap_count *before* vmap() call, so shrinker can
R
Rob Clark 已提交
594
	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
595 596
	 * This guarantees that we won't try to msm_gem_vunmap() this
	 * same object from within the vmap() call (while we already
R
Rob Clark 已提交
597
	 * hold msm_obj lock)
598 599 600
	 */
	msm_obj->vmap_count++;

601 602
	if (!msm_obj->vaddr) {
		struct page **pages = get_pages(obj);
603 604 605 606
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto fail;
		}
607 608
		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
609 610 611 612
		if (msm_obj->vaddr == NULL) {
			ret = -ENOMEM;
			goto fail;
		}
613
	}
614

615 616
	return msm_obj->vaddr;

617 618 619
fail:
	msm_obj->vmap_count--;
	return ERR_PTR(ret);
620 621
}

622
void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
623 624 625 626
{
	return get_vaddr(obj, MSM_MADV_WILLNEED);
}

627 628 629 630 631 632 633 634 635 636 637
void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{
	void *ret;

	msm_gem_lock(obj);
	ret = msm_gem_get_vaddr_locked(obj);
	msm_gem_unlock(obj);

	return ret;
}

638 639 640 641 642 643 644 645
/*
 * Don't use this!  It is for the very special case of dumping
 * submits from GPU hangs or faults, were the bo may already
 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
 * active list.
 */
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
{
646
	return get_vaddr(obj, __MSM_MADV_PURGED);
647 648
}

649
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
650
{
R
Rob Clark 已提交
651
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
652

653
	WARN_ON(!msm_gem_is_locked(obj));
R
Rob Clark 已提交
654
	WARN_ON(msm_obj->vmap_count < 1);
655

R
Rob Clark 已提交
656
	msm_obj->vmap_count--;
657 658 659 660 661 662
}

void msm_gem_put_vaddr(struct drm_gem_object *obj)
{
	msm_gem_lock(obj);
	msm_gem_put_vaddr_locked(obj);
R
Rob Clark 已提交
663
	msm_gem_unlock(obj);
664 665
}

R
Rob Clark 已提交
666 667 668 669 670 671 672
/* Update madvise status, returns true if not purged, else
 * false or -errno.
 */
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

R
Rob Clark 已提交
673
	msm_gem_lock(obj);
674

R
Rob Clark 已提交
675 676 677 678 679
	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));

	if (msm_obj->madv != __MSM_MADV_PURGED)
		msm_obj->madv = madv;

680 681
	madv = msm_obj->madv;

R
Rob Clark 已提交
682
	msm_gem_unlock(obj);
683 684

	return (madv != __MSM_MADV_PURGED);
R
Rob Clark 已提交
685 686
}

687
void msm_gem_purge(struct drm_gem_object *obj)
R
Rob Clark 已提交
688 689 690 691 692
{
	struct drm_device *dev = obj->dev;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
693
	WARN_ON(!msm_gem_is_locked(obj));
R
Rob Clark 已提交
694 695 696 697 698
	WARN_ON(!is_purgeable(msm_obj));
	WARN_ON(obj->import_attach);

	put_iova(obj);

699
	msm_gem_vunmap(obj);
R
Rob Clark 已提交
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718

	put_pages(obj);

	msm_obj->madv = __MSM_MADV_PURGED;

	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
	drm_gem_free_mmap_offset(obj);

	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);

	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
			0, (loff_t)-1);
}

719
void msm_gem_vunmap(struct drm_gem_object *obj)
R
Rob Clark 已提交
720 721 722
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

R
Rob Clark 已提交
723
	WARN_ON(!msm_gem_is_locked(obj));
724

R
Rob Clark 已提交
725 726 727 728 729 730 731
	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
		return;

	vunmap(msm_obj->vaddr);
	msm_obj->vaddr = NULL;
}

R
Rob Clark 已提交
732 733 734 735
/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
		struct msm_fence_context *fctx, bool exclusive)
{
736
	struct dma_resv_list *fobj;
737
	struct dma_fence *fence;
R
Rob Clark 已提交
738 739
	int i, ret;

740
	fobj = dma_resv_get_list(obj->resv);
R
Rob Clark 已提交
741
	if (!fobj || (fobj->shared_count == 0)) {
742
		fence = dma_resv_get_excl(obj->resv);
R
Rob Clark 已提交
743 744
		/* don't need to wait on our own fences, since ring is fifo */
		if (fence && (fence->context != fctx->context)) {
745
			ret = dma_fence_wait(fence, true);
R
Rob Clark 已提交
746 747 748 749 750 751 752 753 754 755
			if (ret)
				return ret;
		}
	}

	if (!exclusive || !fobj)
		return 0;

	for (i = 0; i < fobj->shared_count; i++) {
		fence = rcu_dereference_protected(fobj->shared[i],
756
						dma_resv_held(obj->resv));
R
Rob Clark 已提交
757
		if (fence->context != fctx->context) {
758
			ret = dma_fence_wait(fence, true);
R
Rob Clark 已提交
759 760 761 762 763 764 765 766
			if (ret)
				return ret;
		}
	}

	return 0;
}

767
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
R
Rob Clark 已提交
768 769
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
770 771 772
	struct msm_drm_private *priv = obj->dev->dev_private;

	might_sleep();
R
Rob Clark 已提交
773
	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
774 775

	if (!atomic_fetch_inc(&msm_obj->active_count)) {
776
		mutex_lock(&priv->mm_lock);
777 778 779
		msm_obj->gpu = gpu;
		list_del_init(&msm_obj->mm_list);
		list_add_tail(&msm_obj->mm_list, &gpu->active_list);
780
		mutex_unlock(&priv->mm_lock);
781
	}
R
Rob Clark 已提交
782 783
}

784
void msm_gem_active_put(struct drm_gem_object *obj)
R
Rob Clark 已提交
785 786
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
787
	struct msm_drm_private *priv = obj->dev->dev_private;
R
Rob Clark 已提交
788

789
	might_sleep();
R
Rob Clark 已提交
790

791
	if (!atomic_dec_return(&msm_obj->active_count)) {
792
		mutex_lock(&priv->mm_lock);
793 794 795
		msm_obj->gpu = NULL;
		list_del_init(&msm_obj->mm_list);
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
796
		mutex_unlock(&priv->mm_lock);
797
	}
R
Rob Clark 已提交
798 799
}

R
Rob Clark 已提交
800
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
R
Rob Clark 已提交
801
{
R
Rob Clark 已提交
802
	bool write = !!(op & MSM_PREP_WRITE);
803 804 805 806
	unsigned long remain =
		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
	long ret;

807
	ret = dma_resv_wait_timeout_rcu(obj->resv, write,
808 809 810 811 812
						  true,  remain);
	if (ret == 0)
		return remain == 0 ? -EBUSY : -ETIMEDOUT;
	else if (ret < 0)
		return ret;
R
Rob Clark 已提交
813 814

	/* TODO cache maintenance */
815

R
Rob Clark 已提交
816
	return 0;
R
Rob Clark 已提交
817
}
818

R
Rob Clark 已提交
819 820 821
int msm_gem_cpu_fini(struct drm_gem_object *obj)
{
	/* TODO cache maintenance */
822 823 824 825
	return 0;
}

#ifdef CONFIG_DEBUG_FS
826
static void describe_fence(struct dma_fence *fence, const char *type,
R
Rob Clark 已提交
827 828
		struct seq_file *m)
{
829
	if (!dma_fence_is_signaled(fence))
830
		seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
R
Rob Clark 已提交
831 832 833 834 835
				fence->ops->get_driver_name(fence),
				fence->ops->get_timeline_name(fence),
				fence->seqno);
}

836 837 838
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
839 840
	struct dma_resv *robj = obj->resv;
	struct dma_resv_list *fobj;
841
	struct dma_fence *fence;
842
	struct msm_gem_vma *vma;
843
	uint64_t off = drm_vma_node_start(&obj->vma_node);
R
Rob Clark 已提交
844
	const char *madv;
845

R
Rob Clark 已提交
846
	msm_gem_lock(obj);
R
Rob Clark 已提交
847

R
Rob Clark 已提交
848 849 850 851 852 853 854 855 856 857 858 859 860
	switch (msm_obj->madv) {
	case __MSM_MADV_PURGED:
		madv = " purged";
		break;
	case MSM_MADV_DONTNEED:
		madv = " purgeable";
		break;
	case MSM_MADV_WILLNEED:
	default:
		madv = "";
		break;
	}

861
	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
R
Rob Clark 已提交
862
			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
863
			obj->name, kref_read(&obj->refcount),
864 865
			off, msm_obj->vaddr);

866
	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
867

868 869
	if (!list_empty(&msm_obj->vmas)) {

870
		seq_puts(m, "      vmas:");
871

872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
		list_for_each_entry(vma, &msm_obj->vmas, list) {
			const char *name, *comm;
			if (vma->aspace) {
				struct msm_gem_address_space *aspace = vma->aspace;
				struct task_struct *task =
					get_pid_task(aspace->pid, PIDTYPE_PID);
				if (task) {
					comm = kstrdup(task->comm, GFP_KERNEL);
				} else {
					comm = NULL;
				}
				name = aspace->name;
			} else {
				name = comm = NULL;
			}
			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
				name, comm ? ":" : "", comm ? comm : "",
				vma->aspace, vma->iova,
				vma->mapped ? "mapped" : "unmapped",
891
				vma->inuse);
892 893
			kfree(comm);
		}
894 895 896

		seq_puts(m, "\n");
	}
R
Rob Clark 已提交
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912

	rcu_read_lock();
	fobj = rcu_dereference(robj->fence);
	if (fobj) {
		unsigned int i, shared_count = fobj->shared_count;

		for (i = 0; i < shared_count; i++) {
			fence = rcu_dereference(fobj->shared[i]);
			describe_fence(fence, "Shared", m);
		}
	}

	fence = rcu_dereference(robj->fence_excl);
	if (fence)
		describe_fence(fence, "Exclusive", m);
	rcu_read_unlock();
913

R
Rob Clark 已提交
914
	msm_gem_unlock(obj);
915 916 917 918 919 920 921 922
}

void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
{
	struct msm_gem_object *msm_obj;
	int count = 0;
	size_t size = 0;

923
	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
924 925
	list_for_each_entry(msm_obj, list, mm_list) {
		struct drm_gem_object *obj = &msm_obj->base;
926
		seq_puts(m, "   ");
927 928 929 930 931 932 933 934 935
		msm_gem_describe(obj, m);
		count++;
		size += obj->size;
	}

	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
}
#endif

936
/* don't call directly!  Use drm_gem_object_put_locked() and friends */
937 938 939
void msm_gem_free_object(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
940 941 942 943 944 945 946 947 948 949 950
	struct drm_device *dev = obj->dev;
	struct msm_drm_private *priv = dev->dev_private;

	if (llist_add(&msm_obj->freed, &priv->free_list))
		queue_work(priv->wq, &priv->free_work);
}

static void free_object(struct msm_gem_object *msm_obj)
{
	struct drm_gem_object *obj = &msm_obj->base;
	struct drm_device *dev = obj->dev;
951
	struct msm_drm_private *priv = dev->dev_private;
952 953 954

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

R
Rob Clark 已提交
955 956 957
	/* object should not be on active list: */
	WARN_ON(is_active(msm_obj));

958
	mutex_lock(&priv->mm_lock);
959
	list_del(&msm_obj->mm_list);
960
	mutex_unlock(&priv->mm_lock);
961

R
Rob Clark 已提交
962
	msm_gem_lock(obj);
963

R
Rob Clark 已提交
964
	put_iova(obj);
965

R
Rob Clark 已提交
966
	if (obj->import_attach) {
967
		WARN_ON(msm_obj->vaddr);
R
Rob Clark 已提交
968 969 970 971 972

		/* Don't drop the pages for imported dmabuf, as they are not
		 * ours, just free the array we allocated:
		 */
		if (msm_obj->pages)
M
Michal Hocko 已提交
973
			kvfree(msm_obj->pages);
974

975 976 977 978 979
		/* dma_buf_detach() grabs resv lock, so we need to unlock
		 * prior to drm_prime_gem_destroy
		 */
		msm_gem_unlock(obj);

980
		drm_prime_gem_destroy(obj, msm_obj->sgt);
R
Rob Clark 已提交
981
	} else {
982
		msm_gem_vunmap(obj);
R
Rob Clark 已提交
983
		put_pages(obj);
984
		msm_gem_unlock(obj);
R
Rob Clark 已提交
985
	}
986 987 988 989 990 991

	drm_gem_object_release(obj);

	kfree(msm_obj);
}

992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
void msm_gem_free_work(struct work_struct *work)
{
	struct msm_drm_private *priv =
		container_of(work, struct msm_drm_private, free_work);
	struct drm_device *dev = priv->dev;
	struct llist_node *freed;
	struct msm_gem_object *msm_obj, *next;

	while ((freed = llist_del_all(&priv->free_list))) {

		mutex_lock(&dev->struct_mutex);

		llist_for_each_entry_safe(msm_obj, next,
					  freed, freed)
			free_object(msm_obj);

		mutex_unlock(&dev->struct_mutex);

		if (need_resched())
			break;
	}
}

1015 1016
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1017 1018
		uint32_t size, uint32_t flags, uint32_t *handle,
		char *name)
1019 1020 1021 1022 1023 1024 1025 1026 1027
{
	struct drm_gem_object *obj;
	int ret;

	obj = msm_gem_new(dev, size, flags);

	if (IS_ERR(obj))
		return PTR_ERR(obj);

1028 1029 1030
	if (name)
		msm_gem_object_set_name(obj, "%s", name);

1031 1032 1033
	ret = drm_gem_handle_create(file, obj, handle);

	/* drop reference from allocate - handle holds it now */
1034
	drm_gem_object_put(obj);
1035 1036 1037 1038

	return ret;
}

R
Rob Clark 已提交
1039 1040
static int msm_gem_new_impl(struct drm_device *dev,
		uint32_t size, uint32_t flags,
1041
		struct drm_gem_object **obj)
1042 1043 1044 1045 1046 1047 1048 1049 1050
{
	struct msm_gem_object *msm_obj;

	switch (flags & MSM_BO_CACHE_MASK) {
	case MSM_BO_UNCACHED:
	case MSM_BO_CACHED:
	case MSM_BO_WC:
		break;
	default:
1051
		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
1052
				(flags & MSM_BO_CACHE_MASK));
R
Rob Clark 已提交
1053
		return -EINVAL;
1054 1055
	}

1056
	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
R
Rob Clark 已提交
1057 1058
	if (!msm_obj)
		return -ENOMEM;
1059 1060

	msm_obj->flags = flags;
R
Rob Clark 已提交
1061
	msm_obj->madv = MSM_MADV_WILLNEED;
1062

R
Rob Clark 已提交
1063
	INIT_LIST_HEAD(&msm_obj->submit_entry);
1064 1065
	INIT_LIST_HEAD(&msm_obj->vmas);

R
Rob Clark 已提交
1066 1067 1068 1069 1070
	*obj = &msm_obj->base;

	return 0;
}

1071 1072
static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
		uint32_t size, uint32_t flags, bool struct_mutex_locked)
R
Rob Clark 已提交
1073
{
1074
	struct msm_drm_private *priv = dev->dev_private;
1075
	struct msm_gem_object *msm_obj;
1076
	struct drm_gem_object *obj = NULL;
1077
	bool use_vram = false;
R
Rob Clark 已提交
1078 1079 1080 1081
	int ret;

	size = PAGE_ALIGN(size);

J
Jonathan Marek 已提交
1082
	if (!msm_use_mmu(dev))
1083
		use_vram = true;
1084
	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1085 1086 1087 1088 1089
		use_vram = true;

	if (WARN_ON(use_vram && !priv->vram.size))
		return ERR_PTR(-EINVAL);

1090 1091 1092 1093 1094 1095
	/* Disallow zero sized objects as they make the underlying
	 * infrastructure grumpy
	 */
	if (size == 0)
		return ERR_PTR(-EINVAL);

1096
	ret = msm_gem_new_impl(dev, size, flags, &obj);
R
Rob Clark 已提交
1097 1098 1099
	if (ret)
		goto fail;

1100 1101
	msm_obj = to_msm_bo(obj);

1102
	if (use_vram) {
1103
		struct msm_gem_vma *vma;
1104
		struct page **pages;
1105

R
Rob Clark 已提交
1106
		msm_gem_lock(obj);
1107

1108
		vma = add_vma(obj, NULL);
R
Rob Clark 已提交
1109
		msm_gem_unlock(obj);
1110 1111 1112 1113 1114 1115 1116
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
			goto fail;
		}

		to_msm_bo(obj)->vram_node = &vma->node;

1117 1118 1119 1120 1121 1122 1123
		drm_gem_private_object_init(dev, obj, size);

		pages = get_pages(obj);
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto fail;
		}
1124 1125

		vma->iova = physaddr(obj);
1126
	} else {
1127 1128 1129
		ret = drm_gem_object_init(dev, obj, size);
		if (ret)
			goto fail;
1130 1131 1132 1133 1134 1135 1136
		/*
		 * Our buffers are kept pinned, so allocating them from the
		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
		 * See comments above new_inode() why this is required _and_
		 * expected if you're going to pin these pages.
		 */
		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1137
	}
R
Rob Clark 已提交
1138

1139 1140 1141
	mutex_lock(&priv->mm_lock);
	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
	mutex_unlock(&priv->mm_lock);
1142

R
Rob Clark 已提交
1143 1144 1145
	return obj;

fail:
1146 1147 1148 1149 1150
	if (struct_mutex_locked) {
		drm_gem_object_put_locked(obj);
	} else {
		drm_gem_object_put(obj);
	}
R
Rob Clark 已提交
1151 1152 1153
	return ERR_PTR(ret);
}

1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
		uint32_t size, uint32_t flags)
{
	return _msm_gem_new(dev, size, flags, true);
}

struct drm_gem_object *msm_gem_new(struct drm_device *dev,
		uint32_t size, uint32_t flags)
{
	return _msm_gem_new(dev, size, flags, false);
}

R
Rob Clark 已提交
1166
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1167
		struct dma_buf *dmabuf, struct sg_table *sgt)
R
Rob Clark 已提交
1168
{
1169
	struct msm_drm_private *priv = dev->dev_private;
R
Rob Clark 已提交
1170 1171
	struct msm_gem_object *msm_obj;
	struct drm_gem_object *obj;
1172
	uint32_t size;
R
Rob Clark 已提交
1173 1174
	int ret, npages;

1175
	/* if we don't have IOMMU, don't bother pretending we can import: */
J
Jonathan Marek 已提交
1176
	if (!msm_use_mmu(dev)) {
1177
		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1178 1179 1180
		return ERR_PTR(-EINVAL);
	}

1181
	size = PAGE_ALIGN(dmabuf->size);
R
Rob Clark 已提交
1182

1183
	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
R
Rob Clark 已提交
1184 1185 1186 1187 1188 1189 1190 1191
	if (ret)
		goto fail;

	drm_gem_private_object_init(dev, obj, size);

	npages = size / PAGE_SIZE;

	msm_obj = to_msm_bo(obj);
R
Rob Clark 已提交
1192
	msm_gem_lock(obj);
R
Rob Clark 已提交
1193
	msm_obj->sgt = sgt;
M
Michal Hocko 已提交
1194
	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
R
Rob Clark 已提交
1195
	if (!msm_obj->pages) {
R
Rob Clark 已提交
1196
		msm_gem_unlock(obj);
R
Rob Clark 已提交
1197 1198 1199 1200 1201
		ret = -ENOMEM;
		goto fail;
	}

	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1202
	if (ret) {
R
Rob Clark 已提交
1203
		msm_gem_unlock(obj);
R
Rob Clark 已提交
1204
		goto fail;
1205
	}
R
Rob Clark 已提交
1206

R
Rob Clark 已提交
1207
	msm_gem_unlock(obj);
1208

1209
	mutex_lock(&priv->mm_lock);
1210
	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1211
	mutex_unlock(&priv->mm_lock);
1212

1213 1214 1215
	return obj;

fail:
1216
	drm_gem_object_put(obj);
1217 1218
	return ERR_PTR(ret);
}
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231

static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova, bool locked)
{
	void *vaddr;
	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
	int ret;

	if (IS_ERR(obj))
		return ERR_CAST(obj);

	if (iova) {
1232
		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1233 1234
		if (ret)
			goto err;
1235 1236 1237
	}

	vaddr = msm_gem_get_vaddr(obj);
1238
	if (IS_ERR(vaddr)) {
1239
		msm_gem_unpin_iova(obj, aspace);
1240 1241
		ret = PTR_ERR(vaddr);
		goto err;
1242 1243 1244 1245 1246 1247
	}

	if (bo)
		*bo = obj;

	return vaddr;
1248 1249
err:
	if (locked)
1250
		drm_gem_object_put_locked(obj);
1251
	else
1252
		drm_gem_object_put(obj);
1253 1254 1255

	return ERR_PTR(ret);

1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
}

void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova)
{
	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
}

void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova)
{
	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
}
1271 1272 1273 1274 1275 1276 1277 1278

void msm_gem_kernel_put(struct drm_gem_object *bo,
		struct msm_gem_address_space *aspace, bool locked)
{
	if (IS_ERR_OR_NULL(bo))
		return;

	msm_gem_put_vaddr(bo);
1279
	msm_gem_unpin_iova(bo, aspace);
1280 1281

	if (locked)
1282
		drm_gem_object_put_locked(bo);
1283
	else
1284
		drm_gem_object_put(bo);
1285
}
1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298

void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
{
	struct msm_gem_object *msm_obj = to_msm_bo(bo);
	va_list ap;

	if (!fmt)
		return;

	va_start(ap, fmt);
	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
	va_end(ap);
}