nouveau_gem.c 23.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright (C) 2008 Ben Skeggs.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sublicense, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial
 * portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 */

27 28 29
#include <subdev/fb.h>

#include "nouveau_drm.h"
30
#include "nouveau_dma.h"
31
#include "nouveau_fence.h"
32
#include "nouveau_abi16.h"
33

34 35
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52

int
nouveau_gem_object_new(struct drm_gem_object *gem)
{
	return 0;
}

void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
	struct nouveau_bo *nvbo = gem->driver_private;
	struct ttm_buffer_object *bo = &nvbo->bo;

	if (!nvbo)
		return;
	nvbo->gem = NULL;

53 54
	/* Lockdep hates you for doing reserve with gem object lock held */
	if (WARN_ON_ONCE(nvbo->pin_refcnt)) {
55 56 57 58
		nvbo->pin_refcnt = 1;
		nouveau_bo_unpin(nvbo);
	}

D
Dave Airlie 已提交
59 60 61
	if (gem->import_attach)
		drm_prime_gem_destroy(gem, nvbo->bo.sg);

62
	ttm_bo_unref(&bo);
63 64 65

	drm_gem_object_release(gem);
	kfree(gem);
66 67
}

68 69 70
int
nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
{
71
	struct nouveau_cli *cli = nouveau_cli(file_priv);
72 73 74
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_vma *vma;
	int ret;
75

76
	if (!cli->base.vm)
77 78
		return 0;

79 80 81 82
	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
	if (ret)
		return ret;

83
	vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
84 85 86 87 88 89 90
	if (!vma) {
		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
		if (!vma) {
			ret = -ENOMEM;
			goto out;
		}

91
		ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
92 93 94 95 96 97 98 99 100 101 102
		if (ret) {
			kfree(vma);
			goto out;
		}
	} else {
		vma->refcount++;
	}

out:
	ttm_bo_unreserve(&nvbo->bo);
	return ret;
103 104
}

105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
static void
nouveau_gem_object_delete(void *data)
{
	struct nouveau_vma *vma = data;
	nouveau_vm_unmap(vma);
	nouveau_vm_put(vma);
	kfree(vma);
}

static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
	struct nouveau_fence *fence = NULL;

	list_del(&vma->head);

	if (mapped) {
		spin_lock(&nvbo->bo.bdev->fence_lock);
		if (nvbo->bo.sync_obj)
			fence = nouveau_fence_ref(nvbo->bo.sync_obj);
		spin_unlock(&nvbo->bo.bdev->fence_lock);
	}

	if (fence) {
		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
	} else {
		if (mapped)
			nouveau_vm_unmap(vma);
		nouveau_vm_put(vma);
		kfree(vma);
	}
	nouveau_fence_unref(&fence);
}

140 141 142
void
nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
{
143
	struct nouveau_cli *cli = nouveau_cli(file_priv);
144 145 146
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_vma *vma;
	int ret;
147

148
	if (!cli->base.vm)
149
		return;
150 151 152 153 154

	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
	if (ret)
		return;

155
	vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
156
	if (vma) {
157 158
		if (--vma->refcount == 0)
			nouveau_gem_object_unmap(nvbo, vma);
159 160
	}
	ttm_bo_unreserve(&nvbo->bo);
161 162
}

163
int
164 165 166
nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
		uint32_t tile_mode, uint32_t tile_flags,
		struct nouveau_bo **pnvbo)
167
{
168
	struct nouveau_drm *drm = nouveau_drm(dev);
169
	struct nouveau_bo *nvbo;
170
	u32 flags = 0;
171 172
	int ret;

173 174 175 176 177 178 179
	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
		flags |= TTM_PL_FLAG_VRAM;
	if (domain & NOUVEAU_GEM_DOMAIN_GART)
		flags |= TTM_PL_FLAG_TT;
	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
		flags |= TTM_PL_FLAG_SYSTEM;

180
	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
D
Dave Airlie 已提交
181
			     tile_flags, NULL, pnvbo);
182 183 184 185
	if (ret)
		return ret;
	nvbo = *pnvbo;

186 187 188 189 190 191
	/* we restrict allowed domains on nv50+ to only the types
	 * that were requested at creation time.  not possibly on
	 * earlier chips without busting the ABI.
	 */
	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
			      NOUVEAU_GEM_DOMAIN_GART;
192
	if (nv_device(drm->device)->card_type >= NV_50)
193 194
		nvbo->valid_domains &= domain;

195 196 197 198 199 200
	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
	if (!nvbo->gem) {
		nouveau_bo_ref(NULL, pnvbo);
		return -ENOMEM;
	}

J
Jan Engelhardt 已提交
201
	nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
202 203 204 205 206
	nvbo->gem->driver_private = nvbo;
	return 0;
}

static int
207 208
nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
		 struct drm_nouveau_gem_info *rep)
209
{
210
	struct nouveau_cli *cli = nouveau_cli(file_priv);
211
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
212
	struct nouveau_vma *vma;
213 214 215 216 217 218

	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
	else
		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;

219
	rep->offset = nvbo->bo.offset;
220 221
	if (cli->base.vm) {
		vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
222 223 224 225 226 227
		if (!vma)
			return -EINVAL;

		rep->offset = vma->offset;
	}

228
	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
229
	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
230 231 232 233 234 235 236 237 238
	rep->tile_mode = nvbo->tile_mode;
	rep->tile_flags = nvbo->tile_flags;
	return 0;
}

int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
239
	struct nouveau_drm *drm = nouveau_drm(dev);
240
	struct nouveau_cli *cli = nouveau_cli(file_priv);
241
	struct nouveau_fb *pfb = nouveau_fb(drm->device);
242 243 244 245
	struct drm_nouveau_gem_new *req = data;
	struct nouveau_bo *nvbo = NULL;
	int ret = 0;

246
	drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
247

248
	if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
249
		NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
250
		return -EINVAL;
251
	}
252

253
	ret = nouveau_gem_new(dev, req->info.size, req->align,
254 255
			      req->info.domain, req->info.tile_mode,
			      req->info.tile_flags, &nvbo);
256 257 258 259
	if (ret)
		return ret;

	ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
260 261 262 263 264 265
	if (ret == 0) {
		ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
		if (ret)
			drm_gem_handle_delete(file_priv, req->info.handle);
	}

266 267
	/* drop reference from allocate - handle holds it now */
	drm_gem_object_unreference_unlocked(nvbo->gem);
268 269 270 271 272 273 274 275 276
	return ret;
}

static int
nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
		       uint32_t write_domains, uint32_t valid_domains)
{
	struct nouveau_bo *nvbo = gem->driver_private;
	struct ttm_buffer_object *bo = &nvbo->bo;
277
	uint32_t domains = valid_domains & nvbo->valid_domains &
278 279
		(write_domains ? write_domains : read_domains);
	uint32_t pref_flags = 0, valid_flags = 0;
280

281
	if (!domains)
282 283
		return -EINVAL;

284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
		valid_flags |= TTM_PL_FLAG_VRAM;

	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
		valid_flags |= TTM_PL_FLAG_TT;

	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
	    bo->mem.mem_type == TTM_PL_VRAM)
		pref_flags |= TTM_PL_FLAG_VRAM;

	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
		 bo->mem.mem_type == TTM_PL_TT)
		pref_flags |= TTM_PL_FLAG_TT;

	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
		pref_flags |= TTM_PL_FLAG_VRAM;

	else
		pref_flags |= TTM_PL_FLAG_TT;

	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
305 306 307 308 309 310 311 312

	return 0;
}

struct validate_op {
	struct list_head vram_list;
	struct list_head gart_list;
	struct list_head both_list;
313
	struct ww_acquire_ctx ticket;
314 315 316
};

static void
317 318
validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
		   struct ww_acquire_ctx *ticket)
319 320 321 322 323 324
{
	struct list_head *entry, *tmp;
	struct nouveau_bo *nvbo;

	list_for_each_safe(entry, tmp, list) {
		nvbo = list_entry(entry, struct nouveau_bo, entry);
325 326

		nouveau_bo_fence(nvbo, fence);
327

328 329 330 331 332
		if (unlikely(nvbo->validate_mapped)) {
			ttm_bo_kunmap(&nvbo->kmap);
			nvbo->validate_mapped = false;
		}

333 334
		list_del(&nvbo->entry);
		nvbo->reserved_by = NULL;
335
		ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
336
		drm_gem_object_unreference_unlocked(nvbo->gem);
337 338 339 340
	}
}

static void
341
validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence)
342
{
343 344 345 346 347 348 349 350 351 352
	validate_fini_list(&op->vram_list, fence, &op->ticket);
	validate_fini_list(&op->gart_list, fence, &op->ticket);
	validate_fini_list(&op->both_list, fence, &op->ticket);
}

static void
validate_fini(struct validate_op *op, struct nouveau_fence *fence)
{
	validate_fini_no_ticket(op, fence);
	ww_acquire_fini(&op->ticket);
353 354 355 356 357 358 359
}

static int
validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
	      int nr_buffers, struct validate_op *op)
{
360
	struct nouveau_cli *cli = nouveau_cli(file_priv);
361
	struct drm_device *dev = chan->drm->dev;
362 363
	int trycnt = 0;
	int ret, i;
364
	struct nouveau_bo *res_bo = NULL;
365

366
	ww_acquire_init(&op->ticket, &reservation_ww_class);
367 368
retry:
	if (++trycnt > 100000) {
369
		NV_ERROR(cli, "%s failed and gave up.\n", __func__);
370 371 372 373 374 375 376 377 378 379
		return -EINVAL;
	}

	for (i = 0; i < nr_buffers; i++) {
		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
		struct drm_gem_object *gem;
		struct nouveau_bo *nvbo;

		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
		if (!gem) {
380
			NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
381
			ww_acquire_done(&op->ticket);
382
			validate_fini(op, NULL);
383
			return -ENOENT;
384 385
		}
		nvbo = gem->driver_private;
386 387 388 389 390
		if (nvbo == res_bo) {
			res_bo = NULL;
			drm_gem_object_unreference_unlocked(gem);
			continue;
		}
391 392

		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
393
			NV_ERROR(cli, "multiple instances of buffer %d on "
394
				      "validation list\n", b->handle);
395
			drm_gem_object_unreference_unlocked(gem);
396
			ww_acquire_done(&op->ticket);
397 398 399 400
			validate_fini(op, NULL);
			return -EINVAL;
		}

401
		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
402
		if (ret) {
403
			validate_fini_no_ticket(op, NULL);
404
			if (unlikely(ret == -EDEADLK)) {
405
				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
406
							      &op->ticket);
407 408 409
				if (!ret)
					res_bo = nvbo;
			}
410
			if (unlikely(ret)) {
411 412
				ww_acquire_done(&op->ticket);
				ww_acquire_fini(&op->ticket);
413
				drm_gem_object_unreference_unlocked(gem);
414
				if (ret != -ERESTARTSYS)
415
					NV_ERROR(cli, "fail reserve\n");
416
				return ret;
417
			}
418 419
		}

420
		b->user_priv = (uint64_t)(unsigned long)nvbo;
421 422 423 424 425 426 427 428 429 430 431 432
		nvbo->reserved_by = file_priv;
		nvbo->pbbo_index = i;
		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
			list_add_tail(&nvbo->entry, &op->both_list);
		else
		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
			list_add_tail(&nvbo->entry, &op->vram_list);
		else
		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
			list_add_tail(&nvbo->entry, &op->gart_list);
		else {
433
			NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
434
				 b->valid_domains);
435
			list_add_tail(&nvbo->entry, &op->both_list);
436
			ww_acquire_done(&op->ticket);
437 438 439
			validate_fini(op, NULL);
			return -EINVAL;
		}
440 441
		if (nvbo == res_bo)
			goto retry;
442 443
	}

444
	ww_acquire_done(&op->ticket);
445 446 447
	return 0;
}

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
static int
validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
{
	struct nouveau_fence *fence = NULL;
	int ret = 0;

	spin_lock(&nvbo->bo.bdev->fence_lock);
	if (nvbo->bo.sync_obj)
		fence = nouveau_fence_ref(nvbo->bo.sync_obj);
	spin_unlock(&nvbo->bo.bdev->fence_lock);

	if (fence) {
		ret = nouveau_fence_sync(fence, chan);
		nouveau_fence_unref(&fence);
	}

	return ret;
}

467
static int
468 469 470
validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
	      uint64_t user_pbbo_ptr)
471
{
472
	struct nouveau_drm *drm = chan->drm;
473 474 475 476 477 478 479 480
	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
				(void __force __user *)(uintptr_t)user_pbbo_ptr;
	struct nouveau_bo *nvbo;
	int ret, relocs = 0;

	list_for_each_entry(nvbo, list, entry) {
		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];

481
		ret = validate_sync(chan, nvbo);
482
		if (unlikely(ret)) {
483
			NV_ERROR(cli, "fail pre-validate sync\n");
484
			return ret;
485 486 487 488 489
		}

		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
					     b->write_domains,
					     b->valid_domains);
490
		if (unlikely(ret)) {
491
			NV_ERROR(cli, "fail set_domain\n");
492
			return ret;
493
		}
494

495
		ret = nouveau_bo_validate(nvbo, true, false);
496
		if (unlikely(ret)) {
497
			if (ret != -ERESTARTSYS)
498
				NV_ERROR(cli, "fail ttm_validate\n");
499
			return ret;
500
		}
501

502
		ret = validate_sync(chan, nvbo);
503
		if (unlikely(ret)) {
504
			NV_ERROR(cli, "fail post-validate sync\n");
505 506 507
			return ret;
		}

508
		if (nv_device(drm->device)->card_type < NV_50) {
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
			if (nvbo->bo.offset == b->presumed.offset &&
			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
				continue;

			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
			else
				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
			b->presumed.offset = nvbo->bo.offset;
			b->presumed.valid = 0;
			relocs++;

			if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
					     &b->presumed, sizeof(b->presumed)))
				return -EFAULT;
		}
528 529 530 531 532 533 534 535 536 537 538 539
	}

	return relocs;
}

static int
nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
			     struct drm_file *file_priv,
			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
			     uint64_t user_buffers, int nr_buffers,
			     struct validate_op *op, int *apply_relocs)
{
540
	struct nouveau_cli *cli = nouveau_cli(file_priv);
541 542 543 544 545 546 547 548 549 550
	int ret, relocs = 0;

	INIT_LIST_HEAD(&op->vram_list);
	INIT_LIST_HEAD(&op->gart_list);
	INIT_LIST_HEAD(&op->both_list);

	if (nr_buffers == 0)
		return 0;

	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
551
	if (unlikely(ret)) {
552
		if (ret != -ERESTARTSYS)
553
			NV_ERROR(cli, "validate_init\n");
554
		return ret;
555
	}
556

557
	ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
558
	if (unlikely(ret < 0)) {
559
		if (ret != -ERESTARTSYS)
560
			NV_ERROR(cli, "validate vram_list\n");
561 562 563 564 565
		validate_fini(op, NULL);
		return ret;
	}
	relocs += ret;

566
	ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
567
	if (unlikely(ret < 0)) {
568
		if (ret != -ERESTARTSYS)
569
			NV_ERROR(cli, "validate gart_list\n");
570 571 572 573 574
		validate_fini(op, NULL);
		return ret;
	}
	relocs += ret;

575
	ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
576
	if (unlikely(ret < 0)) {
577
		if (ret != -ERESTARTSYS)
578
			NV_ERROR(cli, "validate both_list\n");
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
		validate_fini(op, NULL);
		return ret;
	}
	relocs += ret;

	*apply_relocs = relocs;
	return 0;
}

static inline void *
u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
{
	void *mem;
	void __user *userptr = (void __force __user *)(uintptr_t)user;

	mem = kmalloc(nmemb * size, GFP_KERNEL);
	if (!mem)
		return ERR_PTR(-ENOMEM);

	if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
		kfree(mem);
		return ERR_PTR(-EFAULT);
	}

	return mem;
}

static int
607
nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
608 609
				struct drm_nouveau_gem_pushbuf *req,
				struct drm_nouveau_gem_pushbuf_bo *bo)
610 611
{
	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
612 613
	int ret = 0;
	unsigned i;
614

615
	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
616 617 618
	if (IS_ERR(reloc))
		return PTR_ERR(reloc);

619
	for (i = 0; i < req->nr_relocs; i++) {
620 621
		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
		struct drm_nouveau_gem_pushbuf_bo *b;
622
		struct nouveau_bo *nvbo;
623 624
		uint32_t data;

625
		if (unlikely(r->bo_index > req->nr_buffers)) {
626
			NV_ERROR(cli, "reloc bo index invalid\n");
627 628 629 630 631
			ret = -EINVAL;
			break;
		}

		b = &bo[r->bo_index];
632
		if (b->presumed.valid)
633 634
			continue;

635
		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
636
			NV_ERROR(cli, "reloc container bo index invalid\n");
637 638 639 640 641 642 643
			ret = -EINVAL;
			break;
		}
		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;

		if (unlikely(r->reloc_bo_offset + 4 >
			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
644
			NV_ERROR(cli, "reloc outside of bo\n");
645 646 647 648 649 650 651 652
			ret = -EINVAL;
			break;
		}

		if (!nvbo->kmap.virtual) {
			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
					  &nvbo->kmap);
			if (ret) {
653
				NV_ERROR(cli, "failed kmap for reloc\n");
654 655 656 657 658
				break;
			}
			nvbo->validate_mapped = true;
		}

659
		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
660
			data = b->presumed.offset + r->data;
661 662
		else
		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
663
			data = (b->presumed.offset + r->data) >> 32;
664 665 666 667
		else
			data = r->data;

		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
668
			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
669 670 671 672 673
				data |= r->tor;
			else
				data |= r->vor;
		}

674
		spin_lock(&nvbo->bo.bdev->fence_lock);
675
		ret = ttm_bo_wait(&nvbo->bo, false, false, false);
676
		spin_unlock(&nvbo->bo.bdev->fence_lock);
677
		if (ret) {
678
			NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret);
679 680 681 682
			break;
		}

		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
683 684 685 686 687 688 689 690 691 692
	}

	kfree(reloc);
	return ret;
}

int
nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
			  struct drm_file *file_priv)
{
693
	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
694
	struct nouveau_cli *cli = nouveau_cli(file_priv);
695 696
	struct nouveau_abi16_chan *temp;
	struct nouveau_drm *drm = nouveau_drm(dev);
697
	struct drm_nouveau_gem_pushbuf *req = data;
698 699
	struct drm_nouveau_gem_pushbuf_push *push;
	struct drm_nouveau_gem_pushbuf_bo *bo;
700
	struct nouveau_channel *chan = NULL;
701
	struct validate_op op;
702
	struct nouveau_fence *fence = NULL;
703
	int i, j, ret = 0, do_reloc = 0;
704

705 706
	if (unlikely(!abi16))
		return -ENOMEM;
707

708 709 710 711 712 713
	list_for_each_entry(temp, &abi16->channels, head) {
		if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
			chan = temp->chan;
			break;
		}
	}
714

715 716 717 718 719
	if (!chan)
		return nouveau_abi16_put(abi16, -ENOENT);

	req->vram_available = drm->gem.vram_available;
	req->gart_available = drm->gem.gart_available;
720 721
	if (unlikely(req->nr_push == 0))
		goto out_next;
722

723
	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
724
		NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n",
725
			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
726
		return nouveau_abi16_put(abi16, -EINVAL);
727 728
	}

729
	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
730
		NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n",
731
			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
732
		return nouveau_abi16_put(abi16, -EINVAL);
733 734
	}

735
	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
736
		NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n",
737
			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
738
		return nouveau_abi16_put(abi16, -EINVAL);
739 740
	}

741
	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
742 743
	if (IS_ERR(push))
		return nouveau_abi16_put(abi16, PTR_ERR(push));
744

745
	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
746 747
	if (IS_ERR(bo)) {
		kfree(push);
748
		return nouveau_abi16_put(abi16, PTR_ERR(bo));
749
	}
750

751
	/* Ensure all push buffers are on validate list */
752 753
	for (i = 0; i < req->nr_push; i++) {
		if (push[i].bo_index >= req->nr_buffers) {
754
			NV_ERROR(cli, "push %d buffer not in list\n", i);
755
			ret = -EINVAL;
756
			goto out_prevalid;
757 758 759
		}
	}

760 761 762 763
	/* Validate buffer list */
	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
					   req->nr_buffers, &op, &do_reloc);
	if (ret) {
764
		if (ret != -ERESTARTSYS)
765
			NV_ERROR(cli, "validate: %d\n", ret);
766
		goto out_prevalid;
767 768 769 770
	}

	/* Apply any relocations that are required */
	if (do_reloc) {
771
		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
772
		if (ret) {
773
			NV_ERROR(cli, "reloc apply: %d\n", ret);
774 775 776 777
			goto out;
		}
	}

778
	if (chan->dma.ib_max) {
779
		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
780
		if (ret) {
781
			NV_ERROR(cli, "nv50cal_space: %d\n", ret);
782 783 784
			goto out;
		}

785 786 787 788 789 790 791
		for (i = 0; i < req->nr_push; i++) {
			struct nouveau_bo *nvbo = (void *)(unsigned long)
				bo[push[i].bo_index].user_priv;

			nv50_dma_push(chan, nvbo, push[i].offset,
				      push[i].length);
		}
792
	} else
793
	if (nv_device(drm->device)->chipset >= 0x25) {
794
		ret = RING_SPACE(chan, req->nr_push * 2);
795
		if (ret) {
796
			NV_ERROR(cli, "cal_space: %d\n", ret);
797 798
			goto out;
		}
799 800 801 802 803

		for (i = 0; i < req->nr_push; i++) {
			struct nouveau_bo *nvbo = (void *)(unsigned long)
				bo[push[i].bo_index].user_priv;

804
			OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
805 806
			OUT_RING(chan, 0);
		}
807
	} else {
808
		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
809
		if (ret) {
810
			NV_ERROR(cli, "jmp_space: %d\n", ret);
811 812 813
			goto out;
		}

814 815 816 817 818
		for (i = 0; i < req->nr_push; i++) {
			struct nouveau_bo *nvbo = (void *)(unsigned long)
				bo[push[i].bo_index].user_priv;
			uint32_t cmd;

819
			cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
			cmd |= 0x20000000;
			if (unlikely(cmd != req->suffix0)) {
				if (!nvbo->kmap.virtual) {
					ret = ttm_bo_kmap(&nvbo->bo, 0,
							  nvbo->bo.mem.
							  num_pages,
							  &nvbo->kmap);
					if (ret) {
						WIND_RING(chan);
						goto out;
					}
					nvbo->validate_mapped = true;
				}

				nouveau_bo_wr32(nvbo, (push[i].offset +
						push[i].length - 8) / 4, cmd);
			}

838 839
			OUT_RING(chan, 0x20000000 |
				      (nvbo->bo.offset + push[i].offset));
840
			OUT_RING(chan, 0);
841 842 843
			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
				OUT_RING(chan, 0);
		}
844 845
	}

846
	ret = nouveau_fence_new(chan, false, &fence);
847
	if (ret) {
848
		NV_ERROR(cli, "error fencing pushbuf: %d\n", ret);
849 850 851 852 853
		WIND_RING(chan);
		goto out;
	}

out:
854
	validate_fini(&op, fence);
855
	nouveau_fence_unref(&fence);
856 857

out_prevalid:
858
	kfree(bo);
859
	kfree(push);
860 861

out_next:
862 863 864 865
	if (chan->dma.ib_max) {
		req->suffix0 = 0x00000000;
		req->suffix1 = 0x00000000;
	} else
866
	if (nv_device(drm->device)->chipset >= 0x25) {
867 868 869 870
		req->suffix0 = 0x00020000;
		req->suffix1 = 0x00000000;
	} else {
		req->suffix0 = 0x20000000 |
871
			      (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
872 873 874
		req->suffix1 = 0x00000000;
	}

875
	return nouveau_abi16_put(abi16, ret);
876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
}

static inline uint32_t
domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
{
	uint32_t flags = 0;

	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
		flags |= TTM_PL_FLAG_VRAM;
	if (domain & NOUVEAU_GEM_DOMAIN_GART)
		flags |= TTM_PL_FLAG_TT;

	return flags;
}

int
nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
			   struct drm_file *file_priv)
{
	struct drm_nouveau_gem_cpu_prep *req = data;
	struct drm_gem_object *gem;
	struct nouveau_bo *nvbo;
	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
	int ret = -EINVAL;

	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
	if (!gem)
903
		return -ENOENT;
904 905
	nvbo = nouveau_gem_object(gem);

B
Ben Skeggs 已提交
906 907 908
	spin_lock(&nvbo->bo.bdev->fence_lock);
	ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
	spin_unlock(&nvbo->bo.bdev->fence_lock);
909
	drm_gem_object_unreference_unlocked(gem);
910 911 912 913 914 915 916
	return ret;
}

int
nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
			   struct drm_file *file_priv)
{
B
Ben Skeggs 已提交
917
	return 0;
918 919 920 921 922 923 924 925 926 927 928 929
}

int
nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
	struct drm_nouveau_gem_info *req = data;
	struct drm_gem_object *gem;
	int ret;

	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
	if (!gem)
930
		return -ENOENT;
931

932
	ret = nouveau_gem_info(file_priv, gem, req);
933
	drm_gem_object_unreference_unlocked(gem);
934 935 936
	return ret;
}