nouveau_gem.c 22.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright (C) 2008 Ben Skeggs.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sublicense, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial
 * portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 */

27
#include "nouveau_drm.h"
28
#include "nouveau_dma.h"
29
#include "nouveau_fence.h"
30
#include "nouveau_abi16.h"
31

32 33
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
34 35 36 37

void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
38
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
39 40
	struct ttm_buffer_object *bo = &nvbo->bo;

D
Dave Airlie 已提交
41 42 43
	if (gem->import_attach)
		drm_prime_gem_destroy(gem, nvbo->bo.sg);

44
	drm_gem_object_release(gem);
45 46 47 48

	/* reset filp so nouveau_bo_del_ttm() can test for it */
	gem->filp = NULL;
	ttm_bo_unref(&bo);
49 50
}

51 52 53
int
nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
{
54
	struct nouveau_cli *cli = nouveau_cli(file_priv);
55 56 57
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_vma *vma;
	int ret;
58

59
	if (!cli->vm)
60 61
		return 0;

62
	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
63 64 65
	if (ret)
		return ret;

66
	vma = nouveau_bo_vma_find(nvbo, cli->vm);
67 68 69 70 71 72 73
	if (!vma) {
		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
		if (!vma) {
			ret = -ENOMEM;
			goto out;
		}

74
		ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
75 76 77 78 79 80 81 82 83 84 85
		if (ret) {
			kfree(vma);
			goto out;
		}
	} else {
		vma->refcount++;
	}

out:
	ttm_bo_unreserve(&nvbo->bo);
	return ret;
86 87
}

88 89 90 91 92 93 94 95 96 97 98 99 100
static void
nouveau_gem_object_delete(void *data)
{
	struct nouveau_vma *vma = data;
	nouveau_vm_unmap(vma);
	nouveau_vm_put(vma);
	kfree(vma);
}

static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
101 102
	struct reservation_object *resv = nvbo->bo.resv;
	struct reservation_object_list *fobj;
103
	struct fence *fence = NULL;
104

105 106
	fobj = reservation_object_get_list(resv);

107 108
	list_del(&vma->head);

109 110 111 112 113 114
	if (fobj && fobj->shared_count > 1)
		ttm_bo_wait(&nvbo->bo, true, false, false);
	else if (fobj && fobj->shared_count == 1)
		fence = rcu_dereference_protected(fobj->shared[0],
						reservation_object_held(resv));
	else
115
		fence = reservation_object_get_excl(nvbo->bo.resv);
116

117
	if (fence && mapped) {
118 119 120 121 122 123 124 125 126
		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
	} else {
		if (mapped)
			nouveau_vm_unmap(vma);
		nouveau_vm_put(vma);
		kfree(vma);
	}
}

127 128 129
void
nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
{
130
	struct nouveau_cli *cli = nouveau_cli(file_priv);
131 132 133
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_vma *vma;
	int ret;
134

135
	if (!cli->vm)
136
		return;
137

138
	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
139 140 141
	if (ret)
		return;

142
	vma = nouveau_bo_vma_find(nvbo, cli->vm);
143
	if (vma) {
144 145
		if (--vma->refcount == 0)
			nouveau_gem_object_unmap(nvbo, vma);
146 147
	}
	ttm_bo_unreserve(&nvbo->bo);
148 149
}

150
int
151 152 153
nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
		uint32_t tile_mode, uint32_t tile_flags,
		struct nouveau_bo **pnvbo)
154
{
155
	struct nouveau_drm *drm = nouveau_drm(dev);
156
	struct nouveau_bo *nvbo;
157
	u32 flags = 0;
158 159
	int ret;

160 161 162 163 164 165 166
	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
		flags |= TTM_PL_FLAG_VRAM;
	if (domain & NOUVEAU_GEM_DOMAIN_GART)
		flags |= TTM_PL_FLAG_TT;
	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
		flags |= TTM_PL_FLAG_SYSTEM;

167
	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
168
			     tile_flags, NULL, NULL, pnvbo);
169 170 171 172
	if (ret)
		return ret;
	nvbo = *pnvbo;

173 174 175 176 177 178
	/* we restrict allowed domains on nv50+ to only the types
	 * that were requested at creation time.  not possibly on
	 * earlier chips without busting the ABI.
	 */
	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
			      NOUVEAU_GEM_DOMAIN_GART;
179
	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
180 181
		nvbo->valid_domains &= domain;

182 183 184 185
	/* Initialize the embedded gem-object. We return a single gem-reference
	 * to the caller, instead of a normal nouveau_bo ttm reference. */
	ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
	if (ret) {
186 187 188 189
		nouveau_bo_ref(NULL, pnvbo);
		return -ENOMEM;
	}

190
	nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
191 192 193 194
	return 0;
}

static int
195 196
nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
		 struct drm_nouveau_gem_info *rep)
197
{
198
	struct nouveau_cli *cli = nouveau_cli(file_priv);
199
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
200
	struct nouveau_vma *vma;
201 202 203 204 205 206

	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
	else
		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;

207
	rep->offset = nvbo->bo.offset;
208 209
	if (cli->vm) {
		vma = nouveau_bo_vma_find(nvbo, cli->vm);
210 211 212 213 214 215
		if (!vma)
			return -EINVAL;

		rep->offset = vma->offset;
	}

216
	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
217
	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
218 219 220 221 222 223 224 225 226
	rep->tile_mode = nvbo->tile_mode;
	rep->tile_flags = nvbo->tile_flags;
	return 0;
}

int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
227
	struct nouveau_drm *drm = nouveau_drm(dev);
228
	struct nouveau_cli *cli = nouveau_cli(file_priv);
229
	struct nouveau_fb *pfb = nvkm_fb(&drm->device);
230 231 232 233
	struct drm_nouveau_gem_new *req = data;
	struct nouveau_bo *nvbo = NULL;
	int ret = 0;

234
	if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
235
		NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
236
		return -EINVAL;
237
	}
238

239
	ret = nouveau_gem_new(dev, req->info.size, req->align,
240 241
			      req->info.domain, req->info.tile_mode,
			      req->info.tile_flags, &nvbo);
242 243 244
	if (ret)
		return ret;

245
	ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
246
	if (ret == 0) {
247
		ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
248 249 250 251
		if (ret)
			drm_gem_handle_delete(file_priv, req->info.handle);
	}

252
	/* drop reference from allocate - handle holds it now */
253
	drm_gem_object_unreference_unlocked(&nvbo->gem);
254 255 256 257 258 259 260
	return ret;
}

static int
nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
		       uint32_t write_domains, uint32_t valid_domains)
{
261
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
262
	struct ttm_buffer_object *bo = &nvbo->bo;
263
	uint32_t domains = valid_domains & nvbo->valid_domains &
264 265
		(write_domains ? write_domains : read_domains);
	uint32_t pref_flags = 0, valid_flags = 0;
266

267
	if (!domains)
268 269
		return -EINVAL;

270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
		valid_flags |= TTM_PL_FLAG_VRAM;

	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
		valid_flags |= TTM_PL_FLAG_TT;

	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
	    bo->mem.mem_type == TTM_PL_VRAM)
		pref_flags |= TTM_PL_FLAG_VRAM;

	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
		 bo->mem.mem_type == TTM_PL_TT)
		pref_flags |= TTM_PL_FLAG_TT;

	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
		pref_flags |= TTM_PL_FLAG_VRAM;

	else
		pref_flags |= TTM_PL_FLAG_TT;

	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
291 292 293 294 295

	return 0;
}

struct validate_op {
296
	struct list_head list;
297
	struct ww_acquire_ctx ticket;
298 299 300
};

static void
301 302
validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
			struct drm_nouveau_gem_pushbuf_bo *pbbo)
303 304
{
	struct nouveau_bo *nvbo;
305
	struct drm_nouveau_gem_pushbuf_bo *b;
306

307 308
	while (!list_empty(&op->list)) {
		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
309
		b = &pbbo[nvbo->pbbo_index];
310

311
		if (likely(fence))
312
			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
313

314 315 316 317 318
		if (unlikely(nvbo->validate_mapped)) {
			ttm_bo_kunmap(&nvbo->kmap);
			nvbo->validate_mapped = false;
		}

319 320
		list_del(&nvbo->entry);
		nvbo->reserved_by = NULL;
321
		ttm_bo_unreserve_ticket(&nvbo->bo, &op->ticket);
322
		drm_gem_object_unreference_unlocked(&nvbo->gem);
323 324 325
	}
}

326
static void
327 328
validate_fini(struct validate_op *op, struct nouveau_fence *fence,
	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
329
{
330
	validate_fini_no_ticket(op, fence, pbbo);
331
	ww_acquire_fini(&op->ticket);
332 333 334 335 336 337 338
}

static int
validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
	      int nr_buffers, struct validate_op *op)
{
339
	struct nouveau_cli *cli = nouveau_cli(file_priv);
340
	struct drm_device *dev = chan->drm->dev;
341 342
	int trycnt = 0;
	int ret, i;
343
	struct nouveau_bo *res_bo = NULL;
344 345 346
	LIST_HEAD(gart_list);
	LIST_HEAD(vram_list);
	LIST_HEAD(both_list);
347

348
	ww_acquire_init(&op->ticket, &reservation_ww_class);
349 350
retry:
	if (++trycnt > 100000) {
351
		NV_PRINTK(error, cli, "%s failed and gave up.\n", __func__);
352 353 354 355 356 357 358 359 360 361
		return -EINVAL;
	}

	for (i = 0; i < nr_buffers; i++) {
		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
		struct drm_gem_object *gem;
		struct nouveau_bo *nvbo;

		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
		if (!gem) {
362
			NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle);
363 364
			ret = -ENOENT;
			break;
365
		}
366
		nvbo = nouveau_gem_object(gem);
367 368 369 370 371
		if (nvbo == res_bo) {
			res_bo = NULL;
			drm_gem_object_unreference_unlocked(gem);
			continue;
		}
372 373

		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
374
			NV_PRINTK(error, cli, "multiple instances of buffer %d on "
375
				      "validation list\n", b->handle);
376
			drm_gem_object_unreference_unlocked(gem);
377 378
			ret = -EINVAL;
			break;
379 380
		}

381
		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
382
		if (ret) {
383 384 385
			list_splice_tail_init(&vram_list, &op->list);
			list_splice_tail_init(&gart_list, &op->list);
			list_splice_tail_init(&both_list, &op->list);
386
			validate_fini_no_ticket(op, NULL, NULL);
387
			if (unlikely(ret == -EDEADLK)) {
388
				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
389
							      &op->ticket);
390 391 392
				if (!ret)
					res_bo = nvbo;
			}
393 394
			if (unlikely(ret)) {
				if (ret != -ERESTARTSYS)
395
					NV_PRINTK(error, cli, "fail reserve\n");
396
				break;
397
			}
398 399
		}

400
		b->user_priv = (uint64_t)(unsigned long)nvbo;
401 402 403 404
		nvbo->reserved_by = file_priv;
		nvbo->pbbo_index = i;
		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
405
			list_add_tail(&nvbo->entry, &both_list);
406 407
		else
		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
408
			list_add_tail(&nvbo->entry, &vram_list);
409 410
		else
		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
411
			list_add_tail(&nvbo->entry, &gart_list);
412
		else {
413
			NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n",
414
				 b->valid_domains);
415 416 417
			list_add_tail(&nvbo->entry, &both_list);
			ret = -EINVAL;
			break;
418
		}
419 420
		if (nvbo == res_bo)
			goto retry;
421 422
	}

423
	ww_acquire_done(&op->ticket);
424 425 426 427
	list_splice_tail(&vram_list, &op->list);
	list_splice_tail(&gart_list, &op->list);
	list_splice_tail(&both_list, &op->list);
	if (ret)
428
		validate_fini(op, NULL, NULL);
429 430
	return ret;

431 432 433
}

static int
434 435 436
validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
	      uint64_t user_pbbo_ptr)
437
{
438
	struct nouveau_drm *drm = chan->drm;
439 440 441 442 443 444 445 446
	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
				(void __force __user *)(uintptr_t)user_pbbo_ptr;
	struct nouveau_bo *nvbo;
	int ret, relocs = 0;

	list_for_each_entry(nvbo, list, entry) {
		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];

447
		ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
448 449
					     b->write_domains,
					     b->valid_domains);
450
		if (unlikely(ret)) {
451
			NV_PRINTK(error, cli, "fail set_domain\n");
452
			return ret;
453
		}
454

455
		ret = nouveau_bo_validate(nvbo, true, false);
456
		if (unlikely(ret)) {
457
			if (ret != -ERESTARTSYS)
458
				NV_PRINTK(error, cli, "fail ttm_validate\n");
459
			return ret;
460
		}
461

462
		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
463
		if (unlikely(ret)) {
464 465
			if (ret != -ERESTARTSYS)
				NV_PRINTK(error, cli, "fail post-validate sync\n");
466 467 468
			return ret;
		}

469
		if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
			if (nvbo->bo.offset == b->presumed.offset &&
			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
				continue;

			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
			else
				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
			b->presumed.offset = nvbo->bo.offset;
			b->presumed.valid = 0;
			relocs++;

D
Daniel Vetter 已提交
485
			if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
486 487 488
					     &b->presumed, sizeof(b->presumed)))
				return -EFAULT;
		}
489 490 491 492 493 494 495 496 497 498 499 500
	}

	return relocs;
}

static int
nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
			     struct drm_file *file_priv,
			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
			     uint64_t user_buffers, int nr_buffers,
			     struct validate_op *op, int *apply_relocs)
{
501
	struct nouveau_cli *cli = nouveau_cli(file_priv);
502
	int ret;
503

504
	INIT_LIST_HEAD(&op->list);
505 506 507 508 509

	if (nr_buffers == 0)
		return 0;

	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
510
	if (unlikely(ret)) {
511
		if (ret != -ERESTARTSYS)
512
			NV_PRINTK(error, cli, "validate_init\n");
513
		return ret;
514
	}
515

516
	ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
517
	if (unlikely(ret < 0)) {
518
		if (ret != -ERESTARTSYS)
519
			NV_PRINTK(error, cli, "validating bo list\n");
520
		validate_fini(op, NULL, NULL);
521 522
		return ret;
	}
523
	*apply_relocs = ret;
524 525 526
	return 0;
}

527 528 529 530 531 532 533 534 535
static inline void
u_free(void *addr)
{
	if (!is_vmalloc_addr(addr))
		kfree(addr);
	else
		vfree(addr);
}

536 537 538 539 540 541
static inline void *
u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
{
	void *mem;
	void __user *userptr = (void __force __user *)(uintptr_t)user;

542 543 544 545 546
	size *= nmemb;

	mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
	if (!mem)
		mem = vmalloc(size);
547 548 549
	if (!mem)
		return ERR_PTR(-ENOMEM);

D
Daniel Vetter 已提交
550
	if (copy_from_user(mem, userptr, size)) {
551
		u_free(mem);
552 553 554 555 556 557 558
		return ERR_PTR(-EFAULT);
	}

	return mem;
}

static int
559
nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
560 561
				struct drm_nouveau_gem_pushbuf *req,
				struct drm_nouveau_gem_pushbuf_bo *bo)
562 563
{
	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
564 565
	int ret = 0;
	unsigned i;
566

567
	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
568 569 570
	if (IS_ERR(reloc))
		return PTR_ERR(reloc);

571
	for (i = 0; i < req->nr_relocs; i++) {
572 573
		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
		struct drm_nouveau_gem_pushbuf_bo *b;
574
		struct nouveau_bo *nvbo;
575 576
		uint32_t data;

577
		if (unlikely(r->bo_index > req->nr_buffers)) {
578
			NV_PRINTK(error, cli, "reloc bo index invalid\n");
579 580 581 582 583
			ret = -EINVAL;
			break;
		}

		b = &bo[r->bo_index];
584
		if (b->presumed.valid)
585 586
			continue;

587
		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
588
			NV_PRINTK(error, cli, "reloc container bo index invalid\n");
589 590 591 592 593 594 595
			ret = -EINVAL;
			break;
		}
		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;

		if (unlikely(r->reloc_bo_offset + 4 >
			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
596
			NV_PRINTK(error, cli, "reloc outside of bo\n");
597 598 599 600 601 602 603 604
			ret = -EINVAL;
			break;
		}

		if (!nvbo->kmap.virtual) {
			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
					  &nvbo->kmap);
			if (ret) {
605
				NV_PRINTK(error, cli, "failed kmap for reloc\n");
606 607 608 609 610
				break;
			}
			nvbo->validate_mapped = true;
		}

611
		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
612
			data = b->presumed.offset + r->data;
613 614
		else
		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
615
			data = (b->presumed.offset + r->data) >> 32;
616 617 618 619
		else
			data = r->data;

		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
620
			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
621 622 623 624 625
				data |= r->tor;
			else
				data |= r->vor;
		}

626
		ret = ttm_bo_wait(&nvbo->bo, true, false, false);
627
		if (ret) {
628
			NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret);
629 630 631 632
			break;
		}

		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
633 634
	}

635
	u_free(reloc);
636 637 638 639 640 641 642
	return ret;
}

int
nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
			  struct drm_file *file_priv)
{
643
	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
644
	struct nouveau_cli *cli = nouveau_cli(file_priv);
645 646
	struct nouveau_abi16_chan *temp;
	struct nouveau_drm *drm = nouveau_drm(dev);
647
	struct drm_nouveau_gem_pushbuf *req = data;
648 649
	struct drm_nouveau_gem_pushbuf_push *push;
	struct drm_nouveau_gem_pushbuf_bo *bo;
650
	struct nouveau_channel *chan = NULL;
651
	struct validate_op op;
652
	struct nouveau_fence *fence = NULL;
653
	int i, j, ret = 0, do_reloc = 0;
654

655 656
	if (unlikely(!abi16))
		return -ENOMEM;
657

658
	list_for_each_entry(temp, &abi16->channels, head) {
659
		if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) {
660 661 662 663
			chan = temp->chan;
			break;
		}
	}
664

665 666 667 668 669
	if (!chan)
		return nouveau_abi16_put(abi16, -ENOENT);

	req->vram_available = drm->gem.vram_available;
	req->gart_available = drm->gem.gart_available;
670 671
	if (unlikely(req->nr_push == 0))
		goto out_next;
672

673
	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
674
		NV_PRINTK(error, cli, "pushbuf push count exceeds limit: %d max %d\n",
675
			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
676
		return nouveau_abi16_put(abi16, -EINVAL);
677 678
	}

679
	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
680
		NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n",
681
			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
682
		return nouveau_abi16_put(abi16, -EINVAL);
683 684
	}

685
	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
686
		NV_PRINTK(error, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
687
			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
688
		return nouveau_abi16_put(abi16, -EINVAL);
689 690
	}

691
	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
692 693
	if (IS_ERR(push))
		return nouveau_abi16_put(abi16, PTR_ERR(push));
694

695
	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
696
	if (IS_ERR(bo)) {
697
		u_free(push);
698
		return nouveau_abi16_put(abi16, PTR_ERR(bo));
699
	}
700

701
	/* Ensure all push buffers are on validate list */
702 703
	for (i = 0; i < req->nr_push; i++) {
		if (push[i].bo_index >= req->nr_buffers) {
704
			NV_PRINTK(error, cli, "push %d buffer not in list\n", i);
705
			ret = -EINVAL;
706
			goto out_prevalid;
707 708 709
		}
	}

710 711 712 713
	/* Validate buffer list */
	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
					   req->nr_buffers, &op, &do_reloc);
	if (ret) {
714
		if (ret != -ERESTARTSYS)
715
			NV_PRINTK(error, cli, "validate: %d\n", ret);
716
		goto out_prevalid;
717 718 719 720
	}

	/* Apply any relocations that are required */
	if (do_reloc) {
721
		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
722
		if (ret) {
723
			NV_PRINTK(error, cli, "reloc apply: %d\n", ret);
724 725 726 727
			goto out;
		}
	}

728
	if (chan->dma.ib_max) {
729
		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
730
		if (ret) {
731
			NV_PRINTK(error, cli, "nv50cal_space: %d\n", ret);
732 733 734
			goto out;
		}

735 736 737 738 739 740 741
		for (i = 0; i < req->nr_push; i++) {
			struct nouveau_bo *nvbo = (void *)(unsigned long)
				bo[push[i].bo_index].user_priv;

			nv50_dma_push(chan, nvbo, push[i].offset,
				      push[i].length);
		}
742
	} else
743
	if (drm->device.info.chipset >= 0x25) {
744
		ret = RING_SPACE(chan, req->nr_push * 2);
745
		if (ret) {
746
			NV_PRINTK(error, cli, "cal_space: %d\n", ret);
747 748
			goto out;
		}
749 750 751 752 753

		for (i = 0; i < req->nr_push; i++) {
			struct nouveau_bo *nvbo = (void *)(unsigned long)
				bo[push[i].bo_index].user_priv;

754
			OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
755 756
			OUT_RING(chan, 0);
		}
757
	} else {
758
		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
759
		if (ret) {
760
			NV_PRINTK(error, cli, "jmp_space: %d\n", ret);
761 762 763
			goto out;
		}

764 765 766 767 768
		for (i = 0; i < req->nr_push; i++) {
			struct nouveau_bo *nvbo = (void *)(unsigned long)
				bo[push[i].bo_index].user_priv;
			uint32_t cmd;

769
			cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787
			cmd |= 0x20000000;
			if (unlikely(cmd != req->suffix0)) {
				if (!nvbo->kmap.virtual) {
					ret = ttm_bo_kmap(&nvbo->bo, 0,
							  nvbo->bo.mem.
							  num_pages,
							  &nvbo->kmap);
					if (ret) {
						WIND_RING(chan);
						goto out;
					}
					nvbo->validate_mapped = true;
				}

				nouveau_bo_wr32(nvbo, (push[i].offset +
						push[i].length - 8) / 4, cmd);
			}

788 789
			OUT_RING(chan, 0x20000000 |
				      (nvbo->bo.offset + push[i].offset));
790
			OUT_RING(chan, 0);
791 792 793
			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
				OUT_RING(chan, 0);
		}
794 795
	}

796
	ret = nouveau_fence_new(chan, false, &fence);
797
	if (ret) {
798
		NV_PRINTK(error, cli, "error fencing pushbuf: %d\n", ret);
799 800 801 802 803
		WIND_RING(chan);
		goto out;
	}

out:
804
	validate_fini(&op, fence, bo);
805
	nouveau_fence_unref(&fence);
806 807

out_prevalid:
808 809
	u_free(bo);
	u_free(push);
810 811

out_next:
812 813 814 815
	if (chan->dma.ib_max) {
		req->suffix0 = 0x00000000;
		req->suffix1 = 0x00000000;
	} else
816
	if (drm->device.info.chipset >= 0x25) {
817 818 819 820
		req->suffix0 = 0x00020000;
		req->suffix1 = 0x00000000;
	} else {
		req->suffix0 = 0x20000000 |
821
			      (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
822 823 824
		req->suffix1 = 0x00000000;
	}

825
	return nouveau_abi16_put(abi16, ret);
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
}

static inline uint32_t
domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
{
	uint32_t flags = 0;

	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
		flags |= TTM_PL_FLAG_VRAM;
	if (domain & NOUVEAU_GEM_DOMAIN_GART)
		flags |= TTM_PL_FLAG_TT;

	return flags;
}

int
nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
			   struct drm_file *file_priv)
{
	struct drm_nouveau_gem_cpu_prep *req = data;
	struct drm_gem_object *gem;
	struct nouveau_bo *nvbo;
	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
849
	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
850
	int ret;
851 852 853

	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
	if (!gem)
854
		return -ENOENT;
855 856
	nvbo = nouveau_gem_object(gem);

857 858 859 860
	if (no_wait)
		ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
	else {
		long lret;
861

862 863 864 865 866 867 868
		lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
		if (!lret)
			ret = -EBUSY;
		else if (lret > 0)
			ret = 0;
		else
			ret = lret;
869
	}
870
	drm_gem_object_unreference_unlocked(gem);
871

872 873 874 875 876 877 878
	return ret;
}

int
nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
			   struct drm_file *file_priv)
{
B
Ben Skeggs 已提交
879
	return 0;
880 881 882 883 884 885 886 887 888 889 890 891
}

int
nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
	struct drm_nouveau_gem_info *req = data;
	struct drm_gem_object *gem;
	int ret;

	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
	if (!gem)
892
		return -ENOENT;
893

894
	ret = nouveau_gem_info(file_priv, gem, req);
895
	drm_gem_object_unreference_unlocked(gem);
896 897 898
	return ret;
}