nouveau_gem.c 21.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright (C) 2008 Ben Skeggs.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sublicense, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial
 * portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 */

D
Dave Airlie 已提交
27
#include <linux/dma-buf.h>
28

29 30 31
#include <subdev/fb.h>

#include "nouveau_drm.h"
32
#include "nouveau_dma.h"
33
#include "nouveau_fence.h"
34
#include "nouveau_abi16.h"
35

36 37
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59

int
nouveau_gem_object_new(struct drm_gem_object *gem)
{
	return 0;
}

void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
	struct nouveau_bo *nvbo = gem->driver_private;
	struct ttm_buffer_object *bo = &nvbo->bo;

	if (!nvbo)
		return;
	nvbo->gem = NULL;

	if (unlikely(nvbo->pin_refcnt)) {
		nvbo->pin_refcnt = 1;
		nouveau_bo_unpin(nvbo);
	}

D
Dave Airlie 已提交
60 61 62
	if (gem->import_attach)
		drm_prime_gem_destroy(gem, nvbo->bo.sg);

63
	ttm_bo_unref(&bo);
64 65 66

	drm_gem_object_release(gem);
	kfree(gem);
67 68
}

69 70 71
int
nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
{
72
	struct nouveau_cli *cli = nouveau_cli(file_priv);
73 74 75
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_vma *vma;
	int ret;
76

77
	if (!cli->base.vm)
78 79
		return 0;

80 81 82 83
	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
	if (ret)
		return ret;

84
	vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
85 86 87 88 89 90 91
	if (!vma) {
		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
		if (!vma) {
			ret = -ENOMEM;
			goto out;
		}

92
		ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
93 94 95 96 97 98 99 100 101 102 103
		if (ret) {
			kfree(vma);
			goto out;
		}
	} else {
		vma->refcount++;
	}

out:
	ttm_bo_unreserve(&nvbo->bo);
	return ret;
104 105 106 107 108
}

void
nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
{
109
	struct nouveau_cli *cli = nouveau_cli(file_priv);
110 111 112
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_vma *vma;
	int ret;
113

114
	if (!cli->base.vm)
115
		return;
116 117 118 119 120

	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
	if (ret)
		return;

121
	vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
122
	if (vma) {
123
		if (--vma->refcount == 0) {
124
			nouveau_bo_vma_del(nvbo, vma);
125 126
			kfree(vma);
		}
127 128
	}
	ttm_bo_unreserve(&nvbo->bo);
129 130
}

131
int
132 133 134
nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
		uint32_t tile_mode, uint32_t tile_flags,
		struct nouveau_bo **pnvbo)
135
{
136
	struct nouveau_drm *drm = nouveau_drm(dev);
137
	struct nouveau_bo *nvbo;
138
	u32 flags = 0;
139 140
	int ret;

141 142 143 144 145 146 147
	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
		flags |= TTM_PL_FLAG_VRAM;
	if (domain & NOUVEAU_GEM_DOMAIN_GART)
		flags |= TTM_PL_FLAG_TT;
	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
		flags |= TTM_PL_FLAG_SYSTEM;

148
	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
D
Dave Airlie 已提交
149
			     tile_flags, NULL, pnvbo);
150 151 152 153
	if (ret)
		return ret;
	nvbo = *pnvbo;

154 155 156 157 158 159
	/* we restrict allowed domains on nv50+ to only the types
	 * that were requested at creation time.  not possibly on
	 * earlier chips without busting the ABI.
	 */
	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
			      NOUVEAU_GEM_DOMAIN_GART;
160
	if (nv_device(drm->device)->card_type >= NV_50)
161 162
		nvbo->valid_domains &= domain;

163 164 165 166 167 168
	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
	if (!nvbo->gem) {
		nouveau_bo_ref(NULL, pnvbo);
		return -ENOMEM;
	}

J
Jan Engelhardt 已提交
169
	nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
170 171 172 173 174
	nvbo->gem->driver_private = nvbo;
	return 0;
}

static int
175 176
nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
		 struct drm_nouveau_gem_info *rep)
177
{
178
	struct nouveau_cli *cli = nouveau_cli(file_priv);
179
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
180
	struct nouveau_vma *vma;
181 182 183 184 185 186

	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
	else
		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;

187
	rep->offset = nvbo->bo.offset;
188 189
	if (cli->base.vm) {
		vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
190 191 192 193 194 195
		if (!vma)
			return -EINVAL;

		rep->offset = vma->offset;
	}

196
	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
197
	rep->map_handle = nvbo->bo.addr_space_offset;
198 199 200 201 202 203 204 205 206
	rep->tile_mode = nvbo->tile_mode;
	rep->tile_flags = nvbo->tile_flags;
	return 0;
}

int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
207 208
	struct nouveau_drm *drm = nouveau_drm(dev);
	struct nouveau_fb *pfb = nouveau_fb(drm->device);
209 210 211 212
	struct drm_nouveau_gem_new *req = data;
	struct nouveau_bo *nvbo = NULL;
	int ret = 0;

213
	drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
214

215 216
	if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
		NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags);
217
		return -EINVAL;
218
	}
219

220
	ret = nouveau_gem_new(dev, req->info.size, req->align,
221 222
			      req->info.domain, req->info.tile_mode,
			      req->info.tile_flags, &nvbo);
223 224 225 226
	if (ret)
		return ret;

	ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
227 228 229 230 231 232
	if (ret == 0) {
		ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
		if (ret)
			drm_gem_handle_delete(file_priv, req->info.handle);
	}

233 234
	/* drop reference from allocate - handle holds it now */
	drm_gem_object_unreference_unlocked(nvbo->gem);
235 236 237 238 239 240 241 242 243
	return ret;
}

static int
nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
		       uint32_t write_domains, uint32_t valid_domains)
{
	struct nouveau_bo *nvbo = gem->driver_private;
	struct ttm_buffer_object *bo = &nvbo->bo;
244
	uint32_t domains = valid_domains & nvbo->valid_domains &
245 246
		(write_domains ? write_domains : read_domains);
	uint32_t pref_flags = 0, valid_flags = 0;
247

248
	if (!domains)
249 250
		return -EINVAL;

251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
		valid_flags |= TTM_PL_FLAG_VRAM;

	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
		valid_flags |= TTM_PL_FLAG_TT;

	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
	    bo->mem.mem_type == TTM_PL_VRAM)
		pref_flags |= TTM_PL_FLAG_VRAM;

	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
		 bo->mem.mem_type == TTM_PL_TT)
		pref_flags |= TTM_PL_FLAG_TT;

	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
		pref_flags |= TTM_PL_FLAG_VRAM;

	else
		pref_flags |= TTM_PL_FLAG_TT;

	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289

	return 0;
}

struct validate_op {
	struct list_head vram_list;
	struct list_head gart_list;
	struct list_head both_list;
};

static void
validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
{
	struct list_head *entry, *tmp;
	struct nouveau_bo *nvbo;

	list_for_each_safe(entry, tmp, list) {
		nvbo = list_entry(entry, struct nouveau_bo, entry);
290 291

		nouveau_bo_fence(nvbo, fence);
292

293 294 295 296 297
		if (unlikely(nvbo->validate_mapped)) {
			ttm_bo_kunmap(&nvbo->kmap);
			nvbo->validate_mapped = false;
		}

298 299 300
		list_del(&nvbo->entry);
		nvbo->reserved_by = NULL;
		ttm_bo_unreserve(&nvbo->bo);
301
		drm_gem_object_unreference_unlocked(nvbo->gem);
302 303 304 305
	}
}

static void
306
validate_fini(struct validate_op *op, struct nouveau_fence* fence)
307
{
308 309 310
	validate_fini_list(&op->vram_list, fence);
	validate_fini_list(&op->gart_list, fence);
	validate_fini_list(&op->both_list, fence);
311 312 313 314 315 316 317
}

static int
validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
	      int nr_buffers, struct validate_op *op)
{
318 319
	struct drm_device *dev = chan->drm->dev;
	struct nouveau_drm *drm = nouveau_drm(dev);
320 321 322 323
	uint32_t sequence;
	int trycnt = 0;
	int ret, i;

324
	sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
325 326
retry:
	if (++trycnt > 100000) {
327
		NV_ERROR(drm, "%s failed and gave up.\n", __func__);
328 329 330 331 332 333 334 335 336 337
		return -EINVAL;
	}

	for (i = 0; i < nr_buffers; i++) {
		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
		struct drm_gem_object *gem;
		struct nouveau_bo *nvbo;

		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
		if (!gem) {
338
			NV_ERROR(drm, "Unknown handle 0x%08x\n", b->handle);
339
			validate_fini(op, NULL);
340
			return -ENOENT;
341 342 343 344
		}
		nvbo = gem->driver_private;

		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
345
			NV_ERROR(drm, "multiple instances of buffer %d on "
346
				      "validation list\n", b->handle);
347
			drm_gem_object_unreference_unlocked(gem);
348 349 350 351
			validate_fini(op, NULL);
			return -EINVAL;
		}

352
		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
353 354
		if (ret) {
			validate_fini(op, NULL);
355 356
			if (unlikely(ret == -EAGAIN))
				ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
357
			drm_gem_object_unreference_unlocked(gem);
358 359
			if (unlikely(ret)) {
				if (ret != -ERESTARTSYS)
360
					NV_ERROR(drm, "fail reserve\n");
361
				return ret;
362
			}
363 364 365
			goto retry;
		}

366
		b->user_priv = (uint64_t)(unsigned long)nvbo;
367 368 369 370 371 372 373 374 375 376 377 378
		nvbo->reserved_by = file_priv;
		nvbo->pbbo_index = i;
		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
			list_add_tail(&nvbo->entry, &op->both_list);
		else
		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
			list_add_tail(&nvbo->entry, &op->vram_list);
		else
		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
			list_add_tail(&nvbo->entry, &op->gart_list);
		else {
379
			NV_ERROR(drm, "invalid valid domains: 0x%08x\n",
380
				 b->valid_domains);
381
			list_add_tail(&nvbo->entry, &op->both_list);
382 383 384 385 386 387 388 389
			validate_fini(op, NULL);
			return -EINVAL;
		}
	}

	return 0;
}

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
static int
validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
{
	struct nouveau_fence *fence = NULL;
	int ret = 0;

	spin_lock(&nvbo->bo.bdev->fence_lock);
	if (nvbo->bo.sync_obj)
		fence = nouveau_fence_ref(nvbo->bo.sync_obj);
	spin_unlock(&nvbo->bo.bdev->fence_lock);

	if (fence) {
		ret = nouveau_fence_sync(fence, chan);
		nouveau_fence_unref(&fence);
	}

	return ret;
}

409 410 411 412
static int
validate_list(struct nouveau_channel *chan, struct list_head *list,
	      struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
{
413
	struct nouveau_drm *drm = chan->drm;
414 415 416 417 418 419 420 421
	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
				(void __force __user *)(uintptr_t)user_pbbo_ptr;
	struct nouveau_bo *nvbo;
	int ret, relocs = 0;

	list_for_each_entry(nvbo, list, entry) {
		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];

422
		ret = validate_sync(chan, nvbo);
423
		if (unlikely(ret)) {
424
			NV_ERROR(drm, "fail pre-validate sync\n");
425
			return ret;
426 427 428 429 430
		}

		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
					     b->write_domains,
					     b->valid_domains);
431
		if (unlikely(ret)) {
432
			NV_ERROR(drm, "fail set_domain\n");
433
			return ret;
434
		}
435

436
		ret = nouveau_bo_validate(nvbo, true, false);
437
		if (unlikely(ret)) {
438
			if (ret != -ERESTARTSYS)
439
				NV_ERROR(drm, "fail ttm_validate\n");
440
			return ret;
441
		}
442

443
		ret = validate_sync(chan, nvbo);
444
		if (unlikely(ret)) {
445
			NV_ERROR(drm, "fail post-validate sync\n");
446 447 448
			return ret;
		}

449
		if (nv_device(drm->device)->card_type < NV_50) {
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
			if (nvbo->bo.offset == b->presumed.offset &&
			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
				continue;

			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
			else
				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
			b->presumed.offset = nvbo->bo.offset;
			b->presumed.valid = 0;
			relocs++;

			if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
					     &b->presumed, sizeof(b->presumed)))
				return -EFAULT;
		}
469 470 471 472 473 474 475 476 477 478 479 480
	}

	return relocs;
}

static int
nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
			     struct drm_file *file_priv,
			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
			     uint64_t user_buffers, int nr_buffers,
			     struct validate_op *op, int *apply_relocs)
{
481
	struct nouveau_drm *drm = chan->drm;
482 483 484 485 486 487 488 489 490 491
	int ret, relocs = 0;

	INIT_LIST_HEAD(&op->vram_list);
	INIT_LIST_HEAD(&op->gart_list);
	INIT_LIST_HEAD(&op->both_list);

	if (nr_buffers == 0)
		return 0;

	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
492
	if (unlikely(ret)) {
493
		if (ret != -ERESTARTSYS)
494
			NV_ERROR(drm, "validate_init\n");
495
		return ret;
496
	}
497 498 499

	ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
	if (unlikely(ret < 0)) {
500
		if (ret != -ERESTARTSYS)
501
			NV_ERROR(drm, "validate vram_list\n");
502 503 504 505 506 507 508
		validate_fini(op, NULL);
		return ret;
	}
	relocs += ret;

	ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
	if (unlikely(ret < 0)) {
509
		if (ret != -ERESTARTSYS)
510
			NV_ERROR(drm, "validate gart_list\n");
511 512 513 514 515 516 517
		validate_fini(op, NULL);
		return ret;
	}
	relocs += ret;

	ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
	if (unlikely(ret < 0)) {
518
		if (ret != -ERESTARTSYS)
519
			NV_ERROR(drm, "validate both_list\n");
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
		validate_fini(op, NULL);
		return ret;
	}
	relocs += ret;

	*apply_relocs = relocs;
	return 0;
}

static inline void *
u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
{
	void *mem;
	void __user *userptr = (void __force __user *)(uintptr_t)user;

	mem = kmalloc(nmemb * size, GFP_KERNEL);
	if (!mem)
		return ERR_PTR(-ENOMEM);

	if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
		kfree(mem);
		return ERR_PTR(-EFAULT);
	}

	return mem;
}

static int
548 549 550
nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
				struct drm_nouveau_gem_pushbuf *req,
				struct drm_nouveau_gem_pushbuf_bo *bo)
551
{
552
	struct nouveau_drm *drm = nouveau_drm(dev);
553
	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
554 555
	int ret = 0;
	unsigned i;
556

557
	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
558 559 560
	if (IS_ERR(reloc))
		return PTR_ERR(reloc);

561
	for (i = 0; i < req->nr_relocs; i++) {
562 563
		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
		struct drm_nouveau_gem_pushbuf_bo *b;
564
		struct nouveau_bo *nvbo;
565 566
		uint32_t data;

567
		if (unlikely(r->bo_index > req->nr_buffers)) {
568
			NV_ERROR(drm, "reloc bo index invalid\n");
569 570 571 572 573
			ret = -EINVAL;
			break;
		}

		b = &bo[r->bo_index];
574
		if (b->presumed.valid)
575 576
			continue;

577
		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
578
			NV_ERROR(drm, "reloc container bo index invalid\n");
579 580 581 582 583 584 585
			ret = -EINVAL;
			break;
		}
		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;

		if (unlikely(r->reloc_bo_offset + 4 >
			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
586
			NV_ERROR(drm, "reloc outside of bo\n");
587 588 589 590 591 592 593 594
			ret = -EINVAL;
			break;
		}

		if (!nvbo->kmap.virtual) {
			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
					  &nvbo->kmap);
			if (ret) {
595
				NV_ERROR(drm, "failed kmap for reloc\n");
596 597 598 599 600
				break;
			}
			nvbo->validate_mapped = true;
		}

601
		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
602
			data = b->presumed.offset + r->data;
603 604
		else
		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
605
			data = (b->presumed.offset + r->data) >> 32;
606 607 608 609
		else
			data = r->data;

		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
610
			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
611 612 613 614 615
				data |= r->tor;
			else
				data |= r->vor;
		}

616
		spin_lock(&nvbo->bo.bdev->fence_lock);
617
		ret = ttm_bo_wait(&nvbo->bo, false, false, false);
618
		spin_unlock(&nvbo->bo.bdev->fence_lock);
619
		if (ret) {
620
			NV_ERROR(drm, "reloc wait_idle failed: %d\n", ret);
621 622 623 624
			break;
		}

		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
625 626 627 628 629 630 631 632 633 634
	}

	kfree(reloc);
	return ret;
}

int
nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
			  struct drm_file *file_priv)
{
635 636 637
	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
	struct nouveau_abi16_chan *temp;
	struct nouveau_drm *drm = nouveau_drm(dev);
638
	struct drm_nouveau_gem_pushbuf *req = data;
639 640
	struct drm_nouveau_gem_pushbuf_push *push;
	struct drm_nouveau_gem_pushbuf_bo *bo;
641
	struct nouveau_channel *chan = NULL;
642
	struct validate_op op;
643
	struct nouveau_fence *fence = NULL;
644
	int i, j, ret = 0, do_reloc = 0;
645

646 647
	if (unlikely(!abi16))
		return -ENOMEM;
648

649 650 651 652 653 654
	list_for_each_entry(temp, &abi16->channels, head) {
		if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
			chan = temp->chan;
			break;
		}
	}
655

656 657 658 659 660
	if (!chan)
		return nouveau_abi16_put(abi16, -ENOENT);

	req->vram_available = drm->gem.vram_available;
	req->gart_available = drm->gem.gart_available;
661 662
	if (unlikely(req->nr_push == 0))
		goto out_next;
663

664
	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
665
		NV_ERROR(drm, "pushbuf push count exceeds limit: %d max %d\n",
666
			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
667
		return nouveau_abi16_put(abi16, -EINVAL);
668 669
	}

670
	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
671
		NV_ERROR(drm, "pushbuf bo count exceeds limit: %d max %d\n",
672
			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
673
		return nouveau_abi16_put(abi16, -EINVAL);
674 675
	}

676
	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
677
		NV_ERROR(drm, "pushbuf reloc count exceeds limit: %d max %d\n",
678
			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
679
		return nouveau_abi16_put(abi16, -EINVAL);
680 681
	}

682
	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
683 684
	if (IS_ERR(push))
		return nouveau_abi16_put(abi16, PTR_ERR(push));
685

686
	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
687 688
	if (IS_ERR(bo)) {
		kfree(push);
689
		return nouveau_abi16_put(abi16, PTR_ERR(bo));
690
	}
691

692
	/* Ensure all push buffers are on validate list */
693 694
	for (i = 0; i < req->nr_push; i++) {
		if (push[i].bo_index >= req->nr_buffers) {
695
			NV_ERROR(drm, "push %d buffer not in list\n", i);
696
			ret = -EINVAL;
697
			goto out_prevalid;
698 699 700
		}
	}

701 702 703 704
	/* Validate buffer list */
	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
					   req->nr_buffers, &op, &do_reloc);
	if (ret) {
705
		if (ret != -ERESTARTSYS)
706
			NV_ERROR(drm, "validate: %d\n", ret);
707
		goto out_prevalid;
708 709 710 711
	}

	/* Apply any relocations that are required */
	if (do_reloc) {
712
		ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
713
		if (ret) {
714
			NV_ERROR(drm, "reloc apply: %d\n", ret);
715 716 717 718
			goto out;
		}
	}

719
	if (chan->dma.ib_max) {
720
		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
721
		if (ret) {
722
			NV_ERROR(drm, "nv50cal_space: %d\n", ret);
723 724 725
			goto out;
		}

726 727 728 729 730 731 732
		for (i = 0; i < req->nr_push; i++) {
			struct nouveau_bo *nvbo = (void *)(unsigned long)
				bo[push[i].bo_index].user_priv;

			nv50_dma_push(chan, nvbo, push[i].offset,
				      push[i].length);
		}
733
	} else
734
	if (nv_device(drm->device)->chipset >= 0x25) {
735
		ret = RING_SPACE(chan, req->nr_push * 2);
736
		if (ret) {
737
			NV_ERROR(drm, "cal_space: %d\n", ret);
738 739
			goto out;
		}
740 741 742 743 744

		for (i = 0; i < req->nr_push; i++) {
			struct nouveau_bo *nvbo = (void *)(unsigned long)
				bo[push[i].bo_index].user_priv;

745
			OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
746 747
			OUT_RING(chan, 0);
		}
748
	} else {
749
		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
750
		if (ret) {
751
			NV_ERROR(drm, "jmp_space: %d\n", ret);
752 753 754
			goto out;
		}

755 756 757 758 759
		for (i = 0; i < req->nr_push; i++) {
			struct nouveau_bo *nvbo = (void *)(unsigned long)
				bo[push[i].bo_index].user_priv;
			uint32_t cmd;

760
			cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
			cmd |= 0x20000000;
			if (unlikely(cmd != req->suffix0)) {
				if (!nvbo->kmap.virtual) {
					ret = ttm_bo_kmap(&nvbo->bo, 0,
							  nvbo->bo.mem.
							  num_pages,
							  &nvbo->kmap);
					if (ret) {
						WIND_RING(chan);
						goto out;
					}
					nvbo->validate_mapped = true;
				}

				nouveau_bo_wr32(nvbo, (push[i].offset +
						push[i].length - 8) / 4, cmd);
			}

779 780
			OUT_RING(chan, 0x20000000 |
				      (nvbo->bo.offset + push[i].offset));
781
			OUT_RING(chan, 0);
782 783 784
			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
				OUT_RING(chan, 0);
		}
785 786
	}

787
	ret = nouveau_fence_new(chan, &fence);
788
	if (ret) {
789
		NV_ERROR(drm, "error fencing pushbuf: %d\n", ret);
790 791 792 793 794
		WIND_RING(chan);
		goto out;
	}

out:
795
	validate_fini(&op, fence);
796
	nouveau_fence_unref(&fence);
797 798

out_prevalid:
799
	kfree(bo);
800
	kfree(push);
801 802

out_next:
803 804 805 806
	if (chan->dma.ib_max) {
		req->suffix0 = 0x00000000;
		req->suffix1 = 0x00000000;
	} else
807
	if (nv_device(drm->device)->chipset >= 0x25) {
808 809 810 811
		req->suffix0 = 0x00020000;
		req->suffix1 = 0x00000000;
	} else {
		req->suffix0 = 0x20000000 |
812
			      (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
813 814 815
		req->suffix1 = 0x00000000;
	}

816
	return nouveau_abi16_put(abi16, ret);
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
}

static inline uint32_t
domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
{
	uint32_t flags = 0;

	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
		flags |= TTM_PL_FLAG_VRAM;
	if (domain & NOUVEAU_GEM_DOMAIN_GART)
		flags |= TTM_PL_FLAG_TT;

	return flags;
}

int
nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
			   struct drm_file *file_priv)
{
	struct drm_nouveau_gem_cpu_prep *req = data;
	struct drm_gem_object *gem;
	struct nouveau_bo *nvbo;
	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
	int ret = -EINVAL;

	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
	if (!gem)
844
		return -ENOENT;
845 846
	nvbo = nouveau_gem_object(gem);

B
Ben Skeggs 已提交
847 848 849
	spin_lock(&nvbo->bo.bdev->fence_lock);
	ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
	spin_unlock(&nvbo->bo.bdev->fence_lock);
850
	drm_gem_object_unreference_unlocked(gem);
851 852 853 854 855 856 857
	return ret;
}

int
nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
			   struct drm_file *file_priv)
{
B
Ben Skeggs 已提交
858
	return 0;
859 860 861 862 863 864 865 866 867 868 869 870
}

int
nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
	struct drm_nouveau_gem_info *req = data;
	struct drm_gem_object *gem;
	int ret;

	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
	if (!gem)
871
		return -ENOENT;
872

873
	ret = nouveau_gem_info(file_priv, gem, req);
874
	drm_gem_object_unreference_unlocked(gem);
875 876 877
	return ret;
}