vmwgfx_execbuf.c 23.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
/**************************************************************************
 *
 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/

#include "vmwgfx_drv.h"
#include "vmwgfx_reg.h"
#include "ttm/ttm_bo_api.h"
#include "ttm/ttm_placement.h"

static int vmw_cmd_invalid(struct vmw_private *dev_priv,
			   struct vmw_sw_context *sw_context,
			   SVGA3dCmdHeader *header)
{
	return capable(CAP_SYS_ADMIN) ? : -EINVAL;
}

static int vmw_cmd_ok(struct vmw_private *dev_priv,
		      struct vmw_sw_context *sw_context,
		      SVGA3dCmdHeader *header)
{
	return 0;
}

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70

static int vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
					 struct vmw_resource **p_res)
{
	int ret = 0;
	struct vmw_resource *res = *p_res;

	if (!res->on_validate_list) {
		if (sw_context->num_ref_resources >= VMWGFX_MAX_VALIDATIONS) {
			DRM_ERROR("Too many resources referenced in "
				  "command stream.\n");
			ret = -ENOMEM;
			goto out;
		}
		sw_context->resources[sw_context->num_ref_resources++] = res;
		res->on_validate_list = true;
		return 0;
	}

out:
	vmw_resource_unreference(p_res);
	return ret;
}

71 72 73 74
static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
			     struct vmw_sw_context *sw_context,
			     SVGA3dCmdHeader *header)
{
75 76
	struct vmw_resource *ctx;

77 78 79 80 81 82 83 84 85 86
	struct vmw_cid_cmd {
		SVGA3dCmdHeader header;
		__le32 cid;
	} *cmd;
	int ret;

	cmd = container_of(header, struct vmw_cid_cmd, header);
	if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
		return 0;

87 88
	ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
				&ctx);
89 90 91 92 93 94 95 96
	if (unlikely(ret != 0)) {
		DRM_ERROR("Could not find or use context %u\n",
			  (unsigned) cmd->cid);
		return ret;
	}

	sw_context->last_cid = cmd->cid;
	sw_context->cid_valid = true;
97
	return vmw_resource_to_validate_list(sw_context, &ctx);
98 99 100 101
}

static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
			     struct vmw_sw_context *sw_context,
102
			     uint32_t *sid)
103
{
104 105 106 107
	struct vmw_surface *srf;
	int ret;
	struct vmw_resource *res;

108 109 110
	if (*sid == SVGA3D_INVALID_ID)
		return 0;

111 112
	if (likely((sw_context->sid_valid  &&
		      *sid == sw_context->last_sid))) {
113
		*sid = sw_context->sid_translation;
114 115
		return 0;
	}
116

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
	ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
					     *sid, &srf);
	if (unlikely(ret != 0)) {
		DRM_ERROR("Could ot find or use surface 0x%08x "
			  "address 0x%08lx\n",
			  (unsigned int) *sid,
			  (unsigned long) sid);
		return ret;
	}

	sw_context->last_sid = *sid;
	sw_context->sid_valid = true;
	sw_context->sid_translation = srf->res.id;
	*sid = sw_context->sid_translation;

	res = &srf->res;
	return vmw_resource_to_validate_list(sw_context, &res);
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
}


static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
					   struct vmw_sw_context *sw_context,
					   SVGA3dCmdHeader *header)
{
	struct vmw_sid_cmd {
		SVGA3dCmdHeader header;
		SVGA3dCmdSetRenderTarget body;
	} *cmd;
	int ret;

	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
	if (unlikely(ret != 0))
		return ret;

	cmd = container_of(header, struct vmw_sid_cmd, header);
152 153
	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
	return ret;
154 155 156 157 158 159 160 161 162 163 164 165 166
}

static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
				      struct vmw_sw_context *sw_context,
				      SVGA3dCmdHeader *header)
{
	struct vmw_sid_cmd {
		SVGA3dCmdHeader header;
		SVGA3dCmdSurfaceCopy body;
	} *cmd;
	int ret;

	cmd = container_of(header, struct vmw_sid_cmd, header);
167
	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
168 169
	if (unlikely(ret != 0))
		return ret;
170
	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
171 172 173 174 175 176 177 178 179 180 181 182 183
}

static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
				     struct vmw_sw_context *sw_context,
				     SVGA3dCmdHeader *header)
{
	struct vmw_sid_cmd {
		SVGA3dCmdHeader header;
		SVGA3dCmdSurfaceStretchBlt body;
	} *cmd;
	int ret;

	cmd = container_of(header, struct vmw_sid_cmd, header);
184
	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
185 186
	if (unlikely(ret != 0))
		return ret;
187
	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
188 189 190 191 192 193 194 195 196 197 198 199
}

static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
					 struct vmw_sw_context *sw_context,
					 SVGA3dCmdHeader *header)
{
	struct vmw_sid_cmd {
		SVGA3dCmdHeader header;
		SVGA3dCmdBlitSurfaceToScreen body;
	} *cmd;

	cmd = container_of(header, struct vmw_sid_cmd, header);
200
	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
201 202 203 204 205 206 207 208 209 210 211 212
}

static int vmw_cmd_present_check(struct vmw_private *dev_priv,
				 struct vmw_sw_context *sw_context,
				 SVGA3dCmdHeader *header)
{
	struct vmw_sid_cmd {
		SVGA3dCmdHeader header;
		SVGA3dCmdPresent body;
	} *cmd;

	cmd = container_of(header, struct vmw_sid_cmd, header);
213
	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
214 215
}

216 217 218 219
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
				   struct vmw_sw_context *sw_context,
				   SVGAGuestPtr *ptr,
				   struct vmw_dma_buffer **vmw_bo_p)
220 221 222
{
	struct vmw_dma_buffer *vmw_bo = NULL;
	struct ttm_buffer_object *bo;
223
	uint32_t handle = ptr->gmrId;
224 225 226
	struct vmw_relocation *reloc;
	uint32_t cur_validate_node;
	struct ttm_validate_buffer *val_buf;
227
	int ret;
228 229 230 231 232 233 234 235 236

	ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
	if (unlikely(ret != 0)) {
		DRM_ERROR("Could not find or use GMR region.\n");
		return -EINVAL;
	}
	bo = &vmw_bo->base;

	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
237
		DRM_ERROR("Max number relocations per submission"
238 239 240 241 242 243
			  " exceeded\n");
		ret = -EINVAL;
		goto out_no_reloc;
	}

	reloc = &sw_context->relocs[sw_context->cur_reloc++];
244
	reloc->location = ptr;
245 246

	cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
247
	if (unlikely(cur_validate_node >= VMWGFX_MAX_VALIDATIONS)) {
248 249 250 251 252 253 254 255 256 257
		DRM_ERROR("Max number of DMA buffers per submission"
			  " exceeded.\n");
		ret = -EINVAL;
		goto out_no_reloc;
	}

	reloc->index = cur_validate_node;
	if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
		val_buf = &sw_context->val_bufs[cur_validate_node];
		val_buf->bo = ttm_bo_reference(bo);
258
		val_buf->usage = TTM_USAGE_READWRITE;
259
		val_buf->new_sync_obj_arg = (void *) DRM_VMW_FENCE_FLAG_EXEC;
260 261 262
		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
		++sw_context->cur_val_buf;
	}
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
	*vmw_bo_p = vmw_bo;
	return 0;

out_no_reloc:
	vmw_dmabuf_unreference(&vmw_bo);
	vmw_bo_p = NULL;
	return ret;
}

static int vmw_cmd_end_query(struct vmw_private *dev_priv,
			     struct vmw_sw_context *sw_context,
			     SVGA3dCmdHeader *header)
{
	struct vmw_dma_buffer *vmw_bo;
	struct vmw_query_cmd {
		SVGA3dCmdHeader header;
		SVGA3dCmdEndQuery q;
	} *cmd;
	int ret;

	cmd = container_of(header, struct vmw_query_cmd, header);
	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
	if (unlikely(ret != 0))
		return ret;

	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
				      &cmd->q.guestResult,
				      &vmw_bo);
	if (unlikely(ret != 0))
		return ret;

	vmw_dmabuf_unreference(&vmw_bo);
	return 0;
}
297

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
			      struct vmw_sw_context *sw_context,
			      SVGA3dCmdHeader *header)
{
	struct vmw_dma_buffer *vmw_bo;
	struct vmw_query_cmd {
		SVGA3dCmdHeader header;
		SVGA3dCmdWaitForQuery q;
	} *cmd;
	int ret;

	cmd = container_of(header, struct vmw_query_cmd, header);
	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
	if (unlikely(ret != 0))
		return ret;

	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
				      &cmd->q.guestResult,
				      &vmw_bo);
	if (unlikely(ret != 0))
		return ret;

	vmw_dmabuf_unreference(&vmw_bo);
	return 0;
}

static int vmw_cmd_dma(struct vmw_private *dev_priv,
		       struct vmw_sw_context *sw_context,
		       SVGA3dCmdHeader *header)
{
	struct vmw_dma_buffer *vmw_bo = NULL;
	struct ttm_buffer_object *bo;
	struct vmw_surface *srf = NULL;
	struct vmw_dma_cmd {
		SVGA3dCmdHeader header;
		SVGA3dCmdSurfaceDMA dma;
	} *cmd;
	int ret;
336
	struct vmw_resource *res;
337 338 339 340 341 342 343 344 345

	cmd = container_of(header, struct vmw_dma_cmd, header);
	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
				      &cmd->dma.guest.ptr,
				      &vmw_bo);
	if (unlikely(ret != 0))
		return ret;

	bo = &vmw_bo->base;
346 347
	ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
					     cmd->dma.host.sid, &srf);
348 349 350 351 352
	if (ret) {
		DRM_ERROR("could not find surface\n");
		goto out_no_reloc;
	}

353
	/*
354 355 356
	 * Patch command stream with device SID.
	 */
	cmd->dma.host.sid = srf->res.id;
357
	vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
358 359 360 361 362

	vmw_dmabuf_unreference(&vmw_bo);

	res = &srf->res;
	return vmw_resource_to_validate_list(sw_context, &res);
363 364 365 366 367 368

out_no_reloc:
	vmw_dmabuf_unreference(&vmw_bo);
	return ret;
}

369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
static int vmw_cmd_draw(struct vmw_private *dev_priv,
			struct vmw_sw_context *sw_context,
			SVGA3dCmdHeader *header)
{
	struct vmw_draw_cmd {
		SVGA3dCmdHeader header;
		SVGA3dCmdDrawPrimitives body;
	} *cmd;
	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
		(unsigned long)header + sizeof(*cmd));
	SVGA3dPrimitiveRange *range;
	uint32_t i;
	uint32_t maxnum;
	int ret;

	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
	if (unlikely(ret != 0))
		return ret;

	cmd = container_of(header, struct vmw_draw_cmd, header);
	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);

	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
		DRM_ERROR("Illegal number of vertex declarations.\n");
		return -EINVAL;
	}

	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
		ret = vmw_cmd_sid_check(dev_priv, sw_context,
					&decl->array.surfaceId);
		if (unlikely(ret != 0))
			return ret;
	}

	maxnum = (header->size - sizeof(cmd->body) -
		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
	if (unlikely(cmd->body.numRanges > maxnum)) {
		DRM_ERROR("Illegal number of index ranges.\n");
		return -EINVAL;
	}

	range = (SVGA3dPrimitiveRange *) decl;
	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
		ret = vmw_cmd_sid_check(dev_priv, sw_context,
					&range->indexArray.surfaceId);
		if (unlikely(ret != 0))
			return ret;
	}
	return 0;
}


static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
			     struct vmw_sw_context *sw_context,
			     SVGA3dCmdHeader *header)
{
	struct vmw_tex_state_cmd {
		SVGA3dCmdHeader header;
		SVGA3dCmdSetTextureState state;
	};

	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
	  ((unsigned long) header + header->size + sizeof(header));
	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
	int ret;

	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
	if (unlikely(ret != 0))
		return ret;

	for (; cur_state < last_state; ++cur_state) {
		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
			continue;

		ret = vmw_cmd_sid_check(dev_priv, sw_context,
					&cur_state->value);
		if (unlikely(ret != 0))
			return ret;
	}

	return 0;
}

453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473

typedef int (*vmw_cmd_func) (struct vmw_private *,
			     struct vmw_sw_context *,
			     SVGA3dCmdHeader *);

#define VMW_CMD_DEF(cmd, func) \
	[cmd - SVGA_3D_CMD_BASE] = func

static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
		    &vmw_cmd_set_render_target_check),
474
	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
475 476 477 478 479 480 481 482 483 484 485
	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
486
	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
487 488
	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
489 490
	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
491 492 493 494 495 496 497 498 499 500
	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
		    &vmw_cmd_blt_surf_screen_check)
};

static int vmw_cmd_check(struct vmw_private *dev_priv,
			 struct vmw_sw_context *sw_context,
			 void *buf, uint32_t *size)
{
	uint32_t cmd_id;
501
	uint32_t size_remaining = *size;
502 503 504 505 506 507 508 509 510 511 512 513 514
	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
	int ret;

	cmd_id = ((uint32_t *)buf)[0];
	if (cmd_id == SVGA_CMD_UPDATE) {
		*size = 5 << 2;
		return 0;
	}

	cmd_id = le32_to_cpu(header->id);
	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);

	cmd_id -= SVGA_3D_CMD_BASE;
515 516 517
	if (unlikely(*size > size_remaining))
		goto out_err;

518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
		goto out_err;

	ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
	if (unlikely(ret != 0))
		goto out_err;

	return 0;
out_err:
	DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
		  cmd_id + SVGA_3D_CMD_BASE);
	return -EINVAL;
}

static int vmw_cmd_check_all(struct vmw_private *dev_priv,
			     struct vmw_sw_context *sw_context,
534
			     uint32_t size)
535
{
536
	void *buf = sw_context->cmd_bounce;
537 538 539 540
	int32_t cur_size = size;
	int ret;

	while (cur_size > 0) {
541
		size = cur_size;
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
		if (unlikely(ret != 0))
			return ret;
		buf = (void *)((unsigned long) buf + size);
		cur_size -= size;
	}

	if (unlikely(cur_size != 0)) {
		DRM_ERROR("Command verifier out of sync.\n");
		return -EINVAL;
	}

	return 0;
}

static void vmw_free_relocations(struct vmw_sw_context *sw_context)
{
	sw_context->cur_reloc = 0;
}

static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
{
	uint32_t i;
	struct vmw_relocation *reloc;
	struct ttm_validate_buffer *validate;
	struct ttm_buffer_object *bo;

	for (i = 0; i < sw_context->cur_reloc; ++i) {
		reloc = &sw_context->relocs[i];
		validate = &sw_context->val_bufs[reloc->index];
		bo = validate->bo;
573 574 575 576 577
		if (bo->mem.mem_type == TTM_PL_VRAM) {
			reloc->location->offset += bo->offset;
			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
		} else
			reloc->location->gmrId = bo->mem.start;
578 579 580 581 582 583 584
	}
	vmw_free_relocations(sw_context);
}

static void vmw_clear_validations(struct vmw_sw_context *sw_context)
{
	struct ttm_validate_buffer *entry, *next;
585
	uint32_t i = sw_context->num_ref_resources;
586

587 588 589
	/*
	 * Drop references to DMA buffers held during command submission.
	 */
590 591 592 593 594 595 596 597
	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
				 head) {
		list_del(&entry->head);
		vmw_dmabuf_validate_clear(entry->bo);
		ttm_bo_unref(&entry->bo);
		sw_context->cur_val_buf--;
	}
	BUG_ON(sw_context->cur_val_buf != 0);
598 599 600 601 602 603 604 605

	/*
	 * Drop references to resources held during command submission.
	 */
	while (i-- > 0) {
		sw_context->resources[i]->on_validate_list = false;
		vmw_resource_unreference(&sw_context->resources[i]);
	}
606 607 608 609 610 611 612
}

static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
				      struct ttm_buffer_object *bo)
{
	int ret;

613
	/**
614 615 616 617
	 * Put BO in VRAM if there is space, otherwise as a GMR.
	 * If there is no space in VRAM and GMR ids are all used up,
	 * start evicting GMRs to make room. If the DMA buffer can't be
	 * used as a GMR, this will return -ENOMEM.
618 619
	 */

620
	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
621
	if (likely(ret == 0 || ret == -ERESTARTSYS))
622 623
		return ret;

624 625 626 627
	/**
	 * If that failed, try VRAM again, this time evicting
	 * previous contents.
	 */
628

629
	DRM_INFO("Falling through to VRAM.\n");
630
	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
	return ret;
}


static int vmw_validate_buffers(struct vmw_private *dev_priv,
				struct vmw_sw_context *sw_context)
{
	struct ttm_validate_buffer *entry;
	int ret;

	list_for_each_entry(entry, &sw_context->validate_nodes, head) {
		ret = vmw_validate_single_buffer(dev_priv, entry->bo);
		if (unlikely(ret != 0))
			return ret;
	}
	return 0;
}

649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
				 uint32_t size)
{
	if (likely(sw_context->cmd_bounce_size >= size))
		return 0;

	if (sw_context->cmd_bounce_size == 0)
		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;

	while (sw_context->cmd_bounce_size < size) {
		sw_context->cmd_bounce_size =
			PAGE_ALIGN(sw_context->cmd_bounce_size +
				   (sw_context->cmd_bounce_size >> 1));
	}

	if (sw_context->cmd_bounce != NULL)
		vfree(sw_context->cmd_bounce);

	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);

	if (sw_context->cmd_bounce == NULL) {
		DRM_ERROR("Failed to allocate command bounce buffer.\n");
		sw_context->cmd_bounce_size = 0;
		return -ENOMEM;
	}

	return 0;
}

678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
/**
 * vmw_execbuf_fence_commands - create and submit a command stream fence
 *
 * Creates a fence object and submits a command stream marker.
 * If this fails for some reason, We sync the fifo and return NULL.
 * It is then safe to fence buffers with a NULL pointer.
 */

int vmw_execbuf_fence_commands(struct drm_file *file_priv,
			       struct vmw_private *dev_priv,
			       struct vmw_fence_obj **p_fence,
			       uint32_t *p_handle)
{
	uint32_t sequence;
	int ret;
	bool synced = false;


	ret = vmw_fifo_send_fence(dev_priv, &sequence);
	if (unlikely(ret != 0)) {
		DRM_ERROR("Fence submission error. Syncing.\n");
		synced = true;
	}

	if (p_handle != NULL)
		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
					    sequence,
					    DRM_VMW_FENCE_FLAG_EXEC,
					    p_fence, p_handle);
	else
		ret = vmw_fence_create(dev_priv->fman, sequence,
				       DRM_VMW_FENCE_FLAG_EXEC,
				       p_fence);

	if (unlikely(ret != 0 && !synced)) {
		(void) vmw_fallback_wait(dev_priv, false, false,
					 sequence, false,
					 VMW_FENCE_WAIT_TIMEOUT);
		*p_fence = NULL;
	}

	return 0;
}

722 723 724 725 726 727 728 729 730 731 732 733
int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
	struct drm_vmw_fence_rep fence_rep;
	struct drm_vmw_fence_rep __user *user_fence_rep;
	int ret;
	void *user_cmd;
	void *cmd;
	struct vmw_sw_context *sw_context = &dev_priv->ctx;
	struct vmw_master *vmaster = vmw_master(file_priv->master);
734 735
	struct vmw_fence_obj *fence;
	uint32_t handle;
736

T
Thomas Hellstrom 已提交
737 738 739 740 741 742 743 744 745 746 747 748 749 750
	/*
	 * This will allow us to extend the ioctl argument while
	 * maintaining backwards compatibility:
	 * We take different code paths depending on the value of
	 * arg->version.
	 */

	if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
		DRM_ERROR("Incorrect execbuf version.\n");
		DRM_ERROR("You're running outdated experimental "
			  "vmwgfx user-space drivers.");
		return -EINVAL;
	}

751 752 753 754 755 756
	ret = ttm_read_lock(&vmaster->lock, true);
	if (unlikely(ret != 0))
		return ret;

	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
	if (unlikely(ret != 0)) {
757
		ret = -ERESTARTSYS;
758 759 760
		goto out_no_cmd_mutex;
	}

761 762
	ret = vmw_resize_cmd_bounce(sw_context, arg->command_size);
	if (unlikely(ret != 0))
763 764 765
		goto out_unlock;

	user_cmd = (void __user *)(unsigned long)arg->commands;
766 767
	ret = copy_from_user(sw_context->cmd_bounce,
			     user_cmd, arg->command_size);
768 769

	if (unlikely(ret != 0)) {
770
		ret = -EFAULT;
771
		DRM_ERROR("Failed copying commands.\n");
772
		goto out_unlock;
773 774 775 776 777 778 779
	}

	sw_context->tfile = vmw_fpriv(file_priv)->tfile;
	sw_context->cid_valid = false;
	sw_context->sid_valid = false;
	sw_context->cur_reloc = 0;
	sw_context->cur_val_buf = 0;
780
	sw_context->num_ref_resources = 0;
781 782 783

	INIT_LIST_HEAD(&sw_context->validate_nodes);

784
	ret = vmw_cmd_check_all(dev_priv, sw_context, arg->command_size);
785 786
	if (unlikely(ret != 0))
		goto out_err;
787

788
	ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
789 790 791 792 793 794 795 796
	if (unlikely(ret != 0))
		goto out_err;

	ret = vmw_validate_buffers(dev_priv, sw_context);
	if (unlikely(ret != 0))
		goto out_err;

	vmw_apply_relocations(sw_context);
797 798

	if (arg->throttle_us) {
799
		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
800 801 802
				   arg->throttle_us);

		if (unlikely(ret != 0))
803 804 805 806 807 808 809 810
			goto out_throttle;
	}

	cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
	if (unlikely(cmd == NULL)) {
		DRM_ERROR("Failed reserving fifo space for commands.\n");
		ret = -ENOMEM;
		goto out_err;
811 812
	}

813
	memcpy(cmd, sw_context->cmd_bounce, arg->command_size);
814 815
	vmw_fifo_commit(dev_priv, arg->command_size);

816 817 818 819 820
	user_fence_rep = (struct drm_vmw_fence_rep __user *)
		(unsigned long)arg->fence_rep;
	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
					 &fence,
					 (user_fence_rep) ? &handle : NULL);
821 822
	/*
	 * This error is harmless, because if fence submission fails,
823 824
	 * vmw_fifo_send_fence will sync. The error will be propagated to
	 * user-space in @fence_rep
825 826 827 828 829
	 */

	if (ret != 0)
		DRM_ERROR("Fence submission error. Syncing.\n");

830 831
	ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
				    (void *) fence);
832

833 834
	vmw_clear_validations(sw_context);
	mutex_unlock(&dev_priv->cmdbuf_mutex);
835

836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
	if (user_fence_rep) {
		fence_rep.error = ret;
		fence_rep.handle = handle;
		fence_rep.seqno = fence->seqno;
		vmw_update_seqno(dev_priv, &dev_priv->fifo);
		fence_rep.passed_seqno = dev_priv->last_read_seqno;

		/*
		 * copy_to_user errors will be detected by user space not
		 * seeing fence_rep::error filled in. Typically
		 * user-space would have pre-set that member to -EFAULT.
		 */
		ret = copy_to_user(user_fence_rep, &fence_rep,
				   sizeof(fence_rep));

		/*
		 * User-space lost the fence object. We need to sync
		 * and unreference the handle.
		 */
		if (unlikely(ret != 0) && (fence_rep.error == 0)) {
			BUG_ON(fence == NULL);

			ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
						  handle, TTM_REF_USAGE);
			DRM_ERROR("Fence copy error. Syncing.\n");
			(void) vmw_fence_obj_wait(fence,
						  fence->signal_mask,
						  false, false,
						  VMW_FENCE_WAIT_TIMEOUT);
		}
	}
867

868 869
	if (likely(fence != NULL))
		vmw_fence_obj_unreference(&fence);
870 871 872 873 874 875

	vmw_kms_cursor_post_execbuf(dev_priv);
	ttm_read_unlock(&vmaster->lock);
	return 0;
out_err:
	vmw_free_relocations(sw_context);
876
out_throttle:
877 878 879 880 881 882 883 884
	ttm_eu_backoff_reservation(&sw_context->validate_nodes);
	vmw_clear_validations(sw_context);
out_unlock:
	mutex_unlock(&dev_priv->cmdbuf_mutex);
out_no_cmd_mutex:
	ttm_read_unlock(&vmaster->lock);
	return ret;
}