vmwgfx_kms.c 74.7 KB
Newer Older
1 2
/**************************************************************************
 *
S
Sinclair Yeh 已提交
3
 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/

#include "vmwgfx_kms.h"
29
#include <drm/drm_plane_helper.h>
S
Sinclair Yeh 已提交
30 31
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
32
#include <drm/drm_rect.h>
33 34 35 36

/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)

37
void vmw_du_cleanup(struct vmw_display_unit *du)
38
{
39 40 41
	drm_plane_cleanup(&du->primary);
	drm_plane_cleanup(&du->cursor);

42
	drm_connector_unregister(&du->connector);
43 44 45 46 47 48 49 50 51
	drm_crtc_cleanup(&du->crtc);
	drm_encoder_cleanup(&du->encoder);
	drm_connector_cleanup(&du->connector);
}

/*
 * Display Unit Cursor functions
 */

52 53 54
static int vmw_cursor_update_image(struct vmw_private *dev_priv,
				   u32 *image, u32 width, u32 height,
				   u32 hotspotX, u32 hotspotY)
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
{
	struct {
		u32 cmd;
		SVGAFifoCmdDefineAlphaCursor cursor;
	} *cmd;
	u32 image_size = width * height * 4;
	u32 cmd_size = sizeof(*cmd) + image_size;

	if (!image)
		return -EINVAL;

	cmd = vmw_fifo_reserve(dev_priv, cmd_size);
	if (unlikely(cmd == NULL)) {
		DRM_ERROR("Fifo reserve failed.\n");
		return -ENOMEM;
	}

	memset(cmd, 0, sizeof(*cmd));

	memcpy(&cmd[1], image, image_size);

76 77 78 79 80 81
	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
	cmd->cursor.id = 0;
	cmd->cursor.width = width;
	cmd->cursor.height = height;
	cmd->cursor.hotspotX = hotspotX;
	cmd->cursor.hotspotY = hotspotY;
82

83
	vmw_fifo_commit_flush(dev_priv, cmd_size);
84 85 86 87

	return 0;
}

88 89 90 91
static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
				    struct vmw_dma_buffer *dmabuf,
				    u32 width, u32 height,
				    u32 hotspotX, u32 hotspotY)
J
Jakob Bornecrantz 已提交
92 93 94 95 96 97 98 99 100 101 102
{
	struct ttm_bo_kmap_obj map;
	unsigned long kmap_offset;
	unsigned long kmap_num;
	void *virtual;
	bool dummy;
	int ret;

	kmap_offset = 0;
	kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;

103
	ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL);
J
Jakob Bornecrantz 已提交
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
	if (unlikely(ret != 0)) {
		DRM_ERROR("reserve failed\n");
		return -EINVAL;
	}

	ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
	if (unlikely(ret != 0))
		goto err_unreserve;

	virtual = ttm_kmap_obj_virtual(&map, &dummy);
	ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
				      hotspotX, hotspotY);

	ttm_bo_kunmap(&map);
err_unreserve:
	ttm_bo_unreserve(&dmabuf->base);

	return ret;
}


125 126
static void vmw_cursor_update_position(struct vmw_private *dev_priv,
				       bool show, int x, int y)
127
{
128
	u32 *fifo_mem = dev_priv->mmio_virt;
129 130
	uint32_t count;

131
	spin_lock(&dev_priv->cursor_lock);
132 133 134 135 136
	vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
	vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
	vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
	count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
	vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
137
	spin_unlock(&dev_priv->cursor_lock);
138 139
}

140

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
			  struct ttm_object_file *tfile,
			  struct ttm_buffer_object *bo,
			  SVGA3dCmdHeader *header)
{
	struct ttm_bo_kmap_obj map;
	unsigned long kmap_offset;
	unsigned long kmap_num;
	SVGA3dCopyBox *box;
	unsigned box_count;
	void *virtual;
	bool dummy;
	struct vmw_dma_cmd {
		SVGA3dCmdHeader header;
		SVGA3dCmdSurfaceDMA dma;
	} *cmd;
157
	int i, ret;
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178

	cmd = container_of(header, struct vmw_dma_cmd, header);

	/* No snooper installed */
	if (!srf->snooper.image)
		return;

	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
		DRM_ERROR("face and mipmap for cursors should never != 0\n");
		return;
	}

	if (cmd->header.size < 64) {
		DRM_ERROR("at least one full copy box must be given\n");
		return;
	}

	box = (SVGA3dCopyBox *)&cmd[1];
	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
			sizeof(SVGA3dCopyBox);

179
	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
180 181
	    box->x != 0    || box->y != 0    || box->z != 0    ||
	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
182
	    box->d != 1    || box_count != 1) {
183
		/* TODO handle none page aligned offsets */
184 185 186 187 188 189 190 191
		/* TODO handle more dst & src != 0 */
		/* TODO handle more then one copy */
		DRM_ERROR("Cant snoop dma request for cursor!\n");
		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
			  box->srcx, box->srcy, box->srcz,
			  box->x, box->y, box->z,
			  box->w, box->h, box->d, box_count,
			  cmd->dma.guest.ptr.offset);
192 193 194 195 196 197
		return;
	}

	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
	kmap_num = (64*64*4) >> PAGE_SHIFT;

198
	ret = ttm_bo_reserve(bo, true, false, NULL);
199 200 201 202 203 204 205 206 207 208 209
	if (unlikely(ret != 0)) {
		DRM_ERROR("reserve failed\n");
		return;
	}

	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
	if (unlikely(ret != 0))
		goto err_unreserve;

	virtual = ttm_kmap_obj_virtual(&map, &dummy);

210 211 212 213 214 215 216 217 218 219
	if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
		memcpy(srf->snooper.image, virtual, 64*64*4);
	} else {
		/* Image is unsigned pointer. */
		for (i = 0; i < box->h; i++)
			memcpy(srf->snooper.image + i * 64,
			       virtual + i * cmd->dma.guest.pitch,
			       box->w * 4);
	}

220 221 222 223 224 225 226
	srf->snooper.age++;

	ttm_bo_kunmap(&map);
err_unreserve:
	ttm_bo_unreserve(bo);
}

227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
/**
 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
 *
 * @dev_priv: Pointer to the device private struct.
 *
 * Clears all legacy hotspots.
 */
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	struct vmw_display_unit *du;
	struct drm_crtc *crtc;

	drm_modeset_lock_all(dev);
	drm_for_each_crtc(crtc, dev) {
		du = vmw_crtc_to_du(crtc);

		du->hotspot_x = 0;
		du->hotspot_y = 0;
	}
	drm_modeset_unlock_all(dev);
}

250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	struct vmw_display_unit *du;
	struct drm_crtc *crtc;

	mutex_lock(&dev->mode_config.mutex);

	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
		du = vmw_crtc_to_du(crtc);
		if (!du->cursor_surface ||
		    du->cursor_age == du->cursor_surface->snooper.age)
			continue;

		du->cursor_age = du->cursor_surface->snooper.age;
		vmw_cursor_update_image(dev_priv,
					du->cursor_surface->snooper.image,
267 268 269
					64, 64,
					du->hotspot_x + du->core_hotspot_x,
					du->hotspot_y + du->core_hotspot_y);
270 271 272 273 274
	}

	mutex_unlock(&dev->mode_config.mutex);
}

275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291

void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
{
	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);

	drm_plane_cleanup(plane);
}


void vmw_du_primary_plane_destroy(struct drm_plane *plane)
{
	drm_plane_cleanup(plane);

	/* Planes are static in our case so we don't free it */
}


292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
/**
 * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface
 *
 * @vps: plane state associated with the display surface
 * @unreference: true if we also want to unreference the display.
 */
void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
			     bool unreference)
{
	if (vps->surf) {
		if (vps->pinned) {
			vmw_resource_unpin(&vps->surf->res);
			vps->pinned--;
		}

		if (unreference) {
			if (vps->pinned)
				DRM_ERROR("Surface still pinned\n");
			vmw_surface_unreference(&vps->surf);
		}
	}
}


/**
 * vmw_du_plane_cleanup_fb - Unpins the cursor
 *
 * @plane:  display plane
 * @old_state: Contains the FB to clean up
 *
 * Unpins the framebuffer surface
 *
 * Returns 0 on success
 */
void
vmw_du_plane_cleanup_fb(struct drm_plane *plane,
			struct drm_plane_state *old_state)
{
	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);

	vmw_du_plane_unpin_surf(vps, false);
}


/**
 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
 *
 * @plane:  display plane
 * @new_state: info on the new plane state, including the FB
 *
 * Returns 0 on success
 */
int
vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
			       struct drm_plane_state *new_state)
{
	struct drm_framebuffer *fb = new_state->fb;
	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);


	if (vps->surf)
		vmw_surface_unreference(&vps->surf);

	if (vps->dmabuf)
		vmw_dmabuf_unreference(&vps->dmabuf);

	if (fb) {
		if (vmw_framebuffer_to_vfb(fb)->dmabuf) {
			vps->dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
			vmw_dmabuf_reference(vps->dmabuf);
		} else {
			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
			vmw_surface_reference(vps->surf);
		}
	}

	return 0;
}


void
vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
				  struct drm_plane_state *old_state)
{
	struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
	struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state);
	s32 hotspot_x, hotspot_y;
	int ret = 0;


	hotspot_x = du->hotspot_x;
	hotspot_y = du->hotspot_y;
386 387 388 389 390 391

	if (plane->fb) {
		hotspot_x += plane->fb->hot_x;
		hotspot_y += plane->fb->hot_y;
	}

392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
	du->cursor_surface = vps->surf;
	du->cursor_dmabuf = vps->dmabuf;

	/* setup new image */
	if (vps->surf) {
		du->cursor_age = du->cursor_surface->snooper.age;

		ret = vmw_cursor_update_image(dev_priv,
					      vps->surf->snooper.image,
					      64, 64, hotspot_x, hotspot_y);
	} else if (vps->dmabuf) {
		ret = vmw_cursor_update_dmabuf(dev_priv, vps->dmabuf,
					       plane->state->crtc_w,
					       plane->state->crtc_h,
					       hotspot_x, hotspot_y);
	} else {
		vmw_cursor_update_position(dev_priv, false, 0, 0);
		return;
	}

	if (!ret) {
		du->cursor_x = plane->state->crtc_x + du->set_gui_x;
		du->cursor_y = plane->state->crtc_y + du->set_gui_y;

		vmw_cursor_update_position(dev_priv, true,
					   du->cursor_x + hotspot_x,
					   du->cursor_y + hotspot_y);
419 420 421

		du->core_hotspot_x = hotspot_x - du->hotspot_x;
		du->core_hotspot_y = hotspot_y - du->hotspot_y;
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
	} else {
		DRM_ERROR("Failed to update cursor image\n");
	}
}


/**
 * vmw_du_primary_plane_atomic_check - check if the new state is okay
 *
 * @plane: display plane
 * @state: info on the new plane state, including the FB
 *
 * Check if the new state is settable given the current state.  Other
 * than what the atomic helper checks, we care about crtc fitting
 * the FB and maintaining one active framebuffer.
 *
 * Returns 0 on success
 */
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
				      struct drm_plane_state *state)
{
443
	struct drm_crtc_state *crtc_state = NULL;
444
	struct drm_framebuffer *new_fb = state->fb;
445
	struct drm_rect clip = {};
446 447
	int ret;

448 449
	if (state->crtc)
		crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
450

451 452 453 454
	if (crtc_state && crtc_state->enable) {
		clip.x2 = crtc_state->adjusted_mode.hdisplay;
		clip.y2 = crtc_state->adjusted_mode.vdisplay;
	}
455

456 457 458 459
	ret = drm_atomic_helper_check_plane_state(state, crtc_state, &clip,
						  DRM_PLANE_HELPER_NO_SCALING,
						  DRM_PLANE_HELPER_NO_SCALING,
						  false, true);
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528

	if (!ret && new_fb) {
		struct drm_crtc *crtc = state->crtc;
		struct vmw_connector_state *vcs;
		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
		struct vmw_private *dev_priv = vmw_priv(crtc->dev);
		struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);

		vcs = vmw_connector_state_to_vcs(du->connector.state);

		/* Only one active implicit framebuffer at a time. */
		mutex_lock(&dev_priv->global_kms_state_mutex);
		if (vcs->is_implicit && dev_priv->implicit_fb &&
		    !(dev_priv->num_implicit == 1 && du->active_implicit)
		    && dev_priv->implicit_fb != vfb) {
			DRM_ERROR("Multiple implicit framebuffers "
				  "not supported.\n");
			ret = -EINVAL;
		}
		mutex_unlock(&dev_priv->global_kms_state_mutex);
	}


	return ret;
}


/**
 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
 *
 * @plane: cursor plane
 * @state: info on the new plane state
 *
 * This is a chance to fail if the new cursor state does not fit
 * our requirements.
 *
 * Returns 0 on success
 */
int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
				     struct drm_plane_state *new_state)
{
	int ret = 0;
	struct vmw_surface *surface = NULL;
	struct drm_framebuffer *fb = new_state->fb;


	/* Turning off */
	if (!fb)
		return ret;

	/* A lot of the code assumes this */
	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
			  new_state->crtc_w, new_state->crtc_h);
		ret = -EINVAL;
	}

	if (!vmw_framebuffer_to_vfb(fb)->dmabuf)
		surface = vmw_framebuffer_to_vfbs(fb)->surface;

	if (surface && !surface->snooper.image) {
		DRM_ERROR("surface not suitable for cursor\n");
		ret = -EINVAL;
	}

	return ret;
}


529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
			     struct drm_crtc_state *new_state)
{
	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
	int connector_mask = 1 << drm_connector_index(&du->connector);
	bool has_primary = new_state->plane_mask &
			   BIT(drm_plane_index(crtc->primary));

	/* We always want to have an active plane with an active CRTC */
	if (has_primary != new_state->enable)
		return -EINVAL;


	if (new_state->connector_mask != connector_mask &&
	    new_state->connector_mask != 0) {
		DRM_ERROR("Invalid connectors configuration\n");
		return -EINVAL;
	}

	/*
	 * Our virtual device does not have a dot clock, so use the logical
	 * clock value as the dot clock.
	 */
	if (new_state->mode.crtc_clock == 0)
		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;

	return 0;
}


void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
			      struct drm_crtc_state *old_crtc_state)
{
}


void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
			      struct drm_crtc_state *old_crtc_state)
{
	struct drm_pending_vblank_event *event = crtc->state->event;

	if (event) {
		crtc->state->event = NULL;

		spin_lock_irq(&crtc->dev->event_lock);
		if (drm_crtc_vblank_get(crtc) == 0)
			drm_crtc_arm_vblank_event(crtc, event);
		else
			drm_crtc_send_vblank_event(crtc, event);
		spin_unlock_irq(&crtc->dev->event_lock);
	}

}


S
Sinclair Yeh 已提交
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
/**
 * vmw_du_crtc_duplicate_state - duplicate crtc state
 * @crtc: DRM crtc
 *
 * Allocates and returns a copy of the crtc state (both common and
 * vmw-specific) for the specified crtc.
 *
 * Returns: The newly allocated crtc state, or NULL on failure.
 */
struct drm_crtc_state *
vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
{
	struct drm_crtc_state *state;
	struct vmw_crtc_state *vcs;

	if (WARN_ON(!crtc->state))
		return NULL;

	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);

	if (!vcs)
		return NULL;

	state = &vcs->base;

	__drm_atomic_helper_crtc_duplicate_state(crtc, state);

	return state;
}


/**
 * vmw_du_crtc_reset - creates a blank vmw crtc state
 * @crtc: DRM crtc
 *
 * Resets the atomic state for @crtc by freeing the state pointer (which
 * might be NULL, e.g. at driver load time) and allocating a new empty state
 * object.
 */
void vmw_du_crtc_reset(struct drm_crtc *crtc)
{
	struct vmw_crtc_state *vcs;


	if (crtc->state) {
		__drm_atomic_helper_crtc_destroy_state(crtc->state);

		kfree(vmw_crtc_state_to_vcs(crtc->state));
	}

	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);

	if (!vcs) {
		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
		return;
	}

	crtc->state = &vcs->base;
	crtc->state->crtc = crtc;
}


/**
 * vmw_du_crtc_destroy_state - destroy crtc state
 * @crtc: DRM crtc
 * @state: state object to destroy
 *
 * Destroys the crtc state (both common and vmw-specific) for the
 * specified plane.
 */
void
vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
			  struct drm_crtc_state *state)
{
	drm_atomic_helper_crtc_destroy_state(crtc, state);
}


S
Sinclair Yeh 已提交
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
/**
 * vmw_du_plane_duplicate_state - duplicate plane state
 * @plane: drm plane
 *
 * Allocates and returns a copy of the plane state (both common and
 * vmw-specific) for the specified plane.
 *
 * Returns: The newly allocated plane state, or NULL on failure.
 */
struct drm_plane_state *
vmw_du_plane_duplicate_state(struct drm_plane *plane)
{
	struct drm_plane_state *state;
	struct vmw_plane_state *vps;

	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);

	if (!vps)
		return NULL;

	vps->pinned = 0;

684 685 686 687
	/* Mapping is managed by prepare_fb/cleanup_fb */
	memset(&vps->host_map, 0, sizeof(vps->host_map));
	vps->cpp = 0;

S
Sinclair Yeh 已提交
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
	/* Each ref counted resource needs to be acquired again */
	if (vps->surf)
		(void) vmw_surface_reference(vps->surf);

	if (vps->dmabuf)
		(void) vmw_dmabuf_reference(vps->dmabuf);

	state = &vps->base;

	__drm_atomic_helper_plane_duplicate_state(plane, state);

	return state;
}


/**
 * vmw_du_plane_reset - creates a blank vmw plane state
 * @plane: drm plane
 *
 * Resets the atomic state for @plane by freeing the state pointer (which might
 * be NULL, e.g. at driver load time) and allocating a new empty state object.
 */
void vmw_du_plane_reset(struct drm_plane *plane)
{
	struct vmw_plane_state *vps;


	if (plane->state)
		vmw_du_plane_destroy_state(plane, plane->state);

	vps = kzalloc(sizeof(*vps), GFP_KERNEL);

	if (!vps) {
		DRM_ERROR("Cannot allocate vmw_plane_state\n");
		return;
	}

	plane->state = &vps->base;
	plane->state->plane = plane;
727
	plane->state->rotation = DRM_MODE_ROTATE_0;
S
Sinclair Yeh 已提交
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
}


/**
 * vmw_du_plane_destroy_state - destroy plane state
 * @plane: DRM plane
 * @state: state object to destroy
 *
 * Destroys the plane state (both common and vmw-specific) for the
 * specified plane.
 */
void
vmw_du_plane_destroy_state(struct drm_plane *plane,
			   struct drm_plane_state *state)
{
	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);


746 747 748 749 750 751
	/* Should have been freed by cleanup_fb */
	if (vps->host_map.virtual) {
		DRM_ERROR("Host mapping not freed\n");
		ttm_bo_kunmap(&vps->host_map);
	}

S
Sinclair Yeh 已提交
752 753 754 755 756 757 758 759 760 761
	if (vps->surf)
		vmw_surface_unreference(&vps->surf);

	if (vps->dmabuf)
		vmw_dmabuf_unreference(&vps->dmabuf);

	drm_atomic_helper_plane_destroy_state(plane, state);
}


762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836
/**
 * vmw_du_connector_duplicate_state - duplicate connector state
 * @connector: DRM connector
 *
 * Allocates and returns a copy of the connector state (both common and
 * vmw-specific) for the specified connector.
 *
 * Returns: The newly allocated connector state, or NULL on failure.
 */
struct drm_connector_state *
vmw_du_connector_duplicate_state(struct drm_connector *connector)
{
	struct drm_connector_state *state;
	struct vmw_connector_state *vcs;

	if (WARN_ON(!connector->state))
		return NULL;

	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);

	if (!vcs)
		return NULL;

	state = &vcs->base;

	__drm_atomic_helper_connector_duplicate_state(connector, state);

	return state;
}


/**
 * vmw_du_connector_reset - creates a blank vmw connector state
 * @connector: DRM connector
 *
 * Resets the atomic state for @connector by freeing the state pointer (which
 * might be NULL, e.g. at driver load time) and allocating a new empty state
 * object.
 */
void vmw_du_connector_reset(struct drm_connector *connector)
{
	struct vmw_connector_state *vcs;


	if (connector->state) {
		__drm_atomic_helper_connector_destroy_state(connector->state);

		kfree(vmw_connector_state_to_vcs(connector->state));
	}

	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);

	if (!vcs) {
		DRM_ERROR("Cannot allocate vmw_connector_state\n");
		return;
	}

	__drm_atomic_helper_connector_reset(connector, &vcs->base);
}


/**
 * vmw_du_connector_destroy_state - destroy connector state
 * @connector: DRM connector
 * @state: state object to destroy
 *
 * Destroys the connector state (both common and vmw-specific) for the
 * specified plane.
 */
void
vmw_du_connector_destroy_state(struct drm_connector *connector,
			  struct drm_connector_state *state)
{
	drm_atomic_helper_connector_destroy_state(connector, state);
}
837 838 839 840 841 842 843 844
/*
 * Generic framebuffer code
 */

/*
 * Surface framebuffer code
 */

845
static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
846
{
847
	struct vmw_framebuffer_surface *vfbs =
848
		vmw_framebuffer_to_vfbs(framebuffer);
849

850
	drm_framebuffer_cleanup(framebuffer);
851
	vmw_surface_unreference(&vfbs->surface);
852 853
	if (vfbs->base.user_obj)
		ttm_base_object_unref(&vfbs->base.user_obj);
854

855
	kfree(vfbs);
856 857
}

858
static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
859
				  struct drm_file *file_priv,
860 861 862 863 864 865 866 867
				  unsigned flags, unsigned color,
				  struct drm_clip_rect *clips,
				  unsigned num_clips)
{
	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
	struct vmw_framebuffer_surface *vfbs =
		vmw_framebuffer_to_vfbs(framebuffer);
	struct drm_clip_rect norect;
868
	int ret, inc = 1;
869

870 871
	/* Legacy Display Unit does not support 3D */
	if (dev_priv->active_display_unit == vmw_du_legacy)
872 873
		return -EINVAL;

874 875
	drm_modeset_lock_all(dev_priv->dev);

876
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
877 878
	if (unlikely(ret != 0)) {
		drm_modeset_unlock_all(dev_priv->dev);
879
		return ret;
880
	}
881

882 883 884 885 886 887 888 889 890 891 892
	if (!num_clips) {
		num_clips = 1;
		clips = &norect;
		norect.x1 = norect.y1 = 0;
		norect.x2 = framebuffer->width;
		norect.y2 = framebuffer->height;
	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
		num_clips /= 2;
		inc = 2; /* skip source rects */
	}

893
	if (dev_priv->active_display_unit == vmw_du_screen_object)
894 895 896
		ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
						   clips, NULL, NULL, 0, 0,
						   num_clips, inc, NULL);
897
	else
898 899 900
		ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
						 clips, NULL, NULL, 0, 0,
						 num_clips, inc, NULL);
901

902
	vmw_fifo_flush(dev_priv, false);
903
	ttm_read_unlock(&dev_priv->reservation_sem);
904 905 906

	drm_modeset_unlock_all(dev_priv->dev);

907 908 909
	return 0;
}

910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
/**
 * vmw_kms_readback - Perform a readback from the screen system to
 * a dma-buffer backed framebuffer.
 *
 * @dev_priv: Pointer to the device private structure.
 * @file_priv: Pointer to a struct drm_file identifying the caller.
 * Must be set to NULL if @user_fence_rep is NULL.
 * @vfb: Pointer to the dma-buffer backed framebuffer.
 * @user_fence_rep: User-space provided structure for fence information.
 * Must be set to non-NULL if @file_priv is non-NULL.
 * @vclips: Array of clip rects.
 * @num_clips: Number of clip rects in @vclips.
 *
 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
 * interrupted.
 */
int vmw_kms_readback(struct vmw_private *dev_priv,
		     struct drm_file *file_priv,
		     struct vmw_framebuffer *vfb,
		     struct drm_vmw_fence_rep __user *user_fence_rep,
		     struct drm_vmw_rect *vclips,
		     uint32_t num_clips)
{
	switch (dev_priv->active_display_unit) {
	case vmw_du_screen_object:
		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
					    user_fence_rep, vclips, num_clips);
937 938 939 940
	case vmw_du_screen_target:
		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
					user_fence_rep, NULL, vclips, num_clips,
					1, false, true);
941 942 943
	default:
		WARN_ONCE(true,
			  "Readback called with invalid display system.\n");
944
}
945 946 947 948 949

	return -ENOSYS;
}


950
static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
951 952 953 954
	.destroy = vmw_framebuffer_surface_destroy,
	.dirty = vmw_framebuffer_surface_dirty,
};

955 956 957
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
					   struct vmw_surface *surface,
					   struct vmw_framebuffer **out,
D
Daniel Vetter 已提交
958
					   const struct drm_mode_fb_cmd2
959 960
					   *mode_cmd,
					   bool is_dmabuf_proxy)
961 962 963 964

{
	struct drm_device *dev = dev_priv->dev;
	struct vmw_framebuffer_surface *vfbs;
965
	enum SVGA3dSurfaceFormat format;
966
	int ret;
D
Daniel Vetter 已提交
967
	struct drm_format_name_buf format_name;
968

969 970
	/* 3D is only supported on HWv8 and newer hosts */
	if (dev_priv->active_display_unit == vmw_du_legacy)
971 972
		return -ENOSYS;

973 974 975 976
	/*
	 * Sanity checks.
	 */

977 978 979 980
	/* Surface must be marked as a scanout. */
	if (unlikely(!surface->scanout))
		return -EINVAL;

981 982
	if (unlikely(surface->mip_levels[0] != 1 ||
		     surface->num_sizes != 1 ||
983 984 985
		     surface->base_size.width < mode_cmd->width ||
		     surface->base_size.height < mode_cmd->height ||
		     surface->base_size.depth != 1)) {
986 987 988 989 990
		DRM_ERROR("Incompatible surface dimensions "
			  "for requested mode.\n");
		return -EINVAL;
	}

D
Daniel Vetter 已提交
991 992
	switch (mode_cmd->pixel_format) {
	case DRM_FORMAT_ARGB8888:
993 994
		format = SVGA3D_A8R8G8B8;
		break;
D
Daniel Vetter 已提交
995
	case DRM_FORMAT_XRGB8888:
996 997
		format = SVGA3D_X8R8G8B8;
		break;
D
Daniel Vetter 已提交
998
	case DRM_FORMAT_RGB565:
999 1000
		format = SVGA3D_R5G6B5;
		break;
D
Daniel Vetter 已提交
1001
	case DRM_FORMAT_XRGB1555:
1002 1003 1004
		format = SVGA3D_A1R5G5B5;
		break;
	default:
D
Daniel Vetter 已提交
1005 1006
		DRM_ERROR("Invalid pixel format: %s\n",
			  drm_get_format_name(mode_cmd->pixel_format, &format_name));
1007 1008 1009
		return -EINVAL;
	}

1010 1011 1012 1013 1014
	/*
	 * For DX, surface format validation is done when surface->scanout
	 * is set.
	 */
	if (!dev_priv->has_dx && format != surface->format) {
1015 1016 1017 1018
		DRM_ERROR("Invalid surface format for requested mode.\n");
		return -EINVAL;
	}

1019 1020 1021 1022 1023 1024
	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
	if (!vfbs) {
		ret = -ENOMEM;
		goto out_err1;
	}

1025
	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1026
	vfbs->surface = vmw_surface_reference(surface);
D
Daniel Vetter 已提交
1027
	vfbs->base.user_handle = mode_cmd->handles[0];
1028
	vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
1029

1030 1031
	*out = &vfbs->base;

1032 1033 1034
	ret = drm_framebuffer_init(dev, &vfbs->base.base,
				   &vmw_framebuffer_surface_funcs);
	if (ret)
1035
		goto out_err2;
1036

1037 1038 1039
	return 0;

out_err2:
1040
	vmw_surface_unreference(&surface);
1041 1042 1043 1044 1045 1046 1047 1048 1049
	kfree(vfbs);
out_err1:
	return ret;
}

/*
 * Dmabuf framebuffer code
 */

1050
static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
1051 1052 1053 1054 1055 1056
{
	struct vmw_framebuffer_dmabuf *vfbd =
		vmw_framebuffer_to_vfbd(framebuffer);

	drm_framebuffer_cleanup(framebuffer);
	vmw_dmabuf_unreference(&vfbd->buffer);
1057 1058
	if (vfbd->base.user_obj)
		ttm_base_object_unref(&vfbd->base.user_obj);
1059 1060 1061 1062

	kfree(vfbd);
}

1063
static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
1064
				 struct drm_file *file_priv,
1065 1066 1067 1068 1069
				 unsigned flags, unsigned color,
				 struct drm_clip_rect *clips,
				 unsigned num_clips)
{
	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1070 1071
	struct vmw_framebuffer_dmabuf *vfbd =
		vmw_framebuffer_to_vfbd(framebuffer);
1072
	struct drm_clip_rect norect;
1073
	int ret, increment = 1;
1074

1075 1076
	drm_modeset_lock_all(dev_priv->dev);

1077
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1078 1079
	if (unlikely(ret != 0)) {
		drm_modeset_unlock_all(dev_priv->dev);
1080
		return ret;
1081
	}
1082

1083
	if (!num_clips) {
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
		num_clips = 1;
		clips = &norect;
		norect.x1 = norect.y1 = 0;
		norect.x2 = framebuffer->width;
		norect.y2 = framebuffer->height;
	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
		num_clips /= 2;
		increment = 2;
	}

1094 1095 1096 1097 1098 1099 1100
	switch (dev_priv->active_display_unit) {
	case vmw_du_screen_target:
		ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
				       clips, NULL, num_clips, increment,
				       true, true);
		break;
	case vmw_du_screen_object:
1101
		ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
1102 1103
						  clips, NULL, num_clips,
						  increment, true, NULL);
1104
		break;
1105 1106 1107 1108
	case vmw_du_legacy:
		ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
						  clips, num_clips, increment);
		break;
1109
	default:
1110 1111
		ret = -EINVAL;
		WARN_ONCE(true, "Dirty called with invalid display system.\n");
1112
		break;
1113
	}
1114

1115
	vmw_fifo_flush(dev_priv, false);
1116
	ttm_read_unlock(&dev_priv->reservation_sem);
1117 1118 1119

	drm_modeset_unlock_all(dev_priv->dev);

1120
	return ret;
1121 1122
}

1123
static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
1124 1125 1126 1127
	.destroy = vmw_framebuffer_dmabuf_destroy,
	.dirty = vmw_framebuffer_dmabuf_dirty,
};

1128 1129 1130
/**
 * Pin the dmabuffer to the start of vram.
 */
1131
static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1132 1133
{
	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1134
	struct vmw_dma_buffer *buf;
1135 1136
	int ret;

1137 1138
	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1139

1140 1141
	if (!buf)
		return 0;
1142

1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
	switch (dev_priv->active_display_unit) {
	case vmw_du_legacy:
		vmw_overlay_pause_all(dev_priv);
		ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
		vmw_overlay_resume_all(dev_priv);
		break;
	case vmw_du_screen_object:
	case vmw_du_screen_target:
		if (vfb->dmabuf)
			return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
							     false);
1154

1155 1156 1157 1158 1159
		return vmw_dmabuf_pin_in_placement(dev_priv, buf,
						   &vmw_mob_placement, false);
	default:
		return -EINVAL;
	}
1160

1161
	return ret;
1162 1163
}

1164
static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
1165 1166
{
	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1167
	struct vmw_dma_buffer *buf;
1168

1169 1170
	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1171

1172
	if (WARN_ON(!buf))
1173 1174
		return 0;

1175
	return vmw_dmabuf_unpin(dev_priv, buf, false);
1176 1177
}

1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
/**
 * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
 *
 * @dev: DRM device
 * @mode_cmd: parameters for the new surface
 * @dmabuf_mob: MOB backing the DMA buf
 * @srf_out: newly created surface
 *
 * When the content FB is a DMA buf, we create a surface as a proxy to the
 * same buffer.  This way we can do a surface copy rather than a surface DMA.
 * This is a more efficient approach
 *
 * RETURNS:
 * 0 on success, error code otherwise
 */
static int vmw_create_dmabuf_proxy(struct drm_device *dev,
D
Daniel Vetter 已提交
1194
				   const struct drm_mode_fb_cmd2 *mode_cmd,
1195 1196 1197 1198
				   struct vmw_dma_buffer *dmabuf_mob,
				   struct vmw_surface **srf_out)
{
	uint32_t format;
1199
	struct drm_vmw_size content_base_size = {0};
1200
	struct vmw_resource *res;
1201
	unsigned int bytes_pp;
D
Daniel Vetter 已提交
1202
	struct drm_format_name_buf format_name;
1203 1204
	int ret;

D
Daniel Vetter 已提交
1205 1206 1207
	switch (mode_cmd->pixel_format) {
	case DRM_FORMAT_ARGB8888:
	case DRM_FORMAT_XRGB8888:
1208
		format = SVGA3D_X8R8G8B8;
1209
		bytes_pp = 4;
1210 1211
		break;

D
Daniel Vetter 已提交
1212 1213
	case DRM_FORMAT_RGB565:
	case DRM_FORMAT_XRGB1555:
1214
		format = SVGA3D_R5G6B5;
1215
		bytes_pp = 2;
1216 1217 1218 1219
		break;

	case 8:
		format = SVGA3D_P8;
1220
		bytes_pp = 1;
1221 1222 1223
		break;

	default:
D
Daniel Vetter 已提交
1224 1225
		DRM_ERROR("Invalid framebuffer format %s\n",
			  drm_get_format_name(mode_cmd->pixel_format, &format_name));
1226 1227 1228
		return -EINVAL;
	}

D
Daniel Vetter 已提交
1229
	content_base_size.width  = mode_cmd->pitches[0] / bytes_pp;
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
	content_base_size.height = mode_cmd->height;
	content_base_size.depth  = 1;

	ret = vmw_surface_gb_priv_define(dev,
			0, /* kernel visible only */
			0, /* flags */
			format,
			true, /* can be a scanout buffer */
			1, /* num of mip levels */
			0,
1240
			0,
1241 1242 1243 1244 1245
			content_base_size,
			srf_out);
	if (ret) {
		DRM_ERROR("Failed to allocate proxy content buffer\n");
		return ret;
1246 1247
	}

1248
	res = &(*srf_out)->res;
1249

1250 1251 1252 1253 1254 1255
	/* Reserve and switch the backing mob. */
	mutex_lock(&res->dev_priv->cmdbuf_mutex);
	(void) vmw_resource_reserve(res, false, true);
	vmw_dmabuf_unreference(&res->backup);
	res->backup = vmw_dmabuf_reference(dmabuf_mob);
	res->backup_offset = 0;
1256
	vmw_resource_unreserve(res, false, NULL, 0);
1257
	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1258

1259
	return 0;
1260 1261
}

1262 1263


1264 1265 1266
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
					  struct vmw_dma_buffer *dmabuf,
					  struct vmw_framebuffer **out,
D
Daniel Vetter 已提交
1267
					  const struct drm_mode_fb_cmd2
1268
					  *mode_cmd)
1269 1270 1271 1272

{
	struct drm_device *dev = dev_priv->dev;
	struct vmw_framebuffer_dmabuf *vfbd;
1273
	unsigned int requested_size;
D
Daniel Vetter 已提交
1274
	struct drm_format_name_buf format_name;
1275 1276
	int ret;

D
Daniel Vetter 已提交
1277
	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1278 1279 1280 1281 1282 1283
	if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
		DRM_ERROR("Screen buffer object size is too small "
			  "for requested mode.\n");
		return -EINVAL;
	}

1284
	/* Limited framebuffer color depth support for screen objects */
1285
	if (dev_priv->active_display_unit == vmw_du_screen_object) {
D
Daniel Vetter 已提交
1286 1287 1288 1289 1290 1291 1292
		switch (mode_cmd->pixel_format) {
		case DRM_FORMAT_XRGB8888:
		case DRM_FORMAT_ARGB8888:
			break;
		case DRM_FORMAT_XRGB1555:
		case DRM_FORMAT_RGB565:
			break;
1293
		default:
D
Daniel Vetter 已提交
1294 1295
			DRM_ERROR("Invalid pixel format: %s\n",
				  drm_get_format_name(mode_cmd->pixel_format, &format_name));
1296 1297 1298 1299
			return -EINVAL;
		}
	}

1300 1301 1302 1303 1304 1305
	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
	if (!vfbd) {
		ret = -ENOMEM;
		goto out_err1;
	}

1306
	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1307
	vfbd->base.dmabuf = true;
1308
	vfbd->buffer = vmw_dmabuf_reference(dmabuf);
D
Daniel Vetter 已提交
1309
	vfbd->base.user_handle = mode_cmd->handles[0];
1310 1311
	*out = &vfbd->base;

1312 1313 1314
	ret = drm_framebuffer_init(dev, &vfbd->base.base,
				   &vmw_framebuffer_dmabuf_funcs);
	if (ret)
1315
		goto out_err2;
1316

1317 1318 1319
	return 0;

out_err2:
1320
	vmw_dmabuf_unreference(&dmabuf);
1321 1322 1323 1324 1325
	kfree(vfbd);
out_err1:
	return ret;
}

1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344

/**
 * vmw_kms_srf_ok - check if a surface can be created
 *
 * @width: requested width
 * @height: requested height
 *
 * Surfaces need to be less than texture size
 */
static bool
vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
{
	if (width  > dev_priv->texture_max_width ||
	    height > dev_priv->texture_max_height)
		return false;

	return true;
}

1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
/**
 * vmw_kms_new_framebuffer - Create a new framebuffer.
 *
 * @dev_priv: Pointer to device private struct.
 * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
 * Either @dmabuf or @surface must be NULL.
 * @surface: Pointer to a surface to wrap the kms framebuffer around.
 * Either @dmabuf or @surface must be NULL.
 * @only_2d: No presents will occur to this dma buffer based framebuffer. This
 * Helps the code to do some important optimizations.
 * @mode_cmd: Frame-buffer metadata.
1356
 */
1357 1358 1359 1360 1361
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
			struct vmw_dma_buffer *dmabuf,
			struct vmw_surface *surface,
			bool only_2d,
D
Daniel Vetter 已提交
1362
			const struct drm_mode_fb_cmd2 *mode_cmd)
1363 1364
{
	struct vmw_framebuffer *vfb = NULL;
1365
	bool is_dmabuf_proxy = false;
1366 1367
	int ret;

1368 1369 1370 1371 1372
	/*
	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
	 * therefore, wrap the DMA buf in a surface so we can use the
	 * SurfaceCopy command.
	 */
1373 1374
	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
	    dmabuf && only_2d &&
1375
	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
	    dev_priv->active_display_unit == vmw_du_screen_target) {
		ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
					      dmabuf, &surface);
		if (ret)
			return ERR_PTR(ret);

		is_dmabuf_proxy = true;
	}

	/* Create the new framebuffer depending one what we have */
1386
	if (surface) {
1387 1388 1389
		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
						      mode_cmd,
						      is_dmabuf_proxy);
1390 1391 1392 1393 1394 1395 1396 1397

		/*
		 * vmw_create_dmabuf_proxy() adds a reference that is no longer
		 * needed
		 */
		if (is_dmabuf_proxy)
			vmw_surface_unreference(&surface);
	} else if (dmabuf) {
1398 1399
		ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
						     mode_cmd);
1400
	} else {
1401
		BUG();
1402
	}
1403 1404 1405 1406 1407 1408 1409 1410 1411 1412

	if (ret)
		return ERR_PTR(ret);

	vfb->pin = vmw_framebuffer_pin;
	vfb->unpin = vmw_framebuffer_unpin;

	return vfb;
}

1413 1414 1415 1416 1417 1418
/*
 * Generic Kernel modesetting functions
 */

static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
						 struct drm_file *file_priv,
D
Daniel Vetter 已提交
1419
						 const struct drm_mode_fb_cmd2 *mode_cmd)
1420 1421 1422 1423 1424 1425
{
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
	struct vmw_framebuffer *vfb = NULL;
	struct vmw_surface *surface = NULL;
	struct vmw_dma_buffer *bo = NULL;
1426
	struct ttm_base_object *user_obj;
1427 1428
	int ret;

1429 1430 1431 1432 1433 1434
	/**
	 * This code should be conditioned on Screen Objects not being used.
	 * If screen objects are used, we can allocate a GMR to hold the
	 * requested framebuffer.
	 */

1435
	if (!vmw_kms_validate_mode_vram(dev_priv,
D
Daniel Vetter 已提交
1436 1437
					mode_cmd->pitches[0],
					mode_cmd->height)) {
1438
		DRM_ERROR("Requested mode exceed bounding box limit.\n");
1439
		return ERR_PTR(-ENOMEM);
1440 1441
	}

1442 1443 1444 1445 1446 1447 1448 1449 1450
	/*
	 * Take a reference on the user object of the resource
	 * backing the kms fb. This ensures that user-space handle
	 * lookups on that resource will always work as long as
	 * it's registered with a kms framebuffer. This is important,
	 * since vmw_execbuf_process identifies resources in the
	 * command stream using user-space handles.
	 */

D
Daniel Vetter 已提交
1451
	user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
1452 1453 1454 1455 1456
	if (unlikely(user_obj == NULL)) {
		DRM_ERROR("Could not locate requested kms frame buffer.\n");
		return ERR_PTR(-ENOENT);
	}

1457 1458 1459 1460
	/**
	 * End conditioned code.
	 */

1461 1462
	/* returns either a dmabuf or surface */
	ret = vmw_user_lookup_handle(dev_priv, tfile,
D
Daniel Vetter 已提交
1463
				     mode_cmd->handles[0],
1464
				     &surface, &bo);
1465
	if (ret)
1466 1467
		goto err_out;

1468 1469 1470 1471 1472 1473 1474 1475 1476 1477

	if (!bo &&
	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
		DRM_ERROR("Surface size cannot exceed %dx%d",
			dev_priv->texture_max_width,
			dev_priv->texture_max_height);
		goto err_out;
	}


1478 1479
	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
				      !(dev_priv->capabilities & SVGA_CAP_3D),
D
Daniel Vetter 已提交
1480
				      mode_cmd);
1481 1482 1483 1484
	if (IS_ERR(vfb)) {
		ret = PTR_ERR(vfb);
		goto err_out;
 	}
1485 1486 1487 1488 1489 1490 1491

err_out:
	/* vmw_user_lookup_handle takes one ref so does new_fb */
	if (bo)
		vmw_dmabuf_unreference(&bo);
	if (surface)
		vmw_surface_unreference(&surface);
1492 1493 1494

	if (ret) {
		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1495
		ttm_base_object_unref(&user_obj);
1496
		return ERR_PTR(ret);
1497 1498
	} else
		vfb->user_obj = user_obj;
1499 1500 1501 1502

	return &vfb->base;
}

1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517


/**
 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
 *
 * @dev: DRM device
 * @state: the driver state object
 *
 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
 * us to assign a value to mode->crtc_clock so that
 * drm_calc_timestamping_constants() won't throw an error message
 *
 * RETURNS
 * Zero for success or -errno
 */
1518
static int
1519 1520 1521 1522 1523 1524 1525 1526
vmw_kms_atomic_check_modeset(struct drm_device *dev,
			     struct drm_atomic_state *state)
{
	struct drm_crtc_state *crtc_state;
	struct drm_crtc *crtc;
	struct vmw_private *dev_priv = vmw_priv(dev);
	int i;

1527
	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
		unsigned long requested_bb_mem = 0;

		if (dev_priv->active_display_unit == vmw_du_screen_target) {
			if (crtc->primary->fb) {
				int cpp = crtc->primary->fb->pitches[0] /
					  crtc->primary->fb->width;

				requested_bb_mem += crtc->mode.hdisplay * cpp *
						    crtc->mode.vdisplay;
			}

			if (requested_bb_mem > dev_priv->prim_bb_mem)
				return -EINVAL;
		}
	}

	return drm_atomic_helper_check(dev, state);
}


1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571
/**
 * vmw_kms_atomic_commit - Perform an atomic state commit
 *
 * @dev: DRM device
 * @state: the driver state object
 * @nonblock: Whether nonblocking behaviour is requested
 *
 * This is a simple wrapper around drm_atomic_helper_commit() for
 * us to clear the nonblocking value.
 *
 * Nonblocking commits currently cause synchronization issues
 * for vmwgfx.
 *
 * RETURNS
 * Zero for success or negative error code on failure.
 */
int vmw_kms_atomic_commit(struct drm_device *dev,
			  struct drm_atomic_state *state,
			  bool nonblock)
{
	return drm_atomic_helper_commit(dev, state, false);
}


1572
static const struct drm_mode_config_funcs vmw_kms_funcs = {
1573
	.fb_create = vmw_kms_fb_create,
1574
	.atomic_check = vmw_kms_atomic_check_modeset,
1575
	.atomic_commit = vmw_kms_atomic_commit,
1576 1577
};

1578 1579 1580 1581 1582 1583 1584 1585
static int vmw_kms_generic_present(struct vmw_private *dev_priv,
				   struct drm_file *file_priv,
				   struct vmw_framebuffer *vfb,
				   struct vmw_surface *surface,
				   uint32_t sid,
				   int32_t destX, int32_t destY,
				   struct drm_vmw_rect *clips,
				   uint32_t num_clips)
1586
{
1587 1588 1589
	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
					    &surface->res, destX, destY,
					    num_clips, 1, NULL);
1590 1591
}

1592

1593 1594 1595 1596 1597 1598 1599 1600 1601
int vmw_kms_present(struct vmw_private *dev_priv,
		    struct drm_file *file_priv,
		    struct vmw_framebuffer *vfb,
		    struct vmw_surface *surface,
		    uint32_t sid,
		    int32_t destX, int32_t destY,
		    struct drm_vmw_rect *clips,
		    uint32_t num_clips)
{
1602
	int ret;
1603

1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619
	switch (dev_priv->active_display_unit) {
	case vmw_du_screen_target:
		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
						 &surface->res, destX, destY,
						 num_clips, 1, NULL);
		break;
	case vmw_du_screen_object:
		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
					      sid, destX, destY, clips,
					      num_clips);
		break;
	default:
		WARN_ONCE(true,
			  "Present called with invalid display system.\n");
		ret = -ENOSYS;
		break;
1620
	}
1621 1622
	if (ret)
		return ret;
1623

1624
	vmw_fifo_flush(dev_priv, false);
1625

1626
	return 0;
1627 1628
}

1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
static void
vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
{
	if (dev_priv->hotplug_mode_update_property)
		return;

	dev_priv->hotplug_mode_update_property =
		drm_property_create_range(dev_priv->dev,
					  DRM_MODE_PROP_IMMUTABLE,
					  "hotplug_mode_update", 0, 1);

	if (!dev_priv->hotplug_mode_update_property)
		return;

}

1645 1646 1647 1648 1649 1650 1651
int vmw_kms_init(struct vmw_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	int ret;

	drm_mode_config_init(dev);
	dev->mode_config.funcs = &vmw_kms_funcs;
1652 1653
	dev->mode_config.min_width = 1;
	dev->mode_config.min_height = 1;
1654 1655
	dev->mode_config.max_width = dev_priv->texture_max_width;
	dev->mode_config.max_height = dev_priv->texture_max_height;
1656

1657 1658 1659
	drm_mode_create_suggested_offset_properties(dev);
	vmw_kms_create_hotplug_mode_update_property(dev_priv);

1660 1661 1662 1663 1664 1665
	ret = vmw_kms_stdu_init_display(dev_priv);
	if (ret) {
		ret = vmw_kms_sou_init_display(dev_priv);
		if (ret) /* Fallback */
			ret = vmw_kms_ldu_init_display(dev_priv);
	}
1666

1667
	return ret;
1668 1669 1670 1671
}

int vmw_kms_close(struct vmw_private *dev_priv)
{
1672
	int ret = 0;
1673

1674 1675 1676 1677 1678 1679
	/*
	 * Docs says we should take the lock before calling this function
	 * but since it destroys encoders and our destructor calls
	 * drm_encoder_cleanup which takes the lock we deadlock.
	 */
	drm_mode_config_cleanup(dev_priv->dev);
1680
	if (dev_priv->active_display_unit == vmw_du_legacy)
1681 1682 1683
		ret = vmw_kms_ldu_close_display(dev_priv);

	return ret;
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707
}

int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
				struct drm_file *file_priv)
{
	struct drm_vmw_cursor_bypass_arg *arg = data;
	struct vmw_display_unit *du;
	struct drm_crtc *crtc;
	int ret = 0;


	mutex_lock(&dev->mode_config.mutex);
	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {

		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
			du = vmw_crtc_to_du(crtc);
			du->hotspot_x = arg->xhot;
			du->hotspot_y = arg->yhot;
		}

		mutex_unlock(&dev->mode_config.mutex);
		return 0;
	}

1708
	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
R
Rob Clark 已提交
1709
	if (!crtc) {
1710
		ret = -ENOENT;
1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
		goto out;
	}

	du = vmw_crtc_to_du(crtc);

	du->hotspot_x = arg->xhot;
	du->hotspot_y = arg->yhot;

out:
	mutex_unlock(&dev->mode_config.mutex);

	return ret;
}

1725
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1726
			unsigned width, unsigned height, unsigned pitch,
M
Michel Dänzer 已提交
1727
			unsigned bpp, unsigned depth)
1728
{
1729 1730 1731
	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
	else if (vmw_fifo_have_pitchlock(vmw_priv))
1732 1733
		vmw_mmio_write(pitch, vmw_priv->mmio_virt +
			       SVGA_FIFO_PITCHLOCK);
1734 1735
	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
M
Michel Dänzer 已提交
1736
	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
1737 1738 1739 1740 1741 1742 1743 1744

	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
		return -EINVAL;
	}

	return 0;
1745
}
1746

1747 1748
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
{
1749 1750 1751
	struct vmw_vga_topology_state *save;
	uint32_t i;

1752 1753
	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
1754
	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
1755 1756
	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
		vmw_priv->vga_pitchlock =
1757
		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
1758
	else if (vmw_fifo_have_pitchlock(vmw_priv))
1759 1760
		vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
							SVGA_FIFO_PITCHLOCK);
1761 1762 1763

	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
		return 0;
1764

1765 1766 1767
	vmw_priv->num_displays = vmw_read(vmw_priv,
					  SVGA_REG_NUM_GUEST_DISPLAYS);

1768 1769 1770
	if (vmw_priv->num_displays == 0)
		vmw_priv->num_displays = 1;

1771 1772 1773 1774 1775 1776 1777 1778 1779
	for (i = 0; i < vmw_priv->num_displays; ++i) {
		save = &vmw_priv->vga_save[i];
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
		save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
		save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
		save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
		save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
		save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790
		if (i == 0 && vmw_priv->num_displays == 1 &&
		    save->width == 0 && save->height == 0) {

			/*
			 * It should be fairly safe to assume that these
			 * values are uninitialized.
			 */

			save->width = vmw_priv->vga_width - save->pos_x;
			save->height = vmw_priv->vga_height - save->pos_y;
		}
1791
	}
1792

1793 1794 1795 1796 1797
	return 0;
}

int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
{
1798 1799 1800
	struct vmw_vga_topology_state *save;
	uint32_t i;

1801 1802
	vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
	vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
1803
	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
1804 1805 1806 1807
	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
			  vmw_priv->vga_pitchlock);
	else if (vmw_fifo_have_pitchlock(vmw_priv))
1808 1809
		vmw_mmio_write(vmw_priv->vga_pitchlock,
			       vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
1810

1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
		return 0;

	for (i = 0; i < vmw_priv->num_displays; ++i) {
		save = &vmw_priv->vga_save[i];
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
	}

1825 1826
	return 0;
}
1827

1828 1829 1830 1831
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
				uint32_t pitch,
				uint32_t height)
{
1832 1833 1834
	return ((u64) pitch * (u64) height) < (u64)
		((dev_priv->active_display_unit == vmw_du_screen_target) ?
		 dev_priv->prim_bb_mem : dev_priv->vram_size);
1835 1836
}

J
Jakob Bornecrantz 已提交
1837 1838 1839 1840

/**
 * Function called by DRM code called with vbl_lock held.
 */
1841
u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
1842 1843 1844
{
	return 0;
}
1845

J
Jakob Bornecrantz 已提交
1846 1847 1848
/**
 * Function called by DRM code called with vbl_lock held.
 */
1849
int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
J
Jakob Bornecrantz 已提交
1850
{
1851
	return -EINVAL;
J
Jakob Bornecrantz 已提交
1852 1853 1854 1855 1856
}

/**
 * Function called by DRM code called with vbl_lock held.
 */
1857
void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
J
Jakob Bornecrantz 已提交
1858 1859 1860
{
}

1861 1862 1863 1864 1865

/*
 * Small shared kms functions.
 */

1866
static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1867 1868 1869 1870 1871 1872 1873 1874 1875
			 struct drm_vmw_rect *rects)
{
	struct drm_device *dev = dev_priv->dev;
	struct vmw_display_unit *du;
	struct drm_connector *con;

	mutex_lock(&dev->mode_config.mutex);

#if 0
T
Thomas Hellstrom 已提交
1876 1877 1878 1879 1880 1881 1882 1883 1884
	{
		unsigned int i;

		DRM_INFO("%s: new layout ", __func__);
		for (i = 0; i < num; i++)
			DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
				 rects[i].w, rects[i].h);
		DRM_INFO("\n");
	}
1885 1886 1887 1888 1889 1890 1891 1892
#endif

	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
		du = vmw_connector_to_du(con);
		if (num > du->unit) {
			du->pref_width = rects[du->unit].w;
			du->pref_height = rects[du->unit].h;
			du->pref_active = true;
1893 1894
			du->gui_x = rects[du->unit].x;
			du->gui_y = rects[du->unit].y;
1895 1896 1897 1898 1899 1900
			drm_object_property_set_value
			  (&con->base, dev->mode_config.suggested_x_property,
			   du->gui_x);
			drm_object_property_set_value
			  (&con->base, dev->mode_config.suggested_y_property,
			   du->gui_y);
1901 1902 1903 1904
		} else {
			du->pref_width = 800;
			du->pref_height = 600;
			du->pref_active = false;
1905 1906 1907 1908 1909 1910
			drm_object_property_set_value
			  (&con->base, dev->mode_config.suggested_x_property,
			   0);
			drm_object_property_set_value
			  (&con->base, dev->mode_config.suggested_y_property,
			   0);
1911 1912 1913 1914 1915
		}
		con->status = vmw_du_connector_detect(con, true);
	}

	mutex_unlock(&dev->mode_config.mutex);
1916
	drm_sysfs_hotplug_event(dev);
1917 1918 1919 1920

	return 0;
}

1921 1922
int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
			  u16 *r, u16 *g, u16 *b,
1923 1924
			  uint32_t size,
			  struct drm_modeset_acquire_ctx *ctx)
1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935
{
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
	int i;

	for (i = 0; i < size; i++) {
		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
			  r[i], g[i], b[i]);
		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
	}
1936 1937

	return 0;
1938 1939
}

1940
int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
1941
{
1942
	return 0;
1943 1944 1945 1946 1947 1948 1949 1950
}

enum drm_connector_status
vmw_du_connector_detect(struct drm_connector *connector, bool force)
{
	uint32_t num_displays;
	struct drm_device *dev = connector->dev;
	struct vmw_private *dev_priv = vmw_priv(dev);
1951
	struct vmw_display_unit *du = vmw_connector_to_du(connector);
1952 1953 1954

	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);

1955 1956
	return ((vmw_connector_to_du(connector)->unit < num_displays &&
		 du->pref_active) ?
1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036
		connector_status_connected : connector_status_disconnected);
}

static struct drm_display_mode vmw_kms_connector_builtin[] = {
	/* 640x480@60Hz */
	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
		   752, 800, 0, 480, 489, 492, 525, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
	/* 800x600@60Hz */
	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
		   968, 1056, 0, 600, 601, 605, 628, 0,
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1024x768@60Hz */
	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
		   1184, 1344, 0, 768, 771, 777, 806, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
	/* 1152x864@75Hz */
	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
		   1344, 1600, 0, 864, 865, 868, 900, 0,
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1280x768@60Hz */
	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
		   1472, 1664, 0, 768, 771, 778, 798, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1280x800@60Hz */
	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
		   1480, 1680, 0, 800, 803, 809, 831, 0,
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
	/* 1280x960@60Hz */
	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
		   1488, 1800, 0, 960, 961, 964, 1000, 0,
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1280x1024@60Hz */
	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1360x768@60Hz */
	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
		   1536, 1792, 0, 768, 771, 777, 795, 0,
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1440x1050@60Hz */
	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1440x900@60Hz */
	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
		   1672, 1904, 0, 900, 903, 909, 934, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1600x1200@60Hz */
	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1680x1050@60Hz */
	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1792x1344@60Hz */
	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1853x1392@60Hz */
	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1920x1200@60Hz */
	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1920x1440@60Hz */
	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 2560x1600@60Hz */
	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* Terminate */
	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
};

2037 2038 2039 2040 2041 2042 2043
/**
 * vmw_guess_mode_timing - Provide fake timings for a
 * 60Hz vrefresh mode.
 *
 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
 * members filled in.
 */
2044
void vmw_guess_mode_timing(struct drm_display_mode *mode)
2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058
{
	mode->hsync_start = mode->hdisplay + 50;
	mode->hsync_end = mode->hsync_start + 50;
	mode->htotal = mode->hsync_end + 50;

	mode->vsync_start = mode->vdisplay + 50;
	mode->vsync_end = mode->vsync_start + 50;
	mode->vtotal = mode->vsync_end + 50;

	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
	mode->vrefresh = drm_mode_vrefresh(mode);
}


2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072
int vmw_du_connector_fill_modes(struct drm_connector *connector,
				uint32_t max_width, uint32_t max_height)
{
	struct vmw_display_unit *du = vmw_connector_to_du(connector);
	struct drm_device *dev = connector->dev;
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct drm_display_mode *mode = NULL;
	struct drm_display_mode *bmode;
	struct drm_display_mode prefmode = { DRM_MODE("preferred",
		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
	};
	int i;
2073
	u32 assumed_bpp = 4;
2074

2075 2076
	if (dev_priv->assume_16bpp)
		assumed_bpp = 2;
2077

2078 2079
	if (dev_priv->active_display_unit == vmw_du_screen_target) {
		max_width  = min(max_width,  dev_priv->stdu_max_width);
2080 2081
		max_width  = min(max_width,  dev_priv->texture_max_width);

2082
		max_height = min(max_height, dev_priv->stdu_max_height);
2083
		max_height = min(max_height, dev_priv->texture_max_height);
2084 2085
	}

2086
	/* Add preferred mode */
2087 2088 2089 2090 2091 2092
	mode = drm_mode_duplicate(dev, &prefmode);
	if (!mode)
		return 0;
	mode->hdisplay = du->pref_width;
	mode->vdisplay = du->pref_height;
	vmw_guess_mode_timing(mode);
2093

2094 2095 2096 2097 2098 2099 2100 2101
	if (vmw_kms_validate_mode_vram(dev_priv,
					mode->hdisplay * assumed_bpp,
					mode->vdisplay)) {
		drm_mode_probed_add(connector, mode);
	} else {
		drm_mode_destroy(dev, mode);
		mode = NULL;
	}
2102

2103 2104 2105
	if (du->pref_mode) {
		list_del_init(&du->pref_mode->head);
		drm_mode_destroy(dev, du->pref_mode);
2106 2107
	}

2108 2109 2110
	/* mode might be null here, this is intended */
	du->pref_mode = mode;

2111 2112 2113 2114 2115 2116
	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
		bmode = &vmw_kms_connector_builtin[i];
		if (bmode->hdisplay > max_width ||
		    bmode->vdisplay > max_height)
			continue;

2117 2118
		if (!vmw_kms_validate_mode_vram(dev_priv,
						bmode->hdisplay * assumed_bpp,
2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129
						bmode->vdisplay))
			continue;

		mode = drm_mode_duplicate(dev, bmode);
		if (!mode)
			return 0;
		mode->vrefresh = drm_mode_vrefresh(mode);

		drm_mode_probed_add(connector, mode);
	}

2130
	drm_mode_connector_list_update(connector);
2131 2132
	/* Move the prefered mode first, help apps pick the right mode. */
	drm_mode_sort(&connector->modes);
2133 2134 2135 2136 2137 2138 2139 2140

	return 1;
}

int vmw_du_connector_set_property(struct drm_connector *connector,
				  struct drm_property *property,
				  uint64_t val)
{
2141 2142 2143 2144 2145 2146
	struct vmw_display_unit *du = vmw_connector_to_du(connector);
	struct vmw_private *dev_priv = vmw_priv(connector->dev);

	if (property == dev_priv->implicit_placement_property)
		du->is_implicit = val;

2147 2148
	return 0;
}
2149 2150


S
Sinclair Yeh 已提交
2151

2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215
/**
 * vmw_du_connector_atomic_set_property - Atomic version of get property
 *
 * @crtc - crtc the property is associated with
 *
 * Returns:
 * Zero on success, negative errno on failure.
 */
int
vmw_du_connector_atomic_set_property(struct drm_connector *connector,
				     struct drm_connector_state *state,
				     struct drm_property *property,
				     uint64_t val)
{
	struct vmw_private *dev_priv = vmw_priv(connector->dev);
	struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state);
	struct vmw_display_unit *du = vmw_connector_to_du(connector);


	if (property == dev_priv->implicit_placement_property) {
		vcs->is_implicit = val;

		/*
		 * We should really be doing a drm_atomic_commit() to
		 * commit the new state, but since this doesn't cause
		 * an immedate state change, this is probably ok
		 */
		du->is_implicit = vcs->is_implicit;
	} else {
		return -EINVAL;
	}

	return 0;
}


/**
 * vmw_du_connector_atomic_get_property - Atomic version of get property
 *
 * @connector - connector the property is associated with
 *
 * Returns:
 * Zero on success, negative errno on failure.
 */
int
vmw_du_connector_atomic_get_property(struct drm_connector *connector,
				     const struct drm_connector_state *state,
				     struct drm_property *property,
				     uint64_t *val)
{
	struct vmw_private *dev_priv = vmw_priv(connector->dev);
	struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state);

	if (property == dev_priv->implicit_placement_property)
		*val = vcs->is_implicit;
	else {
		DRM_ERROR("Invalid Property %s\n", property->name);
		return -EINVAL;
	}

	return 0;
}


2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
				struct drm_file *file_priv)
{
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct drm_vmw_update_layout_arg *arg =
		(struct drm_vmw_update_layout_arg *)data;
	void __user *user_rects;
	struct drm_vmw_rect *rects;
	unsigned rects_size;
	int ret;
	int i;
2227
	u64 total_pixels = 0;
2228
	struct drm_mode_config *mode_config = &dev->mode_config;
2229
	struct drm_vmw_rect bounding_box = {0};
2230 2231 2232 2233

	if (!arg->num_outputs) {
		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
		vmw_du_update_layout(dev_priv, 1, &def_rect);
2234
		return 0;
2235 2236 2237
	}

	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2238 2239
	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
			GFP_KERNEL);
2240 2241
	if (unlikely(!rects))
		return -ENOMEM;
2242 2243 2244 2245 2246 2247 2248 2249 2250 2251

	user_rects = (void __user *)(unsigned long)arg->rects;
	ret = copy_from_user(rects, user_rects, rects_size);
	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed to get rects.\n");
		ret = -EFAULT;
		goto out_free;
	}

	for (i = 0; i < arg->num_outputs; ++i) {
2252 2253 2254 2255
		if (rects[i].x < 0 ||
		    rects[i].y < 0 ||
		    rects[i].x + rects[i].w > mode_config->max_width ||
		    rects[i].y + rects[i].h > mode_config->max_height) {
2256 2257 2258 2259
			DRM_ERROR("Invalid GUI layout.\n");
			ret = -EINVAL;
			goto out_free;
		}
2260 2261 2262 2263 2264 2265 2266 2267 2268 2269

		/*
		 * bounding_box.w and bunding_box.h are used as
		 * lower-right coordinates
		 */
		if (rects[i].x + rects[i].w > bounding_box.w)
			bounding_box.w = rects[i].x + rects[i].w;

		if (rects[i].y + rects[i].h > bounding_box.h)
			bounding_box.h = rects[i].y + rects[i].h;
2270 2271

		total_pixels += (u64) rects[i].w * (u64) rects[i].h;
2272 2273
	}

2274 2275 2276 2277 2278 2279
	if (dev_priv->active_display_unit == vmw_du_screen_target) {
		/*
		 * For Screen Targets, the limits for a toplogy are:
		 *	1. Bounding box (assuming 32bpp) must be < prim_bb_mem
		 *      2. Total pixels (assuming 32bpp) must be < prim_bb_mem
		 */
2280
		u64 bb_mem    = (u64) bounding_box.w * bounding_box.h * 4;
2281 2282 2283 2284
		u64 pixel_mem = total_pixels * 4;

		if (bb_mem > dev_priv->prim_bb_mem) {
			DRM_ERROR("Topology is beyond supported limits.\n");
2285 2286 2287 2288
			ret = -EINVAL;
			goto out_free;
		}

2289 2290 2291 2292 2293
		if (pixel_mem > dev_priv->prim_bb_mem) {
			DRM_ERROR("Combined output size too large\n");
			ret = -EINVAL;
			goto out_free;
		}
2294 2295 2296 2297 2298 2299 2300 2301
	}

	vmw_du_update_layout(dev_priv, arg->num_outputs, rects);

out_free:
	kfree(rects);
	return ret;
}
2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357

/**
 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
 * on a set of cliprects and a set of display units.
 *
 * @dev_priv: Pointer to a device private structure.
 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
 * Cliprects are given in framebuffer coordinates.
 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
 * be NULL. Cliprects are given in source coordinates.
 * @dest_x: X coordinate offset for the crtc / destination clip rects.
 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
 * @num_clips: Number of cliprects in the @clips or @vclips array.
 * @increment: Integer with which to increment the clip counter when looping.
 * Used to skip a predetermined number of clip rects.
 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
 */
int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
			 struct vmw_framebuffer *framebuffer,
			 const struct drm_clip_rect *clips,
			 const struct drm_vmw_rect *vclips,
			 s32 dest_x, s32 dest_y,
			 int num_clips,
			 int increment,
			 struct vmw_kms_dirty *dirty)
{
	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
	struct drm_crtc *crtc;
	u32 num_units = 0;
	u32 i, k;

	dirty->dev_priv = dev_priv;

	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
		if (crtc->primary->fb != &framebuffer->base)
			continue;
		units[num_units++] = vmw_crtc_to_du(crtc);
	}

	for (k = 0; k < num_units; k++) {
		struct vmw_display_unit *unit = units[k];
		s32 crtc_x = unit->crtc.x;
		s32 crtc_y = unit->crtc.y;
		s32 crtc_width = unit->crtc.mode.hdisplay;
		s32 crtc_height = unit->crtc.mode.vdisplay;
		const struct drm_clip_rect *clips_ptr = clips;
		const struct drm_vmw_rect *vclips_ptr = vclips;

		dirty->unit = unit;
		if (dirty->fifo_reserve_size > 0) {
			dirty->cmd = vmw_fifo_reserve(dev_priv,
						      dirty->fifo_reserve_size);
			if (!dirty->cmd) {
				DRM_ERROR("Couldn't reserve fifo space "
					  "for dirty blits.\n");
2358
				return -ENOMEM;
2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442
			}
			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
		}
		dirty->num_hits = 0;
		for (i = 0; i < num_clips; i++, clips_ptr += increment,
		       vclips_ptr += increment) {
			s32 clip_left;
			s32 clip_top;

			/*
			 * Select clip array type. Note that integer type
			 * in @clips is unsigned short, whereas in @vclips
			 * it's 32-bit.
			 */
			if (clips) {
				dirty->fb_x = (s32) clips_ptr->x1;
				dirty->fb_y = (s32) clips_ptr->y1;
				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
					crtc_x;
				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
					crtc_y;
			} else {
				dirty->fb_x = vclips_ptr->x;
				dirty->fb_y = vclips_ptr->y;
				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
					dest_x - crtc_x;
				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
					dest_y - crtc_y;
			}

			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;

			/* Skip this clip if it's outside the crtc region */
			if (dirty->unit_x1 >= crtc_width ||
			    dirty->unit_y1 >= crtc_height ||
			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
				continue;

			/* Clip right and bottom to crtc limits */
			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
					       crtc_width);
			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
					       crtc_height);

			/* Clip left and top to crtc limits */
			clip_left = min_t(s32, dirty->unit_x1, 0);
			clip_top = min_t(s32, dirty->unit_y1, 0);
			dirty->unit_x1 -= clip_left;
			dirty->unit_y1 -= clip_top;
			dirty->fb_x -= clip_left;
			dirty->fb_y -= clip_top;

			dirty->clip(dirty);
		}

		dirty->fifo_commit(dirty);
	}

	return 0;
}

/**
 * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
 * command submission.
 *
 * @dev_priv. Pointer to a device private structure.
 * @buf: The buffer object
 * @interruptible: Whether to perform waits as interruptible.
 * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
 * The buffer will be validated as a GMR. Already pinned buffers will not be
 * validated.
 *
 * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
 * interrupted by a signal.
 */
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
				  struct vmw_dma_buffer *buf,
				  bool interruptible,
				  bool validate_as_mob)
{
	struct ttm_buffer_object *bo = &buf->base;
	int ret;

2443
	ttm_bo_reserve(bo, false, false, NULL);
2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499
	ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
					 validate_as_mob);
	if (ret)
		ttm_bo_unreserve(bo);

	return ret;
}

/**
 * vmw_kms_helper_buffer_revert - Undo the actions of
 * vmw_kms_helper_buffer_prepare.
 *
 * @res: Pointer to the buffer object.
 *
 * Helper to be used if an error forces the caller to undo the actions of
 * vmw_kms_helper_buffer_prepare.
 */
void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
{
	if (buf)
		ttm_bo_unreserve(&buf->base);
}

/**
 * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
 * kms command submission.
 *
 * @dev_priv: Pointer to a device private structure.
 * @file_priv: Pointer to a struct drm_file representing the caller's
 * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
 * if non-NULL, @user_fence_rep must be non-NULL.
 * @buf: The buffer object.
 * @out_fence:  Optional pointer to a fence pointer. If non-NULL, a
 * ref-counted fence pointer is returned here.
 * @user_fence_rep: Optional pointer to a user-space provided struct
 * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
 * function copies fence data to user-space in a fail-safe manner.
 */
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
				  struct drm_file *file_priv,
				  struct vmw_dma_buffer *buf,
				  struct vmw_fence_obj **out_fence,
				  struct drm_vmw_fence_rep __user *
				  user_fence_rep)
{
	struct vmw_fence_obj *fence;
	uint32_t handle;
	int ret;

	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
					 file_priv ? &handle : NULL);
	if (buf)
		vmw_fence_single_bo(&buf->base, fence);
	if (file_priv)
		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
					    ret, user_fence_rep, fence,
2500
					    handle, -1, NULL);
2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518
	if (out_fence)
		*out_fence = fence;
	else
		vmw_fence_obj_unreference(&fence);

	vmw_kms_helper_buffer_revert(buf);
}


/**
 * vmw_kms_helper_resource_revert - Undo the actions of
 * vmw_kms_helper_resource_prepare.
 *
 * @res: Pointer to the resource. Typically a surface.
 *
 * Helper to be used if an error forces the caller to undo the actions of
 * vmw_kms_helper_resource_prepare.
 */
2519
void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
2520
{
2521 2522 2523 2524
	struct vmw_resource *res = ctx->res;

	vmw_kms_helper_buffer_revert(ctx->buf);
	vmw_dmabuf_unreference(&ctx->buf);
2525
	vmw_resource_unreserve(res, false, NULL, 0);
2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540
	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}

/**
 * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
 * command submission.
 *
 * @res: Pointer to the resource. Typically a surface.
 * @interruptible: Whether to perform waits as interruptible.
 *
 * Reserves and validates also the backup buffer if a guest-backed resource.
 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
 * interrupted by a signal.
 */
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2541 2542
				    bool interruptible,
				    struct vmw_validation_ctx *ctx)
2543 2544 2545
{
	int ret = 0;

2546 2547 2548
	ctx->buf = NULL;
	ctx->res = res;

2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566
	if (interruptible)
		ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
	else
		mutex_lock(&res->dev_priv->cmdbuf_mutex);

	if (unlikely(ret != 0))
		return -ERESTARTSYS;

	ret = vmw_resource_reserve(res, interruptible, false);
	if (ret)
		goto out_unlock;

	if (res->backup) {
		ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
						    interruptible,
						    res->dev_priv->has_mob);
		if (ret)
			goto out_unreserve;
2567 2568

		ctx->buf = vmw_dmabuf_reference(res->backup);
2569 2570 2571 2572 2573 2574 2575
	}
	ret = vmw_resource_validate(res);
	if (ret)
		goto out_revert;
	return 0;

out_revert:
2576
	vmw_kms_helper_buffer_revert(ctx->buf);
2577
out_unreserve:
2578
	vmw_resource_unreserve(res, false, NULL, 0);
2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591
out_unlock:
	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
	return ret;
}

/**
 * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
 * kms command submission.
 *
 * @res: Pointer to the resource. Typically a surface.
 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
 * ref-counted fence pointer is returned here.
 */
2592 2593
void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
				    struct vmw_fence_obj **out_fence)
2594
{
2595 2596 2597 2598
	struct vmw_resource *res = ctx->res;

	if (ctx->buf || out_fence)
		vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
2599 2600
					     out_fence, NULL);

2601
	vmw_resource_unreserve(res, false, NULL, 0);
2602 2603
	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672

/**
 * vmw_kms_update_proxy - Helper function to update a proxy surface from
 * its backing MOB.
 *
 * @res: Pointer to the surface resource
 * @clips: Clip rects in framebuffer (surface) space.
 * @num_clips: Number of clips in @clips.
 * @increment: Integer with which to increment the clip counter when looping.
 * Used to skip a predetermined number of clip rects.
 *
 * This function makes sure the proxy surface is updated from its backing MOB
 * using the region given by @clips. The surface resource @res and its backing
 * MOB needs to be reserved and validated on call.
 */
int vmw_kms_update_proxy(struct vmw_resource *res,
			 const struct drm_clip_rect *clips,
			 unsigned num_clips,
			 int increment)
{
	struct vmw_private *dev_priv = res->dev_priv;
	struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
	struct {
		SVGA3dCmdHeader header;
		SVGA3dCmdUpdateGBImage body;
	} *cmd;
	SVGA3dBox *box;
	size_t copy_size = 0;
	int i;

	if (!clips)
		return 0;

	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
	if (!cmd) {
		DRM_ERROR("Couldn't reserve fifo space for proxy surface "
			  "update.\n");
		return -ENOMEM;
	}

	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
		box = &cmd->body.box;

		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
		cmd->header.size = sizeof(cmd->body);
		cmd->body.image.sid = res->id;
		cmd->body.image.face = 0;
		cmd->body.image.mipmap = 0;

		if (clips->x1 > size->width || clips->x2 > size->width ||
		    clips->y1 > size->height || clips->y2 > size->height) {
			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
			return -EINVAL;
		}

		box->x = clips->x1;
		box->y = clips->y1;
		box->z = 0;
		box->w = clips->x2 - clips->x1;
		box->h = clips->y2 - clips->y1;
		box->d = 1;

		copy_size += sizeof(*cmd);
	}

	vmw_fifo_commit(dev_priv, copy_size);

	return 0;
}
2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727

int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
			    unsigned unit,
			    u32 max_width,
			    u32 max_height,
			    struct drm_connector **p_con,
			    struct drm_crtc **p_crtc,
			    struct drm_display_mode **p_mode)
{
	struct drm_connector *con;
	struct vmw_display_unit *du;
	struct drm_display_mode *mode;
	int i = 0;

	list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
			    head) {
		if (i == unit)
			break;

		++i;
	}

	if (i != unit) {
		DRM_ERROR("Could not find initial display unit.\n");
		return -EINVAL;
	}

	if (list_empty(&con->modes))
		(void) vmw_du_connector_fill_modes(con, max_width, max_height);

	if (list_empty(&con->modes)) {
		DRM_ERROR("Could not find initial display mode.\n");
		return -EINVAL;
	}

	du = vmw_connector_to_du(con);
	*p_con = con;
	*p_crtc = &du->crtc;

	list_for_each_entry(mode, &con->modes, head) {
		if (mode->type & DRM_MODE_TYPE_PREFERRED)
			break;
	}

	if (mode->type & DRM_MODE_TYPE_PREFERRED)
		*p_mode = mode;
	else {
		WARN_ONCE(true, "Could not find initial preferred mode.\n");
		*p_mode = list_first_entry(&con->modes,
					   struct drm_display_mode,
					   head);
	}

	return 0;
}
2728 2729 2730 2731 2732 2733 2734 2735 2736 2737

/**
 * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer
 *
 * @dev_priv: Pointer to a device private struct.
 * @du: The display unit of the crtc.
 */
void vmw_kms_del_active(struct vmw_private *dev_priv,
			struct vmw_display_unit *du)
{
2738
	mutex_lock(&dev_priv->global_kms_state_mutex);
2739 2740 2741 2742 2743
	if (du->active_implicit) {
		if (--(dev_priv->num_implicit) == 0)
			dev_priv->implicit_fb = NULL;
		du->active_implicit = false;
	}
2744
	mutex_unlock(&dev_priv->global_kms_state_mutex);
2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759
}

/**
 * vmw_kms_add_active - register a crtc binding to an implicit framebuffer
 *
 * @vmw_priv: Pointer to a device private struct.
 * @du: The display unit of the crtc.
 * @vfb: The implicit framebuffer
 *
 * Registers a binding to an implicit framebuffer.
 */
void vmw_kms_add_active(struct vmw_private *dev_priv,
			struct vmw_display_unit *du,
			struct vmw_framebuffer *vfb)
{
2760
	mutex_lock(&dev_priv->global_kms_state_mutex);
2761 2762 2763 2764 2765 2766 2767
	WARN_ON_ONCE(!dev_priv->num_implicit && dev_priv->implicit_fb);

	if (!du->active_implicit && du->is_implicit) {
		dev_priv->implicit_fb = vfb;
		du->active_implicit = true;
		dev_priv->num_implicit++;
	}
2768
	mutex_unlock(&dev_priv->global_kms_state_mutex);
2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784
}

/**
 * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc.
 *
 * @dev_priv: Pointer to device-private struct.
 * @crtc: The crtc we want to flip.
 *
 * Returns true or false depending whether it's OK to flip this crtc
 * based on the criterion that we must not have more than one implicit
 * frame-buffer at any one time.
 */
bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
			    struct drm_crtc *crtc)
{
	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
2785
	bool ret;
2786

2787 2788 2789
	mutex_lock(&dev_priv->global_kms_state_mutex);
	ret = !du->is_implicit || dev_priv->num_implicit == 1;
	mutex_unlock(&dev_priv->global_kms_state_mutex);
2790

2791
	return ret;
2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805
}

/**
 * vmw_kms_update_implicit_fb - Update the implicit fb.
 *
 * @dev_priv: Pointer to device-private struct.
 * @crtc: The crtc the new implicit frame-buffer is bound to.
 */
void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
				struct drm_crtc *crtc)
{
	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
	struct vmw_framebuffer *vfb;

2806
	mutex_lock(&dev_priv->global_kms_state_mutex);
2807 2808

	if (!du->is_implicit)
2809
		goto out_unlock;
2810 2811 2812 2813 2814 2815

	vfb = vmw_framebuffer_to_vfb(crtc->primary->fb);
	WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
		     dev_priv->implicit_fb != vfb);

	dev_priv->implicit_fb = vfb;
2816 2817
out_unlock:
	mutex_unlock(&dev_priv->global_kms_state_mutex);
2818
}
2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842

/**
 * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
 * property.
 *
 * @dev_priv: Pointer to a device private struct.
 * @immutable: Whether the property is immutable.
 *
 * Sets up the implicit placement property unless it's already set up.
 */
void
vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
					   bool immutable)
{
	if (dev_priv->implicit_placement_property)
		return;

	dev_priv->implicit_placement_property =
		drm_property_create_range(dev_priv->dev,
					  immutable ?
					  DRM_MODE_PROP_IMMUTABLE : 0,
					  "implicit_placement", 0, 1);

}
2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855


/**
 * vmw_kms_set_config - Wrapper around drm_atomic_helper_set_config
 *
 * @set: The configuration to set.
 *
 * The vmwgfx Xorg driver doesn't assign the mode::type member, which
 * when drm_mode_set_crtcinfo is called as part of the configuration setting
 * causes it to return incorrect crtc dimensions causing severe problems in
 * the vmwgfx modesetting. So explicitly clear that member before calling
 * into drm_atomic_helper_set_config.
 */
2856 2857
int vmw_kms_set_config(struct drm_mode_set *set,
		       struct drm_modeset_acquire_ctx *ctx)
2858 2859 2860 2861
{
	if (set && set->mode)
		set->mode->type = 0;

2862
	return drm_atomic_helper_set_config(set, ctx);
2863
}
2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874


/**
 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
 *
 * @dev: Pointer to the drm device
 */
void vmw_kms_lost_device(struct drm_device *dev)
{
	drm_atomic_helper_shutdown(dev);
}