vmwgfx_kms.c 58.9 KB
Newer Older
1 2
/**************************************************************************
 *
S
Sinclair Yeh 已提交
3
 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/

#include "vmwgfx_kms.h"

30

31 32 33
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)

34
void vmw_du_cleanup(struct vmw_display_unit *du)
35 36 37 38 39
{
	if (du->cursor_surface)
		vmw_surface_unreference(&du->cursor_surface);
	if (du->cursor_dmabuf)
		vmw_dmabuf_unreference(&du->cursor_dmabuf);
40
	drm_connector_unregister(&du->connector);
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
	drm_crtc_cleanup(&du->crtc);
	drm_encoder_cleanup(&du->encoder);
	drm_connector_cleanup(&du->connector);
}

/*
 * Display Unit Cursor functions
 */

int vmw_cursor_update_image(struct vmw_private *dev_priv,
			    u32 *image, u32 width, u32 height,
			    u32 hotspotX, u32 hotspotY)
{
	struct {
		u32 cmd;
		SVGAFifoCmdDefineAlphaCursor cursor;
	} *cmd;
	u32 image_size = width * height * 4;
	u32 cmd_size = sizeof(*cmd) + image_size;

	if (!image)
		return -EINVAL;

	cmd = vmw_fifo_reserve(dev_priv, cmd_size);
	if (unlikely(cmd == NULL)) {
		DRM_ERROR("Fifo reserve failed.\n");
		return -ENOMEM;
	}

	memset(cmd, 0, sizeof(*cmd));

	memcpy(&cmd[1], image, image_size);

74 75 76 77 78 79
	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
	cmd->cursor.id = 0;
	cmd->cursor.width = width;
	cmd->cursor.height = height;
	cmd->cursor.hotspotX = hotspotX;
	cmd->cursor.hotspotY = hotspotY;
80

81
	vmw_fifo_commit_flush(dev_priv, cmd_size);
82 83 84 85

	return 0;
}

J
Jakob Bornecrantz 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
			     struct vmw_dma_buffer *dmabuf,
			     u32 width, u32 height,
			     u32 hotspotX, u32 hotspotY)
{
	struct ttm_bo_kmap_obj map;
	unsigned long kmap_offset;
	unsigned long kmap_num;
	void *virtual;
	bool dummy;
	int ret;

	kmap_offset = 0;
	kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;

101
	ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL);
J
Jakob Bornecrantz 已提交
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
	if (unlikely(ret != 0)) {
		DRM_ERROR("reserve failed\n");
		return -EINVAL;
	}

	ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
	if (unlikely(ret != 0))
		goto err_unreserve;

	virtual = ttm_kmap_obj_virtual(&map, &dummy);
	ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
				      hotspotX, hotspotY);

	ttm_bo_kunmap(&map);
err_unreserve:
	ttm_bo_unreserve(&dmabuf->base);

	return ret;
}


123 124 125
void vmw_cursor_update_position(struct vmw_private *dev_priv,
				bool show, int x, int y)
{
126
	u32 *fifo_mem = dev_priv->mmio_virt;
127 128
	uint32_t count;

129 130 131 132 133
	vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
	vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
	vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
	count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
	vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
134 135
}

136 137 138 139 140 141 142

/*
 * vmw_du_crtc_cursor_set2 - Driver cursor_set2 callback.
 */
int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
			    uint32_t handle, uint32_t width, uint32_t height,
			    int32_t hot_x, int32_t hot_y)
143 144 145 146 147
{
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
	struct vmw_surface *surface = NULL;
	struct vmw_dma_buffer *dmabuf = NULL;
148
	s32 hotspot_x, hotspot_y;
149 150
	int ret;

151 152 153 154 155 156 157
	/*
	 * FIXME: Unclear whether there's any global state touched by the
	 * cursor_set function, especially vmw_cursor_update_position looks
	 * suspicious. For now take the easy route and reacquire all locks. We
	 * can do this since the caller in the drm core doesn't check anything
	 * which is protected by any looks.
	 */
R
Rob Clark 已提交
158
	drm_modeset_unlock_crtc(crtc);
159
	drm_modeset_lock_all(dev_priv->dev);
160 161
	hotspot_x = hot_x + du->hotspot_x;
	hotspot_y = hot_y + du->hotspot_y;
162

163
	/* A lot of the code assumes this */
164 165 166 167
	if (handle && (width != 64 || height != 64)) {
		ret = -EINVAL;
		goto out;
	}
168

169
	if (handle) {
170 171
		struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;

172 173 174 175
		ret = vmw_user_lookup_handle(dev_priv, tfile,
					     handle, &surface, &dmabuf);
		if (ret) {
			DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
176 177
			ret = -EINVAL;
			goto out;
178 179 180
		}
	}

181 182 183 184
	/* need to do this before taking down old image */
	if (surface && !surface->snooper.image) {
		DRM_ERROR("surface not suitable for cursor\n");
		vmw_surface_unreference(&surface);
185 186
		ret = -EINVAL;
		goto out;
187 188
	}

189 190 191 192 193 194 195 196 197
	/* takedown old cursor */
	if (du->cursor_surface) {
		du->cursor_surface->snooper.crtc = NULL;
		vmw_surface_unreference(&du->cursor_surface);
	}
	if (du->cursor_dmabuf)
		vmw_dmabuf_unreference(&du->cursor_dmabuf);

	/* setup new image */
198
	ret = 0;
199 200 201 202 203 204
	if (surface) {
		/* vmw_user_surface_lookup takes one reference */
		du->cursor_surface = surface;

		du->cursor_surface->snooper.crtc = crtc;
		du->cursor_age = du->cursor_surface->snooper.age;
205 206
		ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
					      64, 64, hotspot_x, hotspot_y);
207 208 209 210
	} else if (dmabuf) {
		/* vmw_user_surface_lookup takes one reference */
		du->cursor_dmabuf = dmabuf;

J
Jakob Bornecrantz 已提交
211
		ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
212
					       hotspot_x, hotspot_y);
213 214
	} else {
		vmw_cursor_update_position(dev_priv, false, 0, 0);
215
		goto out;
216 217
	}

218 219 220 221 222 223 224
	if (!ret) {
		vmw_cursor_update_position(dev_priv, true,
					   du->cursor_x + hotspot_x,
					   du->cursor_y + hotspot_y);
		du->core_hotspot_x = hot_x;
		du->core_hotspot_y = hot_y;
	}
225

226 227
out:
	drm_modeset_unlock_all(dev_priv->dev);
D
Daniel Vetter 已提交
228
	drm_modeset_lock_crtc(crtc, crtc->cursor);
229 230

	return ret;
231 232 233 234 235 236 237 238 239 240 241
}

int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
	bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;

	du->cursor_x = x + crtc->x;
	du->cursor_y = y + crtc->y;

242 243 244 245 246 247 248
	/*
	 * FIXME: Unclear whether there's any global state touched by the
	 * cursor_set function, especially vmw_cursor_update_position looks
	 * suspicious. For now take the easy route and reacquire all locks. We
	 * can do this since the caller in the drm core doesn't check anything
	 * which is protected by any looks.
	 */
R
Rob Clark 已提交
249
	drm_modeset_unlock_crtc(crtc);
250 251
	drm_modeset_lock_all(dev_priv->dev);

252
	vmw_cursor_update_position(dev_priv, shown,
253 254 255 256
				   du->cursor_x + du->hotspot_x +
				   du->core_hotspot_x,
				   du->cursor_y + du->hotspot_y +
				   du->core_hotspot_y);
257

258
	drm_modeset_unlock_all(dev_priv->dev);
D
Daniel Vetter 已提交
259
	drm_modeset_lock_crtc(crtc, crtc->cursor);
260

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
	return 0;
}

void vmw_kms_cursor_snoop(struct vmw_surface *srf,
			  struct ttm_object_file *tfile,
			  struct ttm_buffer_object *bo,
			  SVGA3dCmdHeader *header)
{
	struct ttm_bo_kmap_obj map;
	unsigned long kmap_offset;
	unsigned long kmap_num;
	SVGA3dCopyBox *box;
	unsigned box_count;
	void *virtual;
	bool dummy;
	struct vmw_dma_cmd {
		SVGA3dCmdHeader header;
		SVGA3dCmdSurfaceDMA dma;
	} *cmd;
280
	int i, ret;
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301

	cmd = container_of(header, struct vmw_dma_cmd, header);

	/* No snooper installed */
	if (!srf->snooper.image)
		return;

	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
		DRM_ERROR("face and mipmap for cursors should never != 0\n");
		return;
	}

	if (cmd->header.size < 64) {
		DRM_ERROR("at least one full copy box must be given\n");
		return;
	}

	box = (SVGA3dCopyBox *)&cmd[1];
	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
			sizeof(SVGA3dCopyBox);

302
	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
303 304
	    box->x != 0    || box->y != 0    || box->z != 0    ||
	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
305
	    box->d != 1    || box_count != 1) {
306
		/* TODO handle none page aligned offsets */
307 308 309 310 311 312 313 314
		/* TODO handle more dst & src != 0 */
		/* TODO handle more then one copy */
		DRM_ERROR("Cant snoop dma request for cursor!\n");
		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
			  box->srcx, box->srcy, box->srcz,
			  box->x, box->y, box->z,
			  box->w, box->h, box->d, box_count,
			  cmd->dma.guest.ptr.offset);
315 316 317 318 319 320
		return;
	}

	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
	kmap_num = (64*64*4) >> PAGE_SHIFT;

321
	ret = ttm_bo_reserve(bo, true, false, false, NULL);
322 323 324 325 326 327 328 329 330 331 332
	if (unlikely(ret != 0)) {
		DRM_ERROR("reserve failed\n");
		return;
	}

	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
	if (unlikely(ret != 0))
		goto err_unreserve;

	virtual = ttm_kmap_obj_virtual(&map, &dummy);

333 334 335 336 337 338 339 340 341 342
	if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
		memcpy(srf->snooper.image, virtual, 64*64*4);
	} else {
		/* Image is unsigned pointer. */
		for (i = 0; i < box->h; i++)
			memcpy(srf->snooper.image + i * 64,
			       virtual + i * cmd->dma.guest.pitch,
			       box->w * 4);
	}

343 344 345 346 347 348 349
	srf->snooper.age++;

	ttm_bo_kunmap(&map);
err_unreserve:
	ttm_bo_unreserve(bo);
}

350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
/**
 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
 *
 * @dev_priv: Pointer to the device private struct.
 *
 * Clears all legacy hotspots.
 */
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	struct vmw_display_unit *du;
	struct drm_crtc *crtc;

	drm_modeset_lock_all(dev);
	drm_for_each_crtc(crtc, dev) {
		du = vmw_crtc_to_du(crtc);

		du->hotspot_x = 0;
		du->hotspot_y = 0;
	}
	drm_modeset_unlock_all(dev);
}

373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	struct vmw_display_unit *du;
	struct drm_crtc *crtc;

	mutex_lock(&dev->mode_config.mutex);

	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
		du = vmw_crtc_to_du(crtc);
		if (!du->cursor_surface ||
		    du->cursor_age == du->cursor_surface->snooper.age)
			continue;

		du->cursor_age = du->cursor_surface->snooper.age;
		vmw_cursor_update_image(dev_priv,
					du->cursor_surface->snooper.image,
390 391 392
					64, 64,
					du->hotspot_x + du->core_hotspot_x,
					du->hotspot_y + du->core_hotspot_y);
393 394 395 396 397 398 399 400 401 402 403 404 405
	}

	mutex_unlock(&dev->mode_config.mutex);
}

/*
 * Generic framebuffer code
 */

/*
 * Surface framebuffer code
 */

406
static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
407
{
408
	struct vmw_framebuffer_surface *vfbs =
409
		vmw_framebuffer_to_vfbs(framebuffer);
410

411
	drm_framebuffer_cleanup(framebuffer);
412
	vmw_surface_unreference(&vfbs->surface);
413 414
	if (vfbs->base.user_obj)
		ttm_base_object_unref(&vfbs->base.user_obj);
415

416
	kfree(vfbs);
417 418
}

419
static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
420
				  struct drm_file *file_priv,
421 422 423 424 425 426 427 428
				  unsigned flags, unsigned color,
				  struct drm_clip_rect *clips,
				  unsigned num_clips)
{
	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
	struct vmw_framebuffer_surface *vfbs =
		vmw_framebuffer_to_vfbs(framebuffer);
	struct drm_clip_rect norect;
429
	int ret, inc = 1;
430

431 432
	/* Legacy Display Unit does not support 3D */
	if (dev_priv->active_display_unit == vmw_du_legacy)
433 434
		return -EINVAL;

435 436
	drm_modeset_lock_all(dev_priv->dev);

437
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
438 439
	if (unlikely(ret != 0)) {
		drm_modeset_unlock_all(dev_priv->dev);
440
		return ret;
441
	}
442

443 444 445 446 447 448 449 450 451 452 453
	if (!num_clips) {
		num_clips = 1;
		clips = &norect;
		norect.x1 = norect.y1 = 0;
		norect.x2 = framebuffer->width;
		norect.y2 = framebuffer->height;
	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
		num_clips /= 2;
		inc = 2; /* skip source rects */
	}

454
	if (dev_priv->active_display_unit == vmw_du_screen_object)
455 456 457
		ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
						   clips, NULL, NULL, 0, 0,
						   num_clips, inc, NULL);
458
	else
459 460 461
		ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
						 clips, NULL, NULL, 0, 0,
						 num_clips, inc, NULL);
462

463
	vmw_fifo_flush(dev_priv, false);
464
	ttm_read_unlock(&dev_priv->reservation_sem);
465 466 467

	drm_modeset_unlock_all(dev_priv->dev);

468 469 470
	return 0;
}

471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
/**
 * vmw_kms_readback - Perform a readback from the screen system to
 * a dma-buffer backed framebuffer.
 *
 * @dev_priv: Pointer to the device private structure.
 * @file_priv: Pointer to a struct drm_file identifying the caller.
 * Must be set to NULL if @user_fence_rep is NULL.
 * @vfb: Pointer to the dma-buffer backed framebuffer.
 * @user_fence_rep: User-space provided structure for fence information.
 * Must be set to non-NULL if @file_priv is non-NULL.
 * @vclips: Array of clip rects.
 * @num_clips: Number of clip rects in @vclips.
 *
 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
 * interrupted.
 */
int vmw_kms_readback(struct vmw_private *dev_priv,
		     struct drm_file *file_priv,
		     struct vmw_framebuffer *vfb,
		     struct drm_vmw_fence_rep __user *user_fence_rep,
		     struct drm_vmw_rect *vclips,
		     uint32_t num_clips)
{
	switch (dev_priv->active_display_unit) {
	case vmw_du_screen_object:
		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
					    user_fence_rep, vclips, num_clips);
498 499 500 501
	case vmw_du_screen_target:
		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
					user_fence_rep, NULL, vclips, num_clips,
					1, false, true);
502 503 504
	default:
		WARN_ONCE(true,
			  "Readback called with invalid display system.\n");
505
}
506 507 508 509 510

	return -ENOSYS;
}


511
static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
512 513 514 515
	.destroy = vmw_framebuffer_surface_destroy,
	.dirty = vmw_framebuffer_surface_dirty,
};

516 517 518 519
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
					   struct vmw_surface *surface,
					   struct vmw_framebuffer **out,
					   const struct drm_mode_fb_cmd
520 521
					   *mode_cmd,
					   bool is_dmabuf_proxy)
522 523 524 525

{
	struct drm_device *dev = dev_priv->dev;
	struct vmw_framebuffer_surface *vfbs;
526
	enum SVGA3dSurfaceFormat format;
527 528
	int ret;

529 530
	/* 3D is only supported on HWv8 and newer hosts */
	if (dev_priv->active_display_unit == vmw_du_legacy)
531 532
		return -ENOSYS;

533 534 535 536
	/*
	 * Sanity checks.
	 */

537 538 539 540
	/* Surface must be marked as a scanout. */
	if (unlikely(!surface->scanout))
		return -EINVAL;

541 542
	if (unlikely(surface->mip_levels[0] != 1 ||
		     surface->num_sizes != 1 ||
543 544 545
		     surface->base_size.width < mode_cmd->width ||
		     surface->base_size.height < mode_cmd->height ||
		     surface->base_size.depth != 1)) {
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
		DRM_ERROR("Incompatible surface dimensions "
			  "for requested mode.\n");
		return -EINVAL;
	}

	switch (mode_cmd->depth) {
	case 32:
		format = SVGA3D_A8R8G8B8;
		break;
	case 24:
		format = SVGA3D_X8R8G8B8;
		break;
	case 16:
		format = SVGA3D_R5G6B5;
		break;
	case 15:
		format = SVGA3D_A1R5G5B5;
		break;
	default:
		DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
		return -EINVAL;
	}

569 570 571 572 573
	/*
	 * For DX, surface format validation is done when surface->scanout
	 * is set.
	 */
	if (!dev_priv->has_dx && format != surface->format) {
574 575 576 577
		DRM_ERROR("Invalid surface format for requested mode.\n");
		return -EINVAL;
	}

578 579 580 581 582 583 584
	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
	if (!vfbs) {
		ret = -ENOMEM;
		goto out_err1;
	}

	/* XXX get the first 3 from the surface info */
585
	vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
586
	vfbs->base.base.pitches[0] = mode_cmd->pitch;
587 588 589
	vfbs->base.base.depth = mode_cmd->depth;
	vfbs->base.base.width = mode_cmd->width;
	vfbs->base.base.height = mode_cmd->height;
590
	vfbs->surface = vmw_surface_reference(surface);
591
	vfbs->base.user_handle = mode_cmd->handle;
592
	vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
593

594 595
	*out = &vfbs->base;

596 597 598
	ret = drm_framebuffer_init(dev, &vfbs->base.base,
				   &vmw_framebuffer_surface_funcs);
	if (ret)
599
		goto out_err2;
600

601 602 603
	return 0;

out_err2:
604
	vmw_surface_unreference(&surface);
605 606 607 608 609 610 611 612 613
	kfree(vfbs);
out_err1:
	return ret;
}

/*
 * Dmabuf framebuffer code
 */

614
static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
615 616 617 618 619 620
{
	struct vmw_framebuffer_dmabuf *vfbd =
		vmw_framebuffer_to_vfbd(framebuffer);

	drm_framebuffer_cleanup(framebuffer);
	vmw_dmabuf_unreference(&vfbd->buffer);
621 622
	if (vfbd->base.user_obj)
		ttm_base_object_unref(&vfbd->base.user_obj);
623 624 625 626

	kfree(vfbd);
}

627
static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
628
				 struct drm_file *file_priv,
629 630 631 632 633
				 unsigned flags, unsigned color,
				 struct drm_clip_rect *clips,
				 unsigned num_clips)
{
	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
634 635
	struct vmw_framebuffer_dmabuf *vfbd =
		vmw_framebuffer_to_vfbd(framebuffer);
636
	struct drm_clip_rect norect;
637
	int ret, increment = 1;
638

639 640
	drm_modeset_lock_all(dev_priv->dev);

641
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
642 643
	if (unlikely(ret != 0)) {
		drm_modeset_unlock_all(dev_priv->dev);
644
		return ret;
645
	}
646

647
	if (!num_clips) {
648 649 650 651 652 653 654 655 656 657
		num_clips = 1;
		clips = &norect;
		norect.x1 = norect.y1 = 0;
		norect.x2 = framebuffer->width;
		norect.y2 = framebuffer->height;
	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
		num_clips /= 2;
		increment = 2;
	}

658 659 660 661 662 663 664
	switch (dev_priv->active_display_unit) {
	case vmw_du_screen_target:
		ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
				       clips, NULL, num_clips, increment,
				       true, true);
		break;
	case vmw_du_screen_object:
665
		ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
666 667
						  clips, NULL, num_clips,
						  increment, true, NULL);
668
		break;
669 670 671 672
	case vmw_du_legacy:
		ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
						  clips, num_clips, increment);
		break;
673
	default:
674 675
		ret = -EINVAL;
		WARN_ONCE(true, "Dirty called with invalid display system.\n");
676
		break;
677
	}
678

679
	vmw_fifo_flush(dev_priv, false);
680
	ttm_read_unlock(&dev_priv->reservation_sem);
681 682 683

	drm_modeset_unlock_all(dev_priv->dev);

684
	return ret;
685 686
}

687
static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
688 689 690 691
	.destroy = vmw_framebuffer_dmabuf_destroy,
	.dirty = vmw_framebuffer_dmabuf_dirty,
};

692 693 694
/**
 * Pin the dmabuffer to the start of vram.
 */
695
static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
696 697
{
	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
698
	struct vmw_dma_buffer *buf;
699 700
	int ret;

701 702
	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
703

704 705
	if (!buf)
		return 0;
706

707 708 709 710 711 712 713 714 715 716 717
	switch (dev_priv->active_display_unit) {
	case vmw_du_legacy:
		vmw_overlay_pause_all(dev_priv);
		ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
		vmw_overlay_resume_all(dev_priv);
		break;
	case vmw_du_screen_object:
	case vmw_du_screen_target:
		if (vfb->dmabuf)
			return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
							     false);
718

719 720 721 722 723
		return vmw_dmabuf_pin_in_placement(dev_priv, buf,
						   &vmw_mob_placement, false);
	default:
		return -EINVAL;
	}
724

725
	return ret;
726 727
}

728
static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
729 730
{
	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
731
	struct vmw_dma_buffer *buf;
732

733 734
	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
735

736
	if (WARN_ON(!buf))
737 738
		return 0;

739
	return vmw_dmabuf_unpin(dev_priv, buf, false);
740 741
}

742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
/**
 * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
 *
 * @dev: DRM device
 * @mode_cmd: parameters for the new surface
 * @dmabuf_mob: MOB backing the DMA buf
 * @srf_out: newly created surface
 *
 * When the content FB is a DMA buf, we create a surface as a proxy to the
 * same buffer.  This way we can do a surface copy rather than a surface DMA.
 * This is a more efficient approach
 *
 * RETURNS:
 * 0 on success, error code otherwise
 */
static int vmw_create_dmabuf_proxy(struct drm_device *dev,
758
				   const struct drm_mode_fb_cmd *mode_cmd,
759 760 761 762 763
				   struct vmw_dma_buffer *dmabuf_mob,
				   struct vmw_surface **srf_out)
{
	uint32_t format;
	struct drm_vmw_size content_base_size;
764
	struct vmw_resource *res;
765
	unsigned int bytes_pp;
766 767 768 769 770 771
	int ret;

	switch (mode_cmd->depth) {
	case 32:
	case 24:
		format = SVGA3D_X8R8G8B8;
772
		bytes_pp = 4;
773 774 775 776 777
		break;

	case 16:
	case 15:
		format = SVGA3D_R5G6B5;
778
		bytes_pp = 2;
779 780 781 782
		break;

	case 8:
		format = SVGA3D_P8;
783
		bytes_pp = 1;
784 785 786 787 788 789 790
		break;

	default:
		DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
		return -EINVAL;
	}

791
	content_base_size.width  = mode_cmd->pitch / bytes_pp;
792 793 794 795 796 797 798 799 800 801
	content_base_size.height = mode_cmd->height;
	content_base_size.depth  = 1;

	ret = vmw_surface_gb_priv_define(dev,
			0, /* kernel visible only */
			0, /* flags */
			format,
			true, /* can be a scanout buffer */
			1, /* num of mip levels */
			0,
802
			0,
803 804 805 806 807
			content_base_size,
			srf_out);
	if (ret) {
		DRM_ERROR("Failed to allocate proxy content buffer\n");
		return ret;
808 809
	}

810
	res = &(*srf_out)->res;
811

812 813 814 815 816 817
	/* Reserve and switch the backing mob. */
	mutex_lock(&res->dev_priv->cmdbuf_mutex);
	(void) vmw_resource_reserve(res, false, true);
	vmw_dmabuf_unreference(&res->backup);
	res->backup = vmw_dmabuf_reference(dmabuf_mob);
	res->backup_offset = 0;
818
	vmw_resource_unreserve(res, false, NULL, 0);
819
	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
820

821
	return 0;
822 823
}

824 825


826 827 828 829 830
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
					  struct vmw_dma_buffer *dmabuf,
					  struct vmw_framebuffer **out,
					  const struct drm_mode_fb_cmd
					  *mode_cmd)
831 832 833 834

{
	struct drm_device *dev = dev_priv->dev;
	struct vmw_framebuffer_dmabuf *vfbd;
835
	unsigned int requested_size;
836 837
	int ret;

838 839 840 841 842 843 844
	requested_size = mode_cmd->height * mode_cmd->pitch;
	if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
		DRM_ERROR("Screen buffer object size is too small "
			  "for requested mode.\n");
		return -EINVAL;
	}

845
	/* Limited framebuffer color depth support for screen objects */
846
	if (dev_priv->active_display_unit == vmw_du_screen_object) {
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
		switch (mode_cmd->depth) {
		case 32:
		case 24:
			/* Only support 32 bpp for 32 and 24 depth fbs */
			if (mode_cmd->bpp == 32)
				break;

			DRM_ERROR("Invalid color depth/bbp: %d %d\n",
				  mode_cmd->depth, mode_cmd->bpp);
			return -EINVAL;
		case 16:
		case 15:
			/* Only support 16 bpp for 16 and 15 depth fbs */
			if (mode_cmd->bpp == 16)
				break;

			DRM_ERROR("Invalid color depth/bbp: %d %d\n",
				  mode_cmd->depth, mode_cmd->bpp);
			return -EINVAL;
		default:
			DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
			return -EINVAL;
		}
	}

872 873 874 875 876 877
	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
	if (!vfbd) {
		ret = -ENOMEM;
		goto out_err1;
	}

878
	vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
879
	vfbd->base.base.pitches[0] = mode_cmd->pitch;
880 881 882
	vfbd->base.base.depth = mode_cmd->depth;
	vfbd->base.base.width = mode_cmd->width;
	vfbd->base.base.height = mode_cmd->height;
883
	vfbd->base.dmabuf = true;
884
	vfbd->buffer = vmw_dmabuf_reference(dmabuf);
885
	vfbd->base.user_handle = mode_cmd->handle;
886 887
	*out = &vfbd->base;

888 889 890
	ret = drm_framebuffer_init(dev, &vfbd->base.base,
				   &vmw_framebuffer_dmabuf_funcs);
	if (ret)
891
		goto out_err2;
892

893 894 895
	return 0;

out_err2:
896
	vmw_dmabuf_unreference(&dmabuf);
897 898 899 900 901
	kfree(vfbd);
out_err1:
	return ret;
}

902 903 904 905 906 907 908 909 910 911 912
/**
 * vmw_kms_new_framebuffer - Create a new framebuffer.
 *
 * @dev_priv: Pointer to device private struct.
 * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
 * Either @dmabuf or @surface must be NULL.
 * @surface: Pointer to a surface to wrap the kms framebuffer around.
 * Either @dmabuf or @surface must be NULL.
 * @only_2d: No presents will occur to this dma buffer based framebuffer. This
 * Helps the code to do some important optimizations.
 * @mode_cmd: Frame-buffer metadata.
913
 */
914 915 916 917 918 919
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
			struct vmw_dma_buffer *dmabuf,
			struct vmw_surface *surface,
			bool only_2d,
			const struct drm_mode_fb_cmd *mode_cmd)
920 921
{
	struct vmw_framebuffer *vfb = NULL;
922
	bool is_dmabuf_proxy = false;
923 924
	int ret;

925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
	/*
	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
	 * therefore, wrap the DMA buf in a surface so we can use the
	 * SurfaceCopy command.
	 */
	if (dmabuf && only_2d &&
	    dev_priv->active_display_unit == vmw_du_screen_target) {
		ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
					      dmabuf, &surface);
		if (ret)
			return ERR_PTR(ret);

		is_dmabuf_proxy = true;
	}

	/* Create the new framebuffer depending one what we have */
941
	if (surface) {
942 943 944
		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
						      mode_cmd,
						      is_dmabuf_proxy);
945 946 947 948 949 950 951 952

		/*
		 * vmw_create_dmabuf_proxy() adds a reference that is no longer
		 * needed
		 */
		if (is_dmabuf_proxy)
			vmw_surface_unreference(&surface);
	} else if (dmabuf) {
953 954
		ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
						     mode_cmd);
955
	} else {
956
		BUG();
957
	}
958 959 960 961 962 963 964 965 966 967

	if (ret)
		return ERR_PTR(ret);

	vfb->pin = vmw_framebuffer_pin;
	vfb->unpin = vmw_framebuffer_unpin;

	return vfb;
}

968 969 970 971 972 973
/*
 * Generic Kernel modesetting functions
 */

static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
						 struct drm_file *file_priv,
974
						 const struct drm_mode_fb_cmd2 *mode_cmd2)
975 976 977 978 979 980
{
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
	struct vmw_framebuffer *vfb = NULL;
	struct vmw_surface *surface = NULL;
	struct vmw_dma_buffer *bo = NULL;
981
	struct ttm_base_object *user_obj;
982
	struct drm_mode_fb_cmd mode_cmd;
983 984
	int ret;

985 986 987 988
	mode_cmd.width = mode_cmd2->width;
	mode_cmd.height = mode_cmd2->height;
	mode_cmd.pitch = mode_cmd2->pitches[0];
	mode_cmd.handle = mode_cmd2->handles[0];
989
	drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
990 991
				    &mode_cmd.bpp);

992 993 994 995 996 997
	/**
	 * This code should be conditioned on Screen Objects not being used.
	 * If screen objects are used, we can allocate a GMR to hold the
	 * requested framebuffer.
	 */

998
	if (!vmw_kms_validate_mode_vram(dev_priv,
999 1000
					mode_cmd.pitch,
					mode_cmd.height)) {
1001
		DRM_ERROR("Requested mode exceed bounding box limit.\n");
1002
		return ERR_PTR(-ENOMEM);
1003 1004
	}

1005 1006 1007 1008 1009 1010 1011 1012 1013
	/*
	 * Take a reference on the user object of the resource
	 * backing the kms fb. This ensures that user-space handle
	 * lookups on that resource will always work as long as
	 * it's registered with a kms framebuffer. This is important,
	 * since vmw_execbuf_process identifies resources in the
	 * command stream using user-space handles.
	 */

1014
	user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
1015 1016 1017 1018 1019
	if (unlikely(user_obj == NULL)) {
		DRM_ERROR("Could not locate requested kms frame buffer.\n");
		return ERR_PTR(-ENOENT);
	}

1020 1021 1022 1023
	/**
	 * End conditioned code.
	 */

1024 1025
	/* returns either a dmabuf or surface */
	ret = vmw_user_lookup_handle(dev_priv, tfile,
1026
				     mode_cmd.handle,
1027
				     &surface, &bo);
1028
	if (ret)
1029 1030
		goto err_out;

1031 1032 1033 1034 1035 1036 1037
	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
				      !(dev_priv->capabilities & SVGA_CAP_3D),
				      &mode_cmd);
	if (IS_ERR(vfb)) {
		ret = PTR_ERR(vfb);
		goto err_out;
 	}
1038 1039 1040 1041 1042 1043 1044

err_out:
	/* vmw_user_lookup_handle takes one ref so does new_fb */
	if (bo)
		vmw_dmabuf_unreference(&bo);
	if (surface)
		vmw_surface_unreference(&surface);
1045 1046 1047

	if (ret) {
		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1048
		ttm_base_object_unref(&user_obj);
1049
		return ERR_PTR(ret);
1050 1051
	} else
		vfb->user_obj = user_obj;
1052 1053 1054 1055

	return &vfb->base;
}

1056
static const struct drm_mode_config_funcs vmw_kms_funcs = {
1057 1058 1059
	.fb_create = vmw_kms_fb_create,
};

1060 1061 1062 1063 1064 1065 1066 1067
static int vmw_kms_generic_present(struct vmw_private *dev_priv,
				   struct drm_file *file_priv,
				   struct vmw_framebuffer *vfb,
				   struct vmw_surface *surface,
				   uint32_t sid,
				   int32_t destX, int32_t destY,
				   struct drm_vmw_rect *clips,
				   uint32_t num_clips)
1068
{
1069 1070 1071
	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
					    &surface->res, destX, destY,
					    num_clips, 1, NULL);
1072 1073
}

1074

1075 1076 1077 1078 1079 1080 1081 1082 1083
int vmw_kms_present(struct vmw_private *dev_priv,
		    struct drm_file *file_priv,
		    struct vmw_framebuffer *vfb,
		    struct vmw_surface *surface,
		    uint32_t sid,
		    int32_t destX, int32_t destY,
		    struct drm_vmw_rect *clips,
		    uint32_t num_clips)
{
1084
	int ret;
1085

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
	switch (dev_priv->active_display_unit) {
	case vmw_du_screen_target:
		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
						 &surface->res, destX, destY,
						 num_clips, 1, NULL);
		break;
	case vmw_du_screen_object:
		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
					      sid, destX, destY, clips,
					      num_clips);
		break;
	default:
		WARN_ONCE(true,
			  "Present called with invalid display system.\n");
		ret = -ENOSYS;
		break;
1102
	}
1103 1104
	if (ret)
		return ret;
1105

1106
	vmw_fifo_flush(dev_priv, false);
1107

1108
	return 0;
1109 1110
}

1111 1112 1113 1114 1115 1116 1117
int vmw_kms_init(struct vmw_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	int ret;

	drm_mode_config_init(dev);
	dev->mode_config.funcs = &vmw_kms_funcs;
1118 1119
	dev->mode_config.min_width = 1;
	dev->mode_config.min_height = 1;
1120 1121
	dev->mode_config.max_width = dev_priv->texture_max_width;
	dev->mode_config.max_height = dev_priv->texture_max_height;
1122

1123 1124 1125 1126 1127 1128
	ret = vmw_kms_stdu_init_display(dev_priv);
	if (ret) {
		ret = vmw_kms_sou_init_display(dev_priv);
		if (ret) /* Fallback */
			ret = vmw_kms_ldu_init_display(dev_priv);
	}
1129

1130
	return ret;
1131 1132 1133 1134
}

int vmw_kms_close(struct vmw_private *dev_priv)
{
1135 1136
	int ret;

1137 1138 1139 1140 1141 1142
	/*
	 * Docs says we should take the lock before calling this function
	 * but since it destroys encoders and our destructor calls
	 * drm_encoder_cleanup which takes the lock we deadlock.
	 */
	drm_mode_config_cleanup(dev_priv->dev);
1143 1144
	if (dev_priv->active_display_unit == vmw_du_screen_object)
		ret = vmw_kms_sou_close_display(dev_priv);
1145 1146
	else if (dev_priv->active_display_unit == vmw_du_screen_target)
		ret = vmw_kms_stdu_close_display(dev_priv);
1147
	else
1148 1149 1150
		ret = vmw_kms_ldu_close_display(dev_priv);

	return ret;
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
}

int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
				struct drm_file *file_priv)
{
	struct drm_vmw_cursor_bypass_arg *arg = data;
	struct vmw_display_unit *du;
	struct drm_crtc *crtc;
	int ret = 0;


	mutex_lock(&dev->mode_config.mutex);
	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {

		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
			du = vmw_crtc_to_du(crtc);
			du->hotspot_x = arg->xhot;
			du->hotspot_y = arg->yhot;
		}

		mutex_unlock(&dev->mode_config.mutex);
		return 0;
	}

R
Rob Clark 已提交
1175 1176
	crtc = drm_crtc_find(dev, arg->crtc_id);
	if (!crtc) {
1177
		ret = -ENOENT;
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
		goto out;
	}

	du = vmw_crtc_to_du(crtc);

	du->hotspot_x = arg->xhot;
	du->hotspot_y = arg->yhot;

out:
	mutex_unlock(&dev->mode_config.mutex);

	return ret;
}

1192
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1193
			unsigned width, unsigned height, unsigned pitch,
M
Michel Dänzer 已提交
1194
			unsigned bpp, unsigned depth)
1195
{
1196 1197 1198
	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
	else if (vmw_fifo_have_pitchlock(vmw_priv))
1199 1200
		vmw_mmio_write(pitch, vmw_priv->mmio_virt +
			       SVGA_FIFO_PITCHLOCK);
1201 1202
	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
M
Michel Dänzer 已提交
1203
	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
1204 1205 1206 1207 1208 1209 1210 1211

	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
		return -EINVAL;
	}

	return 0;
1212
}
1213

1214 1215
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
{
1216 1217 1218
	struct vmw_vga_topology_state *save;
	uint32_t i;

1219 1220
	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
1221
	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
1222 1223
	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
		vmw_priv->vga_pitchlock =
1224
		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
1225
	else if (vmw_fifo_have_pitchlock(vmw_priv))
1226 1227
		vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
							SVGA_FIFO_PITCHLOCK);
1228 1229 1230

	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
		return 0;
1231

1232 1233 1234
	vmw_priv->num_displays = vmw_read(vmw_priv,
					  SVGA_REG_NUM_GUEST_DISPLAYS);

1235 1236 1237
	if (vmw_priv->num_displays == 0)
		vmw_priv->num_displays = 1;

1238 1239 1240 1241 1242 1243 1244 1245 1246
	for (i = 0; i < vmw_priv->num_displays; ++i) {
		save = &vmw_priv->vga_save[i];
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
		save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
		save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
		save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
		save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
		save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
		if (i == 0 && vmw_priv->num_displays == 1 &&
		    save->width == 0 && save->height == 0) {

			/*
			 * It should be fairly safe to assume that these
			 * values are uninitialized.
			 */

			save->width = vmw_priv->vga_width - save->pos_x;
			save->height = vmw_priv->vga_height - save->pos_y;
		}
1258
	}
1259

1260 1261 1262 1263 1264
	return 0;
}

int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
{
1265 1266 1267
	struct vmw_vga_topology_state *save;
	uint32_t i;

1268 1269
	vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
	vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
1270
	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
1271 1272 1273 1274
	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
			  vmw_priv->vga_pitchlock);
	else if (vmw_fifo_have_pitchlock(vmw_priv))
1275 1276
		vmw_mmio_write(vmw_priv->vga_pitchlock,
			       vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
1277

1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
		return 0;

	for (i = 0; i < vmw_priv->num_displays; ++i) {
		save = &vmw_priv->vga_save[i];
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
	}

1292 1293
	return 0;
}
1294

1295 1296 1297 1298
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
				uint32_t pitch,
				uint32_t height)
{
1299 1300 1301
	return ((u64) pitch * (u64) height) < (u64)
		((dev_priv->active_display_unit == vmw_du_screen_target) ?
		 dev_priv->prim_bb_mem : dev_priv->vram_size);
1302 1303
}

J
Jakob Bornecrantz 已提交
1304 1305 1306 1307

/**
 * Function called by DRM code called with vbl_lock held.
 */
1308
u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
1309 1310 1311
{
	return 0;
}
1312

J
Jakob Bornecrantz 已提交
1313 1314 1315
/**
 * Function called by DRM code called with vbl_lock held.
 */
1316
int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
J
Jakob Bornecrantz 已提交
1317 1318 1319 1320 1321 1322 1323
{
	return -ENOSYS;
}

/**
 * Function called by DRM code called with vbl_lock held.
 */
1324
void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
J
Jakob Bornecrantz 已提交
1325 1326 1327
{
}

1328 1329 1330 1331 1332

/*
 * Small shared kms functions.
 */

1333
static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1334 1335 1336 1337 1338 1339 1340 1341 1342
			 struct drm_vmw_rect *rects)
{
	struct drm_device *dev = dev_priv->dev;
	struct vmw_display_unit *du;
	struct drm_connector *con;

	mutex_lock(&dev->mode_config.mutex);

#if 0
T
Thomas Hellstrom 已提交
1343 1344 1345 1346 1347 1348 1349 1350 1351
	{
		unsigned int i;

		DRM_INFO("%s: new layout ", __func__);
		for (i = 0; i < num; i++)
			DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
				 rects[i].w, rects[i].h);
		DRM_INFO("\n");
	}
1352 1353 1354 1355 1356 1357 1358 1359
#endif

	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
		du = vmw_connector_to_du(con);
		if (num > du->unit) {
			du->pref_width = rects[du->unit].w;
			du->pref_height = rects[du->unit].h;
			du->pref_active = true;
1360 1361
			du->gui_x = rects[du->unit].x;
			du->gui_y = rects[du->unit].y;
1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
		} else {
			du->pref_width = 800;
			du->pref_height = 600;
			du->pref_active = false;
		}
		con->status = vmw_du_connector_detect(con, true);
	}

	mutex_unlock(&dev->mode_config.mutex);

	return 0;
}

void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
			   u16 *r, u16 *g, u16 *b,
			   uint32_t start, uint32_t size)
{
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
	int i;

	for (i = 0; i < size; i++) {
		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
			  r[i], g[i], b[i]);
		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
	}
}

1391
int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
1392
{
1393
	return 0;
1394 1395 1396 1397 1398 1399 1400 1401
}

enum drm_connector_status
vmw_du_connector_detect(struct drm_connector *connector, bool force)
{
	uint32_t num_displays;
	struct drm_device *dev = connector->dev;
	struct vmw_private *dev_priv = vmw_priv(dev);
1402
	struct vmw_display_unit *du = vmw_connector_to_du(connector);
1403 1404 1405

	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);

1406 1407
	return ((vmw_connector_to_du(connector)->unit < num_displays &&
		 du->pref_active) ?
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
		connector_status_connected : connector_status_disconnected);
}

static struct drm_display_mode vmw_kms_connector_builtin[] = {
	/* 640x480@60Hz */
	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
		   752, 800, 0, 480, 489, 492, 525, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
	/* 800x600@60Hz */
	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
		   968, 1056, 0, 600, 601, 605, 628, 0,
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1024x768@60Hz */
	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
		   1184, 1344, 0, 768, 771, 777, 806, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
	/* 1152x864@75Hz */
	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
		   1344, 1600, 0, 864, 865, 868, 900, 0,
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1280x768@60Hz */
	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
		   1472, 1664, 0, 768, 771, 778, 798, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1280x800@60Hz */
	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
		   1480, 1680, 0, 800, 803, 809, 831, 0,
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
	/* 1280x960@60Hz */
	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
		   1488, 1800, 0, 960, 961, 964, 1000, 0,
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1280x1024@60Hz */
	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1360x768@60Hz */
	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
		   1536, 1792, 0, 768, 771, 777, 795, 0,
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1440x1050@60Hz */
	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1440x900@60Hz */
	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
		   1672, 1904, 0, 900, 903, 909, 934, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1600x1200@60Hz */
	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1680x1050@60Hz */
	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1792x1344@60Hz */
	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1853x1392@60Hz */
	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1920x1200@60Hz */
	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 1920x1440@60Hz */
	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* 2560x1600@60Hz */
	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
	/* Terminate */
	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
};

1488 1489 1490 1491 1492 1493 1494
/**
 * vmw_guess_mode_timing - Provide fake timings for a
 * 60Hz vrefresh mode.
 *
 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
 * members filled in.
 */
1495
void vmw_guess_mode_timing(struct drm_display_mode *mode)
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
{
	mode->hsync_start = mode->hdisplay + 50;
	mode->hsync_end = mode->hsync_start + 50;
	mode->htotal = mode->hsync_end + 50;

	mode->vsync_start = mode->vdisplay + 50;
	mode->vsync_end = mode->vsync_start + 50;
	mode->vtotal = mode->vsync_end + 50;

	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
	mode->vrefresh = drm_mode_vrefresh(mode);
}


1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
int vmw_du_connector_fill_modes(struct drm_connector *connector,
				uint32_t max_width, uint32_t max_height)
{
	struct vmw_display_unit *du = vmw_connector_to_du(connector);
	struct drm_device *dev = connector->dev;
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct drm_display_mode *mode = NULL;
	struct drm_display_mode *bmode;
	struct drm_display_mode prefmode = { DRM_MODE("preferred",
		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
	};
	int i;
1524 1525 1526 1527 1528 1529
	u32 assumed_bpp = 2;

	/*
	 * If using screen objects, then assume 32-bpp because that's what the
	 * SVGA device is assuming
	 */
1530
	if (dev_priv->active_display_unit == vmw_du_screen_object)
1531
		assumed_bpp = 4;
1532

1533 1534 1535 1536 1537
	if (dev_priv->active_display_unit == vmw_du_screen_target) {
		max_width  = min(max_width,  dev_priv->stdu_max_width);
		max_height = min(max_height, dev_priv->stdu_max_height);
	}

1538
	/* Add preferred mode */
1539 1540 1541 1542 1543 1544
	mode = drm_mode_duplicate(dev, &prefmode);
	if (!mode)
		return 0;
	mode->hdisplay = du->pref_width;
	mode->vdisplay = du->pref_height;
	vmw_guess_mode_timing(mode);
1545

1546 1547 1548 1549 1550 1551 1552 1553
	if (vmw_kms_validate_mode_vram(dev_priv,
					mode->hdisplay * assumed_bpp,
					mode->vdisplay)) {
		drm_mode_probed_add(connector, mode);
	} else {
		drm_mode_destroy(dev, mode);
		mode = NULL;
	}
1554

1555 1556 1557
	if (du->pref_mode) {
		list_del_init(&du->pref_mode->head);
		drm_mode_destroy(dev, du->pref_mode);
1558 1559
	}

1560 1561 1562
	/* mode might be null here, this is intended */
	du->pref_mode = mode;

1563 1564 1565 1566 1567 1568
	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
		bmode = &vmw_kms_connector_builtin[i];
		if (bmode->hdisplay > max_width ||
		    bmode->vdisplay > max_height)
			continue;

1569 1570
		if (!vmw_kms_validate_mode_vram(dev_priv,
						bmode->hdisplay * assumed_bpp,
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581
						bmode->vdisplay))
			continue;

		mode = drm_mode_duplicate(dev, bmode);
		if (!mode)
			return 0;
		mode->vrefresh = drm_mode_vrefresh(mode);

		drm_mode_probed_add(connector, mode);
	}

1582
	drm_mode_connector_list_update(connector);
1583 1584
	/* Move the prefered mode first, help apps pick the right mode. */
	drm_mode_sort(&connector->modes);
1585 1586 1587 1588 1589 1590 1591 1592 1593 1594

	return 1;
}

int vmw_du_connector_set_property(struct drm_connector *connector,
				  struct drm_property *property,
				  uint64_t val)
{
	return 0;
}
1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607


int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
				struct drm_file *file_priv)
{
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct drm_vmw_update_layout_arg *arg =
		(struct drm_vmw_update_layout_arg *)data;
	void __user *user_rects;
	struct drm_vmw_rect *rects;
	unsigned rects_size;
	int ret;
	int i;
1608
	u64 total_pixels = 0;
1609
	struct drm_mode_config *mode_config = &dev->mode_config;
1610
	struct drm_vmw_rect bounding_box = {0};
1611 1612 1613 1614

	if (!arg->num_outputs) {
		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
		vmw_du_update_layout(dev_priv, 1, &def_rect);
1615
		return 0;
1616 1617 1618
	}

	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
1619 1620
	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
			GFP_KERNEL);
1621 1622
	if (unlikely(!rects))
		return -ENOMEM;
1623 1624 1625 1626 1627 1628 1629 1630 1631 1632

	user_rects = (void __user *)(unsigned long)arg->rects;
	ret = copy_from_user(rects, user_rects, rects_size);
	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed to get rects.\n");
		ret = -EFAULT;
		goto out_free;
	}

	for (i = 0; i < arg->num_outputs; ++i) {
1633 1634 1635 1636
		if (rects[i].x < 0 ||
		    rects[i].y < 0 ||
		    rects[i].x + rects[i].w > mode_config->max_width ||
		    rects[i].y + rects[i].h > mode_config->max_height) {
1637 1638 1639 1640
			DRM_ERROR("Invalid GUI layout.\n");
			ret = -EINVAL;
			goto out_free;
		}
1641 1642 1643 1644 1645 1646 1647 1648 1649 1650

		/*
		 * bounding_box.w and bunding_box.h are used as
		 * lower-right coordinates
		 */
		if (rects[i].x + rects[i].w > bounding_box.w)
			bounding_box.w = rects[i].x + rects[i].w;

		if (rects[i].y + rects[i].h > bounding_box.h)
			bounding_box.h = rects[i].y + rects[i].h;
1651 1652

		total_pixels += (u64) rects[i].w * (u64) rects[i].h;
1653 1654
	}

1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
	if (dev_priv->active_display_unit == vmw_du_screen_target) {
		/*
		 * For Screen Targets, the limits for a toplogy are:
		 *	1. Bounding box (assuming 32bpp) must be < prim_bb_mem
		 *      2. Total pixels (assuming 32bpp) must be < prim_bb_mem
		 */
		u64 bb_mem    = bounding_box.w * bounding_box.h * 4;
		u64 pixel_mem = total_pixels * 4;

		if (bb_mem > dev_priv->prim_bb_mem) {
			DRM_ERROR("Topology is beyond supported limits.\n");
1666 1667 1668 1669
			ret = -EINVAL;
			goto out_free;
		}

1670 1671 1672 1673 1674
		if (pixel_mem > dev_priv->prim_bb_mem) {
			DRM_ERROR("Combined output size too large\n");
			ret = -EINVAL;
			goto out_free;
		}
1675 1676 1677 1678 1679 1680 1681 1682
	}

	vmw_du_update_layout(dev_priv, arg->num_outputs, rects);

out_free:
	kfree(rects);
	return ret;
}
1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738

/**
 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
 * on a set of cliprects and a set of display units.
 *
 * @dev_priv: Pointer to a device private structure.
 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
 * Cliprects are given in framebuffer coordinates.
 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
 * be NULL. Cliprects are given in source coordinates.
 * @dest_x: X coordinate offset for the crtc / destination clip rects.
 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
 * @num_clips: Number of cliprects in the @clips or @vclips array.
 * @increment: Integer with which to increment the clip counter when looping.
 * Used to skip a predetermined number of clip rects.
 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
 */
int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
			 struct vmw_framebuffer *framebuffer,
			 const struct drm_clip_rect *clips,
			 const struct drm_vmw_rect *vclips,
			 s32 dest_x, s32 dest_y,
			 int num_clips,
			 int increment,
			 struct vmw_kms_dirty *dirty)
{
	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
	struct drm_crtc *crtc;
	u32 num_units = 0;
	u32 i, k;

	dirty->dev_priv = dev_priv;

	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
		if (crtc->primary->fb != &framebuffer->base)
			continue;
		units[num_units++] = vmw_crtc_to_du(crtc);
	}

	for (k = 0; k < num_units; k++) {
		struct vmw_display_unit *unit = units[k];
		s32 crtc_x = unit->crtc.x;
		s32 crtc_y = unit->crtc.y;
		s32 crtc_width = unit->crtc.mode.hdisplay;
		s32 crtc_height = unit->crtc.mode.vdisplay;
		const struct drm_clip_rect *clips_ptr = clips;
		const struct drm_vmw_rect *vclips_ptr = vclips;

		dirty->unit = unit;
		if (dirty->fifo_reserve_size > 0) {
			dirty->cmd = vmw_fifo_reserve(dev_priv,
						      dirty->fifo_reserve_size);
			if (!dirty->cmd) {
				DRM_ERROR("Couldn't reserve fifo space "
					  "for dirty blits.\n");
1739
				return -ENOMEM;
1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823
			}
			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
		}
		dirty->num_hits = 0;
		for (i = 0; i < num_clips; i++, clips_ptr += increment,
		       vclips_ptr += increment) {
			s32 clip_left;
			s32 clip_top;

			/*
			 * Select clip array type. Note that integer type
			 * in @clips is unsigned short, whereas in @vclips
			 * it's 32-bit.
			 */
			if (clips) {
				dirty->fb_x = (s32) clips_ptr->x1;
				dirty->fb_y = (s32) clips_ptr->y1;
				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
					crtc_x;
				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
					crtc_y;
			} else {
				dirty->fb_x = vclips_ptr->x;
				dirty->fb_y = vclips_ptr->y;
				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
					dest_x - crtc_x;
				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
					dest_y - crtc_y;
			}

			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;

			/* Skip this clip if it's outside the crtc region */
			if (dirty->unit_x1 >= crtc_width ||
			    dirty->unit_y1 >= crtc_height ||
			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
				continue;

			/* Clip right and bottom to crtc limits */
			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
					       crtc_width);
			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
					       crtc_height);

			/* Clip left and top to crtc limits */
			clip_left = min_t(s32, dirty->unit_x1, 0);
			clip_top = min_t(s32, dirty->unit_y1, 0);
			dirty->unit_x1 -= clip_left;
			dirty->unit_y1 -= clip_top;
			dirty->fb_x -= clip_left;
			dirty->fb_y -= clip_top;

			dirty->clip(dirty);
		}

		dirty->fifo_commit(dirty);
	}

	return 0;
}

/**
 * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
 * command submission.
 *
 * @dev_priv. Pointer to a device private structure.
 * @buf: The buffer object
 * @interruptible: Whether to perform waits as interruptible.
 * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
 * The buffer will be validated as a GMR. Already pinned buffers will not be
 * validated.
 *
 * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
 * interrupted by a signal.
 */
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
				  struct vmw_dma_buffer *buf,
				  bool interruptible,
				  bool validate_as_mob)
{
	struct ttm_buffer_object *bo = &buf->base;
	int ret;

1824
	ttm_bo_reserve(bo, false, false, interruptible, NULL);
1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902
	ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
					 validate_as_mob);
	if (ret)
		ttm_bo_unreserve(bo);

	return ret;
}

/**
 * vmw_kms_helper_buffer_revert - Undo the actions of
 * vmw_kms_helper_buffer_prepare.
 *
 * @res: Pointer to the buffer object.
 *
 * Helper to be used if an error forces the caller to undo the actions of
 * vmw_kms_helper_buffer_prepare.
 */
void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
{
	if (buf)
		ttm_bo_unreserve(&buf->base);
}

/**
 * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
 * kms command submission.
 *
 * @dev_priv: Pointer to a device private structure.
 * @file_priv: Pointer to a struct drm_file representing the caller's
 * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
 * if non-NULL, @user_fence_rep must be non-NULL.
 * @buf: The buffer object.
 * @out_fence:  Optional pointer to a fence pointer. If non-NULL, a
 * ref-counted fence pointer is returned here.
 * @user_fence_rep: Optional pointer to a user-space provided struct
 * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
 * function copies fence data to user-space in a fail-safe manner.
 */
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
				  struct drm_file *file_priv,
				  struct vmw_dma_buffer *buf,
				  struct vmw_fence_obj **out_fence,
				  struct drm_vmw_fence_rep __user *
				  user_fence_rep)
{
	struct vmw_fence_obj *fence;
	uint32_t handle;
	int ret;

	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
					 file_priv ? &handle : NULL);
	if (buf)
		vmw_fence_single_bo(&buf->base, fence);
	if (file_priv)
		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
					    ret, user_fence_rep, fence,
					    handle);
	if (out_fence)
		*out_fence = fence;
	else
		vmw_fence_obj_unreference(&fence);

	vmw_kms_helper_buffer_revert(buf);
}


/**
 * vmw_kms_helper_resource_revert - Undo the actions of
 * vmw_kms_helper_resource_prepare.
 *
 * @res: Pointer to the resource. Typically a surface.
 *
 * Helper to be used if an error forces the caller to undo the actions of
 * vmw_kms_helper_resource_prepare.
 */
void vmw_kms_helper_resource_revert(struct vmw_resource *res)
{
	vmw_kms_helper_buffer_revert(res->backup);
1903
	vmw_resource_unreserve(res, false, NULL, 0);
1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949
	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}

/**
 * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
 * command submission.
 *
 * @res: Pointer to the resource. Typically a surface.
 * @interruptible: Whether to perform waits as interruptible.
 *
 * Reserves and validates also the backup buffer if a guest-backed resource.
 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
 * interrupted by a signal.
 */
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
				    bool interruptible)
{
	int ret = 0;

	if (interruptible)
		ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
	else
		mutex_lock(&res->dev_priv->cmdbuf_mutex);

	if (unlikely(ret != 0))
		return -ERESTARTSYS;

	ret = vmw_resource_reserve(res, interruptible, false);
	if (ret)
		goto out_unlock;

	if (res->backup) {
		ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
						    interruptible,
						    res->dev_priv->has_mob);
		if (ret)
			goto out_unreserve;
	}
	ret = vmw_resource_validate(res);
	if (ret)
		goto out_revert;
	return 0;

out_revert:
	vmw_kms_helper_buffer_revert(res->backup);
out_unreserve:
1950
	vmw_resource_unreserve(res, false, NULL, 0);
1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970
out_unlock:
	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
	return ret;
}

/**
 * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
 * kms command submission.
 *
 * @res: Pointer to the resource. Typically a surface.
 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
 * ref-counted fence pointer is returned here.
 */
void vmw_kms_helper_resource_finish(struct vmw_resource *res,
			     struct vmw_fence_obj **out_fence)
{
	if (res->backup || out_fence)
		vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
					     out_fence, NULL);

1971
	vmw_resource_unreserve(res, false, NULL, 0);
1972 1973
	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042

/**
 * vmw_kms_update_proxy - Helper function to update a proxy surface from
 * its backing MOB.
 *
 * @res: Pointer to the surface resource
 * @clips: Clip rects in framebuffer (surface) space.
 * @num_clips: Number of clips in @clips.
 * @increment: Integer with which to increment the clip counter when looping.
 * Used to skip a predetermined number of clip rects.
 *
 * This function makes sure the proxy surface is updated from its backing MOB
 * using the region given by @clips. The surface resource @res and its backing
 * MOB needs to be reserved and validated on call.
 */
int vmw_kms_update_proxy(struct vmw_resource *res,
			 const struct drm_clip_rect *clips,
			 unsigned num_clips,
			 int increment)
{
	struct vmw_private *dev_priv = res->dev_priv;
	struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
	struct {
		SVGA3dCmdHeader header;
		SVGA3dCmdUpdateGBImage body;
	} *cmd;
	SVGA3dBox *box;
	size_t copy_size = 0;
	int i;

	if (!clips)
		return 0;

	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
	if (!cmd) {
		DRM_ERROR("Couldn't reserve fifo space for proxy surface "
			  "update.\n");
		return -ENOMEM;
	}

	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
		box = &cmd->body.box;

		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
		cmd->header.size = sizeof(cmd->body);
		cmd->body.image.sid = res->id;
		cmd->body.image.face = 0;
		cmd->body.image.mipmap = 0;

		if (clips->x1 > size->width || clips->x2 > size->width ||
		    clips->y1 > size->height || clips->y2 > size->height) {
			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
			return -EINVAL;
		}

		box->x = clips->x1;
		box->y = clips->y1;
		box->z = 0;
		box->w = clips->x2 - clips->x1;
		box->h = clips->y2 - clips->y1;
		box->d = 1;

		copy_size += sizeof(*cmd);
	}

	vmw_fifo_commit(dev_priv, copy_size);

	return 0;
}
2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097

int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
			    unsigned unit,
			    u32 max_width,
			    u32 max_height,
			    struct drm_connector **p_con,
			    struct drm_crtc **p_crtc,
			    struct drm_display_mode **p_mode)
{
	struct drm_connector *con;
	struct vmw_display_unit *du;
	struct drm_display_mode *mode;
	int i = 0;

	list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
			    head) {
		if (i == unit)
			break;

		++i;
	}

	if (i != unit) {
		DRM_ERROR("Could not find initial display unit.\n");
		return -EINVAL;
	}

	if (list_empty(&con->modes))
		(void) vmw_du_connector_fill_modes(con, max_width, max_height);

	if (list_empty(&con->modes)) {
		DRM_ERROR("Could not find initial display mode.\n");
		return -EINVAL;
	}

	du = vmw_connector_to_du(con);
	*p_con = con;
	*p_crtc = &du->crtc;

	list_for_each_entry(mode, &con->modes, head) {
		if (mode->type & DRM_MODE_TYPE_PREFERRED)
			break;
	}

	if (mode->type & DRM_MODE_TYPE_PREFERRED)
		*p_mode = mode;
	else {
		WARN_ONCE(true, "Could not find initial preferred mode.\n");
		*p_mode = list_first_entry(&con->modes,
					   struct drm_display_mode,
					   head);
	}

	return 0;
}
2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189

/**
 * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer
 *
 * @dev_priv: Pointer to a device private struct.
 * @du: The display unit of the crtc.
 */
void vmw_kms_del_active(struct vmw_private *dev_priv,
			struct vmw_display_unit *du)
{
	lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);

	if (du->active_implicit) {
		if (--(dev_priv->num_implicit) == 0)
			dev_priv->implicit_fb = NULL;
		du->active_implicit = false;
	}
}

/**
 * vmw_kms_add_active - register a crtc binding to an implicit framebuffer
 *
 * @vmw_priv: Pointer to a device private struct.
 * @du: The display unit of the crtc.
 * @vfb: The implicit framebuffer
 *
 * Registers a binding to an implicit framebuffer.
 */
void vmw_kms_add_active(struct vmw_private *dev_priv,
			struct vmw_display_unit *du,
			struct vmw_framebuffer *vfb)
{
	lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);

	WARN_ON_ONCE(!dev_priv->num_implicit && dev_priv->implicit_fb);

	if (!du->active_implicit && du->is_implicit) {
		dev_priv->implicit_fb = vfb;
		du->active_implicit = true;
		dev_priv->num_implicit++;
	}
}

/**
 * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc.
 *
 * @dev_priv: Pointer to device-private struct.
 * @crtc: The crtc we want to flip.
 *
 * Returns true or false depending whether it's OK to flip this crtc
 * based on the criterion that we must not have more than one implicit
 * frame-buffer at any one time.
 */
bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
			    struct drm_crtc *crtc)
{
	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);

	lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);

	if (!du->is_implicit)
		return true;

	if (dev_priv->num_implicit != 1)
		return false;

	return true;
}

/**
 * vmw_kms_update_implicit_fb - Update the implicit fb.
 *
 * @dev_priv: Pointer to device-private struct.
 * @crtc: The crtc the new implicit frame-buffer is bound to.
 */
void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
				struct drm_crtc *crtc)
{
	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
	struct vmw_framebuffer *vfb;

	lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);

	if (!du->is_implicit)
		return;

	vfb = vmw_framebuffer_to_vfb(crtc->primary->fb);
	WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
		     dev_priv->implicit_fb != vfb);

	dev_priv->implicit_fb = vfb;
}