virtgpu_vq.c 30.9 KB
Newer Older
D
Dave Airlie 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright (C) 2015 Red Hat, Inc.
 * All Rights Reserved.
 *
 * Authors:
 *    Dave Airlie <airlied@redhat.com>
 *    Gerd Hoffmann <kraxel@redhat.com>
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

S
Sam Ravnborg 已提交
29
#include <linux/dma-mapping.h>
D
Dave Airlie 已提交
30 31 32 33
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>

S
Sam Ravnborg 已提交
34 35 36
#include "virtgpu_drv.h"
#include "virtgpu_trace.h"

D
Dave Airlie 已提交
37 38 39 40 41 42
#define MAX_INLINE_CMD_SIZE   96
#define MAX_INLINE_RESP_SIZE  24
#define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
			       + MAX_INLINE_CMD_SIZE		 \
			       + MAX_INLINE_RESP_SIZE)

43 44 45 46 47 48 49 50 51 52 53
static void convert_to_hw_box(struct virtio_gpu_box *dst,
			      const struct drm_virtgpu_3d_box *src)
{
	dst->x = cpu_to_le32(src->x);
	dst->y = cpu_to_le32(src->y);
	dst->z = cpu_to_le32(src->z);
	dst->w = cpu_to_le32(src->w);
	dst->h = cpu_to_le32(src->h);
	dst->d = cpu_to_le32(src->d);
}

D
Dave Airlie 已提交
54 55 56 57
void virtio_gpu_ctrl_ack(struct virtqueue *vq)
{
	struct drm_device *dev = vq->vdev->priv;
	struct virtio_gpu_device *vgdev = dev->dev_private;
58

D
Dave Airlie 已提交
59 60 61 62 63 64 65
	schedule_work(&vgdev->ctrlq.dequeue_work);
}

void virtio_gpu_cursor_ack(struct virtqueue *vq)
{
	struct drm_device *dev = vq->vdev->priv;
	struct virtio_gpu_device *vgdev = dev->dev_private;
66

D
Dave Airlie 已提交
67 68 69 70 71
	schedule_work(&vgdev->cursorq.dequeue_work);
}

int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
{
G
Gerd Hoffmann 已提交
72 73 74 75
	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
					 VBUFFER_SIZE,
					 __alignof__(struct virtio_gpu_vbuffer),
					 0, NULL);
D
Dave Airlie 已提交
76 77 78 79 80 81 82
	if (!vgdev->vbufs)
		return -ENOMEM;
	return 0;
}

void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
{
G
Gerd Hoffmann 已提交
83 84
	kmem_cache_destroy(vgdev->vbufs);
	vgdev->vbufs = NULL;
D
Dave Airlie 已提交
85 86 87 88 89 90 91 92 93
}

static struct virtio_gpu_vbuffer*
virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
		    int size, int resp_size, void *resp_buf,
		    virtio_gpu_resp_cb resp_cb)
{
	struct virtio_gpu_vbuffer *vbuf;

94
	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
95 96
	if (!vbuf)
		return ERR_PTR(-ENOMEM);
D
Dave Airlie 已提交
97

98 99
	BUG_ON(size > MAX_INLINE_CMD_SIZE ||
	       size < sizeof(struct virtio_gpu_ctrl_hdr));
D
Dave Airlie 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112
	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
	vbuf->size = size;

	vbuf->resp_cb = resp_cb;
	vbuf->resp_size = resp_size;
	if (resp_size <= MAX_INLINE_RESP_SIZE)
		vbuf->resp_buf = (void *)vbuf->buf + size;
	else
		vbuf->resp_buf = resp_buf;
	BUG_ON(!vbuf->resp_buf);
	return vbuf;
}

113 114 115 116 117 118 119 120 121 122
static struct virtio_gpu_ctrl_hdr *
virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
{
	/* this assumes a vbuf contains a command that starts with a
	 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
	 * virtqueues.
	 */
	return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
}

D
Dave Airlie 已提交
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
static struct virtio_gpu_update_cursor*
virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
			struct virtio_gpu_vbuffer **vbuffer_p)
{
	struct virtio_gpu_vbuffer *vbuf;

	vbuf = virtio_gpu_get_vbuf
		(vgdev, sizeof(struct virtio_gpu_update_cursor),
		 0, NULL, NULL);
	if (IS_ERR(vbuf)) {
		*vbuffer_p = NULL;
		return ERR_CAST(vbuf);
	}
	*vbuffer_p = vbuf;
	return (struct virtio_gpu_update_cursor *)vbuf->buf;
}

static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
				       virtio_gpu_resp_cb cb,
				       struct virtio_gpu_vbuffer **vbuffer_p,
				       int cmd_size, int resp_size,
				       void *resp_buf)
{
	struct virtio_gpu_vbuffer *vbuf;

	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
				   resp_size, resp_buf, cb);
	if (IS_ERR(vbuf)) {
		*vbuffer_p = NULL;
		return ERR_CAST(vbuf);
	}
	*vbuffer_p = vbuf;
	return (struct virtio_gpu_command *)vbuf->buf;
}

158 159 160 161 162 163 164 165 166
static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
				  struct virtio_gpu_vbuffer **vbuffer_p,
				  int size)
{
	return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
					 sizeof(struct virtio_gpu_ctrl_hdr),
					 NULL);
}

167 168 169 170 171 172 173 174 175 176
static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
				     struct virtio_gpu_vbuffer **vbuffer_p,
				     int size,
				     virtio_gpu_resp_cb cb)
{
	return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
					 sizeof(struct virtio_gpu_ctrl_hdr),
					 NULL);
}

D
Dave Airlie 已提交
177 178 179 180 181
static void free_vbuf(struct virtio_gpu_device *vgdev,
		      struct virtio_gpu_vbuffer *vbuf)
{
	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
		kfree(vbuf->resp_buf);
182
	kvfree(vbuf->data_buf);
G
Gerd Hoffmann 已提交
183
	kmem_cache_free(vgdev->vbufs, vbuf);
D
Dave Airlie 已提交
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
}

static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
{
	struct virtio_gpu_vbuffer *vbuf;
	unsigned int len;
	int freed = 0;

	while ((vbuf = virtqueue_get_buf(vq, &len))) {
		list_add_tail(&vbuf->list, reclaim_list);
		freed++;
	}
	if (freed == 0)
		DRM_DEBUG("Huh? zero vbufs reclaimed");
}

void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
{
	struct virtio_gpu_device *vgdev =
		container_of(work, struct virtio_gpu_device,
			     ctrlq.dequeue_work);
	struct list_head reclaim_list;
	struct virtio_gpu_vbuffer *entry, *tmp;
	struct virtio_gpu_ctrl_hdr *resp;
	u64 fence_id = 0;

	INIT_LIST_HEAD(&reclaim_list);
	spin_lock(&vgdev->ctrlq.qlock);
	do {
		virtqueue_disable_cb(vgdev->ctrlq.vq);
		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);

	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
	spin_unlock(&vgdev->ctrlq.qlock);

219
	list_for_each_entry(entry, &reclaim_list, list) {
D
Dave Airlie 已提交
220
		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
221 222 223

		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);

G
Gerd Hoffmann 已提交
224
		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
G
Gerd Hoffmann 已提交
225
			if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
G
Gerd Hoffmann 已提交
226
				struct virtio_gpu_ctrl_hdr *cmd;
227
				cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
228 229 230
				DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
						      le32_to_cpu(resp->type),
						      le32_to_cpu(cmd->type));
G
Gerd Hoffmann 已提交
231 232 233
			} else
				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
		}
D
Dave Airlie 已提交
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
			u64 f = le64_to_cpu(resp->fence_id);

			if (fence_id > f) {
				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
					  __func__, fence_id, f);
			} else {
				fence_id = f;
			}
		}
		if (entry->resp_cb)
			entry->resp_cb(vgdev, entry);
	}
	wake_up(&vgdev->ctrlq.ack_queue);

	if (fence_id)
		virtio_gpu_fence_event_process(vgdev, fence_id);
251 252 253

	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
		if (entry->objs)
254
			virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
255 256 257
		list_del(&entry->list);
		free_vbuf(vgdev, entry);
	}
D
Dave Airlie 已提交
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
}

void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
{
	struct virtio_gpu_device *vgdev =
		container_of(work, struct virtio_gpu_device,
			     cursorq.dequeue_work);
	struct list_head reclaim_list;
	struct virtio_gpu_vbuffer *entry, *tmp;

	INIT_LIST_HEAD(&reclaim_list);
	spin_lock(&vgdev->cursorq.qlock);
	do {
		virtqueue_disable_cb(vgdev->cursorq.vq);
		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
	spin_unlock(&vgdev->cursorq.qlock);

	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
		list_del(&entry->list);
		free_vbuf(vgdev, entry);
	}
	wake_up(&vgdev->cursorq.ack_queue);
}

283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
/* Create sg_table from a vmalloc'd buffer. */
static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
{
	int ret, s, i;
	struct sg_table *sgt;
	struct scatterlist *sg;
	struct page *pg;

	if (WARN_ON(!PAGE_ALIGNED(data)))
		return NULL;

	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt)
		return NULL;

	*sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
	ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
	if (ret) {
		kfree(sgt);
		return NULL;
	}

	for_each_sg(sgt->sgl, sg, *sg_ents, i) {
		pg = vmalloc_to_page(data);
		if (!pg) {
			sg_free_table(sgt);
			kfree(sgt);
			return NULL;
		}

		s = min_t(int, PAGE_SIZE, size);
		sg_set_page(sg, pg, s, 0);

		size -= s;
		data += s;
	}

	return sgt;
}

323
static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
324 325 326 327 328 329
				      struct virtio_gpu_vbuffer *vbuf,
				      struct virtio_gpu_fence *fence,
				      int elemcnt,
				      struct scatterlist **sgs,
				      int outcnt,
				      int incnt)
D
Dave Airlie 已提交
330 331
{
	struct virtqueue *vq = vgdev->ctrlq.vq;
332
	bool notify = false;
333 334 335 336 337 338 339 340
	int ret, idx;

	if (!drm_dev_enter(vgdev->ddev, &idx)) {
		if (fence && vbuf->objs)
			virtio_gpu_array_unlock_resv(vbuf->objs);
		free_vbuf(vgdev, vbuf);
		return;
	}
D
Dave Airlie 已提交
341

G
Gerd Hoffmann 已提交
342 343 344
	if (vgdev->has_indirect)
		elemcnt = 1;

345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
again:
	spin_lock(&vgdev->ctrlq.qlock);

	if (vq->num_free < elemcnt) {
		spin_unlock(&vgdev->ctrlq.qlock);
		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
		goto again;
	}

	/* now that the position of the vbuf in the virtqueue is known, we can
	 * finally set the fence id
	 */
	if (fence) {
		virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
				      fence);
		if (vbuf->objs) {
			virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
			virtio_gpu_array_unlock_resv(vbuf->objs);
		}
	}

D
Dave Airlie 已提交
366
	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
367 368
	WARN_ON(ret);

369
	trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
370 371 372

	notify = virtqueue_kick_prepare(vq);

373 374
	spin_unlock(&vgdev->ctrlq.qlock);

375 376 377 378 379 380
	if (notify) {
		if (vgdev->disable_notify)
			vgdev->pending_notify = true;
		else
			virtqueue_notify(vq);
	}
381
	drm_dev_exit(idx);
D
Dave Airlie 已提交
382 383
}

384 385 386
static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
						struct virtio_gpu_vbuffer *vbuf,
						struct virtio_gpu_fence *fence)
387
{
388
	struct scatterlist *sgs[3], vcmd, vout, vresp;
389
	struct sg_table *sgt = NULL;
390
	int elemcnt = 0, outcnt = 0, incnt = 0;
391

392 393 394 395 396 397 398
	/* set up vcmd */
	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
	elemcnt++;
	sgs[outcnt] = &vcmd;
	outcnt++;

	/* set up vout */
399 400
	if (vbuf->data_size) {
		if (is_vmalloc_addr(vbuf->data_buf)) {
401
			int sg_ents;
402
			sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
403
					     &sg_ents);
404 405 406
			if (!sgt) {
				if (fence && vbuf->objs)
					virtio_gpu_array_unlock_resv(vbuf->objs);
407
				return;
408
			}
409 410 411

			elemcnt += sg_ents;
			sgs[outcnt] = sgt->sgl;
412
		} else {
413 414 415
			sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
			elemcnt++;
			sgs[outcnt] = &vout;
416
		}
417 418 419 420 421 422 423 424 425
		outcnt++;
	}

	/* set up vresp */
	if (vbuf->resp_size) {
		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
		elemcnt++;
		sgs[outcnt + incnt] = &vresp;
		incnt++;
426
	}
427

428 429
	virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
				  incnt);
430 431 432 433 434

	if (sgt) {
		sg_free_table(sgt);
		kfree(sgt);
	}
435 436
}

437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev)
{
	vgdev->disable_notify = true;
}

void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev)
{
	vgdev->disable_notify = false;

	if (!vgdev->pending_notify)
		return;
	vgdev->pending_notify = false;
	virtqueue_notify(vgdev->ctrlq.vq);
}

452 453 454
static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
					 struct virtio_gpu_vbuffer *vbuf)
{
455
	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
456 457
}

458 459
static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
				    struct virtio_gpu_vbuffer *vbuf)
D
Dave Airlie 已提交
460 461 462
{
	struct virtqueue *vq = vgdev->cursorq.vq;
	struct scatterlist *sgs[1], ccmd;
463
	int idx, ret, outcnt;
464
	bool notify;
D
Dave Airlie 已提交
465

466 467
	if (!drm_dev_enter(vgdev->ddev, &idx)) {
		free_vbuf(vgdev, vbuf);
468
		return;
469
	}
D
Dave Airlie 已提交
470 471 472 473 474 475 476 477 478 479

	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
	sgs[0] = &ccmd;
	outcnt = 1;

	spin_lock(&vgdev->cursorq.qlock);
retry:
	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
	if (ret == -ENOSPC) {
		spin_unlock(&vgdev->cursorq.qlock);
480
		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
D
Dave Airlie 已提交
481 482 483
		spin_lock(&vgdev->cursorq.qlock);
		goto retry;
	} else {
484
		trace_virtio_gpu_cmd_queue(vq,
485
			virtio_gpu_vbuf_ctrl_hdr(vbuf));
486

487
		notify = virtqueue_kick_prepare(vq);
D
Dave Airlie 已提交
488 489 490
	}

	spin_unlock(&vgdev->cursorq.qlock);
491 492 493

	if (notify)
		virtqueue_notify(vq);
494 495

	drm_dev_exit(idx);
D
Dave Airlie 已提交
496 497 498
}

/* just create gem objects for userspace and long lived objects,
499 500
 * just use dma_alloced pages for the queue objects?
 */
D
Dave Airlie 已提交
501 502 503

/* create a basic resource */
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
504
				    struct virtio_gpu_object *bo,
505
				    struct virtio_gpu_object_params *params,
506
				    struct virtio_gpu_object_array *objs,
507
				    struct virtio_gpu_fence *fence)
D
Dave Airlie 已提交
508 509 510 511 512 513
{
	struct virtio_gpu_resource_create_2d *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;

	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));
514
	vbuf->objs = objs;
D
Dave Airlie 已提交
515 516

	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
517
	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
518 519 520
	cmd_p->format = cpu_to_le32(params->format);
	cmd_p->width = cpu_to_le32(params->width);
	cmd_p->height = cpu_to_le32(params->height);
D
Dave Airlie 已提交
521

522
	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
523
	bo->created = true;
D
Dave Airlie 已提交
524 525
}

526 527 528 529 530 531 532 533 534 535 536
static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
				    struct virtio_gpu_vbuffer *vbuf)
{
	struct virtio_gpu_object *bo;

	bo = vbuf->resp_cb_data;
	vbuf->resp_cb_data = NULL;

	virtio_gpu_cleanup_object(bo);
}

D
Dave Airlie 已提交
537
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
538
				   struct virtio_gpu_object *bo)
D
Dave Airlie 已提交
539 540 541 542
{
	struct virtio_gpu_resource_unref *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;

543 544
	cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
					virtio_gpu_cmd_unref_cb);
D
Dave Airlie 已提交
545 546 547
	memset(cmd_p, 0, sizeof(*cmd_p));

	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
548
	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
D
Dave Airlie 已提交
549

550
	vbuf->resp_cb_data = bo;
D
Dave Airlie 已提交
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}

void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
				uint32_t scanout_id, uint32_t resource_id,
				uint32_t width, uint32_t height,
				uint32_t x, uint32_t y)
{
	struct virtio_gpu_set_scanout *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;

	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));

	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
	cmd_p->resource_id = cpu_to_le32(resource_id);
	cmd_p->scanout_id = cpu_to_le32(scanout_id);
	cmd_p->r.width = cpu_to_le32(width);
	cmd_p->r.height = cpu_to_le32(height);
	cmd_p->r.x = cpu_to_le32(x);
	cmd_p->r.y = cpu_to_le32(y);

	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}

void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
				   uint32_t resource_id,
				   uint32_t x, uint32_t y,
				   uint32_t width, uint32_t height)
{
	struct virtio_gpu_resource_flush *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;

	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));

	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
	cmd_p->resource_id = cpu_to_le32(resource_id);
	cmd_p->r.width = cpu_to_le32(width);
	cmd_p->r.height = cpu_to_le32(height);
	cmd_p->r.x = cpu_to_le32(x);
	cmd_p->r.y = cpu_to_le32(y);

	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}

void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
598
					uint64_t offset,
599 600
					uint32_t width, uint32_t height,
					uint32_t x, uint32_t y,
601
					struct virtio_gpu_object_array *objs,
602
					struct virtio_gpu_fence *fence)
D
Dave Airlie 已提交
603
{
604
	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
D
Dave Airlie 已提交
605 606
	struct virtio_gpu_transfer_to_host_2d *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;
607 608 609 610
	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);

	if (use_dma_api)
		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
611
				       bo->pages->sgl, bo->pages->nents,
612
				       DMA_TO_DEVICE);
D
Dave Airlie 已提交
613 614 615

	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));
616
	vbuf->objs = objs;
D
Dave Airlie 已提交
617 618

	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
619
	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
D
Dave Airlie 已提交
620
	cmd_p->offset = cpu_to_le64(offset);
621 622 623 624
	cmd_p->r.width = cpu_to_le32(width);
	cmd_p->r.height = cpu_to_le32(height);
	cmd_p->r.x = cpu_to_le32(x);
	cmd_p->r.y = cpu_to_le32(y);
D
Dave Airlie 已提交
625

626
	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
D
Dave Airlie 已提交
627 628 629 630 631 632 633
}

static void
virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
				       uint32_t resource_id,
				       struct virtio_gpu_mem_entry *ents,
				       uint32_t nents,
634
				       struct virtio_gpu_fence *fence)
D
Dave Airlie 已提交
635 636 637 638 639 640 641 642 643 644 645 646 647 648
{
	struct virtio_gpu_resource_attach_backing *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;

	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));

	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
	cmd_p->resource_id = cpu_to_le32(resource_id);
	cmd_p->nr_entries = cpu_to_le32(nents);

	vbuf->data_buf = ents;
	vbuf->data_size = sizeof(*ents) * nents;

649
	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
D
Dave Airlie 已提交
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
}

static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
					       struct virtio_gpu_vbuffer *vbuf)
{
	struct virtio_gpu_resp_display_info *resp =
		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
	int i;

	spin_lock(&vgdev->display_info_lock);
	for (i = 0; i < vgdev->num_scanouts; i++) {
		vgdev->outputs[i].info = resp->pmodes[i];
		if (resp->pmodes[i].enabled) {
			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
				  le32_to_cpu(resp->pmodes[i].r.width),
				  le32_to_cpu(resp->pmodes[i].r.height),
				  le32_to_cpu(resp->pmodes[i].r.x),
				  le32_to_cpu(resp->pmodes[i].r.y));
		} else {
			DRM_DEBUG("output %d: disabled", i);
		}
	}

673
	vgdev->display_info_pending = false;
D
Dave Airlie 已提交
674 675 676 677 678 679 680
	spin_unlock(&vgdev->display_info_lock);
	wake_up(&vgdev->resp_wq);

	if (!drm_helper_hpd_irq_event(vgdev->ddev))
		drm_kms_helper_hotplug_event(vgdev->ddev);
}

G
Gerd Hoffmann 已提交
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
					      struct virtio_gpu_vbuffer *vbuf)
{
	struct virtio_gpu_get_capset_info *cmd =
		(struct virtio_gpu_get_capset_info *)vbuf->buf;
	struct virtio_gpu_resp_capset_info *resp =
		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
	int i = le32_to_cpu(cmd->capset_index);

	spin_lock(&vgdev->display_info_lock);
	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
	spin_unlock(&vgdev->display_info_lock);
	wake_up(&vgdev->resp_wq);
}

static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
				     struct virtio_gpu_vbuffer *vbuf)
{
	struct virtio_gpu_get_capset *cmd =
		(struct virtio_gpu_get_capset *)vbuf->buf;
	struct virtio_gpu_resp_capset *resp =
		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
	struct virtio_gpu_drv_cap_cache *cache_ent;

	spin_lock(&vgdev->display_info_lock);
	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
			memcpy(cache_ent->caps_cache, resp->capset_data,
			       cache_ent->size);
713 714
			/* Copy must occur before is_valid is signalled. */
			smp_wmb();
G
Gerd Hoffmann 已提交
715 716 717 718 719
			atomic_set(&cache_ent->is_valid, 1);
			break;
		}
	}
	spin_unlock(&vgdev->display_info_lock);
720
	wake_up_all(&vgdev->resp_wq);
G
Gerd Hoffmann 已提交
721 722
}

G
Gerd Hoffmann 已提交
723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
static int virtio_get_edid_block(void *data, u8 *buf,
				 unsigned int block, size_t len)
{
	struct virtio_gpu_resp_edid *resp = data;
	size_t start = block * EDID_LENGTH;

	if (start + len > le32_to_cpu(resp->size))
		return -1;
	memcpy(buf, resp->edid + start, len);
	return 0;
}

static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
				       struct virtio_gpu_vbuffer *vbuf)
{
	struct virtio_gpu_cmd_get_edid *cmd =
		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
	struct virtio_gpu_resp_edid *resp =
		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
	uint32_t scanout = le32_to_cpu(cmd->scanout);
	struct virtio_gpu_output *output;
	struct edid *new_edid, *old_edid;

	if (scanout >= vgdev->num_scanouts)
		return;
	output = vgdev->outputs + scanout;

	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
751
	drm_connector_update_edid_property(&output->conn, new_edid);
G
Gerd Hoffmann 已提交
752 753 754 755 756 757 758 759 760 761

	spin_lock(&vgdev->display_info_lock);
	old_edid = output->edid;
	output->edid = new_edid;
	spin_unlock(&vgdev->display_info_lock);

	kfree(old_edid);
	wake_up(&vgdev->resp_wq);
}

D
Dave Airlie 已提交
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
{
	struct virtio_gpu_ctrl_hdr *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;
	void *resp_buf;

	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
			   GFP_KERNEL);
	if (!resp_buf)
		return -ENOMEM;

	cmd_p = virtio_gpu_alloc_cmd_resp
		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
		 resp_buf);
	memset(cmd_p, 0, sizeof(*cmd_p));

779
	vgdev->display_info_pending = true;
D
Dave Airlie 已提交
780 781 782 783 784
	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
	return 0;
}

G
Gerd Hoffmann 已提交
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
{
	struct virtio_gpu_get_capset_info *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;
	void *resp_buf;

	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
			   GFP_KERNEL);
	if (!resp_buf)
		return -ENOMEM;

	cmd_p = virtio_gpu_alloc_cmd_resp
		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
		 resp_buf);
	memset(cmd_p, 0, sizeof(*cmd_p));

	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
	cmd_p->capset_index = cpu_to_le32(idx);
	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
	return 0;
}

int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
			      int idx, int version,
			      struct virtio_gpu_drv_cap_cache **cache_p)
{
	struct virtio_gpu_get_capset *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;
814
	int max_size;
G
Gerd Hoffmann 已提交
815
	struct virtio_gpu_drv_cap_cache *cache_ent;
816
	struct virtio_gpu_drv_cap_cache *search_ent;
G
Gerd Hoffmann 已提交
817 818
	void *resp_buf;

819 820
	*cache_p = NULL;

821
	if (idx >= vgdev->num_capsets)
G
Gerd Hoffmann 已提交
822 823 824 825 826 827 828 829 830
		return -EINVAL;

	if (version > vgdev->capsets[idx].max_version)
		return -EINVAL;

	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
	if (!cache_ent)
		return -ENOMEM;

831
	max_size = vgdev->capsets[idx].max_size;
G
Gerd Hoffmann 已提交
832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
	if (!cache_ent->caps_cache) {
		kfree(cache_ent);
		return -ENOMEM;
	}

	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
			   GFP_KERNEL);
	if (!resp_buf) {
		kfree(cache_ent->caps_cache);
		kfree(cache_ent);
		return -ENOMEM;
	}

	cache_ent->version = version;
	cache_ent->id = vgdev->capsets[idx].id;
	atomic_set(&cache_ent->is_valid, 0);
	cache_ent->size = max_size;
	spin_lock(&vgdev->display_info_lock);
851 852 853 854 855 856 857 858 859 860
	/* Search while under lock in case it was added by another task. */
	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
		if (search_ent->id == vgdev->capsets[idx].id &&
		    search_ent->version == version) {
			*cache_p = search_ent;
			break;
		}
	}
	if (!*cache_p)
		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
G
Gerd Hoffmann 已提交
861 862
	spin_unlock(&vgdev->display_info_lock);

863 864 865 866 867 868 869 870
	if (*cache_p) {
		/* Entry was found, so free everything that was just created. */
		kfree(resp_buf);
		kfree(cache_ent->caps_cache);
		kfree(cache_ent);
		return 0;
	}

G
Gerd Hoffmann 已提交
871 872 873 874 875 876 877 878 879 880 881 882 883
	cmd_p = virtio_gpu_alloc_cmd_resp
		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
		 sizeof(struct virtio_gpu_resp_capset) + max_size,
		 resp_buf);
	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
	cmd_p->capset_version = cpu_to_le32(version);
	*cache_p = cache_ent;
	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);

	return 0;
}

G
Gerd Hoffmann 已提交
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
{
	struct virtio_gpu_cmd_get_edid *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;
	void *resp_buf;
	int scanout;

	if (WARN_ON(!vgdev->has_edid))
		return -EINVAL;

	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
				   GFP_KERNEL);
		if (!resp_buf)
			return -ENOMEM;

		cmd_p = virtio_gpu_alloc_cmd_resp
			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
			 resp_buf);
		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
		cmd_p->scanout = cpu_to_le32(scanout);
		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
	}

	return 0;
}

G
Gerd Hoffmann 已提交
912 913 914 915 916 917 918 919 920 921 922 923
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
				   uint32_t nlen, const char *name)
{
	struct virtio_gpu_ctx_create *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;

	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));

	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
	cmd_p->hdr.ctx_id = cpu_to_le32(id);
	cmd_p->nlen = cpu_to_le32(nlen);
924 925
	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
G
Gerd Hoffmann 已提交
926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}

void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
				    uint32_t id)
{
	struct virtio_gpu_ctx_destroy *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;

	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));

	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
	cmd_p->hdr.ctx_id = cpu_to_le32(id);
	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}

void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
					    uint32_t ctx_id,
945
					    struct virtio_gpu_object_array *objs)
G
Gerd Hoffmann 已提交
946
{
947
	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
G
Gerd Hoffmann 已提交
948 949 950 951 952
	struct virtio_gpu_ctx_resource *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;

	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));
953
	vbuf->objs = objs;
G
Gerd Hoffmann 已提交
954 955 956

	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
957
	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
G
Gerd Hoffmann 已提交
958 959 960 961 962 963
	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);

}

void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
					    uint32_t ctx_id,
964
					    struct virtio_gpu_object_array *objs)
G
Gerd Hoffmann 已提交
965
{
966
	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
G
Gerd Hoffmann 已提交
967 968 969 970 971
	struct virtio_gpu_ctx_resource *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;

	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));
972
	vbuf->objs = objs;
G
Gerd Hoffmann 已提交
973 974 975

	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
976
	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
G
Gerd Hoffmann 已提交
977 978 979 980 981
	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}

void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
982
				  struct virtio_gpu_object *bo,
983
				  struct virtio_gpu_object_params *params,
984
				  struct virtio_gpu_object_array *objs,
985
				  struct virtio_gpu_fence *fence)
G
Gerd Hoffmann 已提交
986 987 988 989 990 991
{
	struct virtio_gpu_resource_create_3d *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;

	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));
992
	vbuf->objs = objs;
G
Gerd Hoffmann 已提交
993 994

	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
	cmd_p->format = cpu_to_le32(params->format);
	cmd_p->width = cpu_to_le32(params->width);
	cmd_p->height = cpu_to_le32(params->height);

	cmd_p->target = cpu_to_le32(params->target);
	cmd_p->bind = cpu_to_le32(params->bind);
	cmd_p->depth = cpu_to_le32(params->depth);
	cmd_p->array_size = cpu_to_le32(params->array_size);
	cmd_p->last_level = cpu_to_le32(params->last_level);
	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
	cmd_p->flags = cpu_to_le32(params->flags);
G
Gerd Hoffmann 已提交
1007

1008
	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1009
	bo->created = true;
G
Gerd Hoffmann 已提交
1010 1011 1012
}

void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1013
					uint32_t ctx_id,
G
Gerd Hoffmann 已提交
1014
					uint64_t offset, uint32_t level,
1015
					struct drm_virtgpu_3d_box *box,
1016
					struct virtio_gpu_object_array *objs,
1017
					struct virtio_gpu_fence *fence)
G
Gerd Hoffmann 已提交
1018
{
1019
	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
G
Gerd Hoffmann 已提交
1020 1021
	struct virtio_gpu_transfer_host_3d *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;
1022 1023 1024 1025
	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);

	if (use_dma_api)
		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
1026
				       bo->pages->sgl, bo->pages->nents,
1027
				       DMA_TO_DEVICE);
G
Gerd Hoffmann 已提交
1028 1029 1030 1031

	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));

1032 1033
	vbuf->objs = objs;

G
Gerd Hoffmann 已提交
1034 1035
	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1036
	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1037
	convert_to_hw_box(&cmd_p->box, box);
G
Gerd Hoffmann 已提交
1038 1039 1040
	cmd_p->offset = cpu_to_le64(offset);
	cmd_p->level = cpu_to_le32(level);

1041
	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
G
Gerd Hoffmann 已提交
1042 1043 1044
}

void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1045
					  uint32_t ctx_id,
G
Gerd Hoffmann 已提交
1046
					  uint64_t offset, uint32_t level,
1047
					  struct drm_virtgpu_3d_box *box,
1048
					  struct virtio_gpu_object_array *objs,
1049
					  struct virtio_gpu_fence *fence)
G
Gerd Hoffmann 已提交
1050
{
1051
	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
G
Gerd Hoffmann 已提交
1052 1053 1054 1055 1056 1057
	struct virtio_gpu_transfer_host_3d *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;

	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));

1058 1059
	vbuf->objs = objs;

G
Gerd Hoffmann 已提交
1060 1061
	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1062
	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1063
	convert_to_hw_box(&cmd_p->box, box);
G
Gerd Hoffmann 已提交
1064 1065 1066
	cmd_p->offset = cpu_to_le64(offset);
	cmd_p->level = cpu_to_le32(level);

1067
	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
G
Gerd Hoffmann 已提交
1068 1069 1070 1071
}

void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
			   void *data, uint32_t data_size,
1072 1073 1074
			   uint32_t ctx_id,
			   struct virtio_gpu_object_array *objs,
			   struct virtio_gpu_fence *fence)
G
Gerd Hoffmann 已提交
1075 1076 1077 1078 1079 1080 1081 1082 1083
{
	struct virtio_gpu_cmd_submit *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;

	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));

	vbuf->data_buf = data;
	vbuf->data_size = data_size;
1084
	vbuf->objs = objs;
G
Gerd Hoffmann 已提交
1085 1086 1087 1088 1089

	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
	cmd_p->size = cpu_to_le32(data_size);

1090
	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
G
Gerd Hoffmann 已提交
1091 1092
}

D
Dave Airlie 已提交
1093 1094
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
			     struct virtio_gpu_object *obj,
1095 1096
			     struct virtio_gpu_mem_entry *ents,
			     unsigned int nents)
D
Dave Airlie 已提交
1097
{
1098
	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1099
					       ents, nents, NULL);
D
Dave Airlie 已提交
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
	return 0;
}

void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
			    struct virtio_gpu_output *output)
{
	struct virtio_gpu_vbuffer *vbuf;
	struct virtio_gpu_update_cursor *cur_p;

	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
	virtio_gpu_queue_cursor(vgdev, vbuf);
}