net.c 33.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (C) 2009 Red Hat, Inc.
 * Author: Michael S. Tsirkin <mst@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.
 *
 * virtio-net server in host kernel.
 */

#include <linux/compat.h>
#include <linux/eventfd.h>
#include <linux/vhost.h>
#include <linux/virtio_net.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
15
#include <linux/moduleparam.h>
16 17 18
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/file.h>
19
#include <linux/slab.h>
20
#include <linux/sched/clock.h>
21
#include <linux/sched/signal.h>
22
#include <linux/vmalloc.h>
23 24 25 26 27

#include <linux/net.h>
#include <linux/if_packet.h>
#include <linux/if_arp.h>
#include <linux/if_tun.h>
A
Arnd Bergmann 已提交
28
#include <linux/if_macvlan.h>
29
#include <linux/if_tap.h>
B
Basil Gor 已提交
30
#include <linux/if_vlan.h>
31 32
#include <linux/skb_array.h>
#include <linux/skbuff.h>
33 34 35 36 37

#include <net/sock.h>

#include "vhost.h"

38
static int experimental_zcopytx = 1;
39
module_param(experimental_zcopytx, int, 0444);
40 41
MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
		                       " 1 -Enable; 0 - Disable");
42

43 44 45 46
/* Max number of bytes transferred before requeueing the job.
 * Using this limit prevents one virtqueue from starving others. */
#define VHOST_NET_WEIGHT 0x80000

47 48 49 50
/* MAX number of TX used buffers for outstanding zerocopy */
#define VHOST_MAX_PEND 128
#define VHOST_GOODCOPY_LEN 256

51 52 53 54 55
/*
 * For transmit, used buffer len is unused; we override it to track buffer
 * status internally; used for zerocopy tx only.
 */
/* Lower device DMA failed */
56
#define VHOST_DMA_FAILED_LEN	((__force __virtio32)3)
57
/* Lower device DMA done */
58
#define VHOST_DMA_DONE_LEN	((__force __virtio32)2)
59
/* Lower device DMA in progress */
60
#define VHOST_DMA_IN_PROGRESS	((__force __virtio32)1)
61
/* Buffer unused */
62
#define VHOST_DMA_CLEAR_LEN	((__force __virtio32)0)
63

64
#define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
65

66 67 68
enum {
	VHOST_NET_FEATURES = VHOST_FEATURES |
			 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
J
Jason Wang 已提交
69 70
			 (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
			 (1ULL << VIRTIO_F_IOMMU_PLATFORM)
71 72
};

73 74 75 76 77 78
enum {
	VHOST_NET_VQ_RX = 0,
	VHOST_NET_VQ_TX = 1,
	VHOST_NET_VQ_MAX = 2,
};

79
struct vhost_net_ubuf_ref {
80 81 82 83 84 85
	/* refcount follows semantics similar to kref:
	 *  0: object is released
	 *  1: no outstanding ubufs
	 * >1: outstanding ubufs
	 */
	atomic_t refcount;
86 87 88 89
	wait_queue_head_t wait;
	struct vhost_virtqueue *vq;
};

90 91 92 93 94 95 96
#define VHOST_RX_BATCH 64
struct vhost_net_buf {
	struct sk_buff **queue;
	int tail;
	int head;
};

97 98
struct vhost_net_virtqueue {
	struct vhost_virtqueue vq;
99 100
	size_t vhost_hlen;
	size_t sock_hlen;
101 102 103 104 105 106 107 108 109
	/* vhost zerocopy support fields below: */
	/* last used idx for outstanding DMA zerocopy buffers */
	int upend_idx;
	/* first used idx for DMA done zerocopy buffers */
	int done_idx;
	/* an array of userspace buffers info */
	struct ubuf_info *ubuf_info;
	/* Reference counting for outstanding ubufs.
	 * Protected by vq mutex. Writers must also take device mutex. */
110
	struct vhost_net_ubuf_ref *ubufs;
111 112
	struct skb_array *rx_array;
	struct vhost_net_buf rxq;
113 114
};

115 116
struct vhost_net {
	struct vhost_dev dev;
117
	struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
118
	struct vhost_poll poll[VHOST_NET_VQ_MAX];
119 120 121 122 123 124
	/* Number of TX recently submitted.
	 * Protected by tx vq lock. */
	unsigned tx_packets;
	/* Number of times zerocopy TX recently failed.
	 * Protected by tx vq lock. */
	unsigned tx_zcopy_err;
125 126
	/* Flush in progress. Protected by tx vq lock. */
	bool tx_flush;
127 128
};

129
static unsigned vhost_net_zcopy_mask __read_mostly;
130

131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq)
{
	if (rxq->tail != rxq->head)
		return rxq->queue[rxq->head];
	else
		return NULL;
}

static int vhost_net_buf_get_size(struct vhost_net_buf *rxq)
{
	return rxq->tail - rxq->head;
}

static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq)
{
	return rxq->tail == rxq->head;
}

static void *vhost_net_buf_consume(struct vhost_net_buf *rxq)
{
	void *ret = vhost_net_buf_get_ptr(rxq);
	++rxq->head;
	return ret;
}

static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
{
	struct vhost_net_buf *rxq = &nvq->rxq;

	rxq->head = 0;
	rxq->tail = skb_array_consume_batched(nvq->rx_array, rxq->queue,
					      VHOST_RX_BATCH);
	return rxq->tail;
}

static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
{
	struct vhost_net_buf *rxq = &nvq->rxq;

	if (nvq->rx_array && !vhost_net_buf_is_empty(rxq)) {
		skb_array_unconsume(nvq->rx_array, rxq->queue + rxq->head,
				    vhost_net_buf_get_size(rxq));
		rxq->head = rxq->tail = 0;
	}
}

static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
{
	struct vhost_net_buf *rxq = &nvq->rxq;

	if (!vhost_net_buf_is_empty(rxq))
		goto out;

	if (!vhost_net_buf_produce(nvq))
		return 0;

out:
	return __skb_array_len_with_tag(vhost_net_buf_get_ptr(rxq));
}

static void vhost_net_buf_init(struct vhost_net_buf *rxq)
{
	rxq->head = rxq->tail = 0;
}

196
static void vhost_net_enable_zcopy(int vq)
197
{
198
	vhost_net_zcopy_mask |= 0x1 << vq;
199 200
}

201 202
static struct vhost_net_ubuf_ref *
vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
203
{
204
	struct vhost_net_ubuf_ref *ubufs;
205 206 207 208 209 210
	/* No zero copy backend? Nothing to count. */
	if (!zcopy)
		return NULL;
	ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
	if (!ubufs)
		return ERR_PTR(-ENOMEM);
211
	atomic_set(&ubufs->refcount, 1);
212 213 214 215 216
	init_waitqueue_head(&ubufs->wait);
	ubufs->vq = vq;
	return ubufs;
}

217
static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
218
{
219 220 221 222
	int r = atomic_sub_return(1, &ubufs->refcount);
	if (unlikely(!r))
		wake_up(&ubufs->wait);
	return r;
223 224
}

225
static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
226
{
227 228
	vhost_net_ubuf_put(ubufs);
	wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
229 230 231 232 233
}

static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
{
	vhost_net_ubuf_put_and_wait(ubufs);
234 235 236
	kfree(ubufs);
}

237 238 239 240
static void vhost_net_clear_ubuf_info(struct vhost_net *n)
{
	int i;

241 242 243
	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
		kfree(n->vqs[i].ubuf_info);
		n->vqs[i].ubuf_info = NULL;
244 245 246
	}
}

A
Asias He 已提交
247
static int vhost_net_set_ubuf_info(struct vhost_net *n)
248 249 250 251
{
	bool zcopy;
	int i;

252
	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
253
		zcopy = vhost_net_zcopy_mask & (0x1 << i);
254 255 256 257 258 259 260 261 262 263
		if (!zcopy)
			continue;
		n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) *
					      UIO_MAXIOV, GFP_KERNEL);
		if  (!n->vqs[i].ubuf_info)
			goto err;
	}
	return 0;

err:
264
	vhost_net_clear_ubuf_info(n);
265 266 267
	return -ENOMEM;
}

A
Asias He 已提交
268
static void vhost_net_vq_reset(struct vhost_net *n)
269 270 271
{
	int i;

272 273
	vhost_net_clear_ubuf_info(n);

274 275 276 277
	for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
		n->vqs[i].done_idx = 0;
		n->vqs[i].upend_idx = 0;
		n->vqs[i].ubufs = NULL;
278 279
		n->vqs[i].vhost_hlen = 0;
		n->vqs[i].sock_hlen = 0;
280
		vhost_net_buf_init(&n->vqs[i].rxq);
281 282 283 284
	}

}

285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
static void vhost_net_tx_packet(struct vhost_net *net)
{
	++net->tx_packets;
	if (net->tx_packets < 1024)
		return;
	net->tx_packets = 0;
	net->tx_zcopy_err = 0;
}

static void vhost_net_tx_err(struct vhost_net *net)
{
	++net->tx_zcopy_err;
}

static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
{
301 302 303 304 305
	/* TX flush waits for outstanding DMAs to be done.
	 * Don't start new DMAs.
	 */
	return !net->tx_flush &&
		net->tx_packets / 64 >= net->tx_zcopy_err;
306 307
}

308 309 310 311 312 313
static bool vhost_sock_zcopy(struct socket *sock)
{
	return unlikely(experimental_zcopytx) &&
		sock_flag(sock->sk, SOCK_ZEROCOPY);
}

314 315 316 317 318
/* In case of DMA done not in order in lower device driver for some reason.
 * upend_idx is used to track end of used idx, done_idx is used to track head
 * of used idx. Once lower device DMA done contiguously, we will signal KVM
 * guest used idx.
 */
319 320
static void vhost_zerocopy_signal_used(struct vhost_net *net,
				       struct vhost_virtqueue *vq)
321
{
322 323
	struct vhost_net_virtqueue *nvq =
		container_of(vq, struct vhost_net_virtqueue, vq);
324
	int i, add;
325 326
	int j = 0;

327
	for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
328 329
		if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
			vhost_net_tx_err(net);
330 331 332 333 334 335
		if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
			vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
			++j;
		} else
			break;
	}
336 337 338 339 340 341 342
	while (j) {
		add = min(UIO_MAXIOV - nvq->done_idx, j);
		vhost_add_used_and_signal_n(vq->dev, vq,
					    &vq->heads[nvq->done_idx], add);
		nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
		j -= add;
	}
343 344
}

345
static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
346
{
347
	struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
348
	struct vhost_virtqueue *vq = ubufs->vq;
349
	int cnt;
350

351 352
	rcu_read_lock_bh();

353 354 355
	/* set len to mark this desc buffers done DMA */
	vq->heads[ubuf->desc].len = success ?
		VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
356
	cnt = vhost_net_ubuf_put(ubufs);
357

358 359
	/*
	 * Trigger polling thread if guest stopped submitting new buffers:
360
	 * in this case, the refcount after decrement will eventually reach 1.
361 362 363 364
	 * We also trigger polling periodically after each 16 packets
	 * (the value 16 here is more or less arbitrary, it's tuned to trigger
	 * less than 10% of times).
	 */
365
	if (cnt <= 1 || !(cnt % 16))
366
		vhost_poll_queue(&vq->poll);
367 368

	rcu_read_unlock_bh();
369 370
}

J
Jason Wang 已提交
371 372 373 374 375 376 377 378 379 380 381 382 383 384
static inline unsigned long busy_clock(void)
{
	return local_clock() >> 10;
}

static bool vhost_can_busy_poll(struct vhost_dev *dev,
				unsigned long endtime)
{
	return likely(!need_resched()) &&
	       likely(!time_after(busy_clock(), endtime)) &&
	       likely(!signal_pending(current)) &&
	       !vhost_has_work(dev);
}

385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
static void vhost_net_disable_vq(struct vhost_net *n,
				 struct vhost_virtqueue *vq)
{
	struct vhost_net_virtqueue *nvq =
		container_of(vq, struct vhost_net_virtqueue, vq);
	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
	if (!vq->private_data)
		return;
	vhost_poll_stop(poll);
}

static int vhost_net_enable_vq(struct vhost_net *n,
				struct vhost_virtqueue *vq)
{
	struct vhost_net_virtqueue *nvq =
		container_of(vq, struct vhost_net_virtqueue, vq);
	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
	struct socket *sock;

	sock = vq->private_data;
	if (!sock)
		return 0;

	return vhost_poll_start(poll, sock->file);
}

J
Jason Wang 已提交
411 412 413 414 415 416 417
static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
				    struct vhost_virtqueue *vq,
				    struct iovec iov[], unsigned int iov_size,
				    unsigned int *out_num, unsigned int *in_num)
{
	unsigned long uninitialized_var(endtime);
	int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
J
Jason Wang 已提交
418
				  out_num, in_num, NULL, NULL);
J
Jason Wang 已提交
419 420 421 422 423 424

	if (r == vq->num && vq->busyloop_timeout) {
		preempt_disable();
		endtime = busy_clock() + vq->busyloop_timeout;
		while (vhost_can_busy_poll(vq->dev, endtime) &&
		       vhost_vq_avail_empty(vq->dev, vq))
425
			cpu_relax();
J
Jason Wang 已提交
426 427
		preempt_enable();
		r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
J
Jason Wang 已提交
428
				      out_num, in_num, NULL, NULL);
J
Jason Wang 已提交
429 430 431 432 433
	}

	return r;
}

J
Jason Wang 已提交
434 435 436 437 438 439 440 441 442
static bool vhost_exceeds_maxpend(struct vhost_net *net)
{
	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
	struct vhost_virtqueue *vq = &nvq->vq;

	return (nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV
		== nvq->done_idx;
}

443 444 445 446
/* Expects to be always run from workqueue - which acts as
 * read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
{
447
	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
448
	struct vhost_virtqueue *vq = &nvq->vq;
449
	unsigned out, in;
450
	int head;
451 452 453 454 455 456 457 458
	struct msghdr msg = {
		.msg_name = NULL,
		.msg_namelen = 0,
		.msg_control = NULL,
		.msg_controllen = 0,
		.msg_flags = MSG_DONTWAIT,
	};
	size_t len, total_len = 0;
J
Jason Wang 已提交
459
	int err;
460
	size_t hdr_size;
A
Arnd Bergmann 已提交
461
	struct socket *sock;
462
	struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
463
	bool zcopy, zcopy_used;
A
Arnd Bergmann 已提交
464

465 466
	mutex_lock(&vq->mutex);
	sock = vq->private_data;
467
	if (!sock)
468
		goto out;
469

J
Jason Wang 已提交
470 471 472
	if (!vq_iotlb_prefetch(vq))
		goto out;

M
Michael S. Tsirkin 已提交
473
	vhost_disable_notify(&net->dev, vq);
474

475
	hdr_size = nvq->vhost_hlen;
476
	zcopy = nvq->ubufs;
477 478

	for (;;) {
479 480
		/* Release DMAs done buffers first */
		if (zcopy)
481
			vhost_zerocopy_signal_used(net, vq);
482

483 484 485
		/* If more outstanding DMAs, queue the work.
		 * Handle upend_idx wrap around
		 */
J
Jason Wang 已提交
486
		if (unlikely(vhost_exceeds_maxpend(net)))
487 488
			break;

J
Jason Wang 已提交
489 490 491
		head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
						ARRAY_SIZE(vq->iov),
						&out, &in);
492
		/* On error, stop handling until the next kick. */
493
		if (unlikely(head < 0))
494
			break;
495 496
		/* Nothing new?  Wait for eventfd to tell us they refilled. */
		if (head == vq->num) {
M
Michael S. Tsirkin 已提交
497 498
			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
				vhost_disable_notify(&net->dev, vq);
499 500 501 502 503 504 505 506 507 508 509
				continue;
			}
			break;
		}
		if (in) {
			vq_err(vq, "Unexpected descriptor format for TX: "
			       "out %d, int %d\n", out, in);
			break;
		}
		/* Skip header. TODO: support TSO. */
		len = iov_length(vq->iov, out);
A
Al Viro 已提交
510
		iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len);
511
		iov_iter_advance(&msg.msg_iter, hdr_size);
512
		/* Sanity check */
A
Al Viro 已提交
513
		if (!msg_data_left(&msg)) {
514 515
			vq_err(vq, "Unexpected header len for TX: "
			       "%zd expected %zd\n",
516
			       len, hdr_size);
517 518
			break;
		}
A
Al Viro 已提交
519
		len = msg_data_left(&msg);
520 521 522 523 524

		zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
				   && (nvq->upend_idx + 1) % UIO_MAXIOV !=
				      nvq->done_idx
				   && vhost_net_tx_select_zcopy(net);
525

526
		/* use msg_control to pass vhost zerocopy ubuf info to skb */
527
		if (zcopy_used) {
528 529 530
			struct ubuf_info *ubuf;
			ubuf = nvq->ubuf_info + nvq->upend_idx;

531
			vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
532 533 534 535 536 537 538
			vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
			ubuf->callback = vhost_zerocopy_callback;
			ubuf->ctx = nvq->ubufs;
			ubuf->desc = nvq->upend_idx;
			msg.msg_control = ubuf;
			msg.msg_controllen = sizeof(ubuf);
			ubufs = nvq->ubufs;
539
			atomic_inc(&ubufs->refcount);
540
			nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
541
		} else {
542
			msg.msg_control = NULL;
543 544
			ubufs = NULL;
		}
J
Jason Wang 已提交
545 546 547 548 549 550 551 552 553 554

		total_len += len;
		if (total_len < VHOST_NET_WEIGHT &&
		    !vhost_vq_avail_empty(&net->dev, vq) &&
		    likely(!vhost_exceeds_maxpend(net))) {
			msg.msg_flags |= MSG_MORE;
		} else {
			msg.msg_flags &= ~MSG_MORE;
		}

555
		/* TODO: Check specific error and bomb out unless ENOBUFS? */
556
		err = sock->ops->sendmsg(sock, &msg, len);
557
		if (unlikely(err < 0)) {
558
			if (zcopy_used) {
559
				vhost_net_ubuf_put(ubufs);
560 561
				nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
					% UIO_MAXIOV;
562
			}
563
			vhost_discard_vq_desc(vq, 1);
564 565 566
			break;
		}
		if (err != len)
567 568
			pr_debug("Truncated TX packet: "
				 " len %d != %zd\n", err, len);
569
		if (!zcopy_used)
570
			vhost_add_used_and_signal(&net->dev, vq, head, 0);
571
		else
572 573
			vhost_zerocopy_signal_used(net, vq);
		vhost_net_tx_packet(net);
574 575 576 577 578
		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
			vhost_poll_queue(&vq->poll);
			break;
		}
	}
579
out:
580 581 582
	mutex_unlock(&vq->mutex);
}

583
static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
584 585 586
{
	struct sk_buff *head;
	int len = 0;
587
	unsigned long flags;
588

589 590
	if (rvq->rx_array)
		return vhost_net_buf_peek(rvq);
J
Jason Wang 已提交
591

592
	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
593
	head = skb_peek(&sk->sk_receive_queue);
B
Basil Gor 已提交
594
	if (likely(head)) {
595
		len = head->len;
596
		if (skb_vlan_tag_present(head))
B
Basil Gor 已提交
597 598 599
			len += VLAN_HLEN;
	}

600
	spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
601 602 603
	return len;
}

J
Jason Wang 已提交
604 605 606 607 608 609 610 611 612 613
static int sk_has_rx_data(struct sock *sk)
{
	struct socket *sock = sk->sk_socket;

	if (sock->ops->peek_len)
		return sock->ops->peek_len(sock);

	return skb_queue_empty(&sk->sk_receive_queue);
}

J
Jason Wang 已提交
614 615
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
{
616
	struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
J
Jason Wang 已提交
617 618 619
	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
	struct vhost_virtqueue *vq = &nvq->vq;
	unsigned long uninitialized_var(endtime);
620
	int len = peek_head_len(rvq, sk);
J
Jason Wang 已提交
621 622 623 624 625 626 627 628 629 630

	if (!len && vq->busyloop_timeout) {
		/* Both tx vq and rx socket were polled here */
		mutex_lock(&vq->mutex);
		vhost_disable_notify(&net->dev, vq);

		preempt_disable();
		endtime = busy_clock() + vq->busyloop_timeout;

		while (vhost_can_busy_poll(&net->dev, endtime) &&
J
Jason Wang 已提交
631
		       !sk_has_rx_data(sk) &&
J
Jason Wang 已提交
632
		       vhost_vq_avail_empty(&net->dev, vq))
633
			cpu_relax();
J
Jason Wang 已提交
634 635 636 637 638 639 640

		preempt_enable();

		if (vhost_enable_notify(&net->dev, vq))
			vhost_poll_queue(&vq->poll);
		mutex_unlock(&vq->mutex);

641
		len = peek_head_len(rvq, sk);
J
Jason Wang 已提交
642 643 644 645 646
	}

	return len;
}

647 648 649 650 651 652 653
/* This is a multi-buffer version of vhost_get_desc, that works if
 *	vq has read descriptors only.
 * @vq		- the relevant virtqueue
 * @datalen	- data length we'll be reading
 * @iovcount	- returned count of io vectors we fill
 * @log		- vhost log
 * @log_num	- log offset
654
 * @quota       - headcount quota, 1 for big buffer
655 656 657 658 659 660 661
 *	returns number of buffer heads allocated, negative on error
 */
static int get_rx_bufs(struct vhost_virtqueue *vq,
		       struct vring_used_elem *heads,
		       int datalen,
		       unsigned *iovcount,
		       struct vhost_log *log,
662 663
		       unsigned *log_num,
		       unsigned int quota)
664 665 666 667 668 669
{
	unsigned int out, in;
	int seg = 0;
	int headcount = 0;
	unsigned d;
	int r, nlogs = 0;
670 671 672 673
	/* len is always initialized before use since we are always called with
	 * datalen > 0.
	 */
	u32 uninitialized_var(len);
674

675
	while (datalen > 0 && headcount < quota) {
J
Jason Wang 已提交
676
		if (unlikely(seg >= UIO_MAXIOV)) {
677 678 679
			r = -ENOBUFS;
			goto err;
		}
680
		r = vhost_get_vq_desc(vq, vq->iov + seg,
681 682
				      ARRAY_SIZE(vq->iov) - seg, &out,
				      &in, log, log_num);
683 684 685 686
		if (unlikely(r < 0))
			goto err;

		d = r;
687 688 689 690 691 692 693 694 695 696 697 698 699 700
		if (d == vq->num) {
			r = 0;
			goto err;
		}
		if (unlikely(out || in <= 0)) {
			vq_err(vq, "unexpected descriptor format for RX: "
				"out %d, in %d\n", out, in);
			r = -EINVAL;
			goto err;
		}
		if (unlikely(log)) {
			nlogs += *log_num;
			log += *log_num;
		}
701 702 703 704
		heads[headcount].id = cpu_to_vhost32(vq, d);
		len = iov_length(vq->iov + seg, in);
		heads[headcount].len = cpu_to_vhost32(vq, len);
		datalen -= len;
705 706 707
		++headcount;
		seg += in;
	}
708
	heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
709 710 711
	*iovcount = seg;
	if (unlikely(log))
		*log_num = nlogs;
712 713 714 715 716 717

	/* Detect overrun */
	if (unlikely(datalen > 0)) {
		r = UIO_MAXIOV + 1;
		goto err;
	}
718 719 720 721 722 723
	return headcount;
err:
	vhost_discard_vq_desc(vq, headcount);
	return r;
}

724 725
/* Expects to be always run from workqueue - which acts as
 * read-size critical section for our kind of RCU. */
726
static void handle_rx(struct vhost_net *net)
727
{
728 729
	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
	struct vhost_virtqueue *vq = &nvq->vq;
730 731 732 733 734 735 736 737 738
	unsigned uninitialized_var(in), log;
	struct vhost_log *vq_log;
	struct msghdr msg = {
		.msg_name = NULL,
		.msg_namelen = 0,
		.msg_control = NULL, /* FIXME: get and handle RX aux data. */
		.msg_controllen = 0,
		.msg_flags = MSG_DONTWAIT,
	};
739 740 741
	struct virtio_net_hdr hdr = {
		.flags = 0,
		.gso_type = VIRTIO_NET_HDR_GSO_NONE
742 743
	};
	size_t total_len = 0;
744 745
	int err, mergeable;
	s16 headcount;
746 747
	size_t vhost_hlen, sock_hlen;
	size_t vhost_len, sock_len;
748
	struct socket *sock;
749
	struct iov_iter fixup;
750
	__virtio16 num_buffers;
751 752

	mutex_lock(&vq->mutex);
753 754 755
	sock = vq->private_data;
	if (!sock)
		goto out;
J
Jason Wang 已提交
756 757 758 759

	if (!vq_iotlb_prefetch(vq))
		goto out;

M
Michael S. Tsirkin 已提交
760
	vhost_disable_notify(&net->dev, vq);
761
	vhost_net_disable_vq(net, vq);
762

763 764
	vhost_hlen = nvq->vhost_hlen;
	sock_hlen = nvq->sock_hlen;
765

766
	vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
767
		vq->log : NULL;
768
	mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
769

J
Jason Wang 已提交
770
	while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
771 772 773
		sock_len += sock_hlen;
		vhost_len = sock_len + vhost_hlen;
		headcount = get_rx_bufs(vq, vq->heads, vhost_len,
774 775
					&in, vq_log, &log,
					likely(mergeable) ? UIO_MAXIOV : 1);
776 777
		/* On error, stop handling until the next kick. */
		if (unlikely(headcount < 0))
778
			goto out;
779 780
		if (nvq->rx_array)
			msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
781 782
		/* On overrun, truncate and discard */
		if (unlikely(headcount > UIO_MAXIOV)) {
A
Al Viro 已提交
783
			iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
784
			err = sock->ops->recvmsg(sock, &msg,
785 786 787 788
						 1, MSG_DONTWAIT | MSG_TRUNC);
			pr_debug("Discarded rx packet: len %zd\n", sock_len);
			continue;
		}
789 790
		/* OK, now we need to know about added descriptors. */
		if (!headcount) {
M
Michael S. Tsirkin 已提交
791
			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
792 793
				/* They have slipped one in as we were
				 * doing that: check again. */
M
Michael S. Tsirkin 已提交
794
				vhost_disable_notify(&net->dev, vq);
795 796 797 798
				continue;
			}
			/* Nothing new?  Wait for eventfd to tell us
			 * they refilled. */
799
			goto out;
800 801
		}
		/* We don't need to be notified again. */
802 803 804 805 806 807 808 809
		iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
		fixup = msg.msg_iter;
		if (unlikely((vhost_hlen))) {
			/* We will supply the header ourselves
			 * TODO: support TSO.
			 */
			iov_iter_advance(&msg.msg_iter, vhost_hlen);
		}
810
		err = sock->ops->recvmsg(sock, &msg,
811 812 813 814 815 816 817 818 819 820
					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
		/* Userspace might have consumed the packet meanwhile:
		 * it's not supposed to do this usually, but might be hard
		 * to prevent. Discard data we got (if any) and keep going. */
		if (unlikely(err != sock_len)) {
			pr_debug("Discarded rx packet: "
				 " len %d, expected %zd\n", err, sock_len);
			vhost_discard_vq_desc(vq, headcount);
			continue;
		}
821
		/* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
822 823 824 825 826
		if (unlikely(vhost_hlen)) {
			if (copy_to_iter(&hdr, sizeof(hdr),
					 &fixup) != sizeof(hdr)) {
				vq_err(vq, "Unable to write vnet_hdr "
				       "at addr %p\n", vq->iov->iov_base);
827
				goto out;
828 829 830 831 832 833
			}
		} else {
			/* Header came from socket; we'll need to patch
			 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
			 */
			iov_iter_advance(&fixup, sizeof(hdr));
834 835
		}
		/* TODO: Should check and handle checksum. */
836

837
		num_buffers = cpu_to_vhost16(vq, headcount);
838
		if (likely(mergeable) &&
839 840
		    copy_to_iter(&num_buffers, sizeof num_buffers,
				 &fixup) != sizeof num_buffers) {
841 842
			vq_err(vq, "Failed num_buffers write");
			vhost_discard_vq_desc(vq, headcount);
843
			goto out;
844 845 846 847 848 849 850 851
		}
		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
					    headcount);
		if (unlikely(vq_log))
			vhost_log_write(vq, vq_log, log, vhost_len);
		total_len += vhost_len;
		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
			vhost_poll_queue(&vq->poll);
852
			goto out;
853 854
		}
	}
855
	vhost_net_enable_vq(net, vq);
856
out:
857 858 859
	mutex_unlock(&vq->mutex);
}

860
static void handle_tx_kick(struct vhost_work *work)
861
{
862 863 864 865
	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
						  poll.work);
	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);

866 867 868
	handle_tx(net);
}

869
static void handle_rx_kick(struct vhost_work *work)
870
{
871 872 873 874
	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
						  poll.work);
	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);

875 876 877
	handle_rx(net);
}

878
static void handle_tx_net(struct vhost_work *work)
879
{
880 881
	struct vhost_net *net = container_of(work, struct vhost_net,
					     poll[VHOST_NET_VQ_TX].work);
882 883 884
	handle_tx(net);
}

885
static void handle_rx_net(struct vhost_work *work)
886
{
887 888
	struct vhost_net *net = container_of(work, struct vhost_net,
					     poll[VHOST_NET_VQ_RX].work);
889 890 891 892 893
	handle_rx(net);
}

static int vhost_net_open(struct inode *inode, struct file *f)
{
894
	struct vhost_net *n;
895
	struct vhost_dev *dev;
896
	struct vhost_virtqueue **vqs;
897
	struct sk_buff **queue;
Z
Zhi Yong Wu 已提交
898
	int i;
899

900
	n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
901 902
	if (!n)
		return -ENOMEM;
903 904
	vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
	if (!vqs) {
905
		kvfree(n);
906 907
		return -ENOMEM;
	}
908

909 910 911 912 913 914 915 916 917
	queue = kmalloc_array(VHOST_RX_BATCH, sizeof(struct sk_buff *),
			      GFP_KERNEL);
	if (!queue) {
		kfree(vqs);
		kvfree(n);
		return -ENOMEM;
	}
	n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;

918
	dev = &n->dev;
919 920 921 922
	vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
	vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
	n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
	n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
923 924 925 926 927
	for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
		n->vqs[i].ubufs = NULL;
		n->vqs[i].ubuf_info = NULL;
		n->vqs[i].upend_idx = 0;
		n->vqs[i].done_idx = 0;
928 929
		n->vqs[i].vhost_hlen = 0;
		n->vqs[i].sock_hlen = 0;
930
		vhost_net_buf_init(&n->vqs[i].rxq);
931
	}
Z
Zhi Yong Wu 已提交
932
	vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
933

934 935
	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
936 937 938 939 940 941 942 943 944 945

	f->private_data = n;

	return 0;
}

static struct socket *vhost_net_stop_vq(struct vhost_net *n,
					struct vhost_virtqueue *vq)
{
	struct socket *sock;
946 947
	struct vhost_net_virtqueue *nvq =
		container_of(vq, struct vhost_net_virtqueue, vq);
948 949

	mutex_lock(&vq->mutex);
A
Asias He 已提交
950
	sock = vq->private_data;
951
	vhost_net_disable_vq(n, vq);
A
Asias He 已提交
952
	vq->private_data = NULL;
953
	vhost_net_buf_unproduce(nvq);
954 955 956 957 958 959 960
	mutex_unlock(&vq->mutex);
	return sock;
}

static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
			   struct socket **rx_sock)
{
961 962
	*tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
	*rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
963 964 965 966 967
}

static void vhost_net_flush_vq(struct vhost_net *n, int index)
{
	vhost_poll_flush(n->poll + index);
968
	vhost_poll_flush(&n->vqs[index].vq.poll);
969 970 971 972 973 974
}

static void vhost_net_flush(struct vhost_net *n)
{
	vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
	vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
975
	if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
976
		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
977
		n->tx_flush = true;
978
		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
979
		/* Wait for all lower device DMAs done. */
980
		vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
981
		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
982
		n->tx_flush = false;
983
		atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
984
		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
985
	}
986 987 988 989 990 991 992 993 994 995
}

static int vhost_net_release(struct inode *inode, struct file *f)
{
	struct vhost_net *n = f->private_data;
	struct socket *tx_sock;
	struct socket *rx_sock;

	vhost_net_stop(n, &tx_sock, &rx_sock);
	vhost_net_flush(n);
996
	vhost_dev_stop(&n->dev);
997
	vhost_dev_cleanup(&n->dev, false);
998
	vhost_net_vq_reset(n);
999
	if (tx_sock)
A
Al Viro 已提交
1000
		sockfd_put(tx_sock);
1001
	if (rx_sock)
A
Al Viro 已提交
1002
		sockfd_put(rx_sock);
1003 1004
	/* Make sure no callbacks are outstanding */
	synchronize_rcu_bh();
1005 1006 1007
	/* We do an extra flush before freeing memory,
	 * since jobs can re-queue themselves. */
	vhost_net_flush(n);
1008
	kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
1009
	kfree(n->dev.vqs);
1010
	kvfree(n);
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
	return 0;
}

static struct socket *get_raw_socket(int fd)
{
	struct {
		struct sockaddr_ll sa;
		char  buf[MAX_ADDR_LEN];
	} uaddr;
	int uaddr_len = sizeof uaddr, r;
	struct socket *sock = sockfd_lookup(fd, &r);
K
Krishna Kumar 已提交
1022

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
	if (!sock)
		return ERR_PTR(-ENOTSOCK);

	/* Parameter checking */
	if (sock->sk->sk_type != SOCK_RAW) {
		r = -ESOCKTNOSUPPORT;
		goto err;
	}

	r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
			       &uaddr_len, 0);
	if (r)
		goto err;

	if (uaddr.sa.sll_family != AF_PACKET) {
		r = -EPFNOSUPPORT;
		goto err;
	}
	return sock;
err:
A
Al Viro 已提交
1043
	sockfd_put(sock);
1044 1045 1046
	return ERR_PTR(r);
}

1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
static struct skb_array *get_tap_skb_array(int fd)
{
	struct skb_array *array;
	struct file *file = fget(fd);

	if (!file)
		return NULL;
	array = tun_get_skb_array(file);
	if (!IS_ERR(array))
		goto out;
	array = tap_get_skb_array(file);
	if (!IS_ERR(array))
		goto out;
	array = NULL;
out:
	fput(file);
	return array;
}

A
Arnd Bergmann 已提交
1066
static struct socket *get_tap_socket(int fd)
1067 1068 1069
{
	struct file *file = fget(fd);
	struct socket *sock;
K
Krishna Kumar 已提交
1070

1071 1072 1073
	if (!file)
		return ERR_PTR(-EBADF);
	sock = tun_get_socket(file);
A
Arnd Bergmann 已提交
1074 1075
	if (!IS_ERR(sock))
		return sock;
1076
	sock = tap_get_socket(file);
1077 1078 1079 1080 1081 1082 1083 1084
	if (IS_ERR(sock))
		fput(file);
	return sock;
}

static struct socket *get_socket(int fd)
{
	struct socket *sock;
K
Krishna Kumar 已提交
1085

1086 1087 1088 1089 1090 1091
	/* special case to disable backend */
	if (fd == -1)
		return NULL;
	sock = get_raw_socket(fd);
	if (!IS_ERR(sock))
		return sock;
A
Arnd Bergmann 已提交
1092
	sock = get_tap_socket(fd);
1093 1094 1095 1096 1097 1098 1099 1100 1101
	if (!IS_ERR(sock))
		return sock;
	return ERR_PTR(-ENOTSOCK);
}

static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
{
	struct socket *sock, *oldsock;
	struct vhost_virtqueue *vq;
1102
	struct vhost_net_virtqueue *nvq;
1103
	struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
	int r;

	mutex_lock(&n->dev.mutex);
	r = vhost_dev_check_owner(&n->dev);
	if (r)
		goto err;

	if (index >= VHOST_NET_VQ_MAX) {
		r = -ENOBUFS;
		goto err;
	}
1115
	vq = &n->vqs[index].vq;
1116
	nvq = &n->vqs[index];
1117 1118 1119 1120 1121
	mutex_lock(&vq->mutex);

	/* Verify that ring has been setup correctly. */
	if (!vhost_vq_access_ok(vq)) {
		r = -EFAULT;
1122
		goto err_vq;
1123 1124 1125 1126
	}
	sock = get_socket(fd);
	if (IS_ERR(sock)) {
		r = PTR_ERR(sock);
1127
		goto err_vq;
1128 1129 1130
	}

	/* start polling new socket */
A
Asias He 已提交
1131
	oldsock = vq->private_data;
1132
	if (sock != oldsock) {
1133 1134
		ubufs = vhost_net_ubuf_alloc(vq,
					     sock && vhost_sock_zcopy(sock));
1135 1136 1137 1138
		if (IS_ERR(ubufs)) {
			r = PTR_ERR(ubufs);
			goto err_ubufs;
		}
1139

K
Krishna Kumar 已提交
1140
		vhost_net_disable_vq(n, vq);
A
Asias He 已提交
1141
		vq->private_data = sock;
1142 1143 1144
		vhost_net_buf_unproduce(nvq);
		if (index == VHOST_NET_VQ_RX)
			nvq->rx_array = get_tap_skb_array(fd);
G
Greg Kurz 已提交
1145
		r = vhost_vq_init_access(vq);
1146
		if (r)
1147
			goto err_used;
1148 1149 1150
		r = vhost_net_enable_vq(n, vq);
		if (r)
			goto err_used;
1151

1152 1153
		oldubufs = nvq->ubufs;
		nvq->ubufs = ubufs;
1154 1155 1156

		n->tx_packets = 0;
		n->tx_zcopy_err = 0;
1157
		n->tx_flush = false;
J
Jeff Dike 已提交
1158
	}
1159

1160 1161
	mutex_unlock(&vq->mutex);

1162
	if (oldubufs) {
1163
		vhost_net_ubuf_put_wait_and_free(oldubufs);
1164
		mutex_lock(&vq->mutex);
1165
		vhost_zerocopy_signal_used(n, vq);
1166 1167
		mutex_unlock(&vq->mutex);
	}
1168

1169 1170
	if (oldsock) {
		vhost_net_flush_vq(n, index);
A
Al Viro 已提交
1171
		sockfd_put(oldsock);
1172
	}
1173

1174 1175 1176
	mutex_unlock(&n->dev.mutex);
	return 0;

1177
err_used:
A
Asias He 已提交
1178
	vq->private_data = oldsock;
1179 1180
	vhost_net_enable_vq(n, vq);
	if (ubufs)
1181
		vhost_net_ubuf_put_wait_and_free(ubufs);
1182
err_ubufs:
A
Al Viro 已提交
1183
	sockfd_put(sock);
1184 1185
err_vq:
	mutex_unlock(&vq->mutex);
1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
err:
	mutex_unlock(&n->dev.mutex);
	return r;
}

static long vhost_net_reset_owner(struct vhost_net *n)
{
	struct socket *tx_sock = NULL;
	struct socket *rx_sock = NULL;
	long err;
1196
	struct vhost_umem *umem;
K
Krishna Kumar 已提交
1197

1198 1199 1200 1201
	mutex_lock(&n->dev.mutex);
	err = vhost_dev_check_owner(&n->dev);
	if (err)
		goto done;
1202 1203
	umem = vhost_dev_reset_owner_prepare();
	if (!umem) {
1204 1205 1206
		err = -ENOMEM;
		goto done;
	}
1207 1208
	vhost_net_stop(n, &tx_sock, &rx_sock);
	vhost_net_flush(n);
1209
	vhost_dev_reset_owner(&n->dev, umem);
1210
	vhost_net_vq_reset(n);
1211 1212 1213
done:
	mutex_unlock(&n->dev.mutex);
	if (tx_sock)
A
Al Viro 已提交
1214
		sockfd_put(tx_sock);
1215
	if (rx_sock)
A
Al Viro 已提交
1216
		sockfd_put(rx_sock);
1217 1218 1219 1220 1221
	return err;
}

static int vhost_net_set_features(struct vhost_net *n, u64 features)
{
1222
	size_t vhost_hlen, sock_hlen, hdr_len;
1223
	int i;
1224

1225 1226
	hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
			       (1ULL << VIRTIO_F_VERSION_1))) ?
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
			sizeof(struct virtio_net_hdr_mrg_rxbuf) :
			sizeof(struct virtio_net_hdr);
	if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
		/* vhost provides vnet_hdr */
		vhost_hlen = hdr_len;
		sock_hlen = 0;
	} else {
		/* socket provides vnet_hdr */
		vhost_hlen = 0;
		sock_hlen = hdr_len;
	}
1238 1239
	mutex_lock(&n->dev.mutex);
	if ((features & (1 << VHOST_F_LOG_ALL)) &&
J
Jason Wang 已提交
1240 1241 1242 1243 1244 1245
	    !vhost_log_access_ok(&n->dev))
		goto out_unlock;

	if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) {
		if (vhost_init_device_iotlb(&n->dev, true))
			goto out_unlock;
1246
	}
J
Jason Wang 已提交
1247

1248
	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
1249
		mutex_lock(&n->vqs[i].vq.mutex);
1250
		n->vqs[i].vq.acked_features = features;
1251 1252
		n->vqs[i].vhost_hlen = vhost_hlen;
		n->vqs[i].sock_hlen = sock_hlen;
1253
		mutex_unlock(&n->vqs[i].vq.mutex);
1254 1255 1256
	}
	mutex_unlock(&n->dev.mutex);
	return 0;
J
Jason Wang 已提交
1257 1258 1259 1260

out_unlock:
	mutex_unlock(&n->dev.mutex);
	return -EFAULT;
1261 1262
}

1263 1264 1265 1266 1267
static long vhost_net_set_owner(struct vhost_net *n)
{
	int r;

	mutex_lock(&n->dev.mutex);
1268 1269 1270 1271
	if (vhost_dev_has_owner(&n->dev)) {
		r = -EBUSY;
		goto out;
	}
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
	r = vhost_net_set_ubuf_info(n);
	if (r)
		goto out;
	r = vhost_dev_set_owner(&n->dev);
	if (r)
		vhost_net_clear_ubuf_info(n);
	vhost_net_flush(n);
out:
	mutex_unlock(&n->dev.mutex);
	return r;
}

1284 1285 1286 1287 1288 1289 1290 1291 1292
static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
			    unsigned long arg)
{
	struct vhost_net *n = f->private_data;
	void __user *argp = (void __user *)arg;
	u64 __user *featurep = argp;
	struct vhost_vring_file backend;
	u64 features;
	int r;
K
Krishna Kumar 已提交
1293

1294 1295
	switch (ioctl) {
	case VHOST_NET_SET_BACKEND:
1296 1297
		if (copy_from_user(&backend, argp, sizeof backend))
			return -EFAULT;
1298 1299
		return vhost_net_set_backend(n, backend.index, backend.fd);
	case VHOST_GET_FEATURES:
1300
		features = VHOST_NET_FEATURES;
1301 1302 1303
		if (copy_to_user(featurep, &features, sizeof features))
			return -EFAULT;
		return 0;
1304
	case VHOST_SET_FEATURES:
1305 1306
		if (copy_from_user(&features, featurep, sizeof features))
			return -EFAULT;
1307
		if (features & ~VHOST_NET_FEATURES)
1308 1309 1310 1311
			return -EOPNOTSUPP;
		return vhost_net_set_features(n, features);
	case VHOST_RESET_OWNER:
		return vhost_net_reset_owner(n);
1312 1313
	case VHOST_SET_OWNER:
		return vhost_net_set_owner(n);
1314 1315
	default:
		mutex_lock(&n->dev.mutex);
1316 1317 1318 1319 1320
		r = vhost_dev_ioctl(&n->dev, ioctl, argp);
		if (r == -ENOIOCTLCMD)
			r = vhost_vring_ioctl(&n->dev, ioctl, argp);
		else
			vhost_net_flush(n);
1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
		mutex_unlock(&n->dev.mutex);
		return r;
	}
}

#ifdef CONFIG_COMPAT
static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
				   unsigned long arg)
{
	return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
}
#endif

J
Jason Wang 已提交
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
	struct file *file = iocb->ki_filp;
	struct vhost_net *n = file->private_data;
	struct vhost_dev *dev = &n->dev;
	int noblock = file->f_flags & O_NONBLOCK;

	return vhost_chr_read_iter(dev, to, noblock);
}

static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
					struct iov_iter *from)
{
	struct file *file = iocb->ki_filp;
	struct vhost_net *n = file->private_data;
	struct vhost_dev *dev = &n->dev;

	return vhost_chr_write_iter(dev, from);
}

static unsigned int vhost_net_chr_poll(struct file *file, poll_table *wait)
{
	struct vhost_net *n = file->private_data;
	struct vhost_dev *dev = &n->dev;

	return vhost_chr_poll(file, dev, wait);
}

1362
static const struct file_operations vhost_net_fops = {
1363 1364
	.owner          = THIS_MODULE,
	.release        = vhost_net_release,
J
Jason Wang 已提交
1365 1366 1367
	.read_iter      = vhost_net_chr_read_iter,
	.write_iter     = vhost_net_chr_write_iter,
	.poll           = vhost_net_chr_poll,
1368 1369 1370 1371 1372
	.unlocked_ioctl = vhost_net_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl   = vhost_net_compat_ioctl,
#endif
	.open           = vhost_net_open,
1373
	.llseek		= noop_llseek,
1374 1375 1376
};

static struct miscdevice vhost_net_misc = {
1377 1378 1379
	.minor = VHOST_NET_MINOR,
	.name = "vhost-net",
	.fops = &vhost_net_fops,
1380 1381
};

C
Christoph Hellwig 已提交
1382
static int vhost_net_init(void)
1383
{
1384
	if (experimental_zcopytx)
1385
		vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
1386
	return misc_register(&vhost_net_misc);
1387 1388 1389
}
module_init(vhost_net_init);

C
Christoph Hellwig 已提交
1390
static void vhost_net_exit(void)
1391 1392 1393 1394 1395 1396 1397 1398 1399
{
	misc_deregister(&vhost_net_misc);
}
module_exit(vhost_net_exit);

MODULE_VERSION("0.0.1");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Michael S. Tsirkin");
MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1400 1401
MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
MODULE_ALIAS("devname:vhost-net");