net.c 25.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (C) 2009 Red Hat, Inc.
 * Author: Michael S. Tsirkin <mst@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.
 *
 * virtio-net server in host kernel.
 */

#include <linux/compat.h>
#include <linux/eventfd.h>
#include <linux/vhost.h>
#include <linux/virtio_net.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
15
#include <linux/moduleparam.h>
16 17 18 19
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/rcupdate.h>
#include <linux/file.h>
20
#include <linux/slab.h>
21 22 23 24 25

#include <linux/net.h>
#include <linux/if_packet.h>
#include <linux/if_arp.h>
#include <linux/if_tun.h>
A
Arnd Bergmann 已提交
26
#include <linux/if_macvlan.h>
B
Basil Gor 已提交
27
#include <linux/if_vlan.h>
28 29 30 31 32

#include <net/sock.h>

#include "vhost.h"

33
static int experimental_zcopytx = 1;
34
module_param(experimental_zcopytx, int, 0444);
35 36
MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
		                       " 1 -Enable; 0 - Disable");
37

38 39 40 41
/* Max number of bytes transferred before requeueing the job.
 * Using this limit prevents one virtqueue from starving others. */
#define VHOST_NET_WEIGHT 0x80000

42 43 44 45
/* MAX number of TX used buffers for outstanding zerocopy */
#define VHOST_MAX_PEND 128
#define VHOST_GOODCOPY_LEN 256

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
/*
 * For transmit, used buffer len is unused; we override it to track buffer
 * status internally; used for zerocopy tx only.
 */
/* Lower device DMA failed */
#define VHOST_DMA_FAILED_LEN	3
/* Lower device DMA done */
#define VHOST_DMA_DONE_LEN	2
/* Lower device DMA in progress */
#define VHOST_DMA_IN_PROGRESS	1
/* Buffer unused */
#define VHOST_DMA_CLEAR_LEN	0

#define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN)

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
enum {
	VHOST_NET_VQ_RX = 0,
	VHOST_NET_VQ_TX = 1,
	VHOST_NET_VQ_MAX = 2,
};

enum vhost_net_poll_state {
	VHOST_NET_POLL_DISABLED = 0,
	VHOST_NET_POLL_STARTED = 1,
	VHOST_NET_POLL_STOPPED = 2,
};

struct vhost_net {
	struct vhost_dev dev;
	struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
	struct vhost_poll poll[VHOST_NET_VQ_MAX];
	/* Tells us whether we are polling a socket for TX.
	 * We only do this when socket buffer fills up.
	 * Protected by tx vq lock. */
	enum vhost_net_poll_state tx_poll_state;
81 82 83 84 85 86
	/* Number of TX recently submitted.
	 * Protected by tx vq lock. */
	unsigned tx_packets;
	/* Number of times zerocopy TX recently failed.
	 * Protected by tx vq lock. */
	unsigned tx_zcopy_err;
87 88
	/* Flush in progress. Protected by tx vq lock. */
	bool tx_flush;
89 90
};

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
static void vhost_net_tx_packet(struct vhost_net *net)
{
	++net->tx_packets;
	if (net->tx_packets < 1024)
		return;
	net->tx_packets = 0;
	net->tx_zcopy_err = 0;
}

static void vhost_net_tx_err(struct vhost_net *net)
{
	++net->tx_zcopy_err;
}

static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
{
107 108 109 110 111
	/* TX flush waits for outstanding DMAs to be done.
	 * Don't start new DMAs.
	 */
	return !net->tx_flush &&
		net->tx_packets / 64 >= net->tx_zcopy_err;
112 113
}

114 115 116 117 118 119
static bool vhost_sock_zcopy(struct socket *sock)
{
	return unlikely(experimental_zcopytx) &&
		sock_flag(sock->sk, SOCK_ZEROCOPY);
}

120 121 122 123 124 125
/* Pop first len bytes from iovec. Return number of segments used. */
static int move_iovec_hdr(struct iovec *from, struct iovec *to,
			  size_t len, int iov_count)
{
	int seg = 0;
	size_t size;
K
Krishna Kumar 已提交
126

127 128 129 130 131 132 133 134 135 136 137 138 139
	while (len && seg < iov_count) {
		size = min(from->iov_len, len);
		to->iov_base = from->iov_base;
		to->iov_len = size;
		from->iov_len -= size;
		from->iov_base += size;
		len -= size;
		++from;
		++to;
		++seg;
	}
	return seg;
}
140 141 142 143 144 145
/* Copy iovec entries for len bytes from iovec. */
static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
			   size_t len, int iovcount)
{
	int seg = 0;
	size_t size;
K
Krishna Kumar 已提交
146

147 148 149 150 151 152 153 154 155 156
	while (len && seg < iovcount) {
		size = min(from->iov_len, len);
		to->iov_base = from->iov_base;
		to->iov_len = size;
		len -= size;
		++from;
		++to;
		++seg;
	}
}
157 158 159 160 161 162 163 164 165 166 167

/* Caller must have TX VQ lock */
static void tx_poll_stop(struct vhost_net *net)
{
	if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
		return;
	vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
	net->tx_poll_state = VHOST_NET_POLL_STOPPED;
}

/* Caller must have TX VQ lock */
168
static int tx_poll_start(struct vhost_net *net, struct socket *sock)
169
{
170 171
	int ret;

172
	if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
173 174 175 176 177
		return 0;
	ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
	if (!ret)
		net->tx_poll_state = VHOST_NET_POLL_STARTED;
	return ret;
178 179
}

180 181 182 183 184
/* In case of DMA done not in order in lower device driver for some reason.
 * upend_idx is used to track end of used idx, done_idx is used to track head
 * of used idx. Once lower device DMA done contiguously, we will signal KVM
 * guest used idx.
 */
185 186
static int vhost_zerocopy_signal_used(struct vhost_net *net,
				      struct vhost_virtqueue *vq)
187 188 189 190 191
{
	int i;
	int j = 0;

	for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
192 193
		if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
			vhost_net_tx_err(net);
194 195 196 197 198 199 200 201 202 203 204 205 206
		if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
			vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
			vhost_add_used_and_signal(vq->dev, vq,
						  vq->heads[i].id, 0);
			++j;
		} else
			break;
	}
	if (j)
		vq->done_idx = i;
	return j;
}

207
static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
208 209 210
{
	struct vhost_ubuf_ref *ubufs = ubuf->ctx;
	struct vhost_virtqueue *vq = ubufs->vq;
211 212 213 214 215 216 217 218 219 220 221 222
	int cnt = atomic_read(&ubufs->kref.refcount);

	/*
	 * Trigger polling thread if guest stopped submitting new buffers:
	 * in this case, the refcount after decrement will eventually reach 1
	 * so here it is 2.
	 * We also trigger polling periodically after each 16 packets
	 * (the value 16 here is more or less arbitrary, it's tuned to trigger
	 * less than 10% of times).
	 */
	if (cnt <= 2 || !(cnt % 16))
		vhost_poll_queue(&vq->poll);
223
	/* set len to mark this desc buffers done DMA */
224 225
	vq->heads[ubuf->desc].len = success ?
		VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
226 227 228
	vhost_ubuf_put(ubufs);
}

229 230 231 232 233
/* Expects to be always run from workqueue - which acts as
 * read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
{
	struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
234 235
	unsigned out, in, s;
	int head;
236 237 238 239 240 241 242 243 244 245 246
	struct msghdr msg = {
		.msg_name = NULL,
		.msg_namelen = 0,
		.msg_control = NULL,
		.msg_controllen = 0,
		.msg_iov = vq->iov,
		.msg_flags = MSG_DONTWAIT,
	};
	size_t len, total_len = 0;
	int err, wmem;
	size_t hdr_size;
A
Arnd Bergmann 已提交
247
	struct socket *sock;
248
	struct vhost_ubuf_ref *uninitialized_var(ubufs);
249
	bool zcopy, zcopy_used;
A
Arnd Bergmann 已提交
250

M
Michael S. Tsirkin 已提交
251
	/* TODO: check that we are running from vhost_worker? */
252
	sock = rcu_dereference_check(vq->private_data, 1);
253 254 255 256
	if (!sock)
		return;

	wmem = atomic_read(&sock->sk->sk_wmem_alloc);
257 258 259 260
	if (wmem >= sock->sk->sk_sndbuf) {
		mutex_lock(&vq->mutex);
		tx_poll_start(net, sock);
		mutex_unlock(&vq->mutex);
261
		return;
262
	}
263 264

	mutex_lock(&vq->mutex);
M
Michael S. Tsirkin 已提交
265
	vhost_disable_notify(&net->dev, vq);
266

267
	if (wmem < sock->sk->sk_sndbuf / 2)
268
		tx_poll_stop(net);
269
	hdr_size = vq->vhost_hlen;
270
	zcopy = vq->ubufs;
271 272

	for (;;) {
273 274
		/* Release DMAs done buffers first */
		if (zcopy)
275
			vhost_zerocopy_signal_used(net, vq);
276

277 278 279 280
		head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
					 ARRAY_SIZE(vq->iov),
					 &out, &in,
					 NULL, NULL);
281
		/* On error, stop handling until the next kick. */
282
		if (unlikely(head < 0))
283
			break;
284 285
		/* Nothing new?  Wait for eventfd to tell us they refilled. */
		if (head == vq->num) {
286 287
			int num_pends;

288 289 290 291 292 293
			wmem = atomic_read(&sock->sk->sk_wmem_alloc);
			if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
				tx_poll_start(net, sock);
				set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
				break;
			}
294 295 296 297 298 299 300
			/* If more outstanding DMAs, queue the work.
			 * Handle upend_idx wrap around
			 */
			num_pends = likely(vq->upend_idx >= vq->done_idx) ?
				    (vq->upend_idx - vq->done_idx) :
				    (vq->upend_idx + UIO_MAXIOV - vq->done_idx);
			if (unlikely(num_pends > VHOST_MAX_PEND)) {
301 302 303 304
				tx_poll_start(net, sock);
				set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
				break;
			}
M
Michael S. Tsirkin 已提交
305 306
			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
				vhost_disable_notify(&net->dev, vq);
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
				continue;
			}
			break;
		}
		if (in) {
			vq_err(vq, "Unexpected descriptor format for TX: "
			       "out %d, int %d\n", out, in);
			break;
		}
		/* Skip header. TODO: support TSO. */
		s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
		msg.msg_iovlen = out;
		len = iov_length(vq->iov, out);
		/* Sanity check */
		if (!len) {
			vq_err(vq, "Unexpected header len for TX: "
			       "%zd expected %zd\n",
			       iov_length(vq->hdr, s), hdr_size);
			break;
		}
327 328 329
		zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN ||
				       vq->upend_idx != vq->done_idx);

330
		/* use msg_control to pass vhost zerocopy ubuf info to skb */
331
		if (zcopy_used) {
332
			vq->heads[vq->upend_idx].id = head;
333 334
			if (!vhost_net_tx_select_zcopy(net) ||
			    len < VHOST_GOODCOPY_LEN) {
335 336 337 338 339 340 341 342 343
				/* copy don't need to wait for DMA done */
				vq->heads[vq->upend_idx].len =
							VHOST_DMA_DONE_LEN;
				msg.msg_control = NULL;
				msg.msg_controllen = 0;
				ubufs = NULL;
			} else {
				struct ubuf_info *ubuf = &vq->ubuf_info[head];

344 345
				vq->heads[vq->upend_idx].len =
					VHOST_DMA_IN_PROGRESS;
346
				ubuf->callback = vhost_zerocopy_callback;
347
				ubuf->ctx = vq->ubufs;
348 349 350 351 352 353 354 355
				ubuf->desc = vq->upend_idx;
				msg.msg_control = ubuf;
				msg.msg_controllen = sizeof(ubuf);
				ubufs = vq->ubufs;
				kref_get(&ubufs->kref);
			}
			vq->upend_idx = (vq->upend_idx + 1) % UIO_MAXIOV;
		}
356 357 358
		/* TODO: Check specific error and bomb out unless ENOBUFS? */
		err = sock->ops->sendmsg(NULL, sock, &msg, len);
		if (unlikely(err < 0)) {
359
			if (zcopy_used) {
360 361 362 363 364
				if (ubufs)
					vhost_ubuf_put(ubufs);
				vq->upend_idx = ((unsigned)vq->upend_idx - 1) %
					UIO_MAXIOV;
			}
365
			vhost_discard_vq_desc(vq, 1);
366 367
			if (err == -EAGAIN || err == -ENOBUFS)
				tx_poll_start(net, sock);
368 369 370
			break;
		}
		if (err != len)
371 372
			pr_debug("Truncated TX packet: "
				 " len %d != %zd\n", err, len);
373
		if (!zcopy_used)
374
			vhost_add_used_and_signal(&net->dev, vq, head, 0);
375
		else
376
			vhost_zerocopy_signal_used(net, vq);
377
		total_len += len;
378
		vhost_net_tx_packet(net);
379 380 381 382 383 384 385 386 387
		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
			vhost_poll_queue(&vq->poll);
			break;
		}
	}

	mutex_unlock(&vq->mutex);
}

388 389 390 391
static int peek_head_len(struct sock *sk)
{
	struct sk_buff *head;
	int len = 0;
392
	unsigned long flags;
393

394
	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
395
	head = skb_peek(&sk->sk_receive_queue);
B
Basil Gor 已提交
396
	if (likely(head)) {
397
		len = head->len;
B
Basil Gor 已提交
398 399 400 401
		if (vlan_tx_tag_present(head))
			len += VLAN_HLEN;
	}

402
	spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
403 404 405 406 407 408 409 410 411 412
	return len;
}

/* This is a multi-buffer version of vhost_get_desc, that works if
 *	vq has read descriptors only.
 * @vq		- the relevant virtqueue
 * @datalen	- data length we'll be reading
 * @iovcount	- returned count of io vectors we fill
 * @log		- vhost log
 * @log_num	- log offset
413
 * @quota       - headcount quota, 1 for big buffer
414 415 416 417 418 419 420
 *	returns number of buffer heads allocated, negative on error
 */
static int get_rx_bufs(struct vhost_virtqueue *vq,
		       struct vring_used_elem *heads,
		       int datalen,
		       unsigned *iovcount,
		       struct vhost_log *log,
421 422
		       unsigned *log_num,
		       unsigned int quota)
423 424 425 426 427 428 429
{
	unsigned int out, in;
	int seg = 0;
	int headcount = 0;
	unsigned d;
	int r, nlogs = 0;

430
	while (datalen > 0 && headcount < quota) {
J
Jason Wang 已提交
431
		if (unlikely(seg >= UIO_MAXIOV)) {
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
			r = -ENOBUFS;
			goto err;
		}
		d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
				      ARRAY_SIZE(vq->iov) - seg, &out,
				      &in, log, log_num);
		if (d == vq->num) {
			r = 0;
			goto err;
		}
		if (unlikely(out || in <= 0)) {
			vq_err(vq, "unexpected descriptor format for RX: "
				"out %d, in %d\n", out, in);
			r = -EINVAL;
			goto err;
		}
		if (unlikely(log)) {
			nlogs += *log_num;
			log += *log_num;
		}
		heads[headcount].id = d;
		heads[headcount].len = iov_length(vq->iov + seg, in);
		datalen -= heads[headcount].len;
		++headcount;
		seg += in;
	}
	heads[headcount - 1].len += datalen;
	*iovcount = seg;
	if (unlikely(log))
		*log_num = nlogs;
	return headcount;
err:
	vhost_discard_vq_desc(vq, headcount);
	return r;
}

468 469
/* Expects to be always run from workqueue - which acts as
 * read-size critical section for our kind of RCU. */
470
static void handle_rx(struct vhost_net *net)
471
{
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
	struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
	unsigned uninitialized_var(in), log;
	struct vhost_log *vq_log;
	struct msghdr msg = {
		.msg_name = NULL,
		.msg_namelen = 0,
		.msg_control = NULL, /* FIXME: get and handle RX aux data. */
		.msg_controllen = 0,
		.msg_iov = vq->iov,
		.msg_flags = MSG_DONTWAIT,
	};
	struct virtio_net_hdr_mrg_rxbuf hdr = {
		.hdr.flags = 0,
		.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
	};
	size_t total_len = 0;
488 489
	int err, mergeable;
	s16 headcount;
490 491
	size_t vhost_hlen, sock_hlen;
	size_t vhost_len, sock_len;
M
Michael S. Tsirkin 已提交
492 493
	/* TODO: check that we are running from vhost_worker? */
	struct socket *sock = rcu_dereference_check(vq->private_data, 1);
K
Krishna Kumar 已提交
494

495
	if (!sock)
496 497 498
		return;

	mutex_lock(&vq->mutex);
M
Michael S. Tsirkin 已提交
499
	vhost_disable_notify(&net->dev, vq);
500 501 502 503 504
	vhost_hlen = vq->vhost_hlen;
	sock_hlen = vq->sock_hlen;

	vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
		vq->log : NULL;
505
	mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);
506 507 508 509 510

	while ((sock_len = peek_head_len(sock->sk))) {
		sock_len += sock_hlen;
		vhost_len = sock_len + vhost_hlen;
		headcount = get_rx_bufs(vq, vq->heads, vhost_len,
511 512
					&in, vq_log, &log,
					likely(mergeable) ? UIO_MAXIOV : 1);
513 514 515 516 517
		/* On error, stop handling until the next kick. */
		if (unlikely(headcount < 0))
			break;
		/* OK, now we need to know about added descriptors. */
		if (!headcount) {
M
Michael S. Tsirkin 已提交
518
			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
519 520
				/* They have slipped one in as we were
				 * doing that: check again. */
M
Michael S. Tsirkin 已提交
521
				vhost_disable_notify(&net->dev, vq);
522 523 524 525 526 527 528 529 530 531 532 533
				continue;
			}
			/* Nothing new?  Wait for eventfd to tell us
			 * they refilled. */
			break;
		}
		/* We don't need to be notified again. */
		if (unlikely((vhost_hlen)))
			/* Skip header. TODO: support TSO. */
			move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in);
		else
			/* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
J
Jason Wang 已提交
534
			 * needed because recvmsg can modify msg_iov. */
535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
			copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in);
		msg.msg_iovlen = in;
		err = sock->ops->recvmsg(NULL, sock, &msg,
					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
		/* Userspace might have consumed the packet meanwhile:
		 * it's not supposed to do this usually, but might be hard
		 * to prevent. Discard data we got (if any) and keep going. */
		if (unlikely(err != sock_len)) {
			pr_debug("Discarded rx packet: "
				 " len %d, expected %zd\n", err, sock_len);
			vhost_discard_vq_desc(vq, headcount);
			continue;
		}
		if (unlikely(vhost_hlen) &&
		    memcpy_toiovecend(vq->hdr, (unsigned char *)&hdr, 0,
				      vhost_hlen)) {
			vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
			       vq->iov->iov_base);
			break;
		}
		/* TODO: Should check and handle checksum. */
556
		if (likely(mergeable) &&
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
		    memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount,
				      offsetof(typeof(hdr), num_buffers),
				      sizeof hdr.num_buffers)) {
			vq_err(vq, "Failed num_buffers write");
			vhost_discard_vq_desc(vq, headcount);
			break;
		}
		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
					    headcount);
		if (unlikely(vq_log))
			vhost_log_write(vq, vq_log, log, vhost_len);
		total_len += vhost_len;
		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
			vhost_poll_queue(&vq->poll);
			break;
		}
	}

	mutex_unlock(&vq->mutex);
}

578
static void handle_tx_kick(struct vhost_work *work)
579
{
580 581 582 583
	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
						  poll.work);
	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);

584 585 586
	handle_tx(net);
}

587
static void handle_rx_kick(struct vhost_work *work)
588
{
589 590 591 592
	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
						  poll.work);
	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);

593 594 595
	handle_rx(net);
}

596
static void handle_tx_net(struct vhost_work *work)
597
{
598 599
	struct vhost_net *net = container_of(work, struct vhost_net,
					     poll[VHOST_NET_VQ_TX].work);
600 601 602
	handle_tx(net);
}

603
static void handle_rx_net(struct vhost_work *work)
604
{
605 606
	struct vhost_net *net = container_of(work, struct vhost_net,
					     poll[VHOST_NET_VQ_RX].work);
607 608 609 610 611 612
	handle_rx(net);
}

static int vhost_net_open(struct inode *inode, struct file *f)
{
	struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
613
	struct vhost_dev *dev;
614
	int r;
615

616 617
	if (!n)
		return -ENOMEM;
618 619

	dev = &n->dev;
620 621
	n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
	n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
622
	r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX);
623 624 625 626 627
	if (r < 0) {
		kfree(n);
		return r;
	}

628 629
	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
	n->tx_poll_state = VHOST_NET_POLL_DISABLED;

	f->private_data = n;

	return 0;
}

static void vhost_net_disable_vq(struct vhost_net *n,
				 struct vhost_virtqueue *vq)
{
	if (!vq->private_data)
		return;
	if (vq == n->vqs + VHOST_NET_VQ_TX) {
		tx_poll_stop(n);
		n->tx_poll_state = VHOST_NET_POLL_DISABLED;
	} else
		vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
}

649
static int vhost_net_enable_vq(struct vhost_net *n,
650 651
				struct vhost_virtqueue *vq)
{
A
Arnd Bergmann 已提交
652
	struct socket *sock;
653
	int ret;
A
Arnd Bergmann 已提交
654 655 656

	sock = rcu_dereference_protected(vq->private_data,
					 lockdep_is_held(&vq->mutex));
657
	if (!sock)
658
		return 0;
659 660
	if (vq == n->vqs + VHOST_NET_VQ_TX) {
		n->tx_poll_state = VHOST_NET_POLL_STOPPED;
661
		ret = tx_poll_start(n, sock);
662
	} else
663 664 665
		ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);

	return ret;
666 667 668 669 670 671 672 673
}

static struct socket *vhost_net_stop_vq(struct vhost_net *n,
					struct vhost_virtqueue *vq)
{
	struct socket *sock;

	mutex_lock(&vq->mutex);
A
Arnd Bergmann 已提交
674 675
	sock = rcu_dereference_protected(vq->private_data,
					 lockdep_is_held(&vq->mutex));
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
	vhost_net_disable_vq(n, vq);
	rcu_assign_pointer(vq->private_data, NULL);
	mutex_unlock(&vq->mutex);
	return sock;
}

static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
			   struct socket **rx_sock)
{
	*tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX);
	*rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX);
}

static void vhost_net_flush_vq(struct vhost_net *n, int index)
{
	vhost_poll_flush(n->poll + index);
	vhost_poll_flush(&n->dev.vqs[index].poll);
}

static void vhost_net_flush(struct vhost_net *n)
{
	vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
	vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
699 700 701 702 703 704 705 706 707 708 709
	if (n->dev.vqs[VHOST_NET_VQ_TX].ubufs) {
		mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
		n->tx_flush = true;
		mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
		/* Wait for all lower device DMAs done. */
		vhost_ubuf_put_and_wait(n->dev.vqs[VHOST_NET_VQ_TX].ubufs);
		mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
		n->tx_flush = false;
		kref_init(&n->dev.vqs[VHOST_NET_VQ_TX].ubufs->kref);
		mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
	}
710 711 712 713 714 715 716 717 718 719
}

static int vhost_net_release(struct inode *inode, struct file *f)
{
	struct vhost_net *n = f->private_data;
	struct socket *tx_sock;
	struct socket *rx_sock;

	vhost_net_stop(n, &tx_sock, &rx_sock);
	vhost_net_flush(n);
720
	vhost_dev_stop(&n->dev);
721
	vhost_dev_cleanup(&n->dev, false);
722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
	if (tx_sock)
		fput(tx_sock->file);
	if (rx_sock)
		fput(rx_sock->file);
	/* We do an extra flush before freeing memory,
	 * since jobs can re-queue themselves. */
	vhost_net_flush(n);
	kfree(n);
	return 0;
}

static struct socket *get_raw_socket(int fd)
{
	struct {
		struct sockaddr_ll sa;
		char  buf[MAX_ADDR_LEN];
	} uaddr;
	int uaddr_len = sizeof uaddr, r;
	struct socket *sock = sockfd_lookup(fd, &r);
K
Krishna Kumar 已提交
741

742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
	if (!sock)
		return ERR_PTR(-ENOTSOCK);

	/* Parameter checking */
	if (sock->sk->sk_type != SOCK_RAW) {
		r = -ESOCKTNOSUPPORT;
		goto err;
	}

	r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
			       &uaddr_len, 0);
	if (r)
		goto err;

	if (uaddr.sa.sll_family != AF_PACKET) {
		r = -EPFNOSUPPORT;
		goto err;
	}
	return sock;
err:
	fput(sock->file);
	return ERR_PTR(r);
}

A
Arnd Bergmann 已提交
766
static struct socket *get_tap_socket(int fd)
767 768 769
{
	struct file *file = fget(fd);
	struct socket *sock;
K
Krishna Kumar 已提交
770

771 772 773
	if (!file)
		return ERR_PTR(-EBADF);
	sock = tun_get_socket(file);
A
Arnd Bergmann 已提交
774 775 776
	if (!IS_ERR(sock))
		return sock;
	sock = macvtap_get_socket(file);
777 778 779 780 781 782 783 784
	if (IS_ERR(sock))
		fput(file);
	return sock;
}

static struct socket *get_socket(int fd)
{
	struct socket *sock;
K
Krishna Kumar 已提交
785

786 787 788 789 790 791
	/* special case to disable backend */
	if (fd == -1)
		return NULL;
	sock = get_raw_socket(fd);
	if (!IS_ERR(sock))
		return sock;
A
Arnd Bergmann 已提交
792
	sock = get_tap_socket(fd);
793 794 795 796 797 798 799 800 801
	if (!IS_ERR(sock))
		return sock;
	return ERR_PTR(-ENOTSOCK);
}

static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
{
	struct socket *sock, *oldsock;
	struct vhost_virtqueue *vq;
802
	struct vhost_ubuf_ref *ubufs, *oldubufs = NULL;
803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
	int r;

	mutex_lock(&n->dev.mutex);
	r = vhost_dev_check_owner(&n->dev);
	if (r)
		goto err;

	if (index >= VHOST_NET_VQ_MAX) {
		r = -ENOBUFS;
		goto err;
	}
	vq = n->vqs + index;
	mutex_lock(&vq->mutex);

	/* Verify that ring has been setup correctly. */
	if (!vhost_vq_access_ok(vq)) {
		r = -EFAULT;
820
		goto err_vq;
821 822 823 824
	}
	sock = get_socket(fd);
	if (IS_ERR(sock)) {
		r = PTR_ERR(sock);
825
		goto err_vq;
826 827 828
	}

	/* start polling new socket */
A
Arnd Bergmann 已提交
829 830
	oldsock = rcu_dereference_protected(vq->private_data,
					    lockdep_is_held(&vq->mutex));
831
	if (sock != oldsock) {
832 833 834 835 836
		ubufs = vhost_ubuf_alloc(vq, sock && vhost_sock_zcopy(sock));
		if (IS_ERR(ubufs)) {
			r = PTR_ERR(ubufs);
			goto err_ubufs;
		}
837

K
Krishna Kumar 已提交
838 839
		vhost_net_disable_vq(n, vq);
		rcu_assign_pointer(vq->private_data, sock);
840 841
		r = vhost_init_used(vq);
		if (r)
842
			goto err_used;
843 844 845
		r = vhost_net_enable_vq(n, vq);
		if (r)
			goto err_used;
846 847 848

		oldubufs = vq->ubufs;
		vq->ubufs = ubufs;
849 850 851

		n->tx_packets = 0;
		n->tx_zcopy_err = 0;
852
		n->tx_flush = false;
J
Jeff Dike 已提交
853
	}
854

855 856
	mutex_unlock(&vq->mutex);

857
	if (oldubufs) {
858
		vhost_ubuf_put_and_wait(oldubufs);
859
		mutex_lock(&vq->mutex);
860
		vhost_zerocopy_signal_used(n, vq);
861 862
		mutex_unlock(&vq->mutex);
	}
863

864 865 866 867
	if (oldsock) {
		vhost_net_flush_vq(n, index);
		fput(oldsock->file);
	}
868

869 870 871
	mutex_unlock(&n->dev.mutex);
	return 0;

872 873 874 875 876
err_used:
	rcu_assign_pointer(vq->private_data, oldsock);
	vhost_net_enable_vq(n, vq);
	if (ubufs)
		vhost_ubuf_put_and_wait(ubufs);
877 878
err_ubufs:
	fput(sock->file);
879 880
err_vq:
	mutex_unlock(&vq->mutex);
881 882 883 884 885 886 887 888 889 890
err:
	mutex_unlock(&n->dev.mutex);
	return r;
}

static long vhost_net_reset_owner(struct vhost_net *n)
{
	struct socket *tx_sock = NULL;
	struct socket *rx_sock = NULL;
	long err;
K
Krishna Kumar 已提交
891

892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
	mutex_lock(&n->dev.mutex);
	err = vhost_dev_check_owner(&n->dev);
	if (err)
		goto done;
	vhost_net_stop(n, &tx_sock, &rx_sock);
	vhost_net_flush(n);
	err = vhost_dev_reset_owner(&n->dev);
done:
	mutex_unlock(&n->dev.mutex);
	if (tx_sock)
		fput(tx_sock->file);
	if (rx_sock)
		fput(rx_sock->file);
	return err;
}

static int vhost_net_set_features(struct vhost_net *n, u64 features)
{
910
	size_t vhost_hlen, sock_hlen, hdr_len;
911
	int i;
912 913 914 915 916 917 918 919 920 921 922 923 924

	hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ?
			sizeof(struct virtio_net_hdr_mrg_rxbuf) :
			sizeof(struct virtio_net_hdr);
	if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
		/* vhost provides vnet_hdr */
		vhost_hlen = hdr_len;
		sock_hlen = 0;
	} else {
		/* socket provides vnet_hdr */
		vhost_hlen = 0;
		sock_hlen = hdr_len;
	}
925 926 927 928 929 930 931 932 933 934
	mutex_lock(&n->dev.mutex);
	if ((features & (1 << VHOST_F_LOG_ALL)) &&
	    !vhost_log_access_ok(&n->dev)) {
		mutex_unlock(&n->dev.mutex);
		return -EFAULT;
	}
	n->dev.acked_features = features;
	smp_wmb();
	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
		mutex_lock(&n->vqs[i].mutex);
935 936
		n->vqs[i].vhost_hlen = vhost_hlen;
		n->vqs[i].sock_hlen = sock_hlen;
937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
		mutex_unlock(&n->vqs[i].mutex);
	}
	vhost_net_flush(n);
	mutex_unlock(&n->dev.mutex);
	return 0;
}

static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
			    unsigned long arg)
{
	struct vhost_net *n = f->private_data;
	void __user *argp = (void __user *)arg;
	u64 __user *featurep = argp;
	struct vhost_vring_file backend;
	u64 features;
	int r;
K
Krishna Kumar 已提交
953

954 955
	switch (ioctl) {
	case VHOST_NET_SET_BACKEND:
956 957
		if (copy_from_user(&backend, argp, sizeof backend))
			return -EFAULT;
958 959
		return vhost_net_set_backend(n, backend.index, backend.fd);
	case VHOST_GET_FEATURES:
960
		features = VHOST_NET_FEATURES;
961 962 963
		if (copy_to_user(featurep, &features, sizeof features))
			return -EFAULT;
		return 0;
964
	case VHOST_SET_FEATURES:
965 966
		if (copy_from_user(&features, featurep, sizeof features))
			return -EFAULT;
967
		if (features & ~VHOST_NET_FEATURES)
968 969 970 971 972 973
			return -EOPNOTSUPP;
		return vhost_net_set_features(n, features);
	case VHOST_RESET_OWNER:
		return vhost_net_reset_owner(n);
	default:
		mutex_lock(&n->dev.mutex);
974 975 976 977 978
		r = vhost_dev_ioctl(&n->dev, ioctl, argp);
		if (r == -ENOIOCTLCMD)
			r = vhost_vring_ioctl(&n->dev, ioctl, argp);
		else
			vhost_net_flush(n);
979 980 981 982 983 984 985 986 987 988 989 990 991
		mutex_unlock(&n->dev.mutex);
		return r;
	}
}

#ifdef CONFIG_COMPAT
static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
				   unsigned long arg)
{
	return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
}
#endif

992
static const struct file_operations vhost_net_fops = {
993 994 995 996 997 998 999
	.owner          = THIS_MODULE,
	.release        = vhost_net_release,
	.unlocked_ioctl = vhost_net_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl   = vhost_net_compat_ioctl,
#endif
	.open           = vhost_net_open,
1000
	.llseek		= noop_llseek,
1001 1002 1003
};

static struct miscdevice vhost_net_misc = {
1004 1005 1006
	.minor = VHOST_NET_MINOR,
	.name = "vhost-net",
	.fops = &vhost_net_fops,
1007 1008
};

C
Christoph Hellwig 已提交
1009
static int vhost_net_init(void)
1010
{
1011 1012
	if (experimental_zcopytx)
		vhost_enable_zcopy(VHOST_NET_VQ_TX);
1013
	return misc_register(&vhost_net_misc);
1014 1015 1016
}
module_init(vhost_net_init);

C
Christoph Hellwig 已提交
1017
static void vhost_net_exit(void)
1018 1019 1020 1021 1022 1023 1024 1025 1026
{
	misc_deregister(&vhost_net_misc);
}
module_exit(vhost_net_exit);

MODULE_VERSION("0.0.1");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Michael S. Tsirkin");
MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1027 1028
MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
MODULE_ALIAS("devname:vhost-net");