net.c 31.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (C) 2009 Red Hat, Inc.
 * Author: Michael S. Tsirkin <mst@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.
 *
 * virtio-net server in host kernel.
 */

#include <linux/compat.h>
#include <linux/eventfd.h>
#include <linux/vhost.h>
#include <linux/virtio_net.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
15
#include <linux/moduleparam.h>
16 17 18
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/file.h>
19
#include <linux/slab.h>
20
#include <linux/sched/clock.h>
21
#include <linux/sched/signal.h>
22
#include <linux/vmalloc.h>
23 24 25 26 27

#include <linux/net.h>
#include <linux/if_packet.h>
#include <linux/if_arp.h>
#include <linux/if_tun.h>
A
Arnd Bergmann 已提交
28
#include <linux/if_macvlan.h>
29
#include <linux/if_tap.h>
B
Basil Gor 已提交
30
#include <linux/if_vlan.h>
31 32 33 34 35

#include <net/sock.h>

#include "vhost.h"

36
static int experimental_zcopytx = 1;
37
module_param(experimental_zcopytx, int, 0444);
38 39
MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
		                       " 1 -Enable; 0 - Disable");
40

41 42 43 44
/* Max number of bytes transferred before requeueing the job.
 * Using this limit prevents one virtqueue from starving others. */
#define VHOST_NET_WEIGHT 0x80000

45 46 47 48
/* MAX number of TX used buffers for outstanding zerocopy */
#define VHOST_MAX_PEND 128
#define VHOST_GOODCOPY_LEN 256

49 50 51 52 53
/*
 * For transmit, used buffer len is unused; we override it to track buffer
 * status internally; used for zerocopy tx only.
 */
/* Lower device DMA failed */
54
#define VHOST_DMA_FAILED_LEN	((__force __virtio32)3)
55
/* Lower device DMA done */
56
#define VHOST_DMA_DONE_LEN	((__force __virtio32)2)
57
/* Lower device DMA in progress */
58
#define VHOST_DMA_IN_PROGRESS	((__force __virtio32)1)
59
/* Buffer unused */
60
#define VHOST_DMA_CLEAR_LEN	((__force __virtio32)0)
61

62
#define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
63

64 65 66
enum {
	VHOST_NET_FEATURES = VHOST_FEATURES |
			 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
J
Jason Wang 已提交
67 68
			 (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
			 (1ULL << VIRTIO_F_IOMMU_PLATFORM)
69 70
};

71 72 73 74 75 76
enum {
	VHOST_NET_VQ_RX = 0,
	VHOST_NET_VQ_TX = 1,
	VHOST_NET_VQ_MAX = 2,
};

77
struct vhost_net_ubuf_ref {
78 79 80 81 82 83
	/* refcount follows semantics similar to kref:
	 *  0: object is released
	 *  1: no outstanding ubufs
	 * >1: outstanding ubufs
	 */
	atomic_t refcount;
84 85 86 87
	wait_queue_head_t wait;
	struct vhost_virtqueue *vq;
};

88 89
struct vhost_net_virtqueue {
	struct vhost_virtqueue vq;
90 91
	size_t vhost_hlen;
	size_t sock_hlen;
92 93 94 95 96 97 98 99 100
	/* vhost zerocopy support fields below: */
	/* last used idx for outstanding DMA zerocopy buffers */
	int upend_idx;
	/* first used idx for DMA done zerocopy buffers */
	int done_idx;
	/* an array of userspace buffers info */
	struct ubuf_info *ubuf_info;
	/* Reference counting for outstanding ubufs.
	 * Protected by vq mutex. Writers must also take device mutex. */
101
	struct vhost_net_ubuf_ref *ubufs;
102 103
};

104 105
struct vhost_net {
	struct vhost_dev dev;
106
	struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
107
	struct vhost_poll poll[VHOST_NET_VQ_MAX];
108 109 110 111 112 113
	/* Number of TX recently submitted.
	 * Protected by tx vq lock. */
	unsigned tx_packets;
	/* Number of times zerocopy TX recently failed.
	 * Protected by tx vq lock. */
	unsigned tx_zcopy_err;
114 115
	/* Flush in progress. Protected by tx vq lock. */
	bool tx_flush;
116 117
};

118
static unsigned vhost_net_zcopy_mask __read_mostly;
119

120
static void vhost_net_enable_zcopy(int vq)
121
{
122
	vhost_net_zcopy_mask |= 0x1 << vq;
123 124
}

125 126
static struct vhost_net_ubuf_ref *
vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
127
{
128
	struct vhost_net_ubuf_ref *ubufs;
129 130 131 132 133 134
	/* No zero copy backend? Nothing to count. */
	if (!zcopy)
		return NULL;
	ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
	if (!ubufs)
		return ERR_PTR(-ENOMEM);
135
	atomic_set(&ubufs->refcount, 1);
136 137 138 139 140
	init_waitqueue_head(&ubufs->wait);
	ubufs->vq = vq;
	return ubufs;
}

141
static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
142
{
143 144 145 146
	int r = atomic_sub_return(1, &ubufs->refcount);
	if (unlikely(!r))
		wake_up(&ubufs->wait);
	return r;
147 148
}

149
static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
150
{
151 152
	vhost_net_ubuf_put(ubufs);
	wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
153 154 155 156 157
}

static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
{
	vhost_net_ubuf_put_and_wait(ubufs);
158 159 160
	kfree(ubufs);
}

161 162 163 164
static void vhost_net_clear_ubuf_info(struct vhost_net *n)
{
	int i;

165 166 167
	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
		kfree(n->vqs[i].ubuf_info);
		n->vqs[i].ubuf_info = NULL;
168 169 170
	}
}

A
Asias He 已提交
171
static int vhost_net_set_ubuf_info(struct vhost_net *n)
172 173 174 175
{
	bool zcopy;
	int i;

176
	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
177
		zcopy = vhost_net_zcopy_mask & (0x1 << i);
178 179 180 181 182 183 184 185 186 187
		if (!zcopy)
			continue;
		n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) *
					      UIO_MAXIOV, GFP_KERNEL);
		if  (!n->vqs[i].ubuf_info)
			goto err;
	}
	return 0;

err:
188
	vhost_net_clear_ubuf_info(n);
189 190 191
	return -ENOMEM;
}

A
Asias He 已提交
192
static void vhost_net_vq_reset(struct vhost_net *n)
193 194 195
{
	int i;

196 197
	vhost_net_clear_ubuf_info(n);

198 199 200 201
	for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
		n->vqs[i].done_idx = 0;
		n->vqs[i].upend_idx = 0;
		n->vqs[i].ubufs = NULL;
202 203
		n->vqs[i].vhost_hlen = 0;
		n->vqs[i].sock_hlen = 0;
204 205 206 207
	}

}

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
static void vhost_net_tx_packet(struct vhost_net *net)
{
	++net->tx_packets;
	if (net->tx_packets < 1024)
		return;
	net->tx_packets = 0;
	net->tx_zcopy_err = 0;
}

static void vhost_net_tx_err(struct vhost_net *net)
{
	++net->tx_zcopy_err;
}

static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
{
224 225 226 227 228
	/* TX flush waits for outstanding DMAs to be done.
	 * Don't start new DMAs.
	 */
	return !net->tx_flush &&
		net->tx_packets / 64 >= net->tx_zcopy_err;
229 230
}

231 232 233 234 235 236
static bool vhost_sock_zcopy(struct socket *sock)
{
	return unlikely(experimental_zcopytx) &&
		sock_flag(sock->sk, SOCK_ZEROCOPY);
}

237 238 239 240 241
/* In case of DMA done not in order in lower device driver for some reason.
 * upend_idx is used to track end of used idx, done_idx is used to track head
 * of used idx. Once lower device DMA done contiguously, we will signal KVM
 * guest used idx.
 */
242 243
static void vhost_zerocopy_signal_used(struct vhost_net *net,
				       struct vhost_virtqueue *vq)
244
{
245 246
	struct vhost_net_virtqueue *nvq =
		container_of(vq, struct vhost_net_virtqueue, vq);
247
	int i, add;
248 249
	int j = 0;

250
	for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
251 252
		if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
			vhost_net_tx_err(net);
253 254 255 256 257 258
		if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
			vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
			++j;
		} else
			break;
	}
259 260 261 262 263 264 265
	while (j) {
		add = min(UIO_MAXIOV - nvq->done_idx, j);
		vhost_add_used_and_signal_n(vq->dev, vq,
					    &vq->heads[nvq->done_idx], add);
		nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
		j -= add;
	}
266 267
}

268
static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
269
{
270
	struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
271
	struct vhost_virtqueue *vq = ubufs->vq;
272
	int cnt;
273

274 275
	rcu_read_lock_bh();

276 277 278
	/* set len to mark this desc buffers done DMA */
	vq->heads[ubuf->desc].len = success ?
		VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
279
	cnt = vhost_net_ubuf_put(ubufs);
280

281 282
	/*
	 * Trigger polling thread if guest stopped submitting new buffers:
283
	 * in this case, the refcount after decrement will eventually reach 1.
284 285 286 287
	 * We also trigger polling periodically after each 16 packets
	 * (the value 16 here is more or less arbitrary, it's tuned to trigger
	 * less than 10% of times).
	 */
288
	if (cnt <= 1 || !(cnt % 16))
289
		vhost_poll_queue(&vq->poll);
290 291

	rcu_read_unlock_bh();
292 293
}

J
Jason Wang 已提交
294 295 296 297 298 299 300 301 302 303 304 305 306 307
static inline unsigned long busy_clock(void)
{
	return local_clock() >> 10;
}

static bool vhost_can_busy_poll(struct vhost_dev *dev,
				unsigned long endtime)
{
	return likely(!need_resched()) &&
	       likely(!time_after(busy_clock(), endtime)) &&
	       likely(!signal_pending(current)) &&
	       !vhost_has_work(dev);
}

308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
static void vhost_net_disable_vq(struct vhost_net *n,
				 struct vhost_virtqueue *vq)
{
	struct vhost_net_virtqueue *nvq =
		container_of(vq, struct vhost_net_virtqueue, vq);
	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
	if (!vq->private_data)
		return;
	vhost_poll_stop(poll);
}

static int vhost_net_enable_vq(struct vhost_net *n,
				struct vhost_virtqueue *vq)
{
	struct vhost_net_virtqueue *nvq =
		container_of(vq, struct vhost_net_virtqueue, vq);
	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
	struct socket *sock;

	sock = vq->private_data;
	if (!sock)
		return 0;

	return vhost_poll_start(poll, sock->file);
}

J
Jason Wang 已提交
334 335 336 337 338 339 340
static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
				    struct vhost_virtqueue *vq,
				    struct iovec iov[], unsigned int iov_size,
				    unsigned int *out_num, unsigned int *in_num)
{
	unsigned long uninitialized_var(endtime);
	int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
J
Jason Wang 已提交
341
				  out_num, in_num, NULL, NULL);
J
Jason Wang 已提交
342 343 344 345 346 347

	if (r == vq->num && vq->busyloop_timeout) {
		preempt_disable();
		endtime = busy_clock() + vq->busyloop_timeout;
		while (vhost_can_busy_poll(vq->dev, endtime) &&
		       vhost_vq_avail_empty(vq->dev, vq))
348
			cpu_relax();
J
Jason Wang 已提交
349 350
		preempt_enable();
		r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
J
Jason Wang 已提交
351
				      out_num, in_num, NULL, NULL);
J
Jason Wang 已提交
352 353 354 355 356
	}

	return r;
}

J
Jason Wang 已提交
357 358 359 360 361 362 363 364 365
static bool vhost_exceeds_maxpend(struct vhost_net *net)
{
	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
	struct vhost_virtqueue *vq = &nvq->vq;

	return (nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV
		== nvq->done_idx;
}

366 367 368 369
/* Expects to be always run from workqueue - which acts as
 * read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
{
370
	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
371
	struct vhost_virtqueue *vq = &nvq->vq;
372
	unsigned out, in;
373
	int head;
374 375 376 377 378 379 380 381
	struct msghdr msg = {
		.msg_name = NULL,
		.msg_namelen = 0,
		.msg_control = NULL,
		.msg_controllen = 0,
		.msg_flags = MSG_DONTWAIT,
	};
	size_t len, total_len = 0;
J
Jason Wang 已提交
382
	int err;
383
	size_t hdr_size;
A
Arnd Bergmann 已提交
384
	struct socket *sock;
385
	struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
386
	bool zcopy, zcopy_used;
A
Arnd Bergmann 已提交
387

388 389
	mutex_lock(&vq->mutex);
	sock = vq->private_data;
390
	if (!sock)
391
		goto out;
392

J
Jason Wang 已提交
393 394 395
	if (!vq_iotlb_prefetch(vq))
		goto out;

M
Michael S. Tsirkin 已提交
396
	vhost_disable_notify(&net->dev, vq);
397

398
	hdr_size = nvq->vhost_hlen;
399
	zcopy = nvq->ubufs;
400 401

	for (;;) {
402 403
		/* Release DMAs done buffers first */
		if (zcopy)
404
			vhost_zerocopy_signal_used(net, vq);
405

406 407 408
		/* If more outstanding DMAs, queue the work.
		 * Handle upend_idx wrap around
		 */
J
Jason Wang 已提交
409
		if (unlikely(vhost_exceeds_maxpend(net)))
410 411
			break;

J
Jason Wang 已提交
412 413 414
		head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
						ARRAY_SIZE(vq->iov),
						&out, &in);
415
		/* On error, stop handling until the next kick. */
416
		if (unlikely(head < 0))
417
			break;
418 419
		/* Nothing new?  Wait for eventfd to tell us they refilled. */
		if (head == vq->num) {
M
Michael S. Tsirkin 已提交
420 421
			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
				vhost_disable_notify(&net->dev, vq);
422 423 424 425 426 427 428 429 430 431 432
				continue;
			}
			break;
		}
		if (in) {
			vq_err(vq, "Unexpected descriptor format for TX: "
			       "out %d, int %d\n", out, in);
			break;
		}
		/* Skip header. TODO: support TSO. */
		len = iov_length(vq->iov, out);
A
Al Viro 已提交
433
		iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len);
434
		iov_iter_advance(&msg.msg_iter, hdr_size);
435
		/* Sanity check */
A
Al Viro 已提交
436
		if (!msg_data_left(&msg)) {
437 438
			vq_err(vq, "Unexpected header len for TX: "
			       "%zd expected %zd\n",
439
			       len, hdr_size);
440 441
			break;
		}
A
Al Viro 已提交
442
		len = msg_data_left(&msg);
443 444 445 446 447

		zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
				   && (nvq->upend_idx + 1) % UIO_MAXIOV !=
				      nvq->done_idx
				   && vhost_net_tx_select_zcopy(net);
448

449
		/* use msg_control to pass vhost zerocopy ubuf info to skb */
450
		if (zcopy_used) {
451 452 453
			struct ubuf_info *ubuf;
			ubuf = nvq->ubuf_info + nvq->upend_idx;

454
			vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
455 456 457 458 459 460 461
			vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
			ubuf->callback = vhost_zerocopy_callback;
			ubuf->ctx = nvq->ubufs;
			ubuf->desc = nvq->upend_idx;
			msg.msg_control = ubuf;
			msg.msg_controllen = sizeof(ubuf);
			ubufs = nvq->ubufs;
462
			atomic_inc(&ubufs->refcount);
463
			nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
464
		} else {
465
			msg.msg_control = NULL;
466 467
			ubufs = NULL;
		}
J
Jason Wang 已提交
468 469 470 471 472 473 474 475 476 477

		total_len += len;
		if (total_len < VHOST_NET_WEIGHT &&
		    !vhost_vq_avail_empty(&net->dev, vq) &&
		    likely(!vhost_exceeds_maxpend(net))) {
			msg.msg_flags |= MSG_MORE;
		} else {
			msg.msg_flags &= ~MSG_MORE;
		}

478
		/* TODO: Check specific error and bomb out unless ENOBUFS? */
479
		err = sock->ops->sendmsg(sock, &msg, len);
480
		if (unlikely(err < 0)) {
481
			if (zcopy_used) {
482
				vhost_net_ubuf_put(ubufs);
483 484
				nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
					% UIO_MAXIOV;
485
			}
486
			vhost_discard_vq_desc(vq, 1);
487 488 489
			break;
		}
		if (err != len)
490 491
			pr_debug("Truncated TX packet: "
				 " len %d != %zd\n", err, len);
492
		if (!zcopy_used)
493
			vhost_add_used_and_signal(&net->dev, vq, head, 0);
494
		else
495 496
			vhost_zerocopy_signal_used(net, vq);
		vhost_net_tx_packet(net);
497 498 499 500 501
		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
			vhost_poll_queue(&vq->poll);
			break;
		}
	}
502
out:
503 504 505
	mutex_unlock(&vq->mutex);
}

506 507
static int peek_head_len(struct sock *sk)
{
J
Jason Wang 已提交
508
	struct socket *sock = sk->sk_socket;
509 510
	struct sk_buff *head;
	int len = 0;
511
	unsigned long flags;
512

J
Jason Wang 已提交
513 514 515
	if (sock->ops->peek_len)
		return sock->ops->peek_len(sock);

516
	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
517
	head = skb_peek(&sk->sk_receive_queue);
B
Basil Gor 已提交
518
	if (likely(head)) {
519
		len = head->len;
520
		if (skb_vlan_tag_present(head))
B
Basil Gor 已提交
521 522 523
			len += VLAN_HLEN;
	}

524
	spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
525 526 527
	return len;
}

J
Jason Wang 已提交
528 529 530 531 532 533 534 535 536 537
static int sk_has_rx_data(struct sock *sk)
{
	struct socket *sock = sk->sk_socket;

	if (sock->ops->peek_len)
		return sock->ops->peek_len(sock);

	return skb_queue_empty(&sk->sk_receive_queue);
}

J
Jason Wang 已提交
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
{
	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
	struct vhost_virtqueue *vq = &nvq->vq;
	unsigned long uninitialized_var(endtime);
	int len = peek_head_len(sk);

	if (!len && vq->busyloop_timeout) {
		/* Both tx vq and rx socket were polled here */
		mutex_lock(&vq->mutex);
		vhost_disable_notify(&net->dev, vq);

		preempt_disable();
		endtime = busy_clock() + vq->busyloop_timeout;

		while (vhost_can_busy_poll(&net->dev, endtime) &&
J
Jason Wang 已提交
554
		       !sk_has_rx_data(sk) &&
J
Jason Wang 已提交
555
		       vhost_vq_avail_empty(&net->dev, vq))
556
			cpu_relax();
J
Jason Wang 已提交
557 558 559 560 561 562 563 564 565 566 567 568 569

		preempt_enable();

		if (vhost_enable_notify(&net->dev, vq))
			vhost_poll_queue(&vq->poll);
		mutex_unlock(&vq->mutex);

		len = peek_head_len(sk);
	}

	return len;
}

570 571 572 573 574 575 576
/* This is a multi-buffer version of vhost_get_desc, that works if
 *	vq has read descriptors only.
 * @vq		- the relevant virtqueue
 * @datalen	- data length we'll be reading
 * @iovcount	- returned count of io vectors we fill
 * @log		- vhost log
 * @log_num	- log offset
577
 * @quota       - headcount quota, 1 for big buffer
578 579 580 581 582 583 584
 *	returns number of buffer heads allocated, negative on error
 */
static int get_rx_bufs(struct vhost_virtqueue *vq,
		       struct vring_used_elem *heads,
		       int datalen,
		       unsigned *iovcount,
		       struct vhost_log *log,
585 586
		       unsigned *log_num,
		       unsigned int quota)
587 588 589 590 591 592
{
	unsigned int out, in;
	int seg = 0;
	int headcount = 0;
	unsigned d;
	int r, nlogs = 0;
593 594 595 596
	/* len is always initialized before use since we are always called with
	 * datalen > 0.
	 */
	u32 uninitialized_var(len);
597

598
	while (datalen > 0 && headcount < quota) {
J
Jason Wang 已提交
599
		if (unlikely(seg >= UIO_MAXIOV)) {
600 601 602
			r = -ENOBUFS;
			goto err;
		}
603
		r = vhost_get_vq_desc(vq, vq->iov + seg,
604 605
				      ARRAY_SIZE(vq->iov) - seg, &out,
				      &in, log, log_num);
606 607 608 609
		if (unlikely(r < 0))
			goto err;

		d = r;
610 611 612 613 614 615 616 617 618 619 620 621 622 623
		if (d == vq->num) {
			r = 0;
			goto err;
		}
		if (unlikely(out || in <= 0)) {
			vq_err(vq, "unexpected descriptor format for RX: "
				"out %d, in %d\n", out, in);
			r = -EINVAL;
			goto err;
		}
		if (unlikely(log)) {
			nlogs += *log_num;
			log += *log_num;
		}
624 625 626 627
		heads[headcount].id = cpu_to_vhost32(vq, d);
		len = iov_length(vq->iov + seg, in);
		heads[headcount].len = cpu_to_vhost32(vq, len);
		datalen -= len;
628 629 630
		++headcount;
		seg += in;
	}
631
	heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
632 633 634
	*iovcount = seg;
	if (unlikely(log))
		*log_num = nlogs;
635 636 637 638 639 640

	/* Detect overrun */
	if (unlikely(datalen > 0)) {
		r = UIO_MAXIOV + 1;
		goto err;
	}
641 642 643 644 645 646
	return headcount;
err:
	vhost_discard_vq_desc(vq, headcount);
	return r;
}

647 648
/* Expects to be always run from workqueue - which acts as
 * read-size critical section for our kind of RCU. */
649
static void handle_rx(struct vhost_net *net)
650
{
651 652
	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
	struct vhost_virtqueue *vq = &nvq->vq;
653 654 655 656 657 658 659 660 661
	unsigned uninitialized_var(in), log;
	struct vhost_log *vq_log;
	struct msghdr msg = {
		.msg_name = NULL,
		.msg_namelen = 0,
		.msg_control = NULL, /* FIXME: get and handle RX aux data. */
		.msg_controllen = 0,
		.msg_flags = MSG_DONTWAIT,
	};
662 663 664
	struct virtio_net_hdr hdr = {
		.flags = 0,
		.gso_type = VIRTIO_NET_HDR_GSO_NONE
665 666
	};
	size_t total_len = 0;
667 668
	int err, mergeable;
	s16 headcount;
669 670
	size_t vhost_hlen, sock_hlen;
	size_t vhost_len, sock_len;
671
	struct socket *sock;
672
	struct iov_iter fixup;
673
	__virtio16 num_buffers;
674 675

	mutex_lock(&vq->mutex);
676 677 678
	sock = vq->private_data;
	if (!sock)
		goto out;
J
Jason Wang 已提交
679 680 681 682

	if (!vq_iotlb_prefetch(vq))
		goto out;

M
Michael S. Tsirkin 已提交
683
	vhost_disable_notify(&net->dev, vq);
684
	vhost_net_disable_vq(net, vq);
685

686 687
	vhost_hlen = nvq->vhost_hlen;
	sock_hlen = nvq->sock_hlen;
688

689
	vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
690
		vq->log : NULL;
691
	mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
692

J
Jason Wang 已提交
693
	while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
694 695 696
		sock_len += sock_hlen;
		vhost_len = sock_len + vhost_hlen;
		headcount = get_rx_bufs(vq, vq->heads, vhost_len,
697 698
					&in, vq_log, &log,
					likely(mergeable) ? UIO_MAXIOV : 1);
699 700
		/* On error, stop handling until the next kick. */
		if (unlikely(headcount < 0))
701
			goto out;
702 703
		/* On overrun, truncate and discard */
		if (unlikely(headcount > UIO_MAXIOV)) {
A
Al Viro 已提交
704
			iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
705
			err = sock->ops->recvmsg(sock, &msg,
706 707 708 709
						 1, MSG_DONTWAIT | MSG_TRUNC);
			pr_debug("Discarded rx packet: len %zd\n", sock_len);
			continue;
		}
710 711
		/* OK, now we need to know about added descriptors. */
		if (!headcount) {
M
Michael S. Tsirkin 已提交
712
			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
713 714
				/* They have slipped one in as we were
				 * doing that: check again. */
M
Michael S. Tsirkin 已提交
715
				vhost_disable_notify(&net->dev, vq);
716 717 718 719
				continue;
			}
			/* Nothing new?  Wait for eventfd to tell us
			 * they refilled. */
720
			goto out;
721 722
		}
		/* We don't need to be notified again. */
723 724 725 726 727 728 729 730
		iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
		fixup = msg.msg_iter;
		if (unlikely((vhost_hlen))) {
			/* We will supply the header ourselves
			 * TODO: support TSO.
			 */
			iov_iter_advance(&msg.msg_iter, vhost_hlen);
		}
731
		err = sock->ops->recvmsg(sock, &msg,
732 733 734 735 736 737 738 739 740 741
					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
		/* Userspace might have consumed the packet meanwhile:
		 * it's not supposed to do this usually, but might be hard
		 * to prevent. Discard data we got (if any) and keep going. */
		if (unlikely(err != sock_len)) {
			pr_debug("Discarded rx packet: "
				 " len %d, expected %zd\n", err, sock_len);
			vhost_discard_vq_desc(vq, headcount);
			continue;
		}
742
		/* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
743 744 745 746 747
		if (unlikely(vhost_hlen)) {
			if (copy_to_iter(&hdr, sizeof(hdr),
					 &fixup) != sizeof(hdr)) {
				vq_err(vq, "Unable to write vnet_hdr "
				       "at addr %p\n", vq->iov->iov_base);
748
				goto out;
749 750 751 752 753 754
			}
		} else {
			/* Header came from socket; we'll need to patch
			 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
			 */
			iov_iter_advance(&fixup, sizeof(hdr));
755 756
		}
		/* TODO: Should check and handle checksum. */
757

758
		num_buffers = cpu_to_vhost16(vq, headcount);
759
		if (likely(mergeable) &&
760 761
		    copy_to_iter(&num_buffers, sizeof num_buffers,
				 &fixup) != sizeof num_buffers) {
762 763
			vq_err(vq, "Failed num_buffers write");
			vhost_discard_vq_desc(vq, headcount);
764
			goto out;
765 766 767 768 769 770 771 772
		}
		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
					    headcount);
		if (unlikely(vq_log))
			vhost_log_write(vq, vq_log, log, vhost_len);
		total_len += vhost_len;
		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
			vhost_poll_queue(&vq->poll);
773
			goto out;
774 775
		}
	}
776
	vhost_net_enable_vq(net, vq);
777
out:
778 779 780
	mutex_unlock(&vq->mutex);
}

781
static void handle_tx_kick(struct vhost_work *work)
782
{
783 784 785 786
	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
						  poll.work);
	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);

787 788 789
	handle_tx(net);
}

790
static void handle_rx_kick(struct vhost_work *work)
791
{
792 793 794 795
	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
						  poll.work);
	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);

796 797 798
	handle_rx(net);
}

799
static void handle_tx_net(struct vhost_work *work)
800
{
801 802
	struct vhost_net *net = container_of(work, struct vhost_net,
					     poll[VHOST_NET_VQ_TX].work);
803 804 805
	handle_tx(net);
}

806
static void handle_rx_net(struct vhost_work *work)
807
{
808 809
	struct vhost_net *net = container_of(work, struct vhost_net,
					     poll[VHOST_NET_VQ_RX].work);
810 811 812 813 814
	handle_rx(net);
}

static int vhost_net_open(struct inode *inode, struct file *f)
{
815
	struct vhost_net *n;
816
	struct vhost_dev *dev;
817
	struct vhost_virtqueue **vqs;
Z
Zhi Yong Wu 已提交
818
	int i;
819

820 821 822 823 824 825
	n = kmalloc(sizeof *n, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
	if (!n) {
		n = vmalloc(sizeof *n);
		if (!n)
			return -ENOMEM;
	}
826 827
	vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
	if (!vqs) {
828
		kvfree(n);
829 830
		return -ENOMEM;
	}
831 832

	dev = &n->dev;
833 834 835 836
	vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
	vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
	n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
	n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
837 838 839 840 841
	for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
		n->vqs[i].ubufs = NULL;
		n->vqs[i].ubuf_info = NULL;
		n->vqs[i].upend_idx = 0;
		n->vqs[i].done_idx = 0;
842 843
		n->vqs[i].vhost_hlen = 0;
		n->vqs[i].sock_hlen = 0;
844
	}
Z
Zhi Yong Wu 已提交
845
	vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
846

847 848
	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
849 850 851 852 853 854 855 856 857 858 859 860

	f->private_data = n;

	return 0;
}

static struct socket *vhost_net_stop_vq(struct vhost_net *n,
					struct vhost_virtqueue *vq)
{
	struct socket *sock;

	mutex_lock(&vq->mutex);
A
Asias He 已提交
861
	sock = vq->private_data;
862
	vhost_net_disable_vq(n, vq);
A
Asias He 已提交
863
	vq->private_data = NULL;
864 865 866 867 868 869 870
	mutex_unlock(&vq->mutex);
	return sock;
}

static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
			   struct socket **rx_sock)
{
871 872
	*tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
	*rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
873 874 875 876 877
}

static void vhost_net_flush_vq(struct vhost_net *n, int index)
{
	vhost_poll_flush(n->poll + index);
878
	vhost_poll_flush(&n->vqs[index].vq.poll);
879 880 881 882 883 884
}

static void vhost_net_flush(struct vhost_net *n)
{
	vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
	vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
885
	if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
886
		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
887
		n->tx_flush = true;
888
		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
889
		/* Wait for all lower device DMAs done. */
890
		vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
891
		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
892
		n->tx_flush = false;
893
		atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
894
		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
895
	}
896 897 898 899 900 901 902 903 904 905
}

static int vhost_net_release(struct inode *inode, struct file *f)
{
	struct vhost_net *n = f->private_data;
	struct socket *tx_sock;
	struct socket *rx_sock;

	vhost_net_stop(n, &tx_sock, &rx_sock);
	vhost_net_flush(n);
906
	vhost_dev_stop(&n->dev);
907
	vhost_dev_cleanup(&n->dev, false);
908
	vhost_net_vq_reset(n);
909
	if (tx_sock)
A
Al Viro 已提交
910
		sockfd_put(tx_sock);
911
	if (rx_sock)
A
Al Viro 已提交
912
		sockfd_put(rx_sock);
913 914
	/* Make sure no callbacks are outstanding */
	synchronize_rcu_bh();
915 916 917
	/* We do an extra flush before freeing memory,
	 * since jobs can re-queue themselves. */
	vhost_net_flush(n);
918
	kfree(n->dev.vqs);
919
	kvfree(n);
920 921 922 923 924 925 926 927 928 929 930
	return 0;
}

static struct socket *get_raw_socket(int fd)
{
	struct {
		struct sockaddr_ll sa;
		char  buf[MAX_ADDR_LEN];
	} uaddr;
	int uaddr_len = sizeof uaddr, r;
	struct socket *sock = sockfd_lookup(fd, &r);
K
Krishna Kumar 已提交
931

932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
	if (!sock)
		return ERR_PTR(-ENOTSOCK);

	/* Parameter checking */
	if (sock->sk->sk_type != SOCK_RAW) {
		r = -ESOCKTNOSUPPORT;
		goto err;
	}

	r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
			       &uaddr_len, 0);
	if (r)
		goto err;

	if (uaddr.sa.sll_family != AF_PACKET) {
		r = -EPFNOSUPPORT;
		goto err;
	}
	return sock;
err:
A
Al Viro 已提交
952
	sockfd_put(sock);
953 954 955
	return ERR_PTR(r);
}

A
Arnd Bergmann 已提交
956
static struct socket *get_tap_socket(int fd)
957 958 959
{
	struct file *file = fget(fd);
	struct socket *sock;
K
Krishna Kumar 已提交
960

961 962 963
	if (!file)
		return ERR_PTR(-EBADF);
	sock = tun_get_socket(file);
A
Arnd Bergmann 已提交
964 965
	if (!IS_ERR(sock))
		return sock;
966
	sock = tap_get_socket(file);
967 968 969 970 971 972 973 974
	if (IS_ERR(sock))
		fput(file);
	return sock;
}

static struct socket *get_socket(int fd)
{
	struct socket *sock;
K
Krishna Kumar 已提交
975

976 977 978 979 980 981
	/* special case to disable backend */
	if (fd == -1)
		return NULL;
	sock = get_raw_socket(fd);
	if (!IS_ERR(sock))
		return sock;
A
Arnd Bergmann 已提交
982
	sock = get_tap_socket(fd);
983 984 985 986 987 988 989 990 991
	if (!IS_ERR(sock))
		return sock;
	return ERR_PTR(-ENOTSOCK);
}

static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
{
	struct socket *sock, *oldsock;
	struct vhost_virtqueue *vq;
992
	struct vhost_net_virtqueue *nvq;
993
	struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
994 995 996 997 998 999 1000 1001 1002 1003 1004
	int r;

	mutex_lock(&n->dev.mutex);
	r = vhost_dev_check_owner(&n->dev);
	if (r)
		goto err;

	if (index >= VHOST_NET_VQ_MAX) {
		r = -ENOBUFS;
		goto err;
	}
1005
	vq = &n->vqs[index].vq;
1006
	nvq = &n->vqs[index];
1007 1008 1009 1010 1011
	mutex_lock(&vq->mutex);

	/* Verify that ring has been setup correctly. */
	if (!vhost_vq_access_ok(vq)) {
		r = -EFAULT;
1012
		goto err_vq;
1013 1014 1015 1016
	}
	sock = get_socket(fd);
	if (IS_ERR(sock)) {
		r = PTR_ERR(sock);
1017
		goto err_vq;
1018 1019 1020
	}

	/* start polling new socket */
A
Asias He 已提交
1021
	oldsock = vq->private_data;
1022
	if (sock != oldsock) {
1023 1024
		ubufs = vhost_net_ubuf_alloc(vq,
					     sock && vhost_sock_zcopy(sock));
1025 1026 1027 1028
		if (IS_ERR(ubufs)) {
			r = PTR_ERR(ubufs);
			goto err_ubufs;
		}
1029

K
Krishna Kumar 已提交
1030
		vhost_net_disable_vq(n, vq);
A
Asias He 已提交
1031
		vq->private_data = sock;
G
Greg Kurz 已提交
1032
		r = vhost_vq_init_access(vq);
1033
		if (r)
1034
			goto err_used;
1035 1036 1037
		r = vhost_net_enable_vq(n, vq);
		if (r)
			goto err_used;
1038

1039 1040
		oldubufs = nvq->ubufs;
		nvq->ubufs = ubufs;
1041 1042 1043

		n->tx_packets = 0;
		n->tx_zcopy_err = 0;
1044
		n->tx_flush = false;
J
Jeff Dike 已提交
1045
	}
1046

1047 1048
	mutex_unlock(&vq->mutex);

1049
	if (oldubufs) {
1050
		vhost_net_ubuf_put_wait_and_free(oldubufs);
1051
		mutex_lock(&vq->mutex);
1052
		vhost_zerocopy_signal_used(n, vq);
1053 1054
		mutex_unlock(&vq->mutex);
	}
1055

1056 1057
	if (oldsock) {
		vhost_net_flush_vq(n, index);
A
Al Viro 已提交
1058
		sockfd_put(oldsock);
1059
	}
1060

1061 1062 1063
	mutex_unlock(&n->dev.mutex);
	return 0;

1064
err_used:
A
Asias He 已提交
1065
	vq->private_data = oldsock;
1066 1067
	vhost_net_enable_vq(n, vq);
	if (ubufs)
1068
		vhost_net_ubuf_put_wait_and_free(ubufs);
1069
err_ubufs:
A
Al Viro 已提交
1070
	sockfd_put(sock);
1071 1072
err_vq:
	mutex_unlock(&vq->mutex);
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
err:
	mutex_unlock(&n->dev.mutex);
	return r;
}

static long vhost_net_reset_owner(struct vhost_net *n)
{
	struct socket *tx_sock = NULL;
	struct socket *rx_sock = NULL;
	long err;
1083
	struct vhost_umem *umem;
K
Krishna Kumar 已提交
1084

1085 1086 1087 1088
	mutex_lock(&n->dev.mutex);
	err = vhost_dev_check_owner(&n->dev);
	if (err)
		goto done;
1089 1090
	umem = vhost_dev_reset_owner_prepare();
	if (!umem) {
1091 1092 1093
		err = -ENOMEM;
		goto done;
	}
1094 1095
	vhost_net_stop(n, &tx_sock, &rx_sock);
	vhost_net_flush(n);
1096
	vhost_dev_reset_owner(&n->dev, umem);
1097
	vhost_net_vq_reset(n);
1098 1099 1100
done:
	mutex_unlock(&n->dev.mutex);
	if (tx_sock)
A
Al Viro 已提交
1101
		sockfd_put(tx_sock);
1102
	if (rx_sock)
A
Al Viro 已提交
1103
		sockfd_put(rx_sock);
1104 1105 1106 1107 1108
	return err;
}

static int vhost_net_set_features(struct vhost_net *n, u64 features)
{
1109
	size_t vhost_hlen, sock_hlen, hdr_len;
1110
	int i;
1111

1112 1113
	hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
			       (1ULL << VIRTIO_F_VERSION_1))) ?
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
			sizeof(struct virtio_net_hdr_mrg_rxbuf) :
			sizeof(struct virtio_net_hdr);
	if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
		/* vhost provides vnet_hdr */
		vhost_hlen = hdr_len;
		sock_hlen = 0;
	} else {
		/* socket provides vnet_hdr */
		vhost_hlen = 0;
		sock_hlen = hdr_len;
	}
1125 1126
	mutex_lock(&n->dev.mutex);
	if ((features & (1 << VHOST_F_LOG_ALL)) &&
J
Jason Wang 已提交
1127 1128 1129 1130 1131 1132
	    !vhost_log_access_ok(&n->dev))
		goto out_unlock;

	if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) {
		if (vhost_init_device_iotlb(&n->dev, true))
			goto out_unlock;
1133
	}
J
Jason Wang 已提交
1134

1135
	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
1136
		mutex_lock(&n->vqs[i].vq.mutex);
1137
		n->vqs[i].vq.acked_features = features;
1138 1139
		n->vqs[i].vhost_hlen = vhost_hlen;
		n->vqs[i].sock_hlen = sock_hlen;
1140
		mutex_unlock(&n->vqs[i].vq.mutex);
1141 1142 1143
	}
	mutex_unlock(&n->dev.mutex);
	return 0;
J
Jason Wang 已提交
1144 1145 1146 1147

out_unlock:
	mutex_unlock(&n->dev.mutex);
	return -EFAULT;
1148 1149
}

1150 1151 1152 1153 1154
static long vhost_net_set_owner(struct vhost_net *n)
{
	int r;

	mutex_lock(&n->dev.mutex);
1155 1156 1157 1158
	if (vhost_dev_has_owner(&n->dev)) {
		r = -EBUSY;
		goto out;
	}
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
	r = vhost_net_set_ubuf_info(n);
	if (r)
		goto out;
	r = vhost_dev_set_owner(&n->dev);
	if (r)
		vhost_net_clear_ubuf_info(n);
	vhost_net_flush(n);
out:
	mutex_unlock(&n->dev.mutex);
	return r;
}

1171 1172 1173 1174 1175 1176 1177 1178 1179
static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
			    unsigned long arg)
{
	struct vhost_net *n = f->private_data;
	void __user *argp = (void __user *)arg;
	u64 __user *featurep = argp;
	struct vhost_vring_file backend;
	u64 features;
	int r;
K
Krishna Kumar 已提交
1180

1181 1182
	switch (ioctl) {
	case VHOST_NET_SET_BACKEND:
1183 1184
		if (copy_from_user(&backend, argp, sizeof backend))
			return -EFAULT;
1185 1186
		return vhost_net_set_backend(n, backend.index, backend.fd);
	case VHOST_GET_FEATURES:
1187
		features = VHOST_NET_FEATURES;
1188 1189 1190
		if (copy_to_user(featurep, &features, sizeof features))
			return -EFAULT;
		return 0;
1191
	case VHOST_SET_FEATURES:
1192 1193
		if (copy_from_user(&features, featurep, sizeof features))
			return -EFAULT;
1194
		if (features & ~VHOST_NET_FEATURES)
1195 1196 1197 1198
			return -EOPNOTSUPP;
		return vhost_net_set_features(n, features);
	case VHOST_RESET_OWNER:
		return vhost_net_reset_owner(n);
1199 1200
	case VHOST_SET_OWNER:
		return vhost_net_set_owner(n);
1201 1202
	default:
		mutex_lock(&n->dev.mutex);
1203 1204 1205 1206 1207
		r = vhost_dev_ioctl(&n->dev, ioctl, argp);
		if (r == -ENOIOCTLCMD)
			r = vhost_vring_ioctl(&n->dev, ioctl, argp);
		else
			vhost_net_flush(n);
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
		mutex_unlock(&n->dev.mutex);
		return r;
	}
}

#ifdef CONFIG_COMPAT
static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
				   unsigned long arg)
{
	return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
}
#endif

J
Jason Wang 已提交
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
	struct file *file = iocb->ki_filp;
	struct vhost_net *n = file->private_data;
	struct vhost_dev *dev = &n->dev;
	int noblock = file->f_flags & O_NONBLOCK;

	return vhost_chr_read_iter(dev, to, noblock);
}

static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
					struct iov_iter *from)
{
	struct file *file = iocb->ki_filp;
	struct vhost_net *n = file->private_data;
	struct vhost_dev *dev = &n->dev;

	return vhost_chr_write_iter(dev, from);
}

static unsigned int vhost_net_chr_poll(struct file *file, poll_table *wait)
{
	struct vhost_net *n = file->private_data;
	struct vhost_dev *dev = &n->dev;

	return vhost_chr_poll(file, dev, wait);
}

1249
static const struct file_operations vhost_net_fops = {
1250 1251
	.owner          = THIS_MODULE,
	.release        = vhost_net_release,
J
Jason Wang 已提交
1252 1253 1254
	.read_iter      = vhost_net_chr_read_iter,
	.write_iter     = vhost_net_chr_write_iter,
	.poll           = vhost_net_chr_poll,
1255 1256 1257 1258 1259
	.unlocked_ioctl = vhost_net_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl   = vhost_net_compat_ioctl,
#endif
	.open           = vhost_net_open,
1260
	.llseek		= noop_llseek,
1261 1262 1263
};

static struct miscdevice vhost_net_misc = {
1264 1265 1266
	.minor = VHOST_NET_MINOR,
	.name = "vhost-net",
	.fops = &vhost_net_fops,
1267 1268
};

C
Christoph Hellwig 已提交
1269
static int vhost_net_init(void)
1270
{
1271
	if (experimental_zcopytx)
1272
		vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
1273
	return misc_register(&vhost_net_misc);
1274 1275 1276
}
module_init(vhost_net_init);

C
Christoph Hellwig 已提交
1277
static void vhost_net_exit(void)
1278 1279 1280 1281 1282 1283 1284 1285 1286
{
	misc_deregister(&vhost_net_misc);
}
module_exit(vhost_net_exit);

MODULE_VERSION("0.0.1");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Michael S. Tsirkin");
MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1287 1288
MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
MODULE_ALIAS("devname:vhost-net");