virtio_net.c 79.2 KB
Newer Older
1
/* A network driver using virtio.
R
Rusty Russell 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15
 *
 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
16
 * along with this program; if not, see <http://www.gnu.org/licenses/>.
R
Rusty Russell 已提交
17 18 19 20
 */
//#define DEBUG
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
21
#include <linux/ethtool.h>
R
Rusty Russell 已提交
22 23 24
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
J
John Fastabend 已提交
25
#include <linux/bpf.h>
26
#include <linux/bpf_trace.h>
R
Rusty Russell 已提交
27
#include <linux/scatterlist.h>
28
#include <linux/if_vlan.h>
29
#include <linux/slab.h>
30
#include <linux/cpu.h>
31
#include <linux/average.h>
J
Jason Wang 已提交
32
#include <linux/filter.h>
33
#include <net/route.h>
34
#include <net/xdp.h>
R
Rusty Russell 已提交
35

36
static int napi_weight = NAPI_POLL_WEIGHT;
37 38
module_param(napi_weight, int, 0444);

W
Willem de Bruijn 已提交
39
static bool csum = true, gso = true, napi_tx;
R
Rusty Russell 已提交
40 41
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
W
Willem de Bruijn 已提交
42
module_param(napi_tx, bool, 0644);
R
Rusty Russell 已提交
43

R
Rusty Russell 已提交
44
/* FIXME: MTU in config. */
45
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
46
#define GOOD_COPY_LEN	128
R
Rusty Russell 已提交
47

48 49
#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)

50 51 52
/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
#define VIRTIO_XDP_HEADROOM 256

J
Johannes Berg 已提交
53 54 55 56
/* RX packet size EWMA. The average packet size is used to determine the packet
 * buffer size when refilling RX rings. As the entire RX ring may be refilled
 * at once, the weight is chosen so that the EWMA will be insensitive to short-
 * term, transient changes in packet size.
57
 */
58
DECLARE_EWMA(pkt_len, 0, 64)
59

60
#define VIRTNET_DRIVER_VERSION "1.0.0"
61

62 63 64 65 66 67
static const unsigned long guest_offloads[] = {
	VIRTIO_NET_F_GUEST_TSO4,
	VIRTIO_NET_F_GUEST_TSO6,
	VIRTIO_NET_F_GUEST_ECN,
	VIRTIO_NET_F_GUEST_UFO
};
68

T
Toshiaki Makita 已提交
69 70 71
struct virtnet_stat_desc {
	char desc[ETH_GSTRING_LEN];
	size_t offset;
72 73
};

T
Toshiaki Makita 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
struct virtnet_sq_stats {
	struct u64_stats_sync syncp;
	u64 packets;
	u64 bytes;
};

struct virtnet_rq_stats {
	struct u64_stats_sync syncp;
	u64 packets;
	u64 bytes;
};

#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)

static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
	{ "packets",	VIRTNET_SQ_STAT(packets) },
	{ "bytes",	VIRTNET_SQ_STAT(bytes) },
};

static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
	{ "packets",	VIRTNET_RQ_STAT(packets) },
	{ "bytes",	VIRTNET_RQ_STAT(bytes) },
};

#define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
#define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)

102 103 104 105 106 107 108
/* Internal representation of a send virtqueue */
struct send_queue {
	/* Virtqueue associated with this send _queue */
	struct virtqueue *vq;

	/* TX: fragments + linear part + virtio header */
	struct scatterlist sg[MAX_SKB_FRAGS + 2];
J
Jason Wang 已提交
109 110 111

	/* Name of the send queue: output.$index */
	char name[40];
W
Willem de Bruijn 已提交
112

T
Toshiaki Makita 已提交
113 114
	struct virtnet_sq_stats stats;

W
Willem de Bruijn 已提交
115
	struct napi_struct napi;
116 117 118 119 120 121 122
};

/* Internal representation of a receive virtqueue */
struct receive_queue {
	/* Virtqueue associated with this receive_queue */
	struct virtqueue *vq;

R
Rusty Russell 已提交
123 124
	struct napi_struct napi;

J
John Fastabend 已提交
125 126
	struct bpf_prog __rcu *xdp_prog;

T
Toshiaki Makita 已提交
127 128
	struct virtnet_rq_stats stats;

129 130 131
	/* Chain pages by the private ptr. */
	struct page *pages;

132
	/* Average packet length for mergeable receive buffers. */
J
Johannes Berg 已提交
133
	struct ewma_pkt_len mrg_avg_pkt_len;
134

135 136 137
	/* Page frag for packet buffer allocation. */
	struct page_frag alloc_frag;

138 139
	/* RX: fragments + linear part + virtio header */
	struct scatterlist sg[MAX_SKB_FRAGS + 2];
J
Jason Wang 已提交
140

141 142 143
	/* Min single buffer size for mergeable buffers case. */
	unsigned int min_buf_len;

J
Jason Wang 已提交
144 145
	/* Name of this receive queue: input.$index */
	char name[40];
146 147

	struct xdp_rxq_info xdp_rxq;
148 149
};

150 151 152 153 154 155 156
/* Control VQ buffers: protected by the rtnl lock */
struct control_buf {
	struct virtio_net_ctrl_hdr hdr;
	virtio_net_ctrl_ack status;
	struct virtio_net_ctrl_mq mq;
	u8 promisc;
	u8 allmulti;
157
	__virtio16 vid;
158
	__virtio64 offloads;
159 160
};

161 162 163 164
struct virtnet_info {
	struct virtio_device *vdev;
	struct virtqueue *cvq;
	struct net_device *dev;
J
Jason Wang 已提交
165 166
	struct send_queue *sq;
	struct receive_queue *rq;
167 168
	unsigned int status;

J
Jason Wang 已提交
169 170 171 172 173 174
	/* Max # of queue pairs supported by the device */
	u16 max_queue_pairs;

	/* # of queue pairs currently used by the driver */
	u16 curr_queue_pairs;

175 176 177
	/* # of XDP queue pairs currently used by the driver */
	u16 xdp_queue_pairs;

178 179 180
	/* I like... big packets and I cannot lie! */
	bool big_packets;

181 182 183
	/* Host will merge rx buffers for big packets (shake it! shake it!) */
	bool mergeable_rx_bufs;

J
Jason Wang 已提交
184 185 186
	/* Has control virtqueue */
	bool has_cvq;

187 188 189
	/* Host can handle any s/g split between our header and packet data */
	bool any_header_sg;

190 191 192
	/* Packet virtio header size */
	u8 hdr_len;

193 194 195
	/* Work struct for refilling if we run low on memory. */
	struct delayed_work refill;

196 197 198
	/* Work struct for config space updates */
	struct work_struct config_work;

J
Jason Wang 已提交
199 200
	/* Does the affinity hint is set for virtqueues? */
	bool affinity_hint_set;
201

202 203 204
	/* CPU hotplug instances for online & dead */
	struct hlist_node node;
	struct hlist_node node_dead;
205

206
	struct control_buf *ctrl;
207 208 209 210

	/* Ethtool settings */
	u8 duplex;
	u32 speed;
211 212

	unsigned long guest_offloads;
R
Rusty Russell 已提交
213 214
};

215
struct padded_vnet_hdr {
216
	struct virtio_net_hdr_mrg_rxbuf hdr;
217
	/*
218 219 220
	 * hdr is in a separate sg buffer, and data sg buffer shares same page
	 * with this header sg. This padding makes next sg 16 byte aligned
	 * after the header.
221
	 */
222
	char padding[4];
223 224
};

J
Jason Wang 已提交
225 226 227 228 229
/* Converting between virtqueue no. and kernel tx/rx queue no.
 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
 */
static int vq2txq(struct virtqueue *vq)
{
230
	return (vq->index - 1) / 2;
J
Jason Wang 已提交
231 232 233 234 235 236 237 238 239
}

static int txq2vq(int txq)
{
	return txq * 2 + 1;
}

static int vq2rxq(struct virtqueue *vq)
{
240
	return vq->index / 2;
J
Jason Wang 已提交
241 242 243 244 245 246 247
}

static int rxq2vq(int rxq)
{
	return rxq * 2;
}

248
static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
R
Rusty Russell 已提交
249
{
250
	return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
R
Rusty Russell 已提交
251 252
}

253 254 255 256
/*
 * private is used to chain pages for big packets, put the whole
 * most recent used list in the beginning for reuse
 */
257
static void give_pages(struct receive_queue *rq, struct page *page)
258
{
259
	struct page *end;
260

261
	/* Find end of list, sew whole thing into vi->rq.pages. */
262
	for (end = page; end->private; end = (struct page *)end->private);
263 264
	end->private = (unsigned long)rq->pages;
	rq->pages = page;
265 266
}

267
static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
268
{
269
	struct page *p = rq->pages;
270

271
	if (p) {
272
		rq->pages = (struct page *)p->private;
273 274 275
		/* clear private here, it is used to chain pages */
		p->private = 0;
	} else
276 277 278 279
		p = alloc_page(gfp_mask);
	return p;
}

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
static void virtqueue_napi_schedule(struct napi_struct *napi,
				    struct virtqueue *vq)
{
	if (napi_schedule_prep(napi)) {
		virtqueue_disable_cb(vq);
		__napi_schedule(napi);
	}
}

static void virtqueue_napi_complete(struct napi_struct *napi,
				    struct virtqueue *vq, int processed)
{
	int opaque;

	opaque = virtqueue_enable_cb_prepare(vq);
295 296 297 298 299 300
	if (napi_complete_done(napi, processed)) {
		if (unlikely(virtqueue_poll(vq, opaque)))
			virtqueue_napi_schedule(napi, vq);
	} else {
		virtqueue_disable_cb(vq);
	}
301 302
}

303
static void skb_xmit_done(struct virtqueue *vq)
R
Rusty Russell 已提交
304
{
305
	struct virtnet_info *vi = vq->vdev->priv;
W
Willem de Bruijn 已提交
306
	struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
R
Rusty Russell 已提交
307

308
	/* Suppress further interrupts. */
309
	virtqueue_disable_cb(vq);
310

W
Willem de Bruijn 已提交
311 312 313 314 315
	if (napi->weight)
		virtqueue_napi_schedule(napi, vq);
	else
		/* We were probably waiting for more output buffers. */
		netif_wake_subqueue(vi->dev, vq2txq(vq));
R
Rusty Russell 已提交
316 317
}

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
#define MRG_CTX_HEADER_SHIFT 22
static void *mergeable_len_to_ctx(unsigned int truesize,
				  unsigned int headroom)
{
	return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
}

static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
{
	return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
}

static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
{
	return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
}

335
/* Called from bottom half context */
M
Michael S. Tsirkin 已提交
336 337
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
				   struct receive_queue *rq,
338 339
				   struct page *page, unsigned int offset,
				   unsigned int len, unsigned int truesize)
340 341
{
	struct sk_buff *skb;
342
	struct virtio_net_hdr_mrg_rxbuf *hdr;
343
	unsigned int copy, hdr_len, hdr_padded_len;
344
	char *p;
345

346
	p = page_address(page) + offset;
347

348
	/* copy small packet so we can reuse these pages for small data */
349
	skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
350 351
	if (unlikely(!skb))
		return NULL;
352

353
	hdr = skb_vnet_hdr(skb);
354

355 356
	hdr_len = vi->hdr_len;
	if (vi->mergeable_rx_bufs)
357
		hdr_padded_len = sizeof(*hdr);
358
	else
359
		hdr_padded_len = sizeof(struct padded_vnet_hdr);
360

361
	memcpy(hdr, p, hdr_len);
362

363
	len -= hdr_len;
364 365
	offset += hdr_padded_len;
	p += hdr_padded_len;
366

367 368 369
	copy = len;
	if (copy > skb_tailroom(skb))
		copy = skb_tailroom(skb);
370
	skb_put_data(skb, p, copy);
371

372 373
	len -= copy;
	offset += copy;
374

375 376 377 378 379 380 381 382
	if (vi->mergeable_rx_bufs) {
		if (len)
			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
		else
			put_page(page);
		return skb;
	}

383 384 385 386 387 388 389
	/*
	 * Verify that we can indeed put this data into a skb.
	 * This is here to handle cases when the device erroneously
	 * tries to receive more than is possible. This is usually
	 * the case of a broken device.
	 */
	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
390
		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
391 392 393
		dev_kfree_skb(skb);
		return NULL;
	}
394
	BUG_ON(offset >= PAGE_SIZE);
395
	while (len) {
396 397 398 399
		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
				frag_size, truesize);
		len -= frag_size;
400 401 402
		page = (struct page *)page->private;
		offset = 0;
	}
403

404
	if (page)
405
		give_pages(rq, page);
406

407 408
	return skb;
}
409

J
Jason Wang 已提交
410 411 412 413 414 415 416 417 418 419 420 421
static void virtnet_xdp_flush(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct send_queue *sq;
	unsigned int qp;

	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
	sq = &vi->sq[qp];

	virtqueue_kick(sq->vq);
}

422 423 424
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
				   struct send_queue *sq,
				   struct xdp_frame *xdpf)
J
John Fastabend 已提交
425 426 427 428
{
	struct virtio_net_hdr_mrg_rxbuf *hdr;
	int err;

429 430 431
	/* virtqueue want to use data area in-front of packet */
	if (unlikely(xdpf->metasize > 0))
		return -EOPNOTSUPP;
J
John Fastabend 已提交
432

433 434 435 436 437
	if (unlikely(xdpf->headroom < vi->hdr_len))
		return -EOVERFLOW;

	/* Make room for virtqueue hdr (also change xdpf->headroom?) */
	xdpf->data -= vi->hdr_len;
438
	/* Zero header and leave csum up to XDP layers */
439
	hdr = xdpf->data;
440
	memset(hdr, 0, vi->hdr_len);
441
	xdpf->len   += vi->hdr_len;
442

443
	sg_init_one(sq->sg, xdpf->data, xdpf->len);
444

445
	err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC);
446
	if (unlikely(err))
447
		return -ENOSPC; /* Caller handle free/refcnt */
J
John Fastabend 已提交
448

449
	return 0;
J
John Fastabend 已提交
450 451
}

452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi,
				   struct xdp_frame *xdpf)
{
	struct xdp_frame *xdpf_sent;
	struct send_queue *sq;
	unsigned int len;
	unsigned int qp;

	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
	sq = &vi->sq[qp];

	/* Free up any pending old buffers before queueing new ones. */
	while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
		xdp_return_frame(xdpf_sent);

	return __virtnet_xdp_xmit_one(vi, sq, xdpf);
}

static int virtnet_xdp_xmit(struct net_device *dev,
			    int n, struct xdp_frame **frames)
J
Jason Wang 已提交
472 473
{
	struct virtnet_info *vi = netdev_priv(dev);
474
	struct receive_queue *rq = vi->rq;
475
	struct xdp_frame *xdpf_sent;
476
	struct bpf_prog *xdp_prog;
477 478 479 480 481 482 483 484 485
	struct send_queue *sq;
	unsigned int len;
	unsigned int qp;
	int drops = 0;
	int err;
	int i;

	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
	sq = &vi->sq[qp];
J
Jason Wang 已提交
486

487 488 489 490 491 492 493
	/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
	 * indicate XDP resources have been successfully allocated.
	 */
	xdp_prog = rcu_dereference(rq->xdp_prog);
	if (!xdp_prog)
		return -ENXIO;

494 495 496 497 498 499 500 501 502 503 504 505 506 507
	/* Free up any pending old buffers before queueing new ones. */
	while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
		xdp_return_frame(xdpf_sent);

	for (i = 0; i < n; i++) {
		struct xdp_frame *xdpf = frames[i];

		err = __virtnet_xdp_xmit_one(vi, sq, xdpf);
		if (err) {
			xdp_return_frame_rx_napi(xdpf);
			drops++;
		}
	}
	return n - drops;
J
Jason Wang 已提交
508 509
}

510 511 512 513 514
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
{
	return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
}

515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
/* We copy the packet for XDP in the following cases:
 *
 * 1) Packet is scattered across multiple rx buffers.
 * 2) Headroom space is insufficient.
 *
 * This is inefficient but it's a temporary condition that
 * we hit right after XDP is enabled and until queue is refilled
 * with large buffers with sufficient headroom - so it should affect
 * at most queue size packets.
 * Afterwards, the conditions to enable
 * XDP should preclude the underlying device from sending packets
 * across multiple buffers (num_buf > 1), and we make sure buffers
 * have enough headroom.
 */
static struct page *xdp_linearize_page(struct receive_queue *rq,
				       u16 *num_buf,
				       struct page *p,
				       int offset,
				       int page_off,
				       unsigned int *len)
{
	struct page *page = alloc_page(GFP_ATOMIC);

	if (!page)
		return NULL;

	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
	page_off += *len;

	while (--*num_buf) {
545
		int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
546 547 548 549 550 551 552 553 554 555 556 557 558 559
		unsigned int buflen;
		void *buf;
		int off;

		buf = virtqueue_get_buf(rq->vq, &buflen);
		if (unlikely(!buf))
			goto err_buf;

		p = virt_to_head_page(buf);
		off = buf - page_address(p);

		/* guard against a misconfigured or uncooperative backend that
		 * is sending packet larger than the MTU.
		 */
560
		if ((page_off + buflen + tailroom) > PAGE_SIZE) {
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
			put_page(p);
			goto err_buf;
		}

		memcpy(page_address(page) + page_off,
		       page_address(p) + off, buflen);
		page_off += buflen;
		put_page(p);
	}

	/* Headroom does not contribute to packet length */
	*len = page_off - VIRTIO_XDP_HEADROOM;
	return page;
err_buf:
	__free_pages(page, 0);
	return NULL;
}

579 580 581
static struct sk_buff *receive_small(struct net_device *dev,
				     struct virtnet_info *vi,
				     struct receive_queue *rq,
582
				     void *buf, void *ctx,
J
Jason Wang 已提交
583 584
				     unsigned int len,
				     bool *xdp_xmit)
585
{
586
	struct sk_buff *skb;
587
	struct bpf_prog *xdp_prog;
588
	unsigned int xdp_headroom = (unsigned long)ctx;
589 590 591 592
	unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
	unsigned int headroom = vi->hdr_len + header_offset;
	unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
			      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
593
	struct page *page = virt_to_head_page(buf);
594
	unsigned int delta = 0;
595
	struct page *xdp_page;
596 597
	int err;

598
	len -= vi->hdr_len;
599

600 601 602
	rcu_read_lock();
	xdp_prog = rcu_dereference(rq->xdp_prog);
	if (xdp_prog) {
603
		struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
604
		struct xdp_frame *xdpf;
605
		struct xdp_buff xdp;
606
		void *orig_data;
607 608
		u32 act;

609
		if (unlikely(hdr->hdr.gso_type))
610
			goto err_xdp;
611

612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
		if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
			int offset = buf - page_address(page) + header_offset;
			unsigned int tlen = len + vi->hdr_len;
			u16 num_buf = 1;

			xdp_headroom = virtnet_get_headroom(vi);
			header_offset = VIRTNET_RX_PAD + xdp_headroom;
			headroom = vi->hdr_len + header_offset;
			buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
				 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
			xdp_page = xdp_linearize_page(rq, &num_buf, page,
						      offset, header_offset,
						      &tlen);
			if (!xdp_page)
				goto err_xdp;

			buf = page_address(xdp_page);
			put_page(page);
			page = xdp_page;
		}

633 634
		xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
		xdp.data = xdp.data_hard_start + xdp_headroom;
635
		xdp_set_data_meta_invalid(&xdp);
636
		xdp.data_end = xdp.data + len;
637
		xdp.rxq = &rq->xdp_rxq;
638
		orig_data = xdp.data;
639 640
		act = bpf_prog_run_xdp(xdp_prog, &xdp);

641 642
		switch (act) {
		case XDP_PASS:
643
			/* Recalculate length in case bpf program changed it */
644
			delta = orig_data - xdp.data;
645
			len = xdp.data_end - xdp.data;
646 647
			break;
		case XDP_TX:
648 649 650
			xdpf = convert_to_xdp_frame(&xdp);
			if (unlikely(!xdpf))
				goto err_xdp;
651
			err = __virtnet_xdp_tx_xmit(vi, xdpf);
652
			if (unlikely(err)) {
653
				trace_xdp_exception(vi->dev, xdp_prog, act);
654 655 656
				goto err_xdp;
			}
			*xdp_xmit = true;
J
Jason Wang 已提交
657 658 659 660
			rcu_read_unlock();
			goto xdp_xmit;
		case XDP_REDIRECT:
			err = xdp_do_redirect(dev, &xdp, xdp_prog);
661 662 663
			if (err)
				goto err_xdp;
			*xdp_xmit = true;
664 665 666
			rcu_read_unlock();
			goto xdp_xmit;
		default:
667 668 669 670
			bpf_warn_invalid_xdp_action(act);
		case XDP_ABORTED:
			trace_xdp_exception(vi->dev, xdp_prog, act);
		case XDP_DROP:
671 672 673 674 675
			goto err_xdp;
		}
	}
	rcu_read_unlock();

676 677
	skb = build_skb(buf, buflen);
	if (!skb) {
678
		put_page(page);
679 680 681
		goto err;
	}
	skb_reserve(skb, headroom - delta);
682
	skb_put(skb, len);
683 684 685 686 687 688
	if (!delta) {
		buf += header_offset;
		memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
	} /* keep zeroed vnet hdr since packet was changed by bpf */

err:
689
	return skb;
690 691 692 693

err_xdp:
	rcu_read_unlock();
	dev->stats.rx_dropped++;
694
	put_page(page);
695 696
xdp_xmit:
	return NULL;
697 698 699
}

static struct sk_buff *receive_big(struct net_device *dev,
M
Michael S. Tsirkin 已提交
700
				   struct virtnet_info *vi,
701 702 703 704 705
				   struct receive_queue *rq,
				   void *buf,
				   unsigned int len)
{
	struct page *page = buf;
706
	struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
J
John Fastabend 已提交
707

708 709 710 711 712 713 714 715 716 717 718
	if (unlikely(!skb))
		goto err;

	return skb;

err:
	dev->stats.rx_dropped++;
	give_pages(rq, page);
	return NULL;
}

719
static struct sk_buff *receive_mergeable(struct net_device *dev,
M
Michael S. Tsirkin 已提交
720
					 struct virtnet_info *vi,
721
					 struct receive_queue *rq,
722 723
					 void *buf,
					 void *ctx,
J
Jason Wang 已提交
724 725
					 unsigned int len,
					 bool *xdp_xmit)
726
{
727 728
	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
	u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
729 730
	struct page *page = virt_to_head_page(buf);
	int offset = buf - page_address(page);
J
John Fastabend 已提交
731 732 733
	struct sk_buff *head_skb, *curr_skb;
	struct bpf_prog *xdp_prog;
	unsigned int truesize;
734
	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
735
	int err;
J
John Fastabend 已提交
736

J
John Fastabend 已提交
737 738
	head_skb = NULL;

J
John Fastabend 已提交
739 740 741
	rcu_read_lock();
	xdp_prog = rcu_dereference(rq->xdp_prog);
	if (xdp_prog) {
742
		struct xdp_frame *xdpf;
743
		struct page *xdp_page;
744 745
		struct xdp_buff xdp;
		void *data;
J
John Fastabend 已提交
746 747
		u32 act;

748 749 750 751 752 753 754
		/* Transient failure which in theory could occur if
		 * in-flight packets from before XDP was enabled reach
		 * the receive path after XDP is loaded.
		 */
		if (unlikely(hdr->hdr.gso_type))
			goto err_xdp;

755 756 757 758 759 760
		/* This happens when rx buffer size is underestimated
		 * or headroom is not enough because of the buffer
		 * was refilled before XDP is set. This should only
		 * happen for the first several packets, so we don't
		 * care much about its performance.
		 */
761 762
		if (unlikely(num_buf > 1 ||
			     headroom < virtnet_get_headroom(vi))) {
763
			/* linearize data for XDP */
764
			xdp_page = xdp_linearize_page(rq, &num_buf,
765 766 767
						      page, offset,
						      VIRTIO_XDP_HEADROOM,
						      &len);
768 769
			if (!xdp_page)
				goto err_xdp;
770
			offset = VIRTIO_XDP_HEADROOM;
771 772
		} else {
			xdp_page = page;
J
John Fastabend 已提交
773 774
		}

775 776 777
		/* Allow consuming headroom but reserve enough space to push
		 * the descriptor on if we get an XDP_TX return code.
		 */
778
		data = page_address(xdp_page) + offset;
779
		xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
780
		xdp.data = data + vi->hdr_len;
781
		xdp_set_data_meta_invalid(&xdp);
782
		xdp.data_end = xdp.data + (len - vi->hdr_len);
783 784
		xdp.rxq = &rq->xdp_rxq;

785 786
		act = bpf_prog_run_xdp(xdp_prog, &xdp);

J
John Fastabend 已提交
787 788
		switch (act) {
		case XDP_PASS:
789 790 791 792 793 794 795
			/* recalculate offset to account for any header
			 * adjustments. Note other cases do not build an
			 * skb and avoid using offset
			 */
			offset = xdp.data -
					page_address(xdp_page) - vi->hdr_len;

796 797 798
			/* recalculate len if xdp.data or xdp.data_end were
			 * adjusted
			 */
799
			len = xdp.data_end - xdp.data + vi->hdr_len;
800 801 802 803 804
			/* We can only create skb based on xdp_page. */
			if (unlikely(xdp_page != page)) {
				rcu_read_unlock();
				put_page(page);
				head_skb = page_to_skb(vi, rq, xdp_page,
805
						       offset, len, PAGE_SIZE);
806 807
				return head_skb;
			}
J
John Fastabend 已提交
808 809
			break;
		case XDP_TX:
810 811 812
			xdpf = convert_to_xdp_frame(&xdp);
			if (unlikely(!xdpf))
				goto err_xdp;
813
			err = __virtnet_xdp_tx_xmit(vi, xdpf);
814
			if (unlikely(err)) {
815
				trace_xdp_exception(vi->dev, xdp_prog, act);
816 817 818 819 820
				if (unlikely(xdp_page != page))
					put_page(xdp_page);
				goto err_xdp;
			}
			*xdp_xmit = true;
821
			if (unlikely(xdp_page != page))
822
				put_page(page);
J
John Fastabend 已提交
823 824
			rcu_read_unlock();
			goto xdp_xmit;
825 826 827 828 829 830 831 832 833
		case XDP_REDIRECT:
			err = xdp_do_redirect(dev, &xdp, xdp_prog);
			if (err) {
				if (unlikely(xdp_page != page))
					put_page(xdp_page);
				goto err_xdp;
			}
			*xdp_xmit = true;
			if (unlikely(xdp_page != page))
834
				put_page(page);
835 836
			rcu_read_unlock();
			goto xdp_xmit;
J
John Fastabend 已提交
837
		default:
838 839 840 841
			bpf_warn_invalid_xdp_action(act);
		case XDP_ABORTED:
			trace_xdp_exception(vi->dev, xdp_prog, act);
		case XDP_DROP:
842 843
			if (unlikely(xdp_page != page))
				__free_pages(xdp_page, 0);
J
John Fastabend 已提交
844
			goto err_xdp;
J
John Fastabend 已提交
845
		}
J
John Fastabend 已提交
846 847
	}
	rcu_read_unlock();
848

849 850
	truesize = mergeable_ctx_to_truesize(ctx);
	if (unlikely(len > truesize)) {
851
		pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
852 853 854 855
			 dev->name, len, (unsigned long)ctx);
		dev->stats.rx_length_errors++;
		goto err_skb;
	}
856

J
John Fastabend 已提交
857 858
	head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
	curr_skb = head_skb;
859

860 861
	if (unlikely(!curr_skb))
		goto err_skb;
862
	while (--num_buf) {
863 864
		int num_skb_frags;

865
		buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
866
		if (unlikely(!buf)) {
867
			pr_debug("%s: rx error: %d buffers out of %d missing\n",
M
Michael S. Tsirkin 已提交
868
				 dev->name, num_buf,
869 870
				 virtio16_to_cpu(vi->vdev,
						 hdr->num_buffers));
871 872
			dev->stats.rx_length_errors++;
			goto err_buf;
873
		}
874 875

		page = virt_to_head_page(buf);
876 877 878

		truesize = mergeable_ctx_to_truesize(ctx);
		if (unlikely(len > truesize)) {
879
			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
880 881 882 883
				 dev->name, len, (unsigned long)ctx);
			dev->stats.rx_length_errors++;
			goto err_skb;
		}
884 885

		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
886 887
		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
888 889 890

			if (unlikely(!nskb))
				goto err_skb;
891 892 893 894 895 896 897 898 899 900 901
			if (curr_skb == head_skb)
				skb_shinfo(curr_skb)->frag_list = nskb;
			else
				curr_skb->next = nskb;
			curr_skb = nskb;
			head_skb->truesize += nskb->truesize;
			num_skb_frags = 0;
		}
		if (curr_skb != head_skb) {
			head_skb->data_len += len;
			head_skb->len += len;
902
			head_skb->truesize += truesize;
903
		}
904
		offset = buf - page_address(page);
905 906 907
		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
			put_page(page);
			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
908
					     len, truesize);
909 910
		} else {
			skb_add_rx_frag(curr_skb, num_skb_frags, page,
911
					offset, len, truesize);
912
		}
913 914
	}

J
Johannes Berg 已提交
915
	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
916 917
	return head_skb;

J
John Fastabend 已提交
918 919
err_xdp:
	rcu_read_unlock();
920 921
err_skb:
	put_page(page);
922
	while (num_buf-- > 1) {
923 924
		buf = virtqueue_get_buf(rq->vq, &len);
		if (unlikely(!buf)) {
925 926 927 928 929
			pr_debug("%s: rx error: %d buffers missing\n",
				 dev->name, num_buf);
			dev->stats.rx_length_errors++;
			break;
		}
930
		page = virt_to_head_page(buf);
931
		put_page(page);
932
	}
933 934 935
err_buf:
	dev->stats.rx_dropped++;
	dev_kfree_skb(head_skb);
J
John Fastabend 已提交
936
xdp_xmit:
937
	return NULL;
938 939
}

J
Jason Wang 已提交
940
static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
J
Jason Wang 已提交
941
		       void *buf, unsigned int len, void **ctx, bool *xdp_xmit)
942
{
943
	struct net_device *dev = vi->dev;
944
	struct sk_buff *skb;
945
	struct virtio_net_hdr_mrg_rxbuf *hdr;
J
Jason Wang 已提交
946
	int ret;
947

948
	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
949 950
		pr_debug("%s: short packet %i\n", dev->name, len);
		dev->stats.rx_length_errors++;
951
		if (vi->mergeable_rx_bufs) {
952
			put_page(virt_to_head_page(buf));
953
		} else if (vi->big_packets) {
954
			give_pages(rq, buf);
955
		} else {
956
			put_page(virt_to_head_page(buf));
957
		}
J
Jason Wang 已提交
958
		return 0;
959
	}
960

961
	if (vi->mergeable_rx_bufs)
J
Jason Wang 已提交
962
		skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit);
963
	else if (vi->big_packets)
M
Michael S. Tsirkin 已提交
964
		skb = receive_big(dev, vi, rq, buf, len);
965
	else
J
Jason Wang 已提交
966
		skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit);
967 968

	if (unlikely(!skb))
J
Jason Wang 已提交
969
		return 0;
970

971
	hdr = skb_vnet_hdr(skb);
972

J
Jason Wang 已提交
973
	ret = skb->len;
R
Rusty Russell 已提交
974

975
	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
976
		skb->ip_summed = CHECKSUM_UNNECESSARY;
R
Rusty Russell 已提交
977

978 979 980 981 982 983
	if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
				  virtio_is_little_endian(vi->vdev))) {
		net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
				     dev->name, hdr->hdr.gso_type,
				     hdr->hdr.gso_size);
		goto frame_err;
R
Rusty Russell 已提交
984 985
	}

986 987 988 989
	skb->protocol = eth_type_trans(skb, dev);
	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
		 ntohs(skb->protocol), skb->len, skb->pkt_type);

E
Eric Dumazet 已提交
990
	napi_gro_receive(&rq->napi, skb);
J
Jason Wang 已提交
991
	return ret;
R
Rusty Russell 已提交
992 993 994 995

frame_err:
	dev->stats.rx_frame_errors++;
	dev_kfree_skb(skb);
J
Jason Wang 已提交
996
	return 0;
R
Rusty Russell 已提交
997 998
}

999 1000 1001 1002 1003
/* Unlike mergeable buffers, all buffers are allocated to the
 * same size, except for the headroom. For this reason we do
 * not need to use  mergeable_len_to_ctx here - it is enough
 * to store the headroom as the context ignoring the truesize.
 */
M
Michael S. Tsirkin 已提交
1004 1005
static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
			     gfp_t gfp)
R
Rusty Russell 已提交
1006
{
1007 1008
	struct page_frag *alloc_frag = &rq->alloc_frag;
	char *buf;
1009
	unsigned int xdp_headroom = virtnet_get_headroom(vi);
1010
	void *ctx = (void *)(unsigned long)xdp_headroom;
1011
	int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
1012
	int err;
1013

1014 1015 1016
	len = SKB_DATA_ALIGN(len) +
	      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
	if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
1017
		return -ENOMEM;
R
Rusty Russell 已提交
1018

1019 1020 1021 1022 1023
	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
	get_page(alloc_frag->page);
	alloc_frag->offset += len;
	sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
		    vi->hdr_len + GOOD_PACKET_LEN);
1024
	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1025
	if (err < 0)
1026
		put_page(virt_to_head_page(buf));
1027 1028
	return err;
}
1029

1030 1031
static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
			   gfp_t gfp)
1032 1033 1034 1035 1036
{
	struct page *first, *list = NULL;
	char *p;
	int i, err, offset;

1037 1038
	sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);

1039
	/* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
1040
	for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
1041
		first = get_a_page(rq, gfp);
1042 1043
		if (!first) {
			if (list)
1044
				give_pages(rq, list);
1045
			return -ENOMEM;
1046
		}
1047
		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
1048

1049 1050 1051 1052
		/* chain new page in list head to match sg */
		first->private = (unsigned long)list;
		list = first;
	}
R
Rusty Russell 已提交
1053

1054
	first = get_a_page(rq, gfp);
1055
	if (!first) {
1056
		give_pages(rq, list);
1057 1058 1059 1060
		return -ENOMEM;
	}
	p = page_address(first);

1061
	/* rq->sg[0], rq->sg[1] share the same page */
1062 1063
	/* a separated rq->sg[0] for header - required in case !any_header_sg */
	sg_set_buf(&rq->sg[0], p, vi->hdr_len);
1064

1065
	/* rq->sg[1] for data packet, from offset */
1066
	offset = sizeof(struct padded_vnet_hdr);
1067
	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
1068 1069 1070

	/* chain first in list head */
	first->private = (unsigned long)list;
1071 1072
	err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
				  first, gfp);
1073
	if (err < 0)
1074
		give_pages(rq, first);
1075 1076

	return err;
R
Rusty Russell 已提交
1077 1078
}

1079
static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1080 1081
					  struct ewma_pkt_len *avg_pkt_len,
					  unsigned int room)
1082
{
1083
	const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1084 1085
	unsigned int len;

1086 1087 1088 1089
	if (room)
		return PAGE_SIZE - room;

	len = hdr_len +	clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1090
				rq->min_buf_len, PAGE_SIZE - hdr_len);
1091

1092
	return ALIGN(len, L1_CACHE_BYTES);
1093 1094
}

1095 1096
static int add_recvbuf_mergeable(struct virtnet_info *vi,
				 struct receive_queue *rq, gfp_t gfp)
1097
{
1098
	struct page_frag *alloc_frag = &rq->alloc_frag;
1099
	unsigned int headroom = virtnet_get_headroom(vi);
1100 1101
	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1102
	char *buf;
1103
	void *ctx;
1104
	int err;
1105
	unsigned int len, hole;
1106

1107 1108 1109 1110 1111 1112
	/* Extra tailroom is needed to satisfy XDP's assumption. This
	 * means rx frags coalescing won't work, but consider we've
	 * disabled GSO for XDP, it won't be a big issue.
	 */
	len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
	if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
1113
		return -ENOMEM;
1114

1115
	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1116
	buf += headroom; /* advance address leaving hole at front of pkt */
1117
	get_page(alloc_frag->page);
1118
	alloc_frag->offset += len + room;
1119
	hole = alloc_frag->size - alloc_frag->offset;
1120
	if (hole < len + room) {
1121 1122
		/* To avoid internal fragmentation, if there is very likely not
		 * enough space for another buffer, add the remaining space to
1123
		 * the current buffer.
1124
		 */
1125 1126 1127
		len += hole;
		alloc_frag->offset += hole;
	}
1128

1129
	sg_init_one(rq->sg, buf, len);
1130
	ctx = mergeable_len_to_ctx(len, headroom);
1131
	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1132
	if (err < 0)
1133
		put_page(virt_to_head_page(buf));
1134

1135 1136
	return err;
}
1137

1138 1139 1140 1141 1142 1143 1144
/*
 * Returns false if we couldn't fill entirely (OOM).
 *
 * Normally run in the receive path, but can also be run from ndo_open
 * before we're receiving packets, or from refill_work which is
 * careful to disable receiving (using napi_disable).
 */
M
Michael S. Tsirkin 已提交
1145 1146
static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
			  gfp_t gfp)
1147 1148
{
	int err;
1149
	bool oom;
1150

1151 1152
	do {
		if (vi->mergeable_rx_bufs)
1153
			err = add_recvbuf_mergeable(vi, rq, gfp);
1154
		else if (vi->big_packets)
1155
			err = add_recvbuf_big(vi, rq, gfp);
1156
		else
M
Michael S. Tsirkin 已提交
1157
			err = add_recvbuf_small(vi, rq, gfp);
1158

1159
		oom = err == -ENOMEM;
1160
		if (err)
1161
			break;
1162
	} while (rq->vq->num_free);
1163
	virtqueue_kick(rq->vq);
1164
	return !oom;
1165 1166
}

1167
static void skb_recv_done(struct virtqueue *rvq)
R
Rusty Russell 已提交
1168 1169
{
	struct virtnet_info *vi = rvq->vdev->priv;
J
Jason Wang 已提交
1170
	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
1171

1172
	virtqueue_napi_schedule(&rq->napi, rvq);
R
Rusty Russell 已提交
1173 1174
}

1175
static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
1176
{
1177
	napi_enable(napi);
1178 1179

	/* If all buffers were filled by other side before we napi_enabled, we
1180 1181 1182 1183 1184 1185
	 * won't get another interrupt, so process any outstanding packets now.
	 * Call local_bh_enable after to trigger softIRQ processing.
	 */
	local_bh_disable();
	virtqueue_napi_schedule(napi, vq);
	local_bh_enable();
1186 1187
}

W
Willem de Bruijn 已提交
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
static void virtnet_napi_tx_enable(struct virtnet_info *vi,
				   struct virtqueue *vq,
				   struct napi_struct *napi)
{
	if (!napi->weight)
		return;

	/* Tx napi touches cachelines on the cpu handling tx interrupts. Only
	 * enable the feature if this is likely affine with the transmit path.
	 */
	if (!vi->affinity_hint_set) {
		napi->weight = 0;
		return;
	}

	return virtnet_napi_enable(vq, napi);
}

1206 1207 1208 1209 1210 1211
static void virtnet_napi_tx_disable(struct napi_struct *napi)
{
	if (napi->weight)
		napi_disable(napi);
}

1212 1213
static void refill_work(struct work_struct *work)
{
1214 1215
	struct virtnet_info *vi =
		container_of(work, struct virtnet_info, refill.work);
1216
	bool still_empty;
J
Jason Wang 已提交
1217 1218
	int i;

1219
	for (i = 0; i < vi->curr_queue_pairs; i++) {
J
Jason Wang 已提交
1220
		struct receive_queue *rq = &vi->rq[i];
1221

J
Jason Wang 已提交
1222
		napi_disable(&rq->napi);
M
Michael S. Tsirkin 已提交
1223
		still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
1224
		virtnet_napi_enable(rq->vq, &rq->napi);
1225

J
Jason Wang 已提交
1226 1227 1228 1229 1230 1231
		/* In theory, this can happen: if we don't get any buffers in
		 * we will *never* try to fill again.
		 */
		if (still_empty)
			schedule_delayed_work(&vi->refill, HZ/2);
	}
1232 1233
}

J
Jason Wang 已提交
1234
static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
R
Rusty Russell 已提交
1235
{
1236
	struct virtnet_info *vi = rq->vq->vdev->priv;
J
Jason Wang 已提交
1237
	unsigned int len, received = 0, bytes = 0;
1238
	void *buf;
R
Rusty Russell 已提交
1239

1240
	if (!vi->big_packets || vi->mergeable_rx_bufs) {
1241 1242 1243 1244
		void *ctx;

		while (received < budget &&
		       (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
J
Jason Wang 已提交
1245
			bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit);
1246 1247 1248 1249 1250
			received++;
		}
	} else {
		while (received < budget &&
		       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
J
Jason Wang 已提交
1251
			bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit);
1252 1253
			received++;
		}
R
Rusty Russell 已提交
1254 1255
	}

1256
	if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
M
Michael S. Tsirkin 已提交
1257
		if (!try_fill_recv(vi, rq, GFP_ATOMIC))
1258
			schedule_delayed_work(&vi->refill, 0);
1259
	}
R
Rusty Russell 已提交
1260

T
Toshiaki Makita 已提交
1261 1262 1263 1264
	u64_stats_update_begin(&rq->stats.syncp);
	rq->stats.bytes += bytes;
	rq->stats.packets += received;
	u64_stats_update_end(&rq->stats.syncp);
J
Jason Wang 已提交
1265

1266 1267 1268
	return received;
}

1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
static void free_old_xmit_skbs(struct send_queue *sq)
{
	struct sk_buff *skb;
	unsigned int len;
	unsigned int packets = 0;
	unsigned int bytes = 0;

	while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
		pr_debug("Sent skb %p\n", skb);

		bytes += skb->len;
		packets++;

1282
		dev_consume_skb_any(skb);
1283 1284 1285 1286 1287 1288 1289 1290
	}

	/* Avoid overhead when no packets have been processed
	 * happens when called speculatively from start_xmit.
	 */
	if (!packets)
		return;

T
Toshiaki Makita 已提交
1291 1292 1293 1294
	u64_stats_update_begin(&sq->stats.syncp);
	sq->stats.bytes += bytes;
	sq->stats.packets += packets;
	u64_stats_update_end(&sq->stats.syncp);
1295 1296
}

1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
static void virtnet_poll_cleantx(struct receive_queue *rq)
{
	struct virtnet_info *vi = rq->vq->vdev->priv;
	unsigned int index = vq2rxq(rq->vq);
	struct send_queue *sq = &vi->sq[index];
	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);

	if (!sq->napi.weight)
		return;

	if (__netif_tx_trylock(txq)) {
		free_old_xmit_skbs(sq);
		__netif_tx_unlock(txq);
	}

	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
		netif_tx_wake_queue(txq);
}

1316 1317 1318 1319
static int virtnet_poll(struct napi_struct *napi, int budget)
{
	struct receive_queue *rq =
		container_of(napi, struct receive_queue, napi);
1320 1321 1322
	struct virtnet_info *vi = rq->vq->vdev->priv;
	struct send_queue *sq;
	unsigned int received, qp;
J
Jason Wang 已提交
1323
	bool xdp_xmit = false;
1324

1325 1326
	virtnet_poll_cleantx(rq);

J
Jason Wang 已提交
1327
	received = virtnet_receive(rq, budget, &xdp_xmit);
1328

1329
	/* Out of packets? */
1330 1331
	if (received < budget)
		virtqueue_napi_complete(napi, rq->vq, received);
R
Rusty Russell 已提交
1332

1333 1334 1335 1336 1337
	if (xdp_xmit) {
		qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
		     smp_processor_id();
		sq = &vi->sq[qp];
		virtqueue_kick(sq->vq);
J
Jason Wang 已提交
1338
		xdp_do_flush_map();
1339
	}
J
Jason Wang 已提交
1340

R
Rusty Russell 已提交
1341 1342 1343
	return received;
}

J
Jason Wang 已提交
1344 1345 1346
static int virtnet_open(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
1347
	int i, err;
J
Jason Wang 已提交
1348

1349 1350 1351
	for (i = 0; i < vi->max_queue_pairs; i++) {
		if (i < vi->curr_queue_pairs)
			/* Make sure we have some buffers: if oom use wq. */
M
Michael S. Tsirkin 已提交
1352
			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
1353
				schedule_delayed_work(&vi->refill, 0);
1354 1355 1356 1357 1358

		err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
		if (err < 0)
			return err;

1359 1360 1361 1362 1363 1364 1365
		err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
						 MEM_TYPE_PAGE_SHARED, NULL);
		if (err < 0) {
			xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
			return err;
		}

1366
		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
W
Willem de Bruijn 已提交
1367
		virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
J
Jason Wang 已提交
1368 1369 1370 1371 1372
	}

	return 0;
}

W
Willem de Bruijn 已提交
1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
static int virtnet_poll_tx(struct napi_struct *napi, int budget)
{
	struct send_queue *sq = container_of(napi, struct send_queue, napi);
	struct virtnet_info *vi = sq->vq->vdev->priv;
	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));

	__netif_tx_lock(txq, raw_smp_processor_id());
	free_old_xmit_skbs(sq);
	__netif_tx_unlock(txq);

	virtqueue_napi_complete(napi, sq->vq, 0);

	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
		netif_tx_wake_queue(txq);

	return 0;
}

1391
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
R
Rusty Russell 已提交
1392
{
1393
	struct virtio_net_hdr_mrg_rxbuf *hdr;
R
Rusty Russell 已提交
1394
	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
1395
	struct virtnet_info *vi = sq->vq->vdev->priv;
1396
	int num_sg;
1397
	unsigned hdr_len = vi->hdr_len;
1398
	bool can_push;
R
Rusty Russell 已提交
1399

J
Johannes Berg 已提交
1400
	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
1401 1402 1403 1404 1405 1406 1407

	can_push = vi->any_header_sg &&
		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
	/* Even if we can, don't push here yet as this would skew
	 * csum_start offset below. */
	if (can_push)
1408
		hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
1409 1410
	else
		hdr = skb_vnet_hdr(skb);
R
Rusty Russell 已提交
1411

1412
	if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
1413
				    virtio_is_little_endian(vi->vdev), false))
1414
		BUG();
R
Rusty Russell 已提交
1415

1416
	if (vi->mergeable_rx_bufs)
1417
		hdr->num_buffers = 0;
1418

1419
	sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
1420 1421 1422
	if (can_push) {
		__skb_push(skb, hdr_len);
		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
1423 1424
		if (unlikely(num_sg < 0))
			return num_sg;
1425 1426 1427 1428
		/* Pull header back to avoid skew in tx bytes calculations. */
		__skb_pull(skb, hdr_len);
	} else {
		sg_set_buf(sq->sg, hdr, hdr_len);
1429 1430 1431 1432
		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
		if (unlikely(num_sg < 0))
			return num_sg;
		num_sg++;
1433
	}
1434
	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
1435 1436
}

1437
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1438 1439
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
1440 1441
	int qnum = skb_get_queue_mapping(skb);
	struct send_queue *sq = &vi->sq[qnum];
1442
	int err;
1443 1444
	struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
	bool kick = !skb->xmit_more;
W
Willem de Bruijn 已提交
1445
	bool use_napi = sq->napi.weight;
1446 1447

	/* Free up any pending old buffers before queueing new ones. */
1448
	free_old_xmit_skbs(sq);
1449

1450 1451 1452
	if (use_napi && kick)
		virtqueue_enable_cb_delayed(sq->vq);

1453 1454 1455
	/* timestamp packet in software */
	skb_tx_timestamp(skb);

1456
	/* Try to transmit */
1457
	err = xmit_skb(sq, skb);
1458

1459
	/* This should not happen! */
1460
	if (unlikely(err)) {
1461 1462 1463
		dev->stats.tx_fifo_errors++;
		if (net_ratelimit())
			dev_warn(&dev->dev,
1464
				 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
1465
		dev->stats.tx_dropped++;
1466
		dev_kfree_skb_any(skb);
1467
		return NETDEV_TX_OK;
R
Rusty Russell 已提交
1468
	}
1469

1470
	/* Don't wait up for transmitted skbs to be freed. */
W
Willem de Bruijn 已提交
1471 1472 1473 1474
	if (!use_napi) {
		skb_orphan(skb);
		nf_reset(skb);
	}
1475

1476 1477 1478 1479 1480 1481 1482 1483 1484
	/* If running out of space, stop queue to avoid getting packets that we
	 * are then unable to transmit.
	 * An alternative would be to force queuing layer to requeue the skb by
	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
	 * returned in a normal path of operation: it means that driver is not
	 * maintaining the TX queue stop/start state properly, and causes
	 * the stack to do a non-trivial amount of useless work.
	 * Since most packets only take 1 or 2 ring slots, stopping the queue
	 * early means 16 slots are typically wasted.
1485
	 */
1486
	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
J
Jason Wang 已提交
1487
		netif_stop_subqueue(dev, qnum);
W
Willem de Bruijn 已提交
1488 1489
		if (!use_napi &&
		    unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1490
			/* More just got used, free them then recheck. */
1491 1492
			free_old_xmit_skbs(sq);
			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
J
Jason Wang 已提交
1493
				netif_start_subqueue(dev, qnum);
1494
				virtqueue_disable_cb(sq->vq);
1495 1496
			}
		}
1497
	}
1498

1499
	if (kick || netif_xmit_stopped(txq))
1500
		virtqueue_kick(sq->vq);
R
Rusty Russell 已提交
1501

1502
	return NETDEV_TX_OK;
1503 1504
}

1505 1506 1507
/*
 * Send command via the control virtqueue and check status.  Commands
 * supported by the hypervisor, as indicated by feature bits, should
S
stephen hemminger 已提交
1508
 * never fail unless improperly formatted.
1509 1510
 */
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1511
				 struct scatterlist *out)
1512
{
1513
	struct scatterlist *sgs[4], hdr, stat;
1514
	unsigned out_num = 0, tmp;
1515 1516

	/* Caller should know better */
1517
	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
1518

1519 1520 1521
	vi->ctrl->status = ~0;
	vi->ctrl->hdr.class = class;
	vi->ctrl->hdr.cmd = cmd;
1522
	/* Add header */
1523
	sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
1524
	sgs[out_num++] = &hdr;
1525

1526 1527
	if (out)
		sgs[out_num++] = out;
1528

1529
	/* Add return status. */
1530
	sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
1531
	sgs[out_num] = &stat;
1532

1533
	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1534
	virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
1535

1536
	if (unlikely(!virtqueue_kick(vi->cvq)))
1537
		return vi->ctrl->status == VIRTIO_NET_OK;
1538 1539 1540 1541

	/* Spin for a response, the kick causes an ioport write, trapping
	 * into the hypervisor, so the request should be handled immediately.
	 */
1542 1543
	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
	       !virtqueue_is_broken(vi->cvq))
1544 1545
		cpu_relax();

1546
	return vi->ctrl->status == VIRTIO_NET_OK;
1547 1548
}

1549 1550 1551 1552
static int virtnet_set_mac_address(struct net_device *dev, void *p)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;
1553
	int ret;
1554
	struct sockaddr *addr;
1555
	struct scatterlist sg;
1556

1557
	addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
1558 1559 1560 1561
	if (!addr)
		return -ENOMEM;

	ret = eth_prepare_mac_addr_change(dev, addr);
1562
	if (ret)
1563
		goto out;
1564

1565 1566 1567
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
		sg_init_one(&sg, addr->sa_data, dev->addr_len);
		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1568
					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
1569 1570
			dev_warn(&vdev->dev,
				 "Failed to set mac address by vq command.\n");
1571 1572
			ret = -EINVAL;
			goto out;
1573
		}
1574 1575
	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
		   !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1576 1577 1578 1579 1580 1581 1582
		unsigned int i;

		/* Naturally, this has an atomicity problem. */
		for (i = 0; i < dev->addr_len; i++)
			virtio_cwrite8(vdev,
				       offsetof(struct virtio_net_config, mac) +
				       i, addr->sa_data[i]);
1583 1584 1585
	}

	eth_commit_mac_addr_change(dev, p);
1586
	ret = 0;
1587

1588 1589 1590
out:
	kfree(addr);
	return ret;
1591 1592
}

1593 1594
static void virtnet_stats(struct net_device *dev,
			  struct rtnl_link_stats64 *tot)
1595 1596 1597
{
	struct virtnet_info *vi = netdev_priv(dev);
	unsigned int start;
T
Toshiaki Makita 已提交
1598
	int i;
1599

T
Toshiaki Makita 已提交
1600
	for (i = 0; i < vi->max_queue_pairs; i++) {
1601
		u64 tpackets, tbytes, rpackets, rbytes;
T
Toshiaki Makita 已提交
1602 1603
		struct receive_queue *rq = &vi->rq[i];
		struct send_queue *sq = &vi->sq[i];
1604 1605

		do {
T
Toshiaki Makita 已提交
1606 1607 1608 1609
			start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
			tpackets = sq->stats.packets;
			tbytes   = sq->stats.bytes;
		} while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
1610 1611

		do {
T
Toshiaki Makita 已提交
1612 1613 1614 1615
			start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
			rpackets = rq->stats.packets;
			rbytes   = rq->stats.bytes;
		} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
1616 1617 1618 1619 1620 1621 1622 1623

		tot->rx_packets += rpackets;
		tot->tx_packets += tpackets;
		tot->rx_bytes   += rbytes;
		tot->tx_bytes   += tbytes;
	}

	tot->tx_dropped = dev->stats.tx_dropped;
1624
	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
1625 1626 1627 1628 1629
	tot->rx_dropped = dev->stats.rx_dropped;
	tot->rx_length_errors = dev->stats.rx_length_errors;
	tot->rx_frame_errors = dev->stats.rx_frame_errors;
}

1630 1631 1632 1633
#ifdef CONFIG_NET_POLL_CONTROLLER
static void virtnet_netpoll(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
1634
	int i;
1635

J
Jason Wang 已提交
1636 1637
	for (i = 0; i < vi->curr_queue_pairs; i++)
		napi_schedule(&vi->rq[i].napi);
1638 1639 1640
}
#endif

1641 1642 1643 1644
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
	rtnl_lock();
	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
1645
				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
1646 1647 1648 1649
		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
	rtnl_unlock();
}

1650
static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
J
Jason Wang 已提交
1651 1652 1653 1654 1655 1656 1657
{
	struct scatterlist sg;
	struct net_device *dev = vi->dev;

	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
		return 0;

1658 1659
	vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
	sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
J
Jason Wang 已提交
1660 1661

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1662
				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
J
Jason Wang 已提交
1663 1664 1665
		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
			 queue_pairs);
		return -EINVAL;
1666
	} else {
J
Jason Wang 已提交
1667
		vi->curr_queue_pairs = queue_pairs;
1668 1669 1670
		/* virtnet_open() will refill when device is going to up. */
		if (dev->flags & IFF_UP)
			schedule_delayed_work(&vi->refill, 0);
1671
	}
J
Jason Wang 已提交
1672 1673 1674 1675

	return 0;
}

1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
	int err;

	rtnl_lock();
	err = _virtnet_set_queues(vi, queue_pairs);
	rtnl_unlock();
	return err;
}

R
Rusty Russell 已提交
1686 1687 1688
static int virtnet_close(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
1689
	int i;
R
Rusty Russell 已提交
1690

1691 1692
	/* Make sure refill_work doesn't re-enable napi! */
	cancel_delayed_work_sync(&vi->refill);
J
Jason Wang 已提交
1693

W
Willem de Bruijn 已提交
1694
	for (i = 0; i < vi->max_queue_pairs; i++) {
1695
		xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
J
Jason Wang 已提交
1696
		napi_disable(&vi->rq[i].napi);
1697
		virtnet_napi_tx_disable(&vi->sq[i].napi);
W
Willem de Bruijn 已提交
1698
	}
R
Rusty Russell 已提交
1699 1700 1701 1702

	return 0;
}

1703 1704 1705
static void virtnet_set_rx_mode(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
1706 1707
	struct scatterlist sg[2];
	struct virtio_net_ctrl_mac *mac_data;
J
Jiri Pirko 已提交
1708
	struct netdev_hw_addr *ha;
1709
	int uc_count;
1710
	int mc_count;
1711 1712
	void *buf;
	int i;
1713

S
stephen hemminger 已提交
1714
	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
1715 1716 1717
	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
		return;

1718 1719
	vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
	vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1720

1721
	sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
1722 1723

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1724
				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
1725
		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1726
			 vi->ctrl->promisc ? "en" : "dis");
1727

1728
	sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
1729 1730

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1731
				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
1732
		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1733
			 vi->ctrl->allmulti ? "en" : "dis");
1734

1735
	uc_count = netdev_uc_count(dev);
1736
	mc_count = netdev_mc_count(dev);
1737
	/* MAC filter - use one buffer for both lists */
1738 1739 1740
	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
	mac_data = buf;
1741
	if (!buf)
1742 1743
		return;

1744 1745
	sg_init_table(sg, 2);

1746
	/* Store the unicast list and count in the front of the buffer */
M
Michael S. Tsirkin 已提交
1747
	mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
J
Jiri Pirko 已提交
1748
	i = 0;
1749
	netdev_for_each_uc_addr(ha, dev)
J
Jiri Pirko 已提交
1750
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1751 1752

	sg_set_buf(&sg[0], mac_data,
1753
		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
1754 1755

	/* multicast list and count fill the end */
1756
	mac_data = (void *)&mac_data->macs[uc_count][0];
1757

M
Michael S. Tsirkin 已提交
1758
	mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
1759
	i = 0;
1760 1761
	netdev_for_each_mc_addr(ha, dev)
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1762 1763

	sg_set_buf(&sg[1], mac_data,
1764
		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
1765 1766

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1767
				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
1768
		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
1769 1770

	kfree(buf);
1771 1772
}

1773 1774
static int virtnet_vlan_rx_add_vid(struct net_device *dev,
				   __be16 proto, u16 vid)
1775 1776 1777 1778
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

1779
	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
1780
	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
1781 1782

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1783
				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
1784
		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1785
	return 0;
1786 1787
}

1788 1789
static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
				    __be16 proto, u16 vid)
1790 1791 1792 1793
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

1794
	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
1795
	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
1796 1797

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1798
				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
1799
		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1800
	return 0;
1801 1802
}

1803
static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
J
Jason Wang 已提交
1804 1805 1806
{
	int i;

1807 1808
	if (vi->affinity_hint_set) {
		for (i = 0; i < vi->max_queue_pairs; i++) {
1809 1810 1811 1812
			virtqueue_set_affinity(vi->rq[i].vq, -1);
			virtqueue_set_affinity(vi->sq[i].vq, -1);
		}

1813 1814 1815
		vi->affinity_hint_set = false;
	}
}
1816

1817 1818 1819 1820
static void virtnet_set_affinity(struct virtnet_info *vi)
{
	int i;
	int cpu;
J
Jason Wang 已提交
1821 1822 1823 1824 1825

	/* In multiqueue mode, when the number of cpu is equal to the number of
	 * queue pairs, we let the queue pairs to be private to one cpu by
	 * setting the affinity hint to eliminate the contention.
	 */
1826 1827 1828 1829
	if (vi->curr_queue_pairs == 1 ||
	    vi->max_queue_pairs != num_online_cpus()) {
		virtnet_clean_affinity(vi, -1);
		return;
J
Jason Wang 已提交
1830 1831
	}

1832 1833
	i = 0;
	for_each_online_cpu(cpu) {
J
Jason Wang 已提交
1834 1835
		virtqueue_set_affinity(vi->rq[i].vq, cpu);
		virtqueue_set_affinity(vi->sq[i].vq, cpu);
1836
		netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
1837
		i++;
J
Jason Wang 已提交
1838 1839
	}

1840
	vi->affinity_hint_set = true;
J
Jason Wang 已提交
1841 1842
}

1843
static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
1844
{
1845 1846 1847 1848 1849
	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
						   node);
	virtnet_set_affinity(vi);
	return 0;
}
1850

1851 1852 1853 1854 1855 1856 1857
static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
						   node_dead);
	virtnet_set_affinity(vi);
	return 0;
}
1858

1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
{
	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
						   node);

	virtnet_clean_affinity(vi, cpu);
	return 0;
}

static enum cpuhp_state virtionet_online;

static int virtnet_cpu_notif_add(struct virtnet_info *vi)
{
	int ret;

	ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
	if (ret)
		return ret;
	ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
					       &vi->node_dead);
	if (!ret)
		return ret;
	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
	return ret;
}

static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
{
	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
	cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
					    &vi->node_dead);
J
Jason Wang 已提交
1890 1891
}

R
Rick Jones 已提交
1892 1893 1894 1895 1896
static void virtnet_get_ringparam(struct net_device *dev,
				struct ethtool_ringparam *ring)
{
	struct virtnet_info *vi = netdev_priv(dev);

J
Jason Wang 已提交
1897 1898
	ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
	ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
R
Rick Jones 已提交
1899 1900 1901 1902
	ring->rx_pending = ring->rx_max_pending;
	ring->tx_pending = ring->tx_max_pending;
}

1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915

static void virtnet_get_drvinfo(struct net_device *dev,
				struct ethtool_drvinfo *info)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;

	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
	strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
	strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));

}

1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929
/* TODO: Eliminate OOO packets during switching */
static int virtnet_set_channels(struct net_device *dev,
				struct ethtool_channels *channels)
{
	struct virtnet_info *vi = netdev_priv(dev);
	u16 queue_pairs = channels->combined_count;
	int err;

	/* We don't support separate rx/tx channels.
	 * We don't allow setting 'other' channels.
	 */
	if (channels->rx_count || channels->tx_count || channels->other_count)
		return -EINVAL;

1930
	if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
1931 1932
		return -EINVAL;

J
John Fastabend 已提交
1933 1934 1935 1936 1937 1938 1939
	/* For now we don't support modifying channels while XDP is loaded
	 * also when XDP is loaded all RX queues have XDP programs so we only
	 * need to check a single RX queue.
	 */
	if (vi->rq[0].xdp_prog)
		return -EINVAL;

1940
	get_online_cpus();
1941
	err = _virtnet_set_queues(vi, queue_pairs);
1942 1943 1944 1945
	if (!err) {
		netif_set_real_num_tx_queues(dev, queue_pairs);
		netif_set_real_num_rx_queues(dev, queue_pairs);

1946
		virtnet_set_affinity(vi);
1947
	}
1948
	put_online_cpus();
1949 1950 1951 1952

	return err;
}

T
Toshiaki Makita 已提交
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029
static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
	struct virtnet_info *vi = netdev_priv(dev);
	char *p = (char *)data;
	unsigned int i, j;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < vi->curr_queue_pairs; i++) {
			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
				snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
					 i, virtnet_rq_stats_desc[j].desc);
				p += ETH_GSTRING_LEN;
			}
		}

		for (i = 0; i < vi->curr_queue_pairs; i++) {
			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
				snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s",
					 i, virtnet_sq_stats_desc[j].desc);
				p += ETH_GSTRING_LEN;
			}
		}
		break;
	}
}

static int virtnet_get_sset_count(struct net_device *dev, int sset)
{
	struct virtnet_info *vi = netdev_priv(dev);

	switch (sset) {
	case ETH_SS_STATS:
		return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
					       VIRTNET_SQ_STATS_LEN);
	default:
		return -EOPNOTSUPP;
	}
}

static void virtnet_get_ethtool_stats(struct net_device *dev,
				      struct ethtool_stats *stats, u64 *data)
{
	struct virtnet_info *vi = netdev_priv(dev);
	unsigned int idx = 0, start, i, j;
	const u8 *stats_base;
	size_t offset;

	for (i = 0; i < vi->curr_queue_pairs; i++) {
		struct receive_queue *rq = &vi->rq[i];

		stats_base = (u8 *)&rq->stats;
		do {
			start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
				offset = virtnet_rq_stats_desc[j].offset;
				data[idx + j] = *(u64 *)(stats_base + offset);
			}
		} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
		idx += VIRTNET_RQ_STATS_LEN;
	}

	for (i = 0; i < vi->curr_queue_pairs; i++) {
		struct send_queue *sq = &vi->sq[i];

		stats_base = (u8 *)&sq->stats;
		do {
			start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
				offset = virtnet_sq_stats_desc[j].offset;
				data[idx + j] = *(u64 *)(stats_base + offset);
			}
		} while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
		idx += VIRTNET_SQ_STATS_LEN;
	}
}

2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042
static void virtnet_get_channels(struct net_device *dev,
				 struct ethtool_channels *channels)
{
	struct virtnet_info *vi = netdev_priv(dev);

	channels->combined_count = vi->curr_queue_pairs;
	channels->max_combined = vi->max_queue_pairs;
	channels->max_other = 0;
	channels->rx_count = 0;
	channels->tx_count = 0;
	channels->other_count = 0;
}

2043
/* Check if the user is trying to change anything besides speed/duplex */
2044 2045
static bool
virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd)
2046
{
2047 2048
	struct ethtool_link_ksettings diff1 = *cmd;
	struct ethtool_link_ksettings diff2 = {};
2049

2050 2051 2052
	/* cmd is always set so we need to clear it, validate the port type
	 * and also without autonegotiation we can ignore advertising
	 */
2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
	diff1.base.speed = 0;
	diff2.base.port = PORT_OTHER;
	ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
	diff1.base.duplex = 0;
	diff1.base.cmd = 0;
	diff1.base.link_mode_masks_nwords = 0;

	return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) &&
		bitmap_empty(diff1.link_modes.supported,
			     __ETHTOOL_LINK_MODE_MASK_NBITS) &&
		bitmap_empty(diff1.link_modes.advertising,
			     __ETHTOOL_LINK_MODE_MASK_NBITS) &&
		bitmap_empty(diff1.link_modes.lp_advertising,
			     __ETHTOOL_LINK_MODE_MASK_NBITS);
2067 2068
}

2069 2070
static int virtnet_set_link_ksettings(struct net_device *dev,
				      const struct ethtool_link_ksettings *cmd)
2071 2072 2073 2074
{
	struct virtnet_info *vi = netdev_priv(dev);
	u32 speed;

2075
	speed = cmd->base.speed;
2076 2077
	/* don't allow custom speed and duplex */
	if (!ethtool_validate_speed(speed) ||
2078
	    !ethtool_validate_duplex(cmd->base.duplex) ||
2079 2080 2081
	    !virtnet_validate_ethtool_cmd(cmd))
		return -EINVAL;
	vi->speed = speed;
2082
	vi->duplex = cmd->base.duplex;
2083 2084 2085 2086

	return 0;
}

2087 2088
static int virtnet_get_link_ksettings(struct net_device *dev,
				      struct ethtool_link_ksettings *cmd)
2089 2090 2091
{
	struct virtnet_info *vi = netdev_priv(dev);

2092 2093 2094
	cmd->base.speed = vi->speed;
	cmd->base.duplex = vi->duplex;
	cmd->base.port = PORT_OTHER;
2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106

	return 0;
}

static void virtnet_init_settings(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);

	vi->speed = SPEED_UNKNOWN;
	vi->duplex = DUPLEX_UNKNOWN;
}

2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124
static void virtnet_update_settings(struct virtnet_info *vi)
{
	u32 speed;
	u8 duplex;

	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
		return;

	speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config,
						  speed));
	if (ethtool_validate_speed(speed))
		vi->speed = speed;
	duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config,
						  duplex));
	if (ethtool_validate_duplex(duplex))
		vi->duplex = duplex;
}

2125
static const struct ethtool_ops virtnet_ethtool_ops = {
2126
	.get_drvinfo = virtnet_get_drvinfo,
2127
	.get_link = ethtool_op_get_link,
R
Rick Jones 已提交
2128
	.get_ringparam = virtnet_get_ringparam,
T
Toshiaki Makita 已提交
2129 2130 2131
	.get_strings = virtnet_get_strings,
	.get_sset_count = virtnet_get_sset_count,
	.get_ethtool_stats = virtnet_get_ethtool_stats,
2132 2133
	.set_channels = virtnet_set_channels,
	.get_channels = virtnet_get_channels,
2134
	.get_ts_info = ethtool_op_get_ts_info,
2135 2136
	.get_link_ksettings = virtnet_get_link_ksettings,
	.set_link_ksettings = virtnet_set_link_ksettings,
2137 2138
};

2139 2140 2141 2142 2143 2144 2145 2146 2147
static void virtnet_freeze_down(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;
	int i;

	/* Make sure no work handler is accessing the device */
	flush_work(&vi->config_work);

	netif_device_detach(vi->dev);
2148
	netif_tx_disable(vi->dev);
2149 2150 2151
	cancel_delayed_work_sync(&vi->refill);

	if (netif_running(vi->dev)) {
W
Willem de Bruijn 已提交
2152
		for (i = 0; i < vi->max_queue_pairs; i++) {
2153
			napi_disable(&vi->rq[i].napi);
2154
			virtnet_napi_tx_disable(&vi->sq[i].napi);
W
Willem de Bruijn 已提交
2155
		}
2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176
	}
}

static int init_vqs(struct virtnet_info *vi);

static int virtnet_restore_up(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;
	int err, i;

	err = init_vqs(vi);
	if (err)
		return err;

	virtio_device_ready(vdev);

	if (netif_running(vi->dev)) {
		for (i = 0; i < vi->curr_queue_pairs; i++)
			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
				schedule_delayed_work(&vi->refill, 0);

W
Willem de Bruijn 已提交
2177
		for (i = 0; i < vi->max_queue_pairs; i++) {
2178
			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
W
Willem de Bruijn 已提交
2179 2180 2181
			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
					       &vi->sq[i].napi);
		}
2182 2183 2184 2185 2186 2187
	}

	netif_device_attach(vi->dev);
	return err;
}

2188 2189 2190
static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
{
	struct scatterlist sg;
2191
	vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
2192

2193
	sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
				  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
		dev_warn(&vi->dev->dev, "Fail to set guest offload. \n");
		return -EINVAL;
	}

	return 0;
}

static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
{
	u64 offloads = 0;

	if (!vi->guest_offloads)
		return 0;

	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
		offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM;

	return virtnet_set_guest_offloads(vi, offloads);
}

static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
{
	u64 offloads = vi->guest_offloads;

	if (!vi->guest_offloads)
		return 0;
	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
		offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM;

	return virtnet_set_guest_offloads(vi, offloads);
}

2229 2230
static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
			   struct netlink_ext_ack *extack)
J
John Fastabend 已提交
2231 2232 2233 2234
{
	unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
	struct virtnet_info *vi = netdev_priv(dev);
	struct bpf_prog *old_prog;
2235
	u16 xdp_qp = 0, curr_qp;
2236
	int i, err;
J
John Fastabend 已提交
2237

2238 2239 2240 2241 2242
	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
	    && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) {
2243
		NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first");
J
John Fastabend 已提交
2244 2245 2246 2247
		return -EOPNOTSUPP;
	}

	if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
2248
		NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
J
John Fastabend 已提交
2249 2250 2251 2252
		return -EINVAL;
	}

	if (dev->mtu > max_sz) {
2253
		NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
J
John Fastabend 已提交
2254 2255 2256 2257
		netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
		return -EINVAL;
	}

2258 2259 2260 2261 2262 2263
	curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
	if (prog)
		xdp_qp = nr_cpu_ids;

	/* XDP requires extra queues for XDP_TX */
	if (curr_qp + xdp_qp > vi->max_queue_pairs) {
2264
		NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
2265 2266 2267 2268 2269
		netdev_warn(dev, "request %i queues but max is %i\n",
			    curr_qp + xdp_qp, vi->max_queue_pairs);
		return -ENOMEM;
	}

2270 2271 2272 2273 2274 2275
	if (prog) {
		prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
		if (IS_ERR(prog))
			return PTR_ERR(prog);
	}

2276
	/* Make sure NAPI is not using any XDP TX queues for RX. */
2277 2278 2279
	if (netif_running(dev))
		for (i = 0; i < vi->max_queue_pairs; i++)
			napi_disable(&vi->rq[i].napi);
J
John Fastabend 已提交
2280

2281
	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2282 2283 2284 2285
	err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
	if (err)
		goto err;
	vi->xdp_queue_pairs = xdp_qp;
2286

J
John Fastabend 已提交
2287 2288 2289
	for (i = 0; i < vi->max_queue_pairs; i++) {
		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
		rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2290 2291 2292 2293 2294 2295
		if (i == 0) {
			if (!old_prog)
				virtnet_clear_guest_offloads(vi);
			if (!prog)
				virtnet_restore_guest_offloads(vi);
		}
J
John Fastabend 已提交
2296 2297
		if (old_prog)
			bpf_prog_put(old_prog);
2298 2299
		if (netif_running(dev))
			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
J
John Fastabend 已提交
2300 2301 2302
	}

	return 0;
2303

2304 2305 2306
err:
	for (i = 0; i < vi->max_queue_pairs; i++)
		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2307 2308 2309
	if (prog)
		bpf_prog_sub(prog, vi->max_queue_pairs - 1);
	return err;
J
John Fastabend 已提交
2310 2311
}

2312
static u32 virtnet_xdp_query(struct net_device *dev)
J
John Fastabend 已提交
2313 2314
{
	struct virtnet_info *vi = netdev_priv(dev);
2315
	const struct bpf_prog *xdp_prog;
J
John Fastabend 已提交
2316 2317 2318
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
2319 2320 2321
		xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog);
		if (xdp_prog)
			return xdp_prog->aux->id;
J
John Fastabend 已提交
2322
	}
2323
	return 0;
J
John Fastabend 已提交
2324 2325
}

2326
static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
J
John Fastabend 已提交
2327 2328 2329
{
	switch (xdp->command) {
	case XDP_SETUP_PROG:
2330
		return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
J
John Fastabend 已提交
2331
	case XDP_QUERY_PROG:
2332 2333
		xdp->prog_id = virtnet_xdp_query(dev);
		xdp->prog_attached = !!xdp->prog_id;
J
John Fastabend 已提交
2334 2335 2336 2337 2338 2339
		return 0;
	default:
		return -EINVAL;
	}
}

2340 2341 2342 2343 2344
static const struct net_device_ops virtnet_netdev = {
	.ndo_open            = virtnet_open,
	.ndo_stop   	     = virtnet_close,
	.ndo_start_xmit      = start_xmit,
	.ndo_validate_addr   = eth_validate_addr,
2345
	.ndo_set_mac_address = virtnet_set_mac_address,
2346
	.ndo_set_rx_mode     = virtnet_set_rx_mode,
2347
	.ndo_get_stats64     = virtnet_stats,
2348 2349
	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
2350 2351
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = virtnet_netpoll,
J
Jason Wang 已提交
2352
#endif
2353
	.ndo_bpf		= virtnet_xdp,
J
Jason Wang 已提交
2354 2355
	.ndo_xdp_xmit		= virtnet_xdp_xmit,
	.ndo_xdp_flush		= virtnet_xdp_flush,
2356
	.ndo_features_check	= passthru_features_check,
2357 2358
};

2359
static void virtnet_config_changed_work(struct work_struct *work)
2360
{
2361 2362
	struct virtnet_info *vi =
		container_of(work, struct virtnet_info, config_work);
2363 2364
	u16 v;

2365 2366
	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
				 struct virtio_net_config, status, &v) < 0)
M
Michael S. Tsirkin 已提交
2367
		return;
2368 2369

	if (v & VIRTIO_NET_S_ANNOUNCE) {
2370
		netdev_notify_peers(vi->dev);
2371 2372
		virtnet_ack_link_announce(vi);
	}
2373 2374 2375 2376 2377

	/* Ignore unknown (future) status bits */
	v &= VIRTIO_NET_S_LINK_UP;

	if (vi->status == v)
M
Michael S. Tsirkin 已提交
2378
		return;
2379 2380 2381 2382

	vi->status = v;

	if (vi->status & VIRTIO_NET_S_LINK_UP) {
2383
		virtnet_update_settings(vi);
2384
		netif_carrier_on(vi->dev);
J
Jason Wang 已提交
2385
		netif_tx_wake_all_queues(vi->dev);
2386 2387
	} else {
		netif_carrier_off(vi->dev);
J
Jason Wang 已提交
2388
		netif_tx_stop_all_queues(vi->dev);
2389 2390 2391 2392 2393 2394 2395
	}
}

static void virtnet_config_changed(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;

2396
	schedule_work(&vi->config_work);
2397 2398
}

J
Jason Wang 已提交
2399 2400
static void virtnet_free_queues(struct virtnet_info *vi)
{
2401 2402
	int i;

2403 2404
	for (i = 0; i < vi->max_queue_pairs; i++) {
		napi_hash_del(&vi->rq[i].napi);
2405
		netif_napi_del(&vi->rq[i].napi);
W
Willem de Bruijn 已提交
2406
		netif_napi_del(&vi->sq[i].napi);
2407
	}
2408

2409 2410 2411 2412 2413
	/* We called napi_hash_del() before netif_napi_del(),
	 * we need to respect an RCU grace period before freeing vi->rq
	 */
	synchronize_net();

J
Jason Wang 已提交
2414 2415
	kfree(vi->rq);
	kfree(vi->sq);
2416
	kfree(vi->ctrl);
J
Jason Wang 已提交
2417 2418
}

2419
static void _free_receive_bufs(struct virtnet_info *vi)
J
Jason Wang 已提交
2420
{
J
John Fastabend 已提交
2421
	struct bpf_prog *old_prog;
J
Jason Wang 已提交
2422 2423 2424 2425 2426
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
		while (vi->rq[i].pages)
			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
J
John Fastabend 已提交
2427 2428 2429 2430 2431

		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
		RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
		if (old_prog)
			bpf_prog_put(old_prog);
J
Jason Wang 已提交
2432
	}
2433 2434 2435 2436 2437 2438
}

static void free_receive_bufs(struct virtnet_info *vi)
{
	rtnl_lock();
	_free_receive_bufs(vi);
J
John Fastabend 已提交
2439
	rtnl_unlock();
J
Jason Wang 已提交
2440 2441
}

2442 2443 2444 2445 2446 2447 2448 2449
static void free_receive_page_frags(struct virtnet_info *vi)
{
	int i;
	for (i = 0; i < vi->max_queue_pairs; i++)
		if (vi->rq[i].alloc_frag.page)
			put_page(vi->rq[i].alloc_frag.page);
}

2450
static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
J
John Fastabend 已提交
2451 2452 2453 2454 2455 2456 2457 2458 2459
{
	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
		return false;
	else if (q < vi->curr_queue_pairs)
		return true;
	else
		return false;
}

J
Jason Wang 已提交
2460 2461 2462 2463 2464 2465 2466
static void free_unused_bufs(struct virtnet_info *vi)
{
	void *buf;
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
		struct virtqueue *vq = vi->sq[i].vq;
J
John Fastabend 已提交
2467
		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
2468
			if (!is_xdp_raw_buffer_queue(vi, i))
J
John Fastabend 已提交
2469 2470 2471 2472
				dev_kfree_skb(buf);
			else
				put_page(virt_to_head_page(buf));
		}
J
Jason Wang 已提交
2473 2474 2475 2476 2477 2478
	}

	for (i = 0; i < vi->max_queue_pairs; i++) {
		struct virtqueue *vq = vi->rq[i].vq;

		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
2479
			if (vi->mergeable_rx_bufs) {
2480
				put_page(virt_to_head_page(buf));
2481
			} else if (vi->big_packets) {
2482
				give_pages(&vi->rq[i], buf);
2483
			} else {
2484
				put_page(virt_to_head_page(buf));
2485
			}
J
Jason Wang 已提交
2486 2487 2488 2489
		}
	}
}

2490 2491 2492 2493
static void virtnet_del_vqs(struct virtnet_info *vi)
{
	struct virtio_device *vdev = vi->vdev;

2494
	virtnet_clean_affinity(vi, -1);
J
Jason Wang 已提交
2495

2496
	vdev->config->del_vqs(vdev);
J
Jason Wang 已提交
2497 2498

	virtnet_free_queues(vi);
2499 2500
}

2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512
/* How large should a single buffer be so a queue full of these can fit at
 * least one full packet?
 * Logic below assumes the mergeable buffer header is used.
 */
static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
{
	const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
	unsigned int rq_size = virtqueue_get_vring_size(vq);
	unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
	unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
	unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);

2513 2514
	return max(max(min_buf_len, hdr_len) - hdr_len,
		   (unsigned int)GOOD_PACKET_LEN);
2515 2516
}

J
Jason Wang 已提交
2517
static int virtnet_find_vqs(struct virtnet_info *vi)
2518
{
J
Jason Wang 已提交
2519 2520 2521 2522 2523
	vq_callback_t **callbacks;
	struct virtqueue **vqs;
	int ret = -ENOMEM;
	int i, total_vqs;
	const char **names;
2524
	bool *ctx;
J
Jason Wang 已提交
2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542

	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
	 * possible control vq.
	 */
	total_vqs = vi->max_queue_pairs * 2 +
		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);

	/* Allocate space for find_vqs parameters */
	vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
	if (!vqs)
		goto err_vq;
	callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
	if (!callbacks)
		goto err_callback;
	names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
	if (!names)
		goto err_names;
2543
	if (!vi->big_packets || vi->mergeable_rx_bufs) {
2544 2545 2546 2547 2548 2549
		ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL);
		if (!ctx)
			goto err_ctx;
	} else {
		ctx = NULL;
	}
J
Jason Wang 已提交
2550 2551 2552 2553 2554 2555

	/* Parameters for control virtqueue, if any */
	if (vi->has_cvq) {
		callbacks[total_vqs - 1] = NULL;
		names[total_vqs - 1] = "control";
	}
2556

J
Jason Wang 已提交
2557 2558 2559 2560 2561 2562 2563 2564
	/* Allocate/initialize parameters for send/receive virtqueues */
	for (i = 0; i < vi->max_queue_pairs; i++) {
		callbacks[rxq2vq(i)] = skb_recv_done;
		callbacks[txq2vq(i)] = skb_xmit_done;
		sprintf(vi->rq[i].name, "input.%d", i);
		sprintf(vi->sq[i].name, "output.%d", i);
		names[rxq2vq(i)] = vi->rq[i].name;
		names[txq2vq(i)] = vi->sq[i].name;
2565 2566
		if (ctx)
			ctx[rxq2vq(i)] = true;
J
Jason Wang 已提交
2567
	}
2568

J
Jason Wang 已提交
2569
	ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
2570
					 names, ctx, NULL);
J
Jason Wang 已提交
2571 2572
	if (ret)
		goto err_find;
2573

J
Jason Wang 已提交
2574 2575
	if (vi->has_cvq) {
		vi->cvq = vqs[total_vqs - 1];
2576
		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
2577
			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2578
	}
J
Jason Wang 已提交
2579 2580 2581

	for (i = 0; i < vi->max_queue_pairs; i++) {
		vi->rq[i].vq = vqs[rxq2vq(i)];
2582
		vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
J
Jason Wang 已提交
2583 2584 2585 2586 2587 2588
		vi->sq[i].vq = vqs[txq2vq(i)];
	}

	kfree(names);
	kfree(callbacks);
	kfree(vqs);
2589
	kfree(ctx);
J
Jason Wang 已提交
2590

2591
	return 0;
J
Jason Wang 已提交
2592 2593

err_find:
2594 2595
	kfree(ctx);
err_ctx:
J
Jason Wang 已提交
2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608
	kfree(names);
err_names:
	kfree(callbacks);
err_callback:
	kfree(vqs);
err_vq:
	return ret;
}

static int virtnet_alloc_queues(struct virtnet_info *vi)
{
	int i;

2609 2610 2611
	vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
	if (!vi->ctrl)
		goto err_ctrl;
J
Jason Wang 已提交
2612 2613 2614 2615
	vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
	if (!vi->sq)
		goto err_sq;
	vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
2616
	if (!vi->rq)
J
Jason Wang 已提交
2617 2618 2619 2620 2621 2622 2623
		goto err_rq;

	INIT_DELAYED_WORK(&vi->refill, refill_work);
	for (i = 0; i < vi->max_queue_pairs; i++) {
		vi->rq[i].pages = NULL;
		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
			       napi_weight);
2624 2625
		netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
				  napi_tx ? napi_weight : 0);
J
Jason Wang 已提交
2626 2627

		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
J
Johannes Berg 已提交
2628
		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
J
Jason Wang 已提交
2629
		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
T
Toshiaki Makita 已提交
2630 2631 2632

		u64_stats_init(&vi->rq[i].stats.syncp);
		u64_stats_init(&vi->sq[i].stats.syncp);
J
Jason Wang 已提交
2633 2634 2635 2636 2637 2638 2639
	}

	return 0;

err_rq:
	kfree(vi->sq);
err_sq:
2640 2641
	kfree(vi->ctrl);
err_ctrl:
J
Jason Wang 已提交
2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657
	return -ENOMEM;
}

static int init_vqs(struct virtnet_info *vi)
{
	int ret;

	/* Allocate send & receive queues */
	ret = virtnet_alloc_queues(vi);
	if (ret)
		goto err;

	ret = virtnet_find_vqs(vi);
	if (ret)
		goto err_free;

2658
	get_online_cpus();
2659
	virtnet_set_affinity(vi);
2660 2661
	put_online_cpus();

J
Jason Wang 已提交
2662 2663 2664 2665 2666 2667
	return 0;

err_free:
	virtnet_free_queues(vi);
err:
	return ret;
2668 2669
}

2670 2671
#ifdef CONFIG_SYSFS
static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
2672
		char *buf)
2673 2674 2675
{
	struct virtnet_info *vi = netdev_priv(queue->dev);
	unsigned int queue_index = get_netdev_rx_queue_index(queue);
2676 2677
	unsigned int headroom = virtnet_get_headroom(vi);
	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
J
Johannes Berg 已提交
2678
	struct ewma_pkt_len *avg;
2679 2680 2681

	BUG_ON(queue_index >= vi->max_queue_pairs);
	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
2682
	return sprintf(buf, "%u\n",
2683 2684
		       get_mergeable_buf_len(&vi->rq[queue_index], avg,
				       SKB_DATA_ALIGN(headroom + tailroom)));
2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700
}

static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
	__ATTR_RO(mergeable_rx_buffer_size);

static struct attribute *virtio_net_mrg_rx_attrs[] = {
	&mergeable_rx_buffer_size_attribute.attr,
	NULL
};

static const struct attribute_group virtio_net_mrg_rx_group = {
	.name = "virtio_net",
	.attrs = virtio_net_mrg_rx_attrs
};
#endif

2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734
static bool virtnet_fail_on_feature(struct virtio_device *vdev,
				    unsigned int fbit,
				    const char *fname, const char *dname)
{
	if (!virtio_has_feature(vdev, fbit))
		return false;

	dev_err(&vdev->dev, "device advertises feature %s but not %s",
		fname, dname);

	return true;
}

#define VIRTNET_FAIL_ON(vdev, fbit, dbit)			\
	virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)

static bool virtnet_validate_features(struct virtio_device *vdev)
{
	if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
	    (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
			     "VIRTIO_NET_F_CTRL_VQ") ||
	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
			     "VIRTIO_NET_F_CTRL_VQ") ||
	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
			     "VIRTIO_NET_F_CTRL_VQ") ||
	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
			     "VIRTIO_NET_F_CTRL_VQ"))) {
		return false;
	}

	return true;
}

2735 2736 2737
#define MIN_MTU ETH_MIN_MTU
#define MAX_MTU ETH_MAX_MTU

2738
static int virtnet_validate(struct virtio_device *vdev)
R
Rusty Russell 已提交
2739
{
2740 2741 2742 2743 2744 2745
	if (!vdev->config->get) {
		dev_err(&vdev->dev, "%s failure: config access disabled\n",
			__func__);
		return -EINVAL;
	}

2746 2747 2748
	if (!virtnet_validate_features(vdev))
		return -EINVAL;

2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
		int mtu = virtio_cread16(vdev,
					 offsetof(struct virtio_net_config,
						  mtu));
		if (mtu < MIN_MTU)
			__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
	}

	return 0;
}

static int virtnet_probe(struct virtio_device *vdev)
{
T
Toshiaki Makita 已提交
2762
	int i, err = -ENOMEM;
2763 2764 2765 2766 2767
	struct net_device *dev;
	struct virtnet_info *vi;
	u16 max_queue_pairs;
	int mtu;

J
Jason Wang 已提交
2768
	/* Find if host supports multiqueue virtio_net device */
2769 2770 2771
	err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
				   struct virtio_net_config,
				   max_virtqueue_pairs, &max_queue_pairs);
J
Jason Wang 已提交
2772 2773 2774 2775 2776 2777

	/* We need at least 2 queue's */
	if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
		max_queue_pairs = 1;
R
Rusty Russell 已提交
2778 2779

	/* Allocate ourselves a network device with room for our info */
J
Jason Wang 已提交
2780
	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
R
Rusty Russell 已提交
2781 2782 2783 2784
	if (!dev)
		return -ENOMEM;

	/* Set up network device as normal. */
2785
	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2786
	dev->netdev_ops = &virtnet_netdev;
R
Rusty Russell 已提交
2787
	dev->features = NETIF_F_HIGHDMA;
2788

2789
	dev->ethtool_ops = &virtnet_ethtool_ops;
R
Rusty Russell 已提交
2790 2791 2792
	SET_NETDEV_DEV(dev, &vdev->dev);

	/* Do we support "hardware" checksums? */
2793
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
R
Rusty Russell 已提交
2794
		/* This opens up the world of extra features. */
J
Jason Wang 已提交
2795
		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
2796
		if (csum)
J
Jason Wang 已提交
2797
			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
2798 2799

		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
2800
			dev->hw_features |= NETIF_F_TSO
R
Rusty Russell 已提交
2801 2802
				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
		}
2803
		/* Individual feature bits: what can host handle? */
2804 2805 2806 2807 2808 2809 2810
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
			dev->hw_features |= NETIF_F_TSO;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
			dev->hw_features |= NETIF_F_TSO6;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
			dev->hw_features |= NETIF_F_TSO_ECN;

2811 2812
		dev->features |= NETIF_F_GSO_ROBUST;

2813
		if (gso)
2814
			dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
2815
		/* (!csum && gso) case will be fixed by register_netdev() */
R
Rusty Russell 已提交
2816
	}
2817 2818
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
		dev->features |= NETIF_F_RXCSUM;
R
Rusty Russell 已提交
2819

2820 2821
	dev->vlan_features = dev->features;

2822 2823 2824 2825
	/* MTU range: 68 - 65535 */
	dev->min_mtu = MIN_MTU;
	dev->max_mtu = MAX_MTU;

R
Rusty Russell 已提交
2826
	/* Configuration may specify what MAC to use.  Otherwise random. */
2827 2828 2829 2830 2831
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
		virtio_cread_bytes(vdev,
				   offsetof(struct virtio_net_config, mac),
				   dev->dev_addr, dev->addr_len);
	else
2832
		eth_hw_addr_random(dev);
R
Rusty Russell 已提交
2833 2834 2835 2836 2837

	/* Set up our device-specific information */
	vi = netdev_priv(dev);
	vi->dev = dev;
	vi->vdev = vdev;
2838
	vdev->priv = vi;
2839

2840
	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
R
Rusty Russell 已提交
2841

2842
	/* If we can receive ANY GSO packets, we must allocate large ones. */
2843 2844
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2845 2846
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
2847 2848
		vi->big_packets = true;

2849 2850 2851
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
		vi->mergeable_rx_bufs = true;

2852 2853
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
2854 2855 2856 2857
		vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
	else
		vi->hdr_len = sizeof(struct virtio_net_hdr);

2858 2859
	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
2860 2861
		vi->any_header_sg = true;

J
Jason Wang 已提交
2862 2863 2864
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
		vi->has_cvq = true;

2865 2866 2867 2868
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
		mtu = virtio_cread16(vdev,
				     offsetof(struct virtio_net_config,
					      mtu));
2869
		if (mtu < dev->min_mtu) {
2870 2871 2872 2873 2874
			/* Should never trigger: MTU was previously validated
			 * in virtnet_validate.
			 */
			dev_err(&vdev->dev, "device MTU appears to have changed "
				"it is now %d < %d", mtu, dev->min_mtu);
T
Toshiaki Makita 已提交
2875
			goto free;
2876
		}
2877

2878 2879 2880
		dev->mtu = mtu;
		dev->max_mtu = mtu;

2881 2882 2883
		/* TODO: size buffers correctly in this case. */
		if (dev->mtu > ETH_DATA_LEN)
			vi->big_packets = true;
2884 2885
	}

2886 2887
	if (vi->any_header_sg)
		dev->needed_headroom = vi->hdr_len;
2888

2889 2890 2891 2892 2893
	/* Enable multiqueue by default */
	if (num_online_cpus() >= max_queue_pairs)
		vi->curr_queue_pairs = max_queue_pairs;
	else
		vi->curr_queue_pairs = num_online_cpus();
J
Jason Wang 已提交
2894 2895 2896
	vi->max_queue_pairs = max_queue_pairs;

	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
2897
	err = init_vqs(vi);
2898
	if (err)
T
Toshiaki Makita 已提交
2899
		goto free;
R
Rusty Russell 已提交
2900

2901 2902 2903 2904
#ifdef CONFIG_SYSFS
	if (vi->mergeable_rx_bufs)
		dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
#endif
2905 2906
	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
J
Jason Wang 已提交
2907

2908 2909
	virtnet_init_settings(dev);

R
Rusty Russell 已提交
2910 2911 2912
	err = register_netdev(dev);
	if (err) {
		pr_debug("virtio_net: registering device failed\n");
2913
		goto free_vqs;
R
Rusty Russell 已提交
2914
	}
2915

M
Michael S. Tsirkin 已提交
2916 2917
	virtio_device_ready(vdev);

2918
	err = virtnet_cpu_notif_add(vi);
2919 2920
	if (err) {
		pr_debug("virtio_net: registering cpu notifier failed\n");
2921
		goto free_unregister_netdev;
2922 2923
	}

2924
	virtnet_set_queues(vi, vi->curr_queue_pairs);
2925

J
Jason Wang 已提交
2926 2927
	/* Assume link up if device can't report link status,
	   otherwise get link status from config. */
2928
	netif_carrier_off(dev);
J
Jason Wang 已提交
2929
	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
2930
		schedule_work(&vi->config_work);
J
Jason Wang 已提交
2931 2932
	} else {
		vi->status = VIRTIO_NET_S_LINK_UP;
2933
		virtnet_update_settings(vi);
J
Jason Wang 已提交
2934 2935
		netif_carrier_on(dev);
	}
2936

2937 2938 2939 2940
	for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
		if (virtio_has_feature(vi->vdev, guest_offloads[i]))
			set_bit(guest_offloads[i], &vi->guest_offloads);

J
Jason Wang 已提交
2941 2942 2943
	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
		 dev->name, max_queue_pairs);

R
Rusty Russell 已提交
2944 2945
	return 0;

2946
free_unregister_netdev:
2947 2948
	vi->vdev->config->reset(vdev);

2949
	unregister_netdev(dev);
2950
free_vqs:
J
Jason Wang 已提交
2951
	cancel_delayed_work_sync(&vi->refill);
2952
	free_receive_page_frags(vi);
2953
	virtnet_del_vqs(vi);
R
Rusty Russell 已提交
2954 2955 2956 2957 2958
free:
	free_netdev(dev);
	return err;
}

2959
static void remove_vq_common(struct virtnet_info *vi)
R
Rusty Russell 已提交
2960
{
2961
	vi->vdev->config->reset(vi->vdev);
S
Shirley Ma 已提交
2962 2963

	/* Free unused buffers in both send and recv, if any. */
2964
	free_unused_bufs(vi);
2965

J
Jason Wang 已提交
2966
	free_receive_bufs(vi);
2967

2968 2969
	free_receive_page_frags(vi);

J
Jason Wang 已提交
2970
	virtnet_del_vqs(vi);
2971 2972
}

2973
static void virtnet_remove(struct virtio_device *vdev)
2974 2975 2976
{
	struct virtnet_info *vi = vdev->priv;

2977
	virtnet_cpu_notif_remove(vi);
2978

2979 2980
	/* Make sure no work handler is accessing the device. */
	flush_work(&vi->config_work);
2981

2982 2983 2984
	unregister_netdev(vi->dev);

	remove_vq_common(vi);
2985

2986
	free_netdev(vi->dev);
R
Rusty Russell 已提交
2987 2988
}

2989
static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
2990 2991 2992
{
	struct virtnet_info *vi = vdev->priv;

2993
	virtnet_cpu_notif_remove(vi);
2994
	virtnet_freeze_down(vdev);
2995 2996 2997 2998 2999
	remove_vq_common(vi);

	return 0;
}

3000
static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
3001 3002
{
	struct virtnet_info *vi = vdev->priv;
3003
	int err;
3004

3005
	err = virtnet_restore_up(vdev);
3006 3007
	if (err)
		return err;
J
Jason Wang 已提交
3008 3009
	virtnet_set_queues(vi, vi->curr_queue_pairs);

3010
	err = virtnet_cpu_notif_add(vi);
3011 3012 3013
	if (err)
		return err;

3014 3015 3016
	return 0;
}

R
Rusty Russell 已提交
3017 3018 3019 3020 3021
static struct virtio_device_id id_table[] = {
	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

3022 3023 3024 3025 3026 3027 3028 3029 3030 3031
#define VIRTNET_FEATURES \
	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
	VIRTIO_NET_F_MAC, \
	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
	VIRTIO_NET_F_CTRL_MAC_ADDR, \
3032
	VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
3033
	VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY
3034

3035
static unsigned int features[] = {
3036 3037 3038 3039 3040 3041
	VIRTNET_FEATURES,
};

static unsigned int features_legacy[] = {
	VIRTNET_FEATURES,
	VIRTIO_NET_F_GSO,
3042
	VIRTIO_F_ANY_LAYOUT,
3043 3044
};

3045
static struct virtio_driver virtio_net_driver = {
3046 3047
	.feature_table = features,
	.feature_table_size = ARRAY_SIZE(features),
3048 3049
	.feature_table_legacy = features_legacy,
	.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
R
Rusty Russell 已提交
3050 3051 3052
	.driver.name =	KBUILD_MODNAME,
	.driver.owner =	THIS_MODULE,
	.id_table =	id_table,
3053
	.validate =	virtnet_validate,
R
Rusty Russell 已提交
3054
	.probe =	virtnet_probe,
3055
	.remove =	virtnet_remove,
3056
	.config_changed = virtnet_config_changed,
3057
#ifdef CONFIG_PM_SLEEP
3058 3059 3060
	.freeze =	virtnet_freeze,
	.restore =	virtnet_restore,
#endif
R
Rusty Russell 已提交
3061 3062
};

3063 3064 3065 3066
static __init int virtio_net_driver_init(void)
{
	int ret;

T
Thomas Gleixner 已提交
3067
	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
3068 3069 3070 3071 3072
				      virtnet_cpu_online,
				      virtnet_cpu_down_prep);
	if (ret < 0)
		goto out;
	virtionet_online = ret;
T
Thomas Gleixner 已提交
3073
	ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092
				      NULL, virtnet_cpu_dead);
	if (ret)
		goto err_dead;

        ret = register_virtio_driver(&virtio_net_driver);
	if (ret)
		goto err_virtio;
	return 0;
err_virtio:
	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
err_dead:
	cpuhp_remove_multi_state(virtionet_online);
out:
	return ret;
}
module_init(virtio_net_driver_init);

static __exit void virtio_net_driver_exit(void)
{
A
Andrew Jones 已提交
3093
	unregister_virtio_driver(&virtio_net_driver);
3094 3095 3096 3097
	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
	cpuhp_remove_multi_state(virtionet_online);
}
module_exit(virtio_net_driver_exit);
R
Rusty Russell 已提交
3098 3099 3100 3101

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio network driver");
MODULE_LICENSE("GPL");