virtio_net.c 89.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/* A network driver using virtio.
R
Rusty Russell 已提交
3 4 5 6 7 8
 *
 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
 */
//#define DEBUG
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
9
#include <linux/ethtool.h>
R
Rusty Russell 已提交
10 11 12
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
J
John Fastabend 已提交
13
#include <linux/bpf.h>
14
#include <linux/bpf_trace.h>
R
Rusty Russell 已提交
15
#include <linux/scatterlist.h>
16
#include <linux/if_vlan.h>
17
#include <linux/slab.h>
18
#include <linux/cpu.h>
19
#include <linux/average.h>
J
Jason Wang 已提交
20
#include <linux/filter.h>
21
#include <linux/kernel.h>
22
#include <net/route.h>
23
#include <net/xdp.h>
24
#include <net/net_failover.h>
R
Rusty Russell 已提交
25

26
static int napi_weight = NAPI_POLL_WEIGHT;
27 28
module_param(napi_weight, int, 0444);

29
static bool csum = true, gso = true, napi_tx = true;
R
Rusty Russell 已提交
30 31
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
W
Willem de Bruijn 已提交
32
module_param(napi_tx, bool, 0644);
R
Rusty Russell 已提交
33

R
Rusty Russell 已提交
34
/* FIXME: MTU in config. */
35
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
36
#define GOOD_COPY_LEN	128
R
Rusty Russell 已提交
37

38 39
#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)

40 41 42
/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
#define VIRTIO_XDP_HEADROOM 256

43 44 45 46
/* Separating two types of XDP xmit */
#define VIRTIO_XDP_TX		BIT(0)
#define VIRTIO_XDP_REDIR	BIT(1)

47 48
#define VIRTIO_XDP_FLAG	BIT(0)

J
Johannes Berg 已提交
49 50 51 52
/* RX packet size EWMA. The average packet size is used to determine the packet
 * buffer size when refilling RX rings. As the entire RX ring may be refilled
 * at once, the weight is chosen so that the EWMA will be insensitive to short-
 * term, transient changes in packet size.
53
 */
54
DECLARE_EWMA(pkt_len, 0, 64)
55

56
#define VIRTNET_DRIVER_VERSION "1.0.0"
57

58 59 60 61
static const unsigned long guest_offloads[] = {
	VIRTIO_NET_F_GUEST_TSO4,
	VIRTIO_NET_F_GUEST_TSO6,
	VIRTIO_NET_F_GUEST_ECN,
62 63
	VIRTIO_NET_F_GUEST_UFO,
	VIRTIO_NET_F_GUEST_CSUM
64
};
65

66
#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
67 68 69 70
				(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
				(1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
				(1ULL << VIRTIO_NET_F_GUEST_UFO))

T
Toshiaki Makita 已提交
71 72 73
struct virtnet_stat_desc {
	char desc[ETH_GSTRING_LEN];
	size_t offset;
74 75
};

T
Toshiaki Makita 已提交
76 77 78 79
struct virtnet_sq_stats {
	struct u64_stats_sync syncp;
	u64 packets;
	u64 bytes;
80 81
	u64 xdp_tx;
	u64 xdp_tx_drops;
T
Toshiaki Makita 已提交
82
	u64 kicks;
83
	u64 tx_timeouts;
T
Toshiaki Makita 已提交
84 85
};

86 87
struct virtnet_rq_stats {
	struct u64_stats_sync syncp;
T
Toshiaki Makita 已提交
88 89
	u64 packets;
	u64 bytes;
90
	u64 drops;
91 92 93 94
	u64 xdp_packets;
	u64 xdp_tx;
	u64 xdp_redirects;
	u64 xdp_drops;
T
Toshiaki Makita 已提交
95
	u64 kicks;
T
Toshiaki Makita 已提交
96 97 98
};

#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
99
#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
T
Toshiaki Makita 已提交
100 101

static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
102 103 104 105
	{ "packets",		VIRTNET_SQ_STAT(packets) },
	{ "bytes",		VIRTNET_SQ_STAT(bytes) },
	{ "xdp_tx",		VIRTNET_SQ_STAT(xdp_tx) },
	{ "xdp_tx_drops",	VIRTNET_SQ_STAT(xdp_tx_drops) },
T
Toshiaki Makita 已提交
106
	{ "kicks",		VIRTNET_SQ_STAT(kicks) },
107
	{ "tx_timeouts",	VIRTNET_SQ_STAT(tx_timeouts) },
T
Toshiaki Makita 已提交
108 109 110
};

static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
111 112 113 114 115 116 117
	{ "packets",		VIRTNET_RQ_STAT(packets) },
	{ "bytes",		VIRTNET_RQ_STAT(bytes) },
	{ "drops",		VIRTNET_RQ_STAT(drops) },
	{ "xdp_packets",	VIRTNET_RQ_STAT(xdp_packets) },
	{ "xdp_tx",		VIRTNET_RQ_STAT(xdp_tx) },
	{ "xdp_redirects",	VIRTNET_RQ_STAT(xdp_redirects) },
	{ "xdp_drops",		VIRTNET_RQ_STAT(xdp_drops) },
T
Toshiaki Makita 已提交
118
	{ "kicks",		VIRTNET_RQ_STAT(kicks) },
T
Toshiaki Makita 已提交
119 120 121 122 123
};

#define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
#define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)

124 125 126 127 128 129 130
/* Internal representation of a send virtqueue */
struct send_queue {
	/* Virtqueue associated with this send _queue */
	struct virtqueue *vq;

	/* TX: fragments + linear part + virtio header */
	struct scatterlist sg[MAX_SKB_FRAGS + 2];
J
Jason Wang 已提交
131 132 133

	/* Name of the send queue: output.$index */
	char name[40];
W
Willem de Bruijn 已提交
134

T
Toshiaki Makita 已提交
135 136
	struct virtnet_sq_stats stats;

W
Willem de Bruijn 已提交
137
	struct napi_struct napi;
138 139 140 141 142 143 144
};

/* Internal representation of a receive virtqueue */
struct receive_queue {
	/* Virtqueue associated with this receive_queue */
	struct virtqueue *vq;

R
Rusty Russell 已提交
145 146
	struct napi_struct napi;

J
John Fastabend 已提交
147 148
	struct bpf_prog __rcu *xdp_prog;

T
Toshiaki Makita 已提交
149 150
	struct virtnet_rq_stats stats;

151 152 153
	/* Chain pages by the private ptr. */
	struct page *pages;

154
	/* Average packet length for mergeable receive buffers. */
J
Johannes Berg 已提交
155
	struct ewma_pkt_len mrg_avg_pkt_len;
156

157 158 159
	/* Page frag for packet buffer allocation. */
	struct page_frag alloc_frag;

160 161
	/* RX: fragments + linear part + virtio header */
	struct scatterlist sg[MAX_SKB_FRAGS + 2];
J
Jason Wang 已提交
162

163 164 165
	/* Min single buffer size for mergeable buffers case. */
	unsigned int min_buf_len;

J
Jason Wang 已提交
166 167
	/* Name of this receive queue: input.$index */
	char name[40];
168 169

	struct xdp_rxq_info xdp_rxq;
170 171
};

172 173 174 175 176 177 178
/* Control VQ buffers: protected by the rtnl lock */
struct control_buf {
	struct virtio_net_ctrl_hdr hdr;
	virtio_net_ctrl_ack status;
	struct virtio_net_ctrl_mq mq;
	u8 promisc;
	u8 allmulti;
179
	__virtio16 vid;
180
	__virtio64 offloads;
181 182
};

183 184 185 186
struct virtnet_info {
	struct virtio_device *vdev;
	struct virtqueue *cvq;
	struct net_device *dev;
J
Jason Wang 已提交
187 188
	struct send_queue *sq;
	struct receive_queue *rq;
189 190
	unsigned int status;

J
Jason Wang 已提交
191 192 193 194 195 196
	/* Max # of queue pairs supported by the device */
	u16 max_queue_pairs;

	/* # of queue pairs currently used by the driver */
	u16 curr_queue_pairs;

197 198 199
	/* # of XDP queue pairs currently used by the driver */
	u16 xdp_queue_pairs;

200 201 202
	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
	bool xdp_enabled;

203 204 205
	/* I like... big packets and I cannot lie! */
	bool big_packets;

206 207 208
	/* Host will merge rx buffers for big packets (shake it! shake it!) */
	bool mergeable_rx_bufs;

J
Jason Wang 已提交
209 210 211
	/* Has control virtqueue */
	bool has_cvq;

212 213 214
	/* Host can handle any s/g split between our header and packet data */
	bool any_header_sg;

215 216 217
	/* Packet virtio header size */
	u8 hdr_len;

218 219 220
	/* Work struct for refilling if we run low on memory. */
	struct delayed_work refill;

221 222 223
	/* Work struct for config space updates */
	struct work_struct config_work;

J
Jason Wang 已提交
224 225
	/* Does the affinity hint is set for virtqueues? */
	bool affinity_hint_set;
226

227 228 229
	/* CPU hotplug instances for online & dead */
	struct hlist_node node;
	struct hlist_node node_dead;
230

231
	struct control_buf *ctrl;
232 233 234 235

	/* Ethtool settings */
	u8 duplex;
	u32 speed;
236 237

	unsigned long guest_offloads;
238
	unsigned long guest_offloads_capable;
239 240 241

	/* failover when STANDBY feature enabled */
	struct failover *failover;
R
Rusty Russell 已提交
242 243
};

244
struct padded_vnet_hdr {
245
	struct virtio_net_hdr_mrg_rxbuf hdr;
246
	/*
247 248 249
	 * hdr is in a separate sg buffer, and data sg buffer shares same page
	 * with this header sg. This padding makes next sg 16 byte aligned
	 * after the header.
250
	 */
251
	char padding[4];
252 253
};

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
static bool is_xdp_frame(void *ptr)
{
	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
}

static void *xdp_to_ptr(struct xdp_frame *ptr)
{
	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
}

static struct xdp_frame *ptr_to_xdp(void *ptr)
{
	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
}

J
Jason Wang 已提交
269 270 271 272 273
/* Converting between virtqueue no. and kernel tx/rx queue no.
 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
 */
static int vq2txq(struct virtqueue *vq)
{
274
	return (vq->index - 1) / 2;
J
Jason Wang 已提交
275 276 277 278 279 280 281 282 283
}

static int txq2vq(int txq)
{
	return txq * 2 + 1;
}

static int vq2rxq(struct virtqueue *vq)
{
284
	return vq->index / 2;
J
Jason Wang 已提交
285 286 287 288 289 290 291
}

static int rxq2vq(int rxq)
{
	return rxq * 2;
}

292
static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
R
Rusty Russell 已提交
293
{
294
	return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
R
Rusty Russell 已提交
295 296
}

297 298 299 300
/*
 * private is used to chain pages for big packets, put the whole
 * most recent used list in the beginning for reuse
 */
301
static void give_pages(struct receive_queue *rq, struct page *page)
302
{
303
	struct page *end;
304

305
	/* Find end of list, sew whole thing into vi->rq.pages. */
306
	for (end = page; end->private; end = (struct page *)end->private);
307 308
	end->private = (unsigned long)rq->pages;
	rq->pages = page;
309 310
}

311
static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
312
{
313
	struct page *p = rq->pages;
314

315
	if (p) {
316
		rq->pages = (struct page *)p->private;
317 318 319
		/* clear private here, it is used to chain pages */
		p->private = 0;
	} else
320 321 322 323
		p = alloc_page(gfp_mask);
	return p;
}

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
static void virtqueue_napi_schedule(struct napi_struct *napi,
				    struct virtqueue *vq)
{
	if (napi_schedule_prep(napi)) {
		virtqueue_disable_cb(vq);
		__napi_schedule(napi);
	}
}

static void virtqueue_napi_complete(struct napi_struct *napi,
				    struct virtqueue *vq, int processed)
{
	int opaque;

	opaque = virtqueue_enable_cb_prepare(vq);
339 340 341 342 343 344
	if (napi_complete_done(napi, processed)) {
		if (unlikely(virtqueue_poll(vq, opaque)))
			virtqueue_napi_schedule(napi, vq);
	} else {
		virtqueue_disable_cb(vq);
	}
345 346
}

347
static void skb_xmit_done(struct virtqueue *vq)
R
Rusty Russell 已提交
348
{
349
	struct virtnet_info *vi = vq->vdev->priv;
W
Willem de Bruijn 已提交
350
	struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
R
Rusty Russell 已提交
351

352
	/* Suppress further interrupts. */
353
	virtqueue_disable_cb(vq);
354

W
Willem de Bruijn 已提交
355 356 357 358 359
	if (napi->weight)
		virtqueue_napi_schedule(napi, vq);
	else
		/* We were probably waiting for more output buffers. */
		netif_wake_subqueue(vi->dev, vq2txq(vq));
R
Rusty Russell 已提交
360 361
}

362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
#define MRG_CTX_HEADER_SHIFT 22
static void *mergeable_len_to_ctx(unsigned int truesize,
				  unsigned int headroom)
{
	return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
}

static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
{
	return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
}

static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
{
	return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
}

379
/* Called from bottom half context */
M
Michael S. Tsirkin 已提交
380 381
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
				   struct receive_queue *rq,
382
				   struct page *page, unsigned int offset,
383
				   unsigned int len, unsigned int truesize,
384
				   bool hdr_valid, unsigned int metasize,
385
				   unsigned int headroom)
386 387
{
	struct sk_buff *skb;
388
	struct virtio_net_hdr_mrg_rxbuf *hdr;
389
	unsigned int copy, hdr_len, hdr_padded_len;
390
	struct page *page_to_free = NULL;
391
	int tailroom, shinfo_size;
392
	char *p, *hdr_p, *buf;
393

394
	p = page_address(page) + offset;
395
	hdr_p = p;
396

397 398
	hdr_len = vi->hdr_len;
	if (vi->mergeable_rx_bufs)
399
		hdr_padded_len = sizeof(*hdr);
400
	else
401
		hdr_padded_len = sizeof(struct padded_vnet_hdr);
402

403
	/* If headroom is not 0, there is an offset between the beginning of the
404 405
	 * data and the allocated space, otherwise the data and the allocated
	 * space are aligned.
406 407 408
	 *
	 * Buffers with headroom use PAGE_SIZE as alloc size, see
	 * add_recvbuf_mergeable() + get_mergeable_buf_len()
409
	 */
410
	truesize = headroom ? PAGE_SIZE : truesize;
411
	tailroom = truesize - headroom;
412
	buf = p - headroom;
413

414
	len -= hdr_len;
415 416
	offset += hdr_padded_len;
	p += hdr_padded_len;
417
	tailroom -= hdr_padded_len + len;
418

419 420
	shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));

421
	/* copy small packet so we can reuse these pages */
422
	if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
423
		skb = build_skb(buf, truesize);
424 425 426
		if (unlikely(!skb))
			return NULL;

427
		skb_reserve(skb, p - buf);
428
		skb_put(skb, len);
429 430 431 432

		page = (struct page *)page->private;
		if (page)
			give_pages(rq, page);
433 434 435 436 437 438 439 440
		goto ok;
	}

	/* copy small packet so we can reuse these pages for small data */
	skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
	if (unlikely(!skb))
		return NULL;

441 442 443 444 445 446 447
	/* Copy all frame if it fits skb->head, otherwise
	 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
	 */
	if (len <= skb_tailroom(skb))
		copy = len;
	else
		copy = ETH_HLEN + metasize;
448
	skb_put_data(skb, p, copy);
449

450 451
	len -= copy;
	offset += copy;
452

453 454 455 456
	if (vi->mergeable_rx_bufs) {
		if (len)
			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
		else
457
			page_to_free = page;
458
		goto ok;
459 460
	}

461 462 463 464 465 466 467
	/*
	 * Verify that we can indeed put this data into a skb.
	 * This is here to handle cases when the device erroneously
	 * tries to receive more than is possible. This is usually
	 * the case of a broken device.
	 */
	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
468
		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
469 470 471
		dev_kfree_skb(skb);
		return NULL;
	}
472
	BUG_ON(offset >= PAGE_SIZE);
473
	while (len) {
474 475 476 477
		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
				frag_size, truesize);
		len -= frag_size;
478 479 480
		page = (struct page *)page->private;
		offset = 0;
	}
481

482
	if (page)
483
		give_pages(rq, page);
484

485 486 487 488 489 490
ok:
	/* hdr_valid means no XDP, so we can copy the vnet header */
	if (hdr_valid) {
		hdr = skb_vnet_hdr(skb);
		memcpy(hdr, hdr_p, hdr_len);
	}
491 492
	if (page_to_free)
		put_page(page_to_free);
493 494 495 496 497 498

	if (metasize) {
		__skb_pull(skb, metasize);
		skb_metadata_set(skb, metasize);
	}

499 500
	return skb;
}
501

502 503 504
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
				   struct send_queue *sq,
				   struct xdp_frame *xdpf)
J
John Fastabend 已提交
505 506 507 508
{
	struct virtio_net_hdr_mrg_rxbuf *hdr;
	int err;

509 510 511 512 513
	if (unlikely(xdpf->headroom < vi->hdr_len))
		return -EOVERFLOW;

	/* Make room for virtqueue hdr (also change xdpf->headroom?) */
	xdpf->data -= vi->hdr_len;
514
	/* Zero header and leave csum up to XDP layers */
515
	hdr = xdpf->data;
516
	memset(hdr, 0, vi->hdr_len);
517
	xdpf->len   += vi->hdr_len;
518

519
	sg_init_one(sq->sg, xdpf->data, xdpf->len);
520

521 522
	err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
				   GFP_ATOMIC);
523
	if (unlikely(err))
524
		return -ENOSPC; /* Caller handle free/refcnt */
J
John Fastabend 已提交
525

526
	return 0;
J
John Fastabend 已提交
527 528
}

529 530 531 532 533 534 535 536 537
/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
 * the current cpu, so it does not need to be locked.
 *
 * Here we use marco instead of inline functions because we have to deal with
 * three issues at the same time: 1. the choice of sq. 2. judge and execute the
 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
 * functions to perfectly solve these three problems at the same time.
 */
#define virtnet_xdp_get_sq(vi) ({                                       \
538
	int cpu = smp_processor_id();                                   \
539 540 541 542 543 544
	struct netdev_queue *txq;                                       \
	typeof(vi) v = (vi);                                            \
	unsigned int qp;                                                \
									\
	if (v->curr_queue_pairs > nr_cpu_ids) {                         \
		qp = v->curr_queue_pairs - v->xdp_queue_pairs;          \
545
		qp += cpu;                                              \
546 547 548
		txq = netdev_get_tx_queue(v->dev, qp);                  \
		__netif_tx_acquire(txq);                                \
	} else {                                                        \
549
		qp = cpu % v->curr_queue_pairs;                         \
550
		txq = netdev_get_tx_queue(v->dev, qp);                  \
551
		__netif_tx_lock(txq, cpu);                              \
552 553 554 555 556 557 558 559 560 561 562 563 564
	}                                                               \
	v->sq + qp;                                                     \
})

#define virtnet_xdp_put_sq(vi, q) {                                     \
	struct netdev_queue *txq;                                       \
	typeof(vi) v = (vi);                                            \
									\
	txq = netdev_get_tx_queue(v->dev, (q) - v->sq);                 \
	if (v->curr_queue_pairs > nr_cpu_ids)                           \
		__netif_tx_release(txq);                                \
	else                                                            \
		__netif_tx_unlock(txq);                                 \
565 566
}

567
static int virtnet_xdp_xmit(struct net_device *dev,
568
			    int n, struct xdp_frame **frames, u32 flags)
J
Jason Wang 已提交
569 570
{
	struct virtnet_info *vi = netdev_priv(dev);
571 572
	struct receive_queue *rq = vi->rq;
	struct bpf_prog *xdp_prog;
573 574
	struct send_queue *sq;
	unsigned int len;
575 576
	int packets = 0;
	int bytes = 0;
577
	int nxmit = 0;
T
Toshiaki Makita 已提交
578
	int kicks = 0;
579
	void *ptr;
580
	int ret;
581 582
	int i;

583 584 585
	/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
	 * indicate XDP resources have been successfully allocated.
	 */
586
	xdp_prog = rcu_access_pointer(rq->xdp_prog);
587 588 589
	if (!xdp_prog)
		return -ENXIO;

590
	sq = virtnet_xdp_get_sq(vi);
591 592 593

	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
		ret = -EINVAL;
594 595
		goto out;
	}
596

597
	/* Free up any pending old buffers before queueing new ones. */
598
	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
599 600 601 602 603 604 605 606 607 608 609 610
		if (likely(is_xdp_frame(ptr))) {
			struct xdp_frame *frame = ptr_to_xdp(ptr);

			bytes += frame->len;
			xdp_return_frame(frame);
		} else {
			struct sk_buff *skb = ptr;

			bytes += skb->len;
			napi_consume_skb(skb, false);
		}
		packets++;
611
	}
612 613 614 615

	for (i = 0; i < n; i++) {
		struct xdp_frame *xdpf = frames[i];

616 617 618
		if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
			break;
		nxmit++;
619
	}
620
	ret = nxmit;
621

T
Toshiaki Makita 已提交
622 623 624 625
	if (flags & XDP_XMIT_FLUSH) {
		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
			kicks = 1;
	}
626 627
out:
	u64_stats_update_begin(&sq->stats.syncp);
628 629
	sq->stats.bytes += bytes;
	sq->stats.packets += packets;
630
	sq->stats.xdp_tx += n;
631
	sq->stats.xdp_tx_drops += n - nxmit;
T
Toshiaki Makita 已提交
632
	sq->stats.kicks += kicks;
633
	u64_stats_update_end(&sq->stats.syncp);
634

635
	virtnet_xdp_put_sq(vi, sq);
636
	return ret;
J
Jason Wang 已提交
637 638
}

639 640
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
{
641
	return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
642 643
}

644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
/* We copy the packet for XDP in the following cases:
 *
 * 1) Packet is scattered across multiple rx buffers.
 * 2) Headroom space is insufficient.
 *
 * This is inefficient but it's a temporary condition that
 * we hit right after XDP is enabled and until queue is refilled
 * with large buffers with sufficient headroom - so it should affect
 * at most queue size packets.
 * Afterwards, the conditions to enable
 * XDP should preclude the underlying device from sending packets
 * across multiple buffers (num_buf > 1), and we make sure buffers
 * have enough headroom.
 */
static struct page *xdp_linearize_page(struct receive_queue *rq,
				       u16 *num_buf,
				       struct page *p,
				       int offset,
				       int page_off,
				       unsigned int *len)
{
	struct page *page = alloc_page(GFP_ATOMIC);

	if (!page)
		return NULL;

	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
	page_off += *len;

	while (--*num_buf) {
674
		int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
675 676 677 678 679 680 681 682 683 684 685 686 687 688
		unsigned int buflen;
		void *buf;
		int off;

		buf = virtqueue_get_buf(rq->vq, &buflen);
		if (unlikely(!buf))
			goto err_buf;

		p = virt_to_head_page(buf);
		off = buf - page_address(p);

		/* guard against a misconfigured or uncooperative backend that
		 * is sending packet larger than the MTU.
		 */
689
		if ((page_off + buflen + tailroom) > PAGE_SIZE) {
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
			put_page(p);
			goto err_buf;
		}

		memcpy(page_address(page) + page_off,
		       page_address(p) + off, buflen);
		page_off += buflen;
		put_page(p);
	}

	/* Headroom does not contribute to packet length */
	*len = page_off - VIRTIO_XDP_HEADROOM;
	return page;
err_buf:
	__free_pages(page, 0);
	return NULL;
}

708 709 710
static struct sk_buff *receive_small(struct net_device *dev,
				     struct virtnet_info *vi,
				     struct receive_queue *rq,
711
				     void *buf, void *ctx,
J
Jason Wang 已提交
712
				     unsigned int len,
713
				     unsigned int *xdp_xmit,
714
				     struct virtnet_rq_stats *stats)
715
{
716
	struct sk_buff *skb;
717
	struct bpf_prog *xdp_prog;
718
	unsigned int xdp_headroom = (unsigned long)ctx;
719 720 721 722
	unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
	unsigned int headroom = vi->hdr_len + header_offset;
	unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
			      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
723
	struct page *page = virt_to_head_page(buf);
724
	unsigned int delta = 0;
725
	struct page *xdp_page;
726
	int err;
727
	unsigned int metasize = 0;
728

729
	len -= vi->hdr_len;
730
	stats->bytes += len;
731

732 733 734 735
	if (unlikely(len > GOOD_PACKET_LEN)) {
		pr_debug("%s: rx error: len %u exceeds max size %d\n",
			 dev->name, len, GOOD_PACKET_LEN);
		dev->stats.rx_length_errors++;
736
		goto err;
737
	}
738 739 740 741 742 743

	if (likely(!vi->xdp_enabled)) {
		xdp_prog = NULL;
		goto skip_xdp;
	}

744 745 746
	rcu_read_lock();
	xdp_prog = rcu_dereference(rq->xdp_prog);
	if (xdp_prog) {
747
		struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
748
		struct xdp_frame *xdpf;
749
		struct xdp_buff xdp;
750
		void *orig_data;
751 752
		u32 act;

753
		if (unlikely(hdr->hdr.gso_type))
754
			goto err_xdp;
755

756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
		if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
			int offset = buf - page_address(page) + header_offset;
			unsigned int tlen = len + vi->hdr_len;
			u16 num_buf = 1;

			xdp_headroom = virtnet_get_headroom(vi);
			header_offset = VIRTNET_RX_PAD + xdp_headroom;
			headroom = vi->hdr_len + header_offset;
			buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
				 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
			xdp_page = xdp_linearize_page(rq, &num_buf, page,
						      offset, header_offset,
						      &tlen);
			if (!xdp_page)
				goto err_xdp;

			buf = page_address(xdp_page);
			put_page(page);
			page = xdp_page;
		}

777
		xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
778 779
		xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
				 xdp_headroom, len, true);
780
		orig_data = xdp.data;
781
		act = bpf_prog_run_xdp(xdp_prog, &xdp);
782
		stats->xdp_packets++;
783

784 785
		switch (act) {
		case XDP_PASS:
786
			/* Recalculate length in case bpf program changed it */
787
			delta = orig_data - xdp.data;
788
			len = xdp.data_end - xdp.data;
789
			metasize = xdp.data - xdp.data_meta;
790 791
			break;
		case XDP_TX:
792
			stats->xdp_tx++;
793
			xdpf = xdp_convert_buff_to_frame(&xdp);
794 795
			if (unlikely(!xdpf))
				goto err_xdp;
796
			err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
797 798 799
			if (unlikely(!err)) {
				xdp_return_frame_rx_napi(xdpf);
			} else if (unlikely(err < 0)) {
800
				trace_xdp_exception(vi->dev, xdp_prog, act);
801 802
				goto err_xdp;
			}
803
			*xdp_xmit |= VIRTIO_XDP_TX;
J
Jason Wang 已提交
804 805 806
			rcu_read_unlock();
			goto xdp_xmit;
		case XDP_REDIRECT:
807
			stats->xdp_redirects++;
J
Jason Wang 已提交
808
			err = xdp_do_redirect(dev, &xdp, xdp_prog);
809 810
			if (err)
				goto err_xdp;
811
			*xdp_xmit |= VIRTIO_XDP_REDIR;
812 813 814
			rcu_read_unlock();
			goto xdp_xmit;
		default:
815
			bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
816
			fallthrough;
817 818
		case XDP_ABORTED:
			trace_xdp_exception(vi->dev, xdp_prog, act);
819
			goto err_xdp;
820
		case XDP_DROP:
821 822 823 824 825
			goto err_xdp;
		}
	}
	rcu_read_unlock();

826
skip_xdp:
827
	skb = build_skb(buf, buflen);
828
	if (!skb)
829 830
		goto err;
	skb_reserve(skb, headroom - delta);
831
	skb_put(skb, len);
832
	if (!xdp_prog) {
833 834
		buf += header_offset;
		memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
835
	} /* keep zeroed vnet hdr since XDP is loaded */
836

837 838 839
	if (metasize)
		skb_metadata_set(skb, metasize);

840
	return skb;
841 842 843

err_xdp:
	rcu_read_unlock();
844
	stats->xdp_drops++;
845
err:
846
	stats->drops++;
847
	put_page(page);
848 849
xdp_xmit:
	return NULL;
850 851 852
}

static struct sk_buff *receive_big(struct net_device *dev,
M
Michael S. Tsirkin 已提交
853
				   struct virtnet_info *vi,
854 855
				   struct receive_queue *rq,
				   void *buf,
856
				   unsigned int len,
857
				   struct virtnet_rq_stats *stats)
858 859
{
	struct page *page = buf;
860
	struct sk_buff *skb =
861
		page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0, 0);
J
John Fastabend 已提交
862

863
	stats->bytes += len - vi->hdr_len;
864 865 866 867 868 869
	if (unlikely(!skb))
		goto err;

	return skb;

err:
870
	stats->drops++;
871 872 873 874
	give_pages(rq, page);
	return NULL;
}

875
static struct sk_buff *receive_mergeable(struct net_device *dev,
M
Michael S. Tsirkin 已提交
876
					 struct virtnet_info *vi,
877
					 struct receive_queue *rq,
878 879
					 void *buf,
					 void *ctx,
J
Jason Wang 已提交
880
					 unsigned int len,
881
					 unsigned int *xdp_xmit,
882
					 struct virtnet_rq_stats *stats)
883
{
884 885
	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
	u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
886 887
	struct page *page = virt_to_head_page(buf);
	int offset = buf - page_address(page);
J
John Fastabend 已提交
888 889
	struct sk_buff *head_skb, *curr_skb;
	struct bpf_prog *xdp_prog;
890
	unsigned int truesize = mergeable_ctx_to_truesize(ctx);
891
	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
892
	unsigned int metasize = 0;
893 894
	unsigned int frame_sz;
	int err;
J
John Fastabend 已提交
895

J
John Fastabend 已提交
896
	head_skb = NULL;
897
	stats->bytes += len - vi->hdr_len;
J
John Fastabend 已提交
898

899 900 901 902 903 904
	if (unlikely(len > truesize)) {
		pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
			 dev->name, len, (unsigned long)ctx);
		dev->stats.rx_length_errors++;
		goto err_skb;
	}
905 906 907 908 909 910

	if (likely(!vi->xdp_enabled)) {
		xdp_prog = NULL;
		goto skip_xdp;
	}

J
John Fastabend 已提交
911 912 913
	rcu_read_lock();
	xdp_prog = rcu_dereference(rq->xdp_prog);
	if (xdp_prog) {
914
		struct xdp_frame *xdpf;
915
		struct page *xdp_page;
916 917
		struct xdp_buff xdp;
		void *data;
J
John Fastabend 已提交
918 919
		u32 act;

920 921 922 923 924 925 926
		/* Transient failure which in theory could occur if
		 * in-flight packets from before XDP was enabled reach
		 * the receive path after XDP is loaded.
		 */
		if (unlikely(hdr->hdr.gso_type))
			goto err_xdp;

927 928 929 930 931
		/* Buffers with headroom use PAGE_SIZE as alloc size,
		 * see add_recvbuf_mergeable() + get_mergeable_buf_len()
		 */
		frame_sz = headroom ? PAGE_SIZE : truesize;

932 933 934 935 936 937
		/* This happens when rx buffer size is underestimated
		 * or headroom is not enough because of the buffer
		 * was refilled before XDP is set. This should only
		 * happen for the first several packets, so we don't
		 * care much about its performance.
		 */
938 939
		if (unlikely(num_buf > 1 ||
			     headroom < virtnet_get_headroom(vi))) {
940
			/* linearize data for XDP */
941
			xdp_page = xdp_linearize_page(rq, &num_buf,
942 943 944
						      page, offset,
						      VIRTIO_XDP_HEADROOM,
						      &len);
945 946
			frame_sz = PAGE_SIZE;

947 948
			if (!xdp_page)
				goto err_xdp;
949
			offset = VIRTIO_XDP_HEADROOM;
950 951
		} else {
			xdp_page = page;
J
John Fastabend 已提交
952 953
		}

954 955 956
		/* Allow consuming headroom but reserve enough space to push
		 * the descriptor on if we get an XDP_TX return code.
		 */
957
		data = page_address(xdp_page) + offset;
958
		xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq);
959 960
		xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len,
				 VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true);
961

962
		act = bpf_prog_run_xdp(xdp_prog, &xdp);
963
		stats->xdp_packets++;
964

J
John Fastabend 已提交
965 966
		switch (act) {
		case XDP_PASS:
967 968
			metasize = xdp.data - xdp.data_meta;

969
			/* recalculate offset to account for any header
970 971 972
			 * adjustments and minus the metasize to copy the
			 * metadata in page_to_skb(). Note other cases do not
			 * build an skb and avoid using offset
973
			 */
974 975
			offset = xdp.data - page_address(xdp_page) -
				 vi->hdr_len - metasize;
976

977 978
			/* recalculate len if xdp.data, xdp.data_end or
			 * xdp.data_meta were adjusted
979
			 */
980
			len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
981 982 983 984
			/* We can only create skb based on xdp_page. */
			if (unlikely(xdp_page != page)) {
				rcu_read_unlock();
				put_page(page);
985 986
				head_skb = page_to_skb(vi, rq, xdp_page, offset,
						       len, PAGE_SIZE, false,
987 988
						       metasize,
						       VIRTIO_XDP_HEADROOM);
989 990
				return head_skb;
			}
J
John Fastabend 已提交
991 992
			break;
		case XDP_TX:
993
			stats->xdp_tx++;
994
			xdpf = xdp_convert_buff_to_frame(&xdp);
995 996
			if (unlikely(!xdpf))
				goto err_xdp;
997
			err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
998 999 1000
			if (unlikely(!err)) {
				xdp_return_frame_rx_napi(xdpf);
			} else if (unlikely(err < 0)) {
1001
				trace_xdp_exception(vi->dev, xdp_prog, act);
1002 1003 1004 1005
				if (unlikely(xdp_page != page))
					put_page(xdp_page);
				goto err_xdp;
			}
1006
			*xdp_xmit |= VIRTIO_XDP_TX;
1007
			if (unlikely(xdp_page != page))
1008
				put_page(page);
J
John Fastabend 已提交
1009 1010
			rcu_read_unlock();
			goto xdp_xmit;
1011
		case XDP_REDIRECT:
1012
			stats->xdp_redirects++;
1013 1014 1015 1016 1017 1018
			err = xdp_do_redirect(dev, &xdp, xdp_prog);
			if (err) {
				if (unlikely(xdp_page != page))
					put_page(xdp_page);
				goto err_xdp;
			}
1019
			*xdp_xmit |= VIRTIO_XDP_REDIR;
1020
			if (unlikely(xdp_page != page))
1021
				put_page(page);
1022 1023
			rcu_read_unlock();
			goto xdp_xmit;
J
John Fastabend 已提交
1024
		default:
1025
			bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
1026
			fallthrough;
1027 1028
		case XDP_ABORTED:
			trace_xdp_exception(vi->dev, xdp_prog, act);
1029
			fallthrough;
1030
		case XDP_DROP:
1031 1032
			if (unlikely(xdp_page != page))
				__free_pages(xdp_page, 0);
J
John Fastabend 已提交
1033
			goto err_xdp;
J
John Fastabend 已提交
1034
		}
J
John Fastabend 已提交
1035 1036
	}
	rcu_read_unlock();
1037

1038
skip_xdp:
1039
	head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
1040
			       metasize, headroom);
J
John Fastabend 已提交
1041
	curr_skb = head_skb;
1042

1043 1044
	if (unlikely(!curr_skb))
		goto err_skb;
1045
	while (--num_buf) {
1046 1047
		int num_skb_frags;

1048
		buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
1049
		if (unlikely(!buf)) {
1050
			pr_debug("%s: rx error: %d buffers out of %d missing\n",
M
Michael S. Tsirkin 已提交
1051
				 dev->name, num_buf,
1052 1053
				 virtio16_to_cpu(vi->vdev,
						 hdr->num_buffers));
1054 1055
			dev->stats.rx_length_errors++;
			goto err_buf;
1056
		}
1057

1058
		stats->bytes += len;
1059
		page = virt_to_head_page(buf);
1060 1061 1062

		truesize = mergeable_ctx_to_truesize(ctx);
		if (unlikely(len > truesize)) {
1063
			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1064 1065 1066 1067
				 dev->name, len, (unsigned long)ctx);
			dev->stats.rx_length_errors++;
			goto err_skb;
		}
1068 1069

		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
1070 1071
		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
1072 1073 1074

			if (unlikely(!nskb))
				goto err_skb;
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
			if (curr_skb == head_skb)
				skb_shinfo(curr_skb)->frag_list = nskb;
			else
				curr_skb->next = nskb;
			curr_skb = nskb;
			head_skb->truesize += nskb->truesize;
			num_skb_frags = 0;
		}
		if (curr_skb != head_skb) {
			head_skb->data_len += len;
			head_skb->len += len;
1086
			head_skb->truesize += truesize;
1087
		}
1088
		offset = buf - page_address(page);
1089 1090 1091
		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
			put_page(page);
			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
1092
					     len, truesize);
1093 1094
		} else {
			skb_add_rx_frag(curr_skb, num_skb_frags, page,
1095
					offset, len, truesize);
1096
		}
1097 1098
	}

J
Johannes Berg 已提交
1099
	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
1100 1101
	return head_skb;

J
John Fastabend 已提交
1102 1103
err_xdp:
	rcu_read_unlock();
1104
	stats->xdp_drops++;
1105 1106
err_skb:
	put_page(page);
1107
	while (num_buf-- > 1) {
1108 1109
		buf = virtqueue_get_buf(rq->vq, &len);
		if (unlikely(!buf)) {
1110 1111 1112 1113 1114
			pr_debug("%s: rx error: %d buffers missing\n",
				 dev->name, num_buf);
			dev->stats.rx_length_errors++;
			break;
		}
1115
		stats->bytes += len;
1116
		page = virt_to_head_page(buf);
1117
		put_page(page);
1118
	}
1119
err_buf:
1120
	stats->drops++;
1121
	dev_kfree_skb(head_skb);
J
John Fastabend 已提交
1122
xdp_xmit:
1123
	return NULL;
1124 1125
}

1126 1127
static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
			void *buf, unsigned int len, void **ctx,
1128
			unsigned int *xdp_xmit,
1129
			struct virtnet_rq_stats *stats)
1130
{
1131
	struct net_device *dev = vi->dev;
1132
	struct sk_buff *skb;
1133
	struct virtio_net_hdr_mrg_rxbuf *hdr;
1134

1135
	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1136 1137
		pr_debug("%s: short packet %i\n", dev->name, len);
		dev->stats.rx_length_errors++;
1138
		if (vi->mergeable_rx_bufs) {
1139
			put_page(virt_to_head_page(buf));
1140
		} else if (vi->big_packets) {
1141
			give_pages(rq, buf);
1142
		} else {
1143
			put_page(virt_to_head_page(buf));
1144
		}
1145
		return;
1146
	}
1147

1148
	if (vi->mergeable_rx_bufs)
1149
		skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1150
					stats);
1151
	else if (vi->big_packets)
1152
		skb = receive_big(dev, vi, rq, buf, len, stats);
1153
	else
1154
		skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1155 1156

	if (unlikely(!skb))
1157
		return;
1158

1159
	hdr = skb_vnet_hdr(skb);
1160

1161
	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
1162
		skb->ip_summed = CHECKSUM_UNNECESSARY;
R
Rusty Russell 已提交
1163

1164 1165 1166 1167 1168 1169
	if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
				  virtio_is_little_endian(vi->vdev))) {
		net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
				     dev->name, hdr->hdr.gso_type,
				     hdr->hdr.gso_size);
		goto frame_err;
R
Rusty Russell 已提交
1170 1171
	}

1172
	skb_record_rx_queue(skb, vq2rxq(rq->vq));
1173 1174 1175 1176
	skb->protocol = eth_type_trans(skb, dev);
	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
		 ntohs(skb->protocol), skb->len, skb->pkt_type);

E
Eric Dumazet 已提交
1177
	napi_gro_receive(&rq->napi, skb);
1178
	return;
R
Rusty Russell 已提交
1179 1180 1181 1182 1183 1184

frame_err:
	dev->stats.rx_frame_errors++;
	dev_kfree_skb(skb);
}

1185 1186 1187 1188 1189
/* Unlike mergeable buffers, all buffers are allocated to the
 * same size, except for the headroom. For this reason we do
 * not need to use  mergeable_len_to_ctx here - it is enough
 * to store the headroom as the context ignoring the truesize.
 */
M
Michael S. Tsirkin 已提交
1190 1191
static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
			     gfp_t gfp)
R
Rusty Russell 已提交
1192
{
1193 1194
	struct page_frag *alloc_frag = &rq->alloc_frag;
	char *buf;
1195
	unsigned int xdp_headroom = virtnet_get_headroom(vi);
1196
	void *ctx = (void *)(unsigned long)xdp_headroom;
1197
	int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
1198
	int err;
1199

1200 1201 1202
	len = SKB_DATA_ALIGN(len) +
	      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
	if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
1203
		return -ENOMEM;
R
Rusty Russell 已提交
1204

1205 1206 1207 1208 1209
	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
	get_page(alloc_frag->page);
	alloc_frag->offset += len;
	sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
		    vi->hdr_len + GOOD_PACKET_LEN);
1210
	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1211
	if (err < 0)
1212
		put_page(virt_to_head_page(buf));
1213 1214
	return err;
}
1215

1216 1217
static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
			   gfp_t gfp)
1218 1219 1220 1221 1222
{
	struct page *first, *list = NULL;
	char *p;
	int i, err, offset;

1223 1224
	sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);

1225
	/* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
1226
	for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
1227
		first = get_a_page(rq, gfp);
1228 1229
		if (!first) {
			if (list)
1230
				give_pages(rq, list);
1231
			return -ENOMEM;
1232
		}
1233
		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
1234

1235 1236 1237 1238
		/* chain new page in list head to match sg */
		first->private = (unsigned long)list;
		list = first;
	}
R
Rusty Russell 已提交
1239

1240
	first = get_a_page(rq, gfp);
1241
	if (!first) {
1242
		give_pages(rq, list);
1243 1244 1245 1246
		return -ENOMEM;
	}
	p = page_address(first);

1247
	/* rq->sg[0], rq->sg[1] share the same page */
1248 1249
	/* a separated rq->sg[0] for header - required in case !any_header_sg */
	sg_set_buf(&rq->sg[0], p, vi->hdr_len);
1250

1251
	/* rq->sg[1] for data packet, from offset */
1252
	offset = sizeof(struct padded_vnet_hdr);
1253
	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
1254 1255 1256

	/* chain first in list head */
	first->private = (unsigned long)list;
1257 1258
	err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
				  first, gfp);
1259
	if (err < 0)
1260
		give_pages(rq, first);
1261 1262

	return err;
R
Rusty Russell 已提交
1263 1264
}

1265
static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1266 1267
					  struct ewma_pkt_len *avg_pkt_len,
					  unsigned int room)
1268
{
1269
	const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1270 1271
	unsigned int len;

1272 1273 1274 1275
	if (room)
		return PAGE_SIZE - room;

	len = hdr_len +	clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1276
				rq->min_buf_len, PAGE_SIZE - hdr_len);
1277

1278
	return ALIGN(len, L1_CACHE_BYTES);
1279 1280
}

1281 1282
static int add_recvbuf_mergeable(struct virtnet_info *vi,
				 struct receive_queue *rq, gfp_t gfp)
1283
{
1284
	struct page_frag *alloc_frag = &rq->alloc_frag;
1285
	unsigned int headroom = virtnet_get_headroom(vi);
1286 1287
	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1288
	char *buf;
1289
	void *ctx;
1290
	int err;
1291
	unsigned int len, hole;
1292

1293 1294 1295 1296 1297 1298
	/* Extra tailroom is needed to satisfy XDP's assumption. This
	 * means rx frags coalescing won't work, but consider we've
	 * disabled GSO for XDP, it won't be a big issue.
	 */
	len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
	if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
1299
		return -ENOMEM;
1300

1301
	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1302
	buf += headroom; /* advance address leaving hole at front of pkt */
1303
	get_page(alloc_frag->page);
1304
	alloc_frag->offset += len + room;
1305
	hole = alloc_frag->size - alloc_frag->offset;
1306
	if (hole < len + room) {
1307 1308
		/* To avoid internal fragmentation, if there is very likely not
		 * enough space for another buffer, add the remaining space to
1309
		 * the current buffer.
1310
		 */
1311 1312 1313
		len += hole;
		alloc_frag->offset += hole;
	}
1314

1315
	sg_init_one(rq->sg, buf, len);
1316
	ctx = mergeable_len_to_ctx(len, headroom);
1317
	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1318
	if (err < 0)
1319
		put_page(virt_to_head_page(buf));
1320

1321 1322
	return err;
}
1323

1324 1325 1326 1327 1328 1329 1330
/*
 * Returns false if we couldn't fill entirely (OOM).
 *
 * Normally run in the receive path, but can also be run from ndo_open
 * before we're receiving packets, or from refill_work which is
 * careful to disable receiving (using napi_disable).
 */
M
Michael S. Tsirkin 已提交
1331 1332
static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
			  gfp_t gfp)
1333 1334
{
	int err;
1335
	bool oom;
1336

1337 1338
	do {
		if (vi->mergeable_rx_bufs)
1339
			err = add_recvbuf_mergeable(vi, rq, gfp);
1340
		else if (vi->big_packets)
1341
			err = add_recvbuf_big(vi, rq, gfp);
1342
		else
M
Michael S. Tsirkin 已提交
1343
			err = add_recvbuf_small(vi, rq, gfp);
1344

1345
		oom = err == -ENOMEM;
1346
		if (err)
1347
			break;
1348
	} while (rq->vq->num_free);
T
Toshiaki Makita 已提交
1349
	if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
1350 1351 1352
		unsigned long flags;

		flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
1353
		rq->stats.kicks++;
1354
		u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
T
Toshiaki Makita 已提交
1355 1356
	}

1357
	return !oom;
1358 1359
}

1360
static void skb_recv_done(struct virtqueue *rvq)
R
Rusty Russell 已提交
1361 1362
{
	struct virtnet_info *vi = rvq->vdev->priv;
J
Jason Wang 已提交
1363
	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
1364

1365
	virtqueue_napi_schedule(&rq->napi, rvq);
R
Rusty Russell 已提交
1366 1367
}

1368
static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
1369
{
1370
	napi_enable(napi);
1371 1372

	/* If all buffers were filled by other side before we napi_enabled, we
1373 1374 1375 1376 1377 1378
	 * won't get another interrupt, so process any outstanding packets now.
	 * Call local_bh_enable after to trigger softIRQ processing.
	 */
	local_bh_disable();
	virtqueue_napi_schedule(napi, vq);
	local_bh_enable();
1379 1380
}

W
Willem de Bruijn 已提交
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
static void virtnet_napi_tx_enable(struct virtnet_info *vi,
				   struct virtqueue *vq,
				   struct napi_struct *napi)
{
	if (!napi->weight)
		return;

	/* Tx napi touches cachelines on the cpu handling tx interrupts. Only
	 * enable the feature if this is likely affine with the transmit path.
	 */
	if (!vi->affinity_hint_set) {
		napi->weight = 0;
		return;
	}

	return virtnet_napi_enable(vq, napi);
}

1399 1400 1401 1402 1403 1404
static void virtnet_napi_tx_disable(struct napi_struct *napi)
{
	if (napi->weight)
		napi_disable(napi);
}

1405 1406
static void refill_work(struct work_struct *work)
{
1407 1408
	struct virtnet_info *vi =
		container_of(work, struct virtnet_info, refill.work);
1409
	bool still_empty;
J
Jason Wang 已提交
1410 1411
	int i;

1412
	for (i = 0; i < vi->curr_queue_pairs; i++) {
J
Jason Wang 已提交
1413
		struct receive_queue *rq = &vi->rq[i];
1414

J
Jason Wang 已提交
1415
		napi_disable(&rq->napi);
M
Michael S. Tsirkin 已提交
1416
		still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
1417
		virtnet_napi_enable(rq->vq, &rq->napi);
1418

J
Jason Wang 已提交
1419 1420 1421 1422 1423 1424
		/* In theory, this can happen: if we don't get any buffers in
		 * we will *never* try to fill again.
		 */
		if (still_empty)
			schedule_delayed_work(&vi->refill, HZ/2);
	}
1425 1426
}

1427 1428
static int virtnet_receive(struct receive_queue *rq, int budget,
			   unsigned int *xdp_xmit)
R
Rusty Russell 已提交
1429
{
1430
	struct virtnet_info *vi = rq->vq->vdev->priv;
1431
	struct virtnet_rq_stats stats = {};
1432
	unsigned int len;
1433
	void *buf;
1434
	int i;
R
Rusty Russell 已提交
1435

1436
	if (!vi->big_packets || vi->mergeable_rx_bufs) {
1437 1438
		void *ctx;

1439
		while (stats.packets < budget &&
1440
		       (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
1441
			receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
1442
			stats.packets++;
1443 1444
		}
	} else {
1445
		while (stats.packets < budget &&
1446
		       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
1447
			receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
1448
			stats.packets++;
1449
		}
R
Rusty Russell 已提交
1450 1451
	}

1452
	if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
M
Michael S. Tsirkin 已提交
1453
		if (!try_fill_recv(vi, rq, GFP_ATOMIC))
1454
			schedule_delayed_work(&vi->refill, 0);
1455
	}
R
Rusty Russell 已提交
1456

T
Toshiaki Makita 已提交
1457
	u64_stats_update_begin(&rq->stats.syncp);
1458 1459 1460 1461
	for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
		size_t offset = virtnet_rq_stats_desc[i].offset;
		u64 *item;

1462 1463
		item = (u64 *)((u8 *)&rq->stats + offset);
		*item += *(u64 *)((u8 *)&stats + offset);
1464
	}
T
Toshiaki Makita 已提交
1465
	u64_stats_update_end(&rq->stats.syncp);
J
Jason Wang 已提交
1466

1467
	return stats.packets;
1468 1469
}

1470
static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
1471 1472 1473 1474
{
	unsigned int len;
	unsigned int packets = 0;
	unsigned int bytes = 0;
1475
	void *ptr;
1476

1477 1478 1479
	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
		if (likely(!is_xdp_frame(ptr))) {
			struct sk_buff *skb = ptr;
1480

1481
			pr_debug("Sent skb %p\n", skb);
1482

1483 1484 1485 1486
			bytes += skb->len;
			napi_consume_skb(skb, in_napi);
		} else {
			struct xdp_frame *frame = ptr_to_xdp(ptr);
1487

1488 1489 1490 1491
			bytes += frame->len;
			xdp_return_frame(frame);
		}
		packets++;
1492 1493 1494 1495 1496 1497 1498 1499
	}

	/* Avoid overhead when no packets have been processed
	 * happens when called speculatively from start_xmit.
	 */
	if (!packets)
		return;

T
Toshiaki Makita 已提交
1500 1501 1502 1503
	u64_stats_update_begin(&sq->stats.syncp);
	sq->stats.bytes += bytes;
	sq->stats.packets += packets;
	u64_stats_update_end(&sq->stats.syncp);
1504 1505
}

1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
{
	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
		return false;
	else if (q < vi->curr_queue_pairs)
		return true;
	else
		return false;
}

1516 1517 1518 1519 1520 1521 1522
static void virtnet_poll_cleantx(struct receive_queue *rq)
{
	struct virtnet_info *vi = rq->vq->vdev->priv;
	unsigned int index = vq2rxq(rq->vq);
	struct send_queue *sq = &vi->sq[index];
	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);

1523
	if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
1524 1525 1526
		return;

	if (__netif_tx_trylock(txq)) {
1527 1528 1529 1530
		do {
			virtqueue_disable_cb(sq->vq);
			free_old_xmit_skbs(sq, true);
		} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
1531 1532 1533 1534

		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
			netif_tx_wake_queue(txq);

1535 1536 1537 1538
		__netif_tx_unlock(txq);
	}
}

1539 1540 1541 1542
static int virtnet_poll(struct napi_struct *napi, int budget)
{
	struct receive_queue *rq =
		container_of(napi, struct receive_queue, napi);
1543 1544
	struct virtnet_info *vi = rq->vq->vdev->priv;
	struct send_queue *sq;
1545
	unsigned int received;
1546
	unsigned int xdp_xmit = 0;
1547

1548 1549
	virtnet_poll_cleantx(rq);

J
Jason Wang 已提交
1550
	received = virtnet_receive(rq, budget, &xdp_xmit);
1551

1552
	/* Out of packets? */
1553 1554
	if (received < budget)
		virtqueue_napi_complete(napi, rq->vq, received);
R
Rusty Russell 已提交
1555

1556
	if (xdp_xmit & VIRTIO_XDP_REDIR)
1557
		xdp_do_flush();
1558 1559

	if (xdp_xmit & VIRTIO_XDP_TX) {
1560
		sq = virtnet_xdp_get_sq(vi);
T
Toshiaki Makita 已提交
1561 1562 1563 1564 1565
		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
			u64_stats_update_begin(&sq->stats.syncp);
			sq->stats.kicks++;
			u64_stats_update_end(&sq->stats.syncp);
		}
1566
		virtnet_xdp_put_sq(vi, sq);
1567
	}
J
Jason Wang 已提交
1568

R
Rusty Russell 已提交
1569 1570 1571
	return received;
}

J
Jason Wang 已提交
1572 1573 1574
static int virtnet_open(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
1575
	int i, err;
J
Jason Wang 已提交
1576

1577 1578 1579
	for (i = 0; i < vi->max_queue_pairs; i++) {
		if (i < vi->curr_queue_pairs)
			/* Make sure we have some buffers: if oom use wq. */
M
Michael S. Tsirkin 已提交
1580
			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
1581
				schedule_delayed_work(&vi->refill, 0);
1582

1583
		err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id);
1584 1585 1586
		if (err < 0)
			return err;

1587 1588 1589 1590 1591 1592 1593
		err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
						 MEM_TYPE_PAGE_SHARED, NULL);
		if (err < 0) {
			xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
			return err;
		}

1594
		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
W
Willem de Bruijn 已提交
1595
		virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
J
Jason Wang 已提交
1596 1597 1598 1599 1600
	}

	return 0;
}

W
Willem de Bruijn 已提交
1601 1602 1603 1604
static int virtnet_poll_tx(struct napi_struct *napi, int budget)
{
	struct send_queue *sq = container_of(napi, struct send_queue, napi);
	struct virtnet_info *vi = sq->vq->vdev->priv;
1605 1606
	unsigned int index = vq2txq(sq->vq);
	struct netdev_queue *txq;
1607 1608
	int opaque;
	bool done;
W
Willem de Bruijn 已提交
1609

1610 1611 1612 1613 1614 1615 1616
	if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
		/* We don't need to enable cb for XDP */
		napi_complete_done(napi, 0);
		return 0;
	}

	txq = netdev_get_tx_queue(vi->dev, index);
W
Willem de Bruijn 已提交
1617
	__netif_tx_lock(txq, raw_smp_processor_id());
1618
	virtqueue_disable_cb(sq->vq);
1619
	free_old_xmit_skbs(sq, true);
1620

1621 1622 1623
	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
		netif_tx_wake_queue(txq);

1624 1625 1626 1627 1628 1629 1630
	opaque = virtqueue_enable_cb_prepare(sq->vq);

	done = napi_complete_done(napi, 0);

	if (!done)
		virtqueue_disable_cb(sq->vq);

W
Willem de Bruijn 已提交
1631 1632
	__netif_tx_unlock(txq);

1633 1634 1635 1636 1637 1638 1639 1640 1641 1642
	if (done) {
		if (unlikely(virtqueue_poll(sq->vq, opaque))) {
			if (napi_schedule_prep(napi)) {
				__netif_tx_lock(txq, raw_smp_processor_id());
				virtqueue_disable_cb(sq->vq);
				__netif_tx_unlock(txq);
				__napi_schedule(napi);
			}
		}
	}
W
Willem de Bruijn 已提交
1643 1644 1645 1646

	return 0;
}

1647
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
R
Rusty Russell 已提交
1648
{
1649
	struct virtio_net_hdr_mrg_rxbuf *hdr;
R
Rusty Russell 已提交
1650
	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
1651
	struct virtnet_info *vi = sq->vq->vdev->priv;
1652
	int num_sg;
1653
	unsigned hdr_len = vi->hdr_len;
1654
	bool can_push;
R
Rusty Russell 已提交
1655

J
Johannes Berg 已提交
1656
	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
1657 1658 1659 1660 1661 1662 1663

	can_push = vi->any_header_sg &&
		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
	/* Even if we can, don't push here yet as this would skew
	 * csum_start offset below. */
	if (can_push)
1664
		hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
1665 1666
	else
		hdr = skb_vnet_hdr(skb);
R
Rusty Russell 已提交
1667

1668
	if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
1669 1670
				    virtio_is_little_endian(vi->vdev), false,
				    0))
1671
		return -EPROTO;
R
Rusty Russell 已提交
1672

1673
	if (vi->mergeable_rx_bufs)
1674
		hdr->num_buffers = 0;
1675

1676
	sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
1677 1678 1679
	if (can_push) {
		__skb_push(skb, hdr_len);
		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
1680 1681
		if (unlikely(num_sg < 0))
			return num_sg;
1682 1683 1684 1685
		/* Pull header back to avoid skew in tx bytes calculations. */
		__skb_pull(skb, hdr_len);
	} else {
		sg_set_buf(sq->sg, hdr, hdr_len);
1686 1687 1688 1689
		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
		if (unlikely(num_sg < 0))
			return num_sg;
		num_sg++;
1690
	}
1691
	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
1692 1693
}

1694
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1695 1696
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
1697 1698
	int qnum = skb_get_queue_mapping(skb);
	struct send_queue *sq = &vi->sq[qnum];
1699
	int err;
1700
	struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1701
	bool kick = !netdev_xmit_more();
W
Willem de Bruijn 已提交
1702
	bool use_napi = sq->napi.weight;
1703 1704

	/* Free up any pending old buffers before queueing new ones. */
1705 1706 1707 1708 1709
	do {
		if (use_napi)
			virtqueue_disable_cb(sq->vq);

		free_old_xmit_skbs(sq, false);
1710

1711 1712
	} while (use_napi && kick &&
	       unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
1713

1714 1715 1716
	/* timestamp packet in software */
	skb_tx_timestamp(skb);

1717
	/* Try to transmit */
1718
	err = xmit_skb(sq, skb);
1719

1720
	/* This should not happen! */
1721
	if (unlikely(err)) {
1722 1723 1724
		dev->stats.tx_fifo_errors++;
		if (net_ratelimit())
			dev_warn(&dev->dev,
1725 1726
				 "Unexpected TXQ (%d) queue failure: %d\n",
				 qnum, err);
1727
		dev->stats.tx_dropped++;
1728
		dev_kfree_skb_any(skb);
1729
		return NETDEV_TX_OK;
R
Rusty Russell 已提交
1730
	}
1731

1732
	/* Don't wait up for transmitted skbs to be freed. */
W
Willem de Bruijn 已提交
1733 1734
	if (!use_napi) {
		skb_orphan(skb);
1735
		nf_reset_ct(skb);
W
Willem de Bruijn 已提交
1736
	}
1737

1738 1739 1740 1741 1742 1743 1744 1745 1746
	/* If running out of space, stop queue to avoid getting packets that we
	 * are then unable to transmit.
	 * An alternative would be to force queuing layer to requeue the skb by
	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
	 * returned in a normal path of operation: it means that driver is not
	 * maintaining the TX queue stop/start state properly, and causes
	 * the stack to do a non-trivial amount of useless work.
	 * Since most packets only take 1 or 2 ring slots, stopping the queue
	 * early means 16 slots are typically wasted.
1747
	 */
1748
	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
J
Jason Wang 已提交
1749
		netif_stop_subqueue(dev, qnum);
W
Willem de Bruijn 已提交
1750 1751
		if (!use_napi &&
		    unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1752
			/* More just got used, free them then recheck. */
1753
			free_old_xmit_skbs(sq, false);
1754
			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
J
Jason Wang 已提交
1755
				netif_start_subqueue(dev, qnum);
1756
				virtqueue_disable_cb(sq->vq);
1757 1758
			}
		}
1759
	}
1760

T
Toshiaki Makita 已提交
1761 1762 1763 1764 1765 1766 1767
	if (kick || netif_xmit_stopped(txq)) {
		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
			u64_stats_update_begin(&sq->stats.syncp);
			sq->stats.kicks++;
			u64_stats_update_end(&sq->stats.syncp);
		}
	}
R
Rusty Russell 已提交
1768

1769
	return NETDEV_TX_OK;
1770 1771
}

1772 1773 1774
/*
 * Send command via the control virtqueue and check status.  Commands
 * supported by the hypervisor, as indicated by feature bits, should
S
stephen hemminger 已提交
1775
 * never fail unless improperly formatted.
1776 1777
 */
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1778
				 struct scatterlist *out)
1779
{
1780
	struct scatterlist *sgs[4], hdr, stat;
1781
	unsigned out_num = 0, tmp;
1782
	int ret;
1783 1784

	/* Caller should know better */
1785
	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
1786

1787 1788 1789
	vi->ctrl->status = ~0;
	vi->ctrl->hdr.class = class;
	vi->ctrl->hdr.cmd = cmd;
1790
	/* Add header */
1791
	sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
1792
	sgs[out_num++] = &hdr;
1793

1794 1795
	if (out)
		sgs[out_num++] = out;
1796

1797
	/* Add return status. */
1798
	sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
1799
	sgs[out_num] = &stat;
1800

1801
	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1802 1803 1804 1805 1806 1807
	ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
	if (ret < 0) {
		dev_warn(&vi->vdev->dev,
			 "Failed to add sgs for command vq: %d\n.", ret);
		return false;
	}
1808

1809
	if (unlikely(!virtqueue_kick(vi->cvq)))
1810
		return vi->ctrl->status == VIRTIO_NET_OK;
1811 1812 1813 1814

	/* Spin for a response, the kick causes an ioport write, trapping
	 * into the hypervisor, so the request should be handled immediately.
	 */
1815 1816
	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
	       !virtqueue_is_broken(vi->cvq))
1817 1818
		cpu_relax();

1819
	return vi->ctrl->status == VIRTIO_NET_OK;
1820 1821
}

1822 1823 1824 1825
static int virtnet_set_mac_address(struct net_device *dev, void *p)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;
1826
	int ret;
1827
	struct sockaddr *addr;
1828
	struct scatterlist sg;
1829

1830 1831 1832
	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
		return -EOPNOTSUPP;

1833
	addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
1834 1835 1836 1837
	if (!addr)
		return -ENOMEM;

	ret = eth_prepare_mac_addr_change(dev, addr);
1838
	if (ret)
1839
		goto out;
1840

1841 1842 1843
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
		sg_init_one(&sg, addr->sa_data, dev->addr_len);
		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1844
					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
1845 1846
			dev_warn(&vdev->dev,
				 "Failed to set mac address by vq command.\n");
1847 1848
			ret = -EINVAL;
			goto out;
1849
		}
1850 1851
	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
		   !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1852 1853 1854 1855 1856 1857 1858
		unsigned int i;

		/* Naturally, this has an atomicity problem. */
		for (i = 0; i < dev->addr_len; i++)
			virtio_cwrite8(vdev,
				       offsetof(struct virtio_net_config, mac) +
				       i, addr->sa_data[i]);
1859 1860 1861
	}

	eth_commit_mac_addr_change(dev, p);
1862
	ret = 0;
1863

1864 1865 1866
out:
	kfree(addr);
	return ret;
1867 1868
}

1869 1870
static void virtnet_stats(struct net_device *dev,
			  struct rtnl_link_stats64 *tot)
1871 1872 1873
{
	struct virtnet_info *vi = netdev_priv(dev);
	unsigned int start;
T
Toshiaki Makita 已提交
1874
	int i;
1875

T
Toshiaki Makita 已提交
1876
	for (i = 0; i < vi->max_queue_pairs; i++) {
1877
		u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
T
Toshiaki Makita 已提交
1878 1879
		struct receive_queue *rq = &vi->rq[i];
		struct send_queue *sq = &vi->sq[i];
1880 1881

		do {
T
Toshiaki Makita 已提交
1882 1883 1884
			start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
			tpackets = sq->stats.packets;
			tbytes   = sq->stats.bytes;
1885
			terrors  = sq->stats.tx_timeouts;
T
Toshiaki Makita 已提交
1886
		} while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
1887 1888

		do {
T
Toshiaki Makita 已提交
1889
			start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
1890 1891 1892
			rpackets = rq->stats.packets;
			rbytes   = rq->stats.bytes;
			rdrops   = rq->stats.drops;
T
Toshiaki Makita 已提交
1893
		} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
1894 1895 1896 1897 1898

		tot->rx_packets += rpackets;
		tot->tx_packets += tpackets;
		tot->rx_bytes   += rbytes;
		tot->tx_bytes   += tbytes;
1899
		tot->rx_dropped += rdrops;
1900
		tot->tx_errors  += terrors;
1901 1902 1903
	}

	tot->tx_dropped = dev->stats.tx_dropped;
1904
	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
1905 1906 1907 1908
	tot->rx_length_errors = dev->stats.rx_length_errors;
	tot->rx_frame_errors = dev->stats.rx_frame_errors;
}

1909 1910 1911 1912
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
	rtnl_lock();
	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
1913
				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
1914 1915 1916 1917
		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
	rtnl_unlock();
}

1918
static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
J
Jason Wang 已提交
1919 1920 1921 1922 1923 1924 1925
{
	struct scatterlist sg;
	struct net_device *dev = vi->dev;

	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
		return 0;

1926 1927
	vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
	sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
J
Jason Wang 已提交
1928 1929

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1930
				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
J
Jason Wang 已提交
1931 1932 1933
		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
			 queue_pairs);
		return -EINVAL;
1934
	} else {
J
Jason Wang 已提交
1935
		vi->curr_queue_pairs = queue_pairs;
1936 1937 1938
		/* virtnet_open() will refill when device is going to up. */
		if (dev->flags & IFF_UP)
			schedule_delayed_work(&vi->refill, 0);
1939
	}
J
Jason Wang 已提交
1940 1941 1942 1943

	return 0;
}

1944 1945 1946 1947 1948 1949 1950 1951 1952 1953
static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
	int err;

	rtnl_lock();
	err = _virtnet_set_queues(vi, queue_pairs);
	rtnl_unlock();
	return err;
}

R
Rusty Russell 已提交
1954 1955 1956
static int virtnet_close(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
1957
	int i;
R
Rusty Russell 已提交
1958

1959 1960
	/* Make sure refill_work doesn't re-enable napi! */
	cancel_delayed_work_sync(&vi->refill);
J
Jason Wang 已提交
1961

W
Willem de Bruijn 已提交
1962
	for (i = 0; i < vi->max_queue_pairs; i++) {
1963
		xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
J
Jason Wang 已提交
1964
		napi_disable(&vi->rq[i].napi);
1965
		virtnet_napi_tx_disable(&vi->sq[i].napi);
W
Willem de Bruijn 已提交
1966
	}
R
Rusty Russell 已提交
1967 1968 1969 1970

	return 0;
}

1971 1972 1973
static void virtnet_set_rx_mode(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
1974 1975
	struct scatterlist sg[2];
	struct virtio_net_ctrl_mac *mac_data;
J
Jiri Pirko 已提交
1976
	struct netdev_hw_addr *ha;
1977
	int uc_count;
1978
	int mc_count;
1979 1980
	void *buf;
	int i;
1981

S
stephen hemminger 已提交
1982
	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
1983 1984 1985
	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
		return;

1986 1987
	vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
	vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1988

1989
	sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
1990 1991

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1992
				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
1993
		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1994
			 vi->ctrl->promisc ? "en" : "dis");
1995

1996
	sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
1997 1998

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1999
				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
2000
		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
2001
			 vi->ctrl->allmulti ? "en" : "dis");
2002

2003
	uc_count = netdev_uc_count(dev);
2004
	mc_count = netdev_mc_count(dev);
2005
	/* MAC filter - use one buffer for both lists */
2006 2007 2008
	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
	mac_data = buf;
2009
	if (!buf)
2010 2011
		return;

2012 2013
	sg_init_table(sg, 2);

2014
	/* Store the unicast list and count in the front of the buffer */
M
Michael S. Tsirkin 已提交
2015
	mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
J
Jiri Pirko 已提交
2016
	i = 0;
2017
	netdev_for_each_uc_addr(ha, dev)
J
Jiri Pirko 已提交
2018
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2019 2020

	sg_set_buf(&sg[0], mac_data,
2021
		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
2022 2023

	/* multicast list and count fill the end */
2024
	mac_data = (void *)&mac_data->macs[uc_count][0];
2025

M
Michael S. Tsirkin 已提交
2026
	mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
2027
	i = 0;
2028 2029
	netdev_for_each_mc_addr(ha, dev)
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2030 2031

	sg_set_buf(&sg[1], mac_data,
2032
		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
2033 2034

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2035
				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
2036
		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
2037 2038

	kfree(buf);
2039 2040
}

2041 2042
static int virtnet_vlan_rx_add_vid(struct net_device *dev,
				   __be16 proto, u16 vid)
2043 2044 2045 2046
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

2047
	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2048
	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2049 2050

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2051
				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
2052
		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
2053
	return 0;
2054 2055
}

2056 2057
static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
				    __be16 proto, u16 vid)
2058 2059 2060 2061
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

2062
	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2063
	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2064 2065

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2066
				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
2067
		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
2068
	return 0;
2069 2070
}

2071
static void virtnet_clean_affinity(struct virtnet_info *vi)
J
Jason Wang 已提交
2072 2073 2074
{
	int i;

2075 2076
	if (vi->affinity_hint_set) {
		for (i = 0; i < vi->max_queue_pairs; i++) {
2077 2078
			virtqueue_set_affinity(vi->rq[i].vq, NULL);
			virtqueue_set_affinity(vi->sq[i].vq, NULL);
2079 2080
		}

2081 2082 2083
		vi->affinity_hint_set = false;
	}
}
2084

2085 2086
static void virtnet_set_affinity(struct virtnet_info *vi)
{
2087 2088 2089 2090 2091 2092 2093 2094
	cpumask_var_t mask;
	int stragglers;
	int group_size;
	int i, j, cpu;
	int num_cpu;
	int stride;

	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2095
		virtnet_clean_affinity(vi);
2096
		return;
J
Jason Wang 已提交
2097 2098
	}

2099 2100 2101 2102 2103
	num_cpu = num_online_cpus();
	stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
	stragglers = num_cpu >= vi->curr_queue_pairs ?
			num_cpu % vi->curr_queue_pairs :
			0;
2104
	cpu = cpumask_first(cpu_online_mask);
2105

2106 2107 2108 2109 2110 2111 2112 2113 2114 2115
	for (i = 0; i < vi->curr_queue_pairs; i++) {
		group_size = stride + (i < stragglers ? 1 : 0);

		for (j = 0; j < group_size; j++) {
			cpumask_set_cpu(cpu, mask);
			cpu = cpumask_next_wrap(cpu, cpu_online_mask,
						nr_cpu_ids, false);
		}
		virtqueue_set_affinity(vi->rq[i].vq, mask);
		virtqueue_set_affinity(vi->sq[i].vq, mask);
2116
		__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
2117
		cpumask_clear(mask);
J
Jason Wang 已提交
2118 2119
	}

2120
	vi->affinity_hint_set = true;
2121
	free_cpumask_var(mask);
J
Jason Wang 已提交
2122 2123
}

2124
static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
2125
{
2126 2127 2128 2129 2130
	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
						   node);
	virtnet_set_affinity(vi);
	return 0;
}
2131

2132 2133 2134 2135 2136 2137 2138
static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
						   node_dead);
	virtnet_set_affinity(vi);
	return 0;
}
2139

2140 2141 2142 2143 2144
static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
{
	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
						   node);

2145
	virtnet_clean_affinity(vi);
2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
	return 0;
}

static enum cpuhp_state virtionet_online;

static int virtnet_cpu_notif_add(struct virtnet_info *vi)
{
	int ret;

	ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
	if (ret)
		return ret;
	ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
					       &vi->node_dead);
	if (!ret)
		return ret;
	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
	return ret;
}

static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
{
	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
	cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
					    &vi->node_dead);
J
Jason Wang 已提交
2171 2172
}

R
Rick Jones 已提交
2173
static void virtnet_get_ringparam(struct net_device *dev,
2174 2175 2176
				  struct ethtool_ringparam *ring,
				  struct kernel_ethtool_ringparam *kernel_ring,
				  struct netlink_ext_ack *extack)
R
Rick Jones 已提交
2177 2178 2179
{
	struct virtnet_info *vi = netdev_priv(dev);

J
Jason Wang 已提交
2180 2181
	ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
	ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
R
Rick Jones 已提交
2182 2183 2184 2185
	ring->rx_pending = ring->rx_max_pending;
	ring->tx_pending = ring->tx_max_pending;
}

2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198

static void virtnet_get_drvinfo(struct net_device *dev,
				struct ethtool_drvinfo *info)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;

	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
	strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
	strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));

}

2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212
/* TODO: Eliminate OOO packets during switching */
static int virtnet_set_channels(struct net_device *dev,
				struct ethtool_channels *channels)
{
	struct virtnet_info *vi = netdev_priv(dev);
	u16 queue_pairs = channels->combined_count;
	int err;

	/* We don't support separate rx/tx channels.
	 * We don't allow setting 'other' channels.
	 */
	if (channels->rx_count || channels->tx_count || channels->other_count)
		return -EINVAL;

2213
	if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
2214 2215
		return -EINVAL;

J
John Fastabend 已提交
2216 2217 2218 2219 2220 2221 2222
	/* For now we don't support modifying channels while XDP is loaded
	 * also when XDP is loaded all RX queues have XDP programs so we only
	 * need to check a single RX queue.
	 */
	if (vi->rq[0].xdp_prog)
		return -EINVAL;

2223
	cpus_read_lock();
2224
	err = _virtnet_set_queues(vi, queue_pairs);
2225
	if (err) {
2226
		cpus_read_unlock();
2227
		goto err;
2228
	}
2229
	virtnet_set_affinity(vi);
2230
	cpus_read_unlock();
2231

2232 2233 2234
	netif_set_real_num_tx_queues(dev, queue_pairs);
	netif_set_real_num_rx_queues(dev, queue_pairs);
 err:
2235 2236 2237
	return err;
}

T
Toshiaki Makita 已提交
2238 2239 2240 2241
static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
	struct virtnet_info *vi = netdev_priv(dev);
	unsigned int i, j;
2242
	u8 *p = data;
T
Toshiaki Makita 已提交
2243 2244 2245 2246

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < vi->curr_queue_pairs; i++) {
2247 2248 2249
			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
				ethtool_sprintf(&p, "rx_queue_%u_%s", i,
						virtnet_rq_stats_desc[j].desc);
T
Toshiaki Makita 已提交
2250 2251 2252
		}

		for (i = 0; i < vi->curr_queue_pairs; i++) {
2253 2254 2255
			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
				ethtool_sprintf(&p, "tx_queue_%u_%s", i,
						virtnet_sq_stats_desc[j].desc);
T
Toshiaki Makita 已提交
2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
		}
		break;
	}
}

static int virtnet_get_sset_count(struct net_device *dev, int sset)
{
	struct virtnet_info *vi = netdev_priv(dev);

	switch (sset) {
	case ETH_SS_STATS:
		return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
					       VIRTNET_SQ_STATS_LEN);
	default:
		return -EOPNOTSUPP;
	}
}

static void virtnet_get_ethtool_stats(struct net_device *dev,
				      struct ethtool_stats *stats, u64 *data)
{
	struct virtnet_info *vi = netdev_priv(dev);
	unsigned int idx = 0, start, i, j;
	const u8 *stats_base;
	size_t offset;

	for (i = 0; i < vi->curr_queue_pairs; i++) {
		struct receive_queue *rq = &vi->rq[i];

2285
		stats_base = (u8 *)&rq->stats;
T
Toshiaki Makita 已提交
2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310
		do {
			start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
				offset = virtnet_rq_stats_desc[j].offset;
				data[idx + j] = *(u64 *)(stats_base + offset);
			}
		} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
		idx += VIRTNET_RQ_STATS_LEN;
	}

	for (i = 0; i < vi->curr_queue_pairs; i++) {
		struct send_queue *sq = &vi->sq[i];

		stats_base = (u8 *)&sq->stats;
		do {
			start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
				offset = virtnet_sq_stats_desc[j].offset;
				data[idx + j] = *(u64 *)(stats_base + offset);
			}
		} while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
		idx += VIRTNET_SQ_STATS_LEN;
	}
}

2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323
static void virtnet_get_channels(struct net_device *dev,
				 struct ethtool_channels *channels)
{
	struct virtnet_info *vi = netdev_priv(dev);

	channels->combined_count = vi->curr_queue_pairs;
	channels->max_combined = vi->max_queue_pairs;
	channels->max_other = 0;
	channels->rx_count = 0;
	channels->tx_count = 0;
	channels->other_count = 0;
}

2324 2325
static int virtnet_set_link_ksettings(struct net_device *dev,
				      const struct ethtool_link_ksettings *cmd)
2326 2327 2328
{
	struct virtnet_info *vi = netdev_priv(dev);

2329 2330
	return ethtool_virtdev_set_link_ksettings(dev, cmd,
						  &vi->speed, &vi->duplex);
2331 2332
}

2333 2334
static int virtnet_get_link_ksettings(struct net_device *dev,
				      struct ethtool_link_ksettings *cmd)
2335 2336 2337
{
	struct virtnet_info *vi = netdev_priv(dev);

2338 2339 2340
	cmd->base.speed = vi->speed;
	cmd->base.duplex = vi->duplex;
	cmd->base.port = PORT_OTHER;
2341 2342 2343 2344

	return 0;
}

2345
static int virtnet_set_coalesce(struct net_device *dev,
2346 2347 2348
				struct ethtool_coalesce *ec,
				struct kernel_ethtool_coalesce *kernel_coal,
				struct netlink_ext_ack *extack)
2349 2350 2351 2352
{
	struct virtnet_info *vi = netdev_priv(dev);
	int i, napi_weight;

2353 2354
	if (ec->tx_max_coalesced_frames > 1 ||
	    ec->rx_max_coalesced_frames != 1)
2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368
		return -EINVAL;

	napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
	if (napi_weight ^ vi->sq[0].napi.weight) {
		if (dev->flags & IFF_UP)
			return -EBUSY;
		for (i = 0; i < vi->max_queue_pairs; i++)
			vi->sq[i].napi.weight = napi_weight;
	}

	return 0;
}

static int virtnet_get_coalesce(struct net_device *dev,
2369 2370 2371
				struct ethtool_coalesce *ec,
				struct kernel_ethtool_coalesce *kernel_coal,
				struct netlink_ext_ack *extack)
2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386
{
	struct ethtool_coalesce ec_default = {
		.cmd = ETHTOOL_GCOALESCE,
		.rx_max_coalesced_frames = 1,
	};
	struct virtnet_info *vi = netdev_priv(dev);

	memcpy(ec, &ec_default, sizeof(ec_default));

	if (vi->sq[0].napi.weight)
		ec->tx_max_coalesced_frames = 1;

	return 0;
}

2387 2388 2389 2390 2391 2392 2393 2394
static void virtnet_init_settings(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);

	vi->speed = SPEED_UNKNOWN;
	vi->duplex = DUPLEX_UNKNOWN;
}

2395 2396 2397 2398 2399 2400 2401 2402
static void virtnet_update_settings(struct virtnet_info *vi)
{
	u32 speed;
	u8 duplex;

	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
		return;

2403 2404
	virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);

2405 2406
	if (ethtool_validate_speed(speed))
		vi->speed = speed;
2407 2408 2409

	virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);

2410 2411 2412 2413
	if (ethtool_validate_duplex(duplex))
		vi->duplex = duplex;
}

2414
static const struct ethtool_ops virtnet_ethtool_ops = {
2415
	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
2416
	.get_drvinfo = virtnet_get_drvinfo,
2417
	.get_link = ethtool_op_get_link,
R
Rick Jones 已提交
2418
	.get_ringparam = virtnet_get_ringparam,
T
Toshiaki Makita 已提交
2419 2420 2421
	.get_strings = virtnet_get_strings,
	.get_sset_count = virtnet_get_sset_count,
	.get_ethtool_stats = virtnet_get_ethtool_stats,
2422 2423
	.set_channels = virtnet_set_channels,
	.get_channels = virtnet_get_channels,
2424
	.get_ts_info = ethtool_op_get_ts_info,
2425 2426
	.get_link_ksettings = virtnet_get_link_ksettings,
	.set_link_ksettings = virtnet_set_link_ksettings,
2427 2428
	.set_coalesce = virtnet_set_coalesce,
	.get_coalesce = virtnet_get_coalesce,
2429 2430
};

2431 2432 2433 2434 2435 2436 2437 2438
static void virtnet_freeze_down(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;
	int i;

	/* Make sure no work handler is accessing the device */
	flush_work(&vi->config_work);

2439
	netif_tx_lock_bh(vi->dev);
2440
	netif_device_detach(vi->dev);
2441
	netif_tx_unlock_bh(vi->dev);
2442 2443 2444
	cancel_delayed_work_sync(&vi->refill);

	if (netif_running(vi->dev)) {
W
Willem de Bruijn 已提交
2445
		for (i = 0; i < vi->max_queue_pairs; i++) {
2446
			napi_disable(&vi->rq[i].napi);
2447
			virtnet_napi_tx_disable(&vi->sq[i].napi);
W
Willem de Bruijn 已提交
2448
		}
2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469
	}
}

static int init_vqs(struct virtnet_info *vi);

static int virtnet_restore_up(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;
	int err, i;

	err = init_vqs(vi);
	if (err)
		return err;

	virtio_device_ready(vdev);

	if (netif_running(vi->dev)) {
		for (i = 0; i < vi->curr_queue_pairs; i++)
			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
				schedule_delayed_work(&vi->refill, 0);

W
Willem de Bruijn 已提交
2470
		for (i = 0; i < vi->max_queue_pairs; i++) {
2471
			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
W
Willem de Bruijn 已提交
2472 2473 2474
			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
					       &vi->sq[i].napi);
		}
2475 2476
	}

2477
	netif_tx_lock_bh(vi->dev);
2478
	netif_device_attach(vi->dev);
2479
	netif_tx_unlock_bh(vi->dev);
2480 2481 2482
	return err;
}

2483 2484 2485
static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
{
	struct scatterlist sg;
2486
	vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
2487

2488
	sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
2489 2490 2491

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
				  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
2492
		dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518
		return -EINVAL;
	}

	return 0;
}

static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
{
	u64 offloads = 0;

	if (!vi->guest_offloads)
		return 0;

	return virtnet_set_guest_offloads(vi, offloads);
}

static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
{
	u64 offloads = vi->guest_offloads;

	if (!vi->guest_offloads)
		return 0;

	return virtnet_set_guest_offloads(vi, offloads);
}

2519 2520
static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
			   struct netlink_ext_ack *extack)
J
John Fastabend 已提交
2521 2522 2523 2524
{
	unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
	struct virtnet_info *vi = netdev_priv(dev);
	struct bpf_prog *old_prog;
2525
	u16 xdp_qp = 0, curr_qp;
2526
	int i, err;
J
John Fastabend 已提交
2527

2528 2529 2530 2531
	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
	    && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
2532 2533
		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
2534
		NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
J
John Fastabend 已提交
2535 2536 2537 2538
		return -EOPNOTSUPP;
	}

	if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
2539
		NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
J
John Fastabend 已提交
2540 2541 2542 2543
		return -EINVAL;
	}

	if (dev->mtu > max_sz) {
2544
		NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
J
John Fastabend 已提交
2545 2546 2547 2548
		netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
		return -EINVAL;
	}

2549 2550 2551 2552 2553 2554
	curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
	if (prog)
		xdp_qp = nr_cpu_ids;

	/* XDP requires extra queues for XDP_TX */
	if (curr_qp + xdp_qp > vi->max_queue_pairs) {
2555 2556
		netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
				 curr_qp + xdp_qp, vi->max_queue_pairs);
2557
		xdp_qp = 0;
2558 2559
	}

2560 2561 2562 2563
	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
	if (!prog && !old_prog)
		return 0;

2564 2565
	if (prog)
		bpf_prog_add(prog, vi->max_queue_pairs - 1);
2566

2567
	/* Make sure NAPI is not using any XDP TX queues for RX. */
2568 2569
	if (netif_running(dev)) {
		for (i = 0; i < vi->max_queue_pairs; i++) {
2570
			napi_disable(&vi->rq[i].napi);
2571 2572 2573
			virtnet_napi_tx_disable(&vi->sq[i].napi);
		}
	}
J
John Fastabend 已提交
2574

2575 2576 2577 2578 2579 2580 2581 2582
	if (!prog) {
		for (i = 0; i < vi->max_queue_pairs; i++) {
			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
			if (i == 0)
				virtnet_restore_guest_offloads(vi);
		}
		synchronize_net();
	}
J
John Fastabend 已提交
2583

2584 2585 2586
	err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
	if (err)
		goto err;
2587
	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2588
	vi->xdp_queue_pairs = xdp_qp;
2589

2590
	if (prog) {
2591
		vi->xdp_enabled = true;
2592 2593 2594
		for (i = 0; i < vi->max_queue_pairs; i++) {
			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
			if (i == 0 && !old_prog)
2595 2596
				virtnet_clear_guest_offloads(vi);
		}
2597 2598
	} else {
		vi->xdp_enabled = false;
2599 2600 2601
	}

	for (i = 0; i < vi->max_queue_pairs; i++) {
J
John Fastabend 已提交
2602 2603
		if (old_prog)
			bpf_prog_put(old_prog);
2604
		if (netif_running(dev)) {
2605
			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2606 2607 2608
			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
					       &vi->sq[i].napi);
		}
J
John Fastabend 已提交
2609 2610 2611
	}

	return 0;
2612

2613
err:
2614 2615 2616 2617 2618 2619
	if (!prog) {
		virtnet_clear_guest_offloads(vi);
		for (i = 0; i < vi->max_queue_pairs; i++)
			rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
	}

2620
	if (netif_running(dev)) {
2621
		for (i = 0; i < vi->max_queue_pairs; i++) {
2622
			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2623 2624 2625
			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
					       &vi->sq[i].napi);
		}
2626
	}
2627 2628 2629
	if (prog)
		bpf_prog_sub(prog, vi->max_queue_pairs - 1);
	return err;
J
John Fastabend 已提交
2630 2631
}

2632
static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
J
John Fastabend 已提交
2633 2634 2635
{
	switch (xdp->command) {
	case XDP_SETUP_PROG:
2636
		return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
J
John Fastabend 已提交
2637 2638 2639 2640 2641
	default:
		return -EINVAL;
	}
}

2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657
static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
				      size_t len)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int ret;

	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
		return -EOPNOTSUPP;

	ret = snprintf(buf, len, "sby");
	if (ret >= len)
		return -EOPNOTSUPP;

	return 0;
}

2658 2659 2660 2661
static int virtnet_set_features(struct net_device *dev,
				netdev_features_t features)
{
	struct virtnet_info *vi = netdev_priv(dev);
2662
	u64 offloads;
2663 2664
	int err;

2665
	if ((dev->features ^ features) & NETIF_F_GRO_HW) {
2666
		if (vi->xdp_enabled)
2667 2668
			return -EBUSY;

2669
		if (features & NETIF_F_GRO_HW)
2670
			offloads = vi->guest_offloads_capable;
2671
		else
2672
			offloads = vi->guest_offloads_capable &
2673
				   ~GUEST_OFFLOAD_GRO_HW_MASK;
2674

2675 2676 2677 2678
		err = virtnet_set_guest_offloads(vi, offloads);
		if (err)
			return err;
		vi->guest_offloads = offloads;
2679 2680 2681 2682 2683
	}

	return 0;
}

2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695
static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
	struct virtnet_info *priv = netdev_priv(dev);
	struct send_queue *sq = &priv->sq[txqueue];
	struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);

	u64_stats_update_begin(&sq->stats.syncp);
	sq->stats.tx_timeouts++;
	u64_stats_update_end(&sq->stats.syncp);

	netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
		   txqueue, sq->name, sq->vq->index, sq->vq->name,
2696
		   jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
2697 2698
}

2699 2700 2701 2702 2703
static const struct net_device_ops virtnet_netdev = {
	.ndo_open            = virtnet_open,
	.ndo_stop   	     = virtnet_close,
	.ndo_start_xmit      = start_xmit,
	.ndo_validate_addr   = eth_validate_addr,
2704
	.ndo_set_mac_address = virtnet_set_mac_address,
2705
	.ndo_set_rx_mode     = virtnet_set_rx_mode,
2706
	.ndo_get_stats64     = virtnet_stats,
2707 2708
	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
2709
	.ndo_bpf		= virtnet_xdp,
J
Jason Wang 已提交
2710
	.ndo_xdp_xmit		= virtnet_xdp_xmit,
2711
	.ndo_features_check	= passthru_features_check,
2712
	.ndo_get_phys_port_name	= virtnet_get_phys_port_name,
2713
	.ndo_set_features	= virtnet_set_features,
2714
	.ndo_tx_timeout		= virtnet_tx_timeout,
2715 2716
};

2717
static void virtnet_config_changed_work(struct work_struct *work)
2718
{
2719 2720
	struct virtnet_info *vi =
		container_of(work, struct virtnet_info, config_work);
2721 2722
	u16 v;

2723 2724
	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
				 struct virtio_net_config, status, &v) < 0)
M
Michael S. Tsirkin 已提交
2725
		return;
2726 2727

	if (v & VIRTIO_NET_S_ANNOUNCE) {
2728
		netdev_notify_peers(vi->dev);
2729 2730
		virtnet_ack_link_announce(vi);
	}
2731 2732 2733 2734 2735

	/* Ignore unknown (future) status bits */
	v &= VIRTIO_NET_S_LINK_UP;

	if (vi->status == v)
M
Michael S. Tsirkin 已提交
2736
		return;
2737 2738 2739 2740

	vi->status = v;

	if (vi->status & VIRTIO_NET_S_LINK_UP) {
2741
		virtnet_update_settings(vi);
2742
		netif_carrier_on(vi->dev);
J
Jason Wang 已提交
2743
		netif_tx_wake_all_queues(vi->dev);
2744 2745
	} else {
		netif_carrier_off(vi->dev);
J
Jason Wang 已提交
2746
		netif_tx_stop_all_queues(vi->dev);
2747 2748 2749 2750 2751 2752 2753
	}
}

static void virtnet_config_changed(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;

2754
	schedule_work(&vi->config_work);
2755 2756
}

J
Jason Wang 已提交
2757 2758
static void virtnet_free_queues(struct virtnet_info *vi)
{
2759 2760
	int i;

2761
	for (i = 0; i < vi->max_queue_pairs; i++) {
2762 2763
		__netif_napi_del(&vi->rq[i].napi);
		__netif_napi_del(&vi->sq[i].napi);
2764
	}
2765

2766
	/* We called __netif_napi_del(),
2767 2768 2769 2770
	 * we need to respect an RCU grace period before freeing vi->rq
	 */
	synchronize_net();

J
Jason Wang 已提交
2771 2772
	kfree(vi->rq);
	kfree(vi->sq);
2773
	kfree(vi->ctrl);
J
Jason Wang 已提交
2774 2775
}

2776
static void _free_receive_bufs(struct virtnet_info *vi)
J
Jason Wang 已提交
2777
{
J
John Fastabend 已提交
2778
	struct bpf_prog *old_prog;
J
Jason Wang 已提交
2779 2780 2781 2782 2783
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
		while (vi->rq[i].pages)
			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
J
John Fastabend 已提交
2784 2785 2786 2787 2788

		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
		RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
		if (old_prog)
			bpf_prog_put(old_prog);
J
Jason Wang 已提交
2789
	}
2790 2791 2792 2793 2794 2795
}

static void free_receive_bufs(struct virtnet_info *vi)
{
	rtnl_lock();
	_free_receive_bufs(vi);
J
John Fastabend 已提交
2796
	rtnl_unlock();
J
Jason Wang 已提交
2797 2798
}

2799 2800 2801 2802 2803 2804 2805 2806
static void free_receive_page_frags(struct virtnet_info *vi)
{
	int i;
	for (i = 0; i < vi->max_queue_pairs; i++)
		if (vi->rq[i].alloc_frag.page)
			put_page(vi->rq[i].alloc_frag.page);
}

J
Jason Wang 已提交
2807 2808 2809 2810 2811 2812 2813
static void free_unused_bufs(struct virtnet_info *vi)
{
	void *buf;
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
		struct virtqueue *vq = vi->sq[i].vq;
J
John Fastabend 已提交
2814
		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
2815
			if (!is_xdp_frame(buf))
J
John Fastabend 已提交
2816 2817
				dev_kfree_skb(buf);
			else
2818
				xdp_return_frame(ptr_to_xdp(buf));
J
John Fastabend 已提交
2819
		}
J
Jason Wang 已提交
2820 2821 2822 2823 2824 2825
	}

	for (i = 0; i < vi->max_queue_pairs; i++) {
		struct virtqueue *vq = vi->rq[i].vq;

		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
2826
			if (vi->mergeable_rx_bufs) {
2827
				put_page(virt_to_head_page(buf));
2828
			} else if (vi->big_packets) {
2829
				give_pages(&vi->rq[i], buf);
2830
			} else {
2831
				put_page(virt_to_head_page(buf));
2832
			}
J
Jason Wang 已提交
2833 2834 2835 2836
		}
	}
}

2837 2838 2839 2840
static void virtnet_del_vqs(struct virtnet_info *vi)
{
	struct virtio_device *vdev = vi->vdev;

2841
	virtnet_clean_affinity(vi);
J
Jason Wang 已提交
2842

2843
	vdev->config->del_vqs(vdev);
J
Jason Wang 已提交
2844 2845

	virtnet_free_queues(vi);
2846 2847
}

2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859
/* How large should a single buffer be so a queue full of these can fit at
 * least one full packet?
 * Logic below assumes the mergeable buffer header is used.
 */
static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
{
	const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
	unsigned int rq_size = virtqueue_get_vring_size(vq);
	unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
	unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
	unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);

2860 2861
	return max(max(min_buf_len, hdr_len) - hdr_len,
		   (unsigned int)GOOD_PACKET_LEN);
2862 2863
}

J
Jason Wang 已提交
2864
static int virtnet_find_vqs(struct virtnet_info *vi)
2865
{
J
Jason Wang 已提交
2866 2867 2868 2869 2870
	vq_callback_t **callbacks;
	struct virtqueue **vqs;
	int ret = -ENOMEM;
	int i, total_vqs;
	const char **names;
2871
	bool *ctx;
J
Jason Wang 已提交
2872 2873 2874 2875 2876 2877 2878 2879 2880

	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
	 * possible control vq.
	 */
	total_vqs = vi->max_queue_pairs * 2 +
		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);

	/* Allocate space for find_vqs parameters */
K
Kees Cook 已提交
2881
	vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
J
Jason Wang 已提交
2882 2883
	if (!vqs)
		goto err_vq;
2884
	callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
J
Jason Wang 已提交
2885 2886
	if (!callbacks)
		goto err_callback;
2887
	names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
J
Jason Wang 已提交
2888 2889
	if (!names)
		goto err_names;
2890
	if (!vi->big_packets || vi->mergeable_rx_bufs) {
K
Kees Cook 已提交
2891
		ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
2892 2893 2894 2895 2896
		if (!ctx)
			goto err_ctx;
	} else {
		ctx = NULL;
	}
J
Jason Wang 已提交
2897 2898 2899 2900 2901 2902

	/* Parameters for control virtqueue, if any */
	if (vi->has_cvq) {
		callbacks[total_vqs - 1] = NULL;
		names[total_vqs - 1] = "control";
	}
2903

J
Jason Wang 已提交
2904 2905 2906 2907 2908 2909 2910 2911
	/* Allocate/initialize parameters for send/receive virtqueues */
	for (i = 0; i < vi->max_queue_pairs; i++) {
		callbacks[rxq2vq(i)] = skb_recv_done;
		callbacks[txq2vq(i)] = skb_xmit_done;
		sprintf(vi->rq[i].name, "input.%d", i);
		sprintf(vi->sq[i].name, "output.%d", i);
		names[rxq2vq(i)] = vi->rq[i].name;
		names[txq2vq(i)] = vi->sq[i].name;
2912 2913
		if (ctx)
			ctx[rxq2vq(i)] = true;
J
Jason Wang 已提交
2914
	}
2915

2916 2917
	ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
				  names, ctx, NULL);
J
Jason Wang 已提交
2918 2919
	if (ret)
		goto err_find;
2920

J
Jason Wang 已提交
2921 2922
	if (vi->has_cvq) {
		vi->cvq = vqs[total_vqs - 1];
2923
		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
2924
			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2925
	}
J
Jason Wang 已提交
2926 2927 2928

	for (i = 0; i < vi->max_queue_pairs; i++) {
		vi->rq[i].vq = vqs[rxq2vq(i)];
2929
		vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
J
Jason Wang 已提交
2930 2931 2932
		vi->sq[i].vq = vqs[txq2vq(i)];
	}

2933
	/* run here: ret == 0. */
J
Jason Wang 已提交
2934 2935 2936


err_find:
2937 2938
	kfree(ctx);
err_ctx:
J
Jason Wang 已提交
2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951
	kfree(names);
err_names:
	kfree(callbacks);
err_callback:
	kfree(vqs);
err_vq:
	return ret;
}

static int virtnet_alloc_queues(struct virtnet_info *vi)
{
	int i;

2952 2953 2954 2955 2956 2957 2958
	if (vi->has_cvq) {
		vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
		if (!vi->ctrl)
			goto err_ctrl;
	} else {
		vi->ctrl = NULL;
	}
K
Kees Cook 已提交
2959
	vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
J
Jason Wang 已提交
2960 2961
	if (!vi->sq)
		goto err_sq;
K
Kees Cook 已提交
2962
	vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
2963
	if (!vi->rq)
J
Jason Wang 已提交
2964 2965 2966 2967 2968 2969 2970
		goto err_rq;

	INIT_DELAYED_WORK(&vi->refill, refill_work);
	for (i = 0; i < vi->max_queue_pairs; i++) {
		vi->rq[i].pages = NULL;
		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
			       napi_weight);
2971 2972
		netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
				  napi_tx ? napi_weight : 0);
J
Jason Wang 已提交
2973 2974

		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
J
Johannes Berg 已提交
2975
		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
J
Jason Wang 已提交
2976
		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
T
Toshiaki Makita 已提交
2977 2978 2979

		u64_stats_init(&vi->rq[i].stats.syncp);
		u64_stats_init(&vi->sq[i].stats.syncp);
J
Jason Wang 已提交
2980 2981 2982 2983 2984 2985 2986
	}

	return 0;

err_rq:
	kfree(vi->sq);
err_sq:
2987 2988
	kfree(vi->ctrl);
err_ctrl:
J
Jason Wang 已提交
2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004
	return -ENOMEM;
}

static int init_vqs(struct virtnet_info *vi)
{
	int ret;

	/* Allocate send & receive queues */
	ret = virtnet_alloc_queues(vi);
	if (ret)
		goto err;

	ret = virtnet_find_vqs(vi);
	if (ret)
		goto err_free;

3005
	cpus_read_lock();
3006
	virtnet_set_affinity(vi);
3007
	cpus_read_unlock();
3008

J
Jason Wang 已提交
3009 3010 3011 3012 3013 3014
	return 0;

err_free:
	virtnet_free_queues(vi);
err:
	return ret;
3015 3016
}

3017 3018
#ifdef CONFIG_SYSFS
static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
3019
		char *buf)
3020 3021 3022
{
	struct virtnet_info *vi = netdev_priv(queue->dev);
	unsigned int queue_index = get_netdev_rx_queue_index(queue);
3023 3024
	unsigned int headroom = virtnet_get_headroom(vi);
	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
J
Johannes Berg 已提交
3025
	struct ewma_pkt_len *avg;
3026 3027 3028

	BUG_ON(queue_index >= vi->max_queue_pairs);
	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
3029
	return sprintf(buf, "%u\n",
3030 3031
		       get_mergeable_buf_len(&vi->rq[queue_index], avg,
				       SKB_DATA_ALIGN(headroom + tailroom)));
3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047
}

static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
	__ATTR_RO(mergeable_rx_buffer_size);

static struct attribute *virtio_net_mrg_rx_attrs[] = {
	&mergeable_rx_buffer_size_attribute.attr,
	NULL
};

static const struct attribute_group virtio_net_mrg_rx_group = {
	.name = "virtio_net",
	.attrs = virtio_net_mrg_rx_attrs
};
#endif

3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081
static bool virtnet_fail_on_feature(struct virtio_device *vdev,
				    unsigned int fbit,
				    const char *fname, const char *dname)
{
	if (!virtio_has_feature(vdev, fbit))
		return false;

	dev_err(&vdev->dev, "device advertises feature %s but not %s",
		fname, dname);

	return true;
}

#define VIRTNET_FAIL_ON(vdev, fbit, dbit)			\
	virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)

static bool virtnet_validate_features(struct virtio_device *vdev)
{
	if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
	    (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
			     "VIRTIO_NET_F_CTRL_VQ") ||
	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
			     "VIRTIO_NET_F_CTRL_VQ") ||
	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
			     "VIRTIO_NET_F_CTRL_VQ") ||
	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
			     "VIRTIO_NET_F_CTRL_VQ"))) {
		return false;
	}

	return true;
}

3082 3083 3084
#define MIN_MTU ETH_MIN_MTU
#define MAX_MTU ETH_MAX_MTU

3085
static int virtnet_validate(struct virtio_device *vdev)
R
Rusty Russell 已提交
3086
{
3087 3088 3089 3090 3091 3092
	if (!vdev->config->get) {
		dev_err(&vdev->dev, "%s failure: config access disabled\n",
			__func__);
		return -EINVAL;
	}

3093 3094 3095
	if (!virtnet_validate_features(vdev))
		return -EINVAL;

3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
		int mtu = virtio_cread16(vdev,
					 offsetof(struct virtio_net_config,
						  mtu));
		if (mtu < MIN_MTU)
			__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
	}

	return 0;
}

static int virtnet_probe(struct virtio_device *vdev)
{
T
Toshiaki Makita 已提交
3109
	int i, err = -ENOMEM;
3110 3111 3112 3113 3114
	struct net_device *dev;
	struct virtnet_info *vi;
	u16 max_queue_pairs;
	int mtu;

J
Jason Wang 已提交
3115
	/* Find if host supports multiqueue virtio_net device */
3116 3117 3118
	err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
				   struct virtio_net_config,
				   max_virtqueue_pairs, &max_queue_pairs);
J
Jason Wang 已提交
3119 3120 3121 3122 3123 3124

	/* We need at least 2 queue's */
	if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
		max_queue_pairs = 1;
R
Rusty Russell 已提交
3125 3126

	/* Allocate ourselves a network device with room for our info */
J
Jason Wang 已提交
3127
	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
R
Rusty Russell 已提交
3128 3129 3130 3131
	if (!dev)
		return -ENOMEM;

	/* Set up network device as normal. */
3132 3133
	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
			   IFF_TX_SKB_NO_LINEAR;
3134
	dev->netdev_ops = &virtnet_netdev;
R
Rusty Russell 已提交
3135
	dev->features = NETIF_F_HIGHDMA;
3136

3137
	dev->ethtool_ops = &virtnet_ethtool_ops;
R
Rusty Russell 已提交
3138 3139 3140
	SET_NETDEV_DEV(dev, &vdev->dev);

	/* Do we support "hardware" checksums? */
3141
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
R
Rusty Russell 已提交
3142
		/* This opens up the world of extra features. */
J
Jason Wang 已提交
3143
		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
3144
		if (csum)
J
Jason Wang 已提交
3145
			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
3146 3147

		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
3148
			dev->hw_features |= NETIF_F_TSO
R
Rusty Russell 已提交
3149 3150
				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
		}
3151
		/* Individual feature bits: what can host handle? */
3152 3153 3154 3155 3156 3157 3158
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
			dev->hw_features |= NETIF_F_TSO;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
			dev->hw_features |= NETIF_F_TSO6;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
			dev->hw_features |= NETIF_F_TSO_ECN;

3159 3160
		dev->features |= NETIF_F_GSO_ROBUST;

3161
		if (gso)
3162
			dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
3163
		/* (!csum && gso) case will be fixed by register_netdev() */
R
Rusty Russell 已提交
3164
	}
3165 3166
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
		dev->features |= NETIF_F_RXCSUM;
3167 3168
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
3169
		dev->features |= NETIF_F_GRO_HW;
3170
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
3171
		dev->hw_features |= NETIF_F_GRO_HW;
R
Rusty Russell 已提交
3172

3173 3174
	dev->vlan_features = dev->features;

3175 3176 3177 3178
	/* MTU range: 68 - 65535 */
	dev->min_mtu = MIN_MTU;
	dev->max_mtu = MAX_MTU;

R
Rusty Russell 已提交
3179
	/* Configuration may specify what MAC to use.  Otherwise random. */
3180 3181 3182
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
		u8 addr[ETH_ALEN];

3183 3184
		virtio_cread_bytes(vdev,
				   offsetof(struct virtio_net_config, mac),
3185 3186 3187
				   addr, ETH_ALEN);
		eth_hw_addr_set(dev, addr);
	} else {
3188
		eth_hw_addr_random(dev);
3189
	}
R
Rusty Russell 已提交
3190 3191 3192 3193 3194

	/* Set up our device-specific information */
	vi = netdev_priv(dev);
	vi->dev = dev;
	vi->vdev = vdev;
3195
	vdev->priv = vi;
3196

3197
	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
R
Rusty Russell 已提交
3198

3199
	/* If we can receive ANY GSO packets, we must allocate large ones. */
3200 3201
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3202 3203
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
3204 3205
		vi->big_packets = true;

3206 3207 3208
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
		vi->mergeable_rx_bufs = true;

3209 3210
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
3211 3212 3213 3214
		vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
	else
		vi->hdr_len = sizeof(struct virtio_net_hdr);

3215 3216
	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
3217 3218
		vi->any_header_sg = true;

J
Jason Wang 已提交
3219 3220 3221
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
		vi->has_cvq = true;

3222 3223 3224 3225
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
		mtu = virtio_cread16(vdev,
				     offsetof(struct virtio_net_config,
					      mtu));
3226
		if (mtu < dev->min_mtu) {
3227 3228 3229
			/* Should never trigger: MTU was previously validated
			 * in virtnet_validate.
			 */
3230 3231 3232
			dev_err(&vdev->dev,
				"device MTU appears to have changed it is now %d < %d",
				mtu, dev->min_mtu);
3233
			err = -EINVAL;
T
Toshiaki Makita 已提交
3234
			goto free;
3235
		}
3236

3237 3238 3239
		dev->mtu = mtu;
		dev->max_mtu = mtu;

3240 3241 3242
		/* TODO: size buffers correctly in this case. */
		if (dev->mtu > ETH_DATA_LEN)
			vi->big_packets = true;
3243 3244
	}

3245 3246
	if (vi->any_header_sg)
		dev->needed_headroom = vi->hdr_len;
3247

3248 3249 3250 3251 3252
	/* Enable multiqueue by default */
	if (num_online_cpus() >= max_queue_pairs)
		vi->curr_queue_pairs = max_queue_pairs;
	else
		vi->curr_queue_pairs = num_online_cpus();
J
Jason Wang 已提交
3253 3254 3255
	vi->max_queue_pairs = max_queue_pairs;

	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
3256
	err = init_vqs(vi);
3257
	if (err)
T
Toshiaki Makita 已提交
3258
		goto free;
R
Rusty Russell 已提交
3259

3260 3261 3262 3263
#ifdef CONFIG_SYSFS
	if (vi->mergeable_rx_bufs)
		dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
#endif
3264 3265
	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
J
Jason Wang 已提交
3266

3267 3268
	virtnet_init_settings(dev);

3269 3270
	if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
		vi->failover = net_failover_create(vi->dev);
3271 3272
		if (IS_ERR(vi->failover)) {
			err = PTR_ERR(vi->failover);
3273
			goto free_vqs;
3274
		}
3275 3276
	}

R
Rusty Russell 已提交
3277 3278 3279
	err = register_netdev(dev);
	if (err) {
		pr_debug("virtio_net: registering device failed\n");
3280
		goto free_failover;
R
Rusty Russell 已提交
3281
	}
3282

M
Michael S. Tsirkin 已提交
3283 3284
	virtio_device_ready(vdev);

3285
	err = virtnet_cpu_notif_add(vi);
3286 3287
	if (err) {
		pr_debug("virtio_net: registering cpu notifier failed\n");
3288
		goto free_unregister_netdev;
3289 3290
	}

3291
	virtnet_set_queues(vi, vi->curr_queue_pairs);
3292

J
Jason Wang 已提交
3293 3294
	/* Assume link up if device can't report link status,
	   otherwise get link status from config. */
3295
	netif_carrier_off(dev);
J
Jason Wang 已提交
3296
	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
3297
		schedule_work(&vi->config_work);
J
Jason Wang 已提交
3298 3299
	} else {
		vi->status = VIRTIO_NET_S_LINK_UP;
3300
		virtnet_update_settings(vi);
J
Jason Wang 已提交
3301 3302
		netif_carrier_on(dev);
	}
3303

3304 3305 3306
	for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
		if (virtio_has_feature(vi->vdev, guest_offloads[i]))
			set_bit(guest_offloads[i], &vi->guest_offloads);
3307
	vi->guest_offloads_capable = vi->guest_offloads;
3308

J
Jason Wang 已提交
3309 3310 3311
	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
		 dev->name, max_queue_pairs);

R
Rusty Russell 已提交
3312 3313
	return 0;

3314
free_unregister_netdev:
3315
	virtio_reset_device(vdev);
3316

3317
	unregister_netdev(dev);
3318 3319
free_failover:
	net_failover_destroy(vi->failover);
3320
free_vqs:
J
Jason Wang 已提交
3321
	cancel_delayed_work_sync(&vi->refill);
3322
	free_receive_page_frags(vi);
3323
	virtnet_del_vqs(vi);
R
Rusty Russell 已提交
3324 3325 3326 3327 3328
free:
	free_netdev(dev);
	return err;
}

3329
static void remove_vq_common(struct virtnet_info *vi)
R
Rusty Russell 已提交
3330
{
3331
	virtio_reset_device(vi->vdev);
S
Shirley Ma 已提交
3332 3333

	/* Free unused buffers in both send and recv, if any. */
3334
	free_unused_bufs(vi);
3335

J
Jason Wang 已提交
3336
	free_receive_bufs(vi);
3337

3338 3339
	free_receive_page_frags(vi);

J
Jason Wang 已提交
3340
	virtnet_del_vqs(vi);
3341 3342
}

3343
static void virtnet_remove(struct virtio_device *vdev)
3344 3345 3346
{
	struct virtnet_info *vi = vdev->priv;

3347
	virtnet_cpu_notif_remove(vi);
3348

3349 3350
	/* Make sure no work handler is accessing the device. */
	flush_work(&vi->config_work);
3351

3352 3353
	unregister_netdev(vi->dev);

3354 3355
	net_failover_destroy(vi->failover);

3356
	remove_vq_common(vi);
3357

3358
	free_netdev(vi->dev);
R
Rusty Russell 已提交
3359 3360
}

3361
static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
3362 3363 3364
{
	struct virtnet_info *vi = vdev->priv;

3365
	virtnet_cpu_notif_remove(vi);
3366
	virtnet_freeze_down(vdev);
3367 3368 3369 3370 3371
	remove_vq_common(vi);

	return 0;
}

3372
static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
3373 3374
{
	struct virtnet_info *vi = vdev->priv;
3375
	int err;
3376

3377
	err = virtnet_restore_up(vdev);
3378 3379
	if (err)
		return err;
J
Jason Wang 已提交
3380 3381
	virtnet_set_queues(vi, vi->curr_queue_pairs);

3382
	err = virtnet_cpu_notif_add(vi);
3383 3384 3385
	if (err) {
		virtnet_freeze_down(vdev);
		remove_vq_common(vi);
3386
		return err;
3387
	}
3388

3389 3390 3391
	return 0;
}

R
Rusty Russell 已提交
3392 3393 3394 3395 3396
static struct virtio_device_id id_table[] = {
	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

3397 3398 3399 3400 3401 3402 3403 3404 3405 3406
#define VIRTNET_FEATURES \
	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
	VIRTIO_NET_F_MAC, \
	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
	VIRTIO_NET_F_CTRL_MAC_ADDR, \
3407
	VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
3408
	VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY
3409

3410
static unsigned int features[] = {
3411 3412 3413 3414 3415 3416
	VIRTNET_FEATURES,
};

static unsigned int features_legacy[] = {
	VIRTNET_FEATURES,
	VIRTIO_NET_F_GSO,
3417
	VIRTIO_F_ANY_LAYOUT,
3418 3419
};

3420
static struct virtio_driver virtio_net_driver = {
3421 3422
	.feature_table = features,
	.feature_table_size = ARRAY_SIZE(features),
3423 3424
	.feature_table_legacy = features_legacy,
	.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
R
Rusty Russell 已提交
3425 3426 3427
	.driver.name =	KBUILD_MODNAME,
	.driver.owner =	THIS_MODULE,
	.id_table =	id_table,
3428
	.validate =	virtnet_validate,
R
Rusty Russell 已提交
3429
	.probe =	virtnet_probe,
3430
	.remove =	virtnet_remove,
3431
	.config_changed = virtnet_config_changed,
3432
#ifdef CONFIG_PM_SLEEP
3433 3434 3435
	.freeze =	virtnet_freeze,
	.restore =	virtnet_restore,
#endif
R
Rusty Russell 已提交
3436 3437
};

3438 3439 3440 3441
static __init int virtio_net_driver_init(void)
{
	int ret;

T
Thomas Gleixner 已提交
3442
	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
3443 3444 3445 3446 3447
				      virtnet_cpu_online,
				      virtnet_cpu_down_prep);
	if (ret < 0)
		goto out;
	virtionet_online = ret;
T
Thomas Gleixner 已提交
3448
	ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467
				      NULL, virtnet_cpu_dead);
	if (ret)
		goto err_dead;

        ret = register_virtio_driver(&virtio_net_driver);
	if (ret)
		goto err_virtio;
	return 0;
err_virtio:
	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
err_dead:
	cpuhp_remove_multi_state(virtionet_online);
out:
	return ret;
}
module_init(virtio_net_driver_init);

static __exit void virtio_net_driver_exit(void)
{
A
Andrew Jones 已提交
3468
	unregister_virtio_driver(&virtio_net_driver);
3469 3470 3471 3472
	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
	cpuhp_remove_multi_state(virtionet_online);
}
module_exit(virtio_net_driver_exit);
R
Rusty Russell 已提交
3473 3474 3475 3476

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio network driver");
MODULE_LICENSE("GPL");