virtio_net.c 95.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/* A network driver using virtio.
R
Rusty Russell 已提交
3 4 5 6 7 8
 *
 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
 */
//#define DEBUG
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
9
#include <linux/ethtool.h>
R
Rusty Russell 已提交
10 11 12
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
J
John Fastabend 已提交
13
#include <linux/bpf.h>
14
#include <linux/bpf_trace.h>
R
Rusty Russell 已提交
15
#include <linux/scatterlist.h>
16
#include <linux/if_vlan.h>
17
#include <linux/slab.h>
18
#include <linux/cpu.h>
19
#include <linux/average.h>
J
Jason Wang 已提交
20
#include <linux/filter.h>
21
#include <linux/kernel.h>
22
#include <net/route.h>
23
#include <net/xdp.h>
24
#include <net/net_failover.h>
R
Rusty Russell 已提交
25

26
static int napi_weight = NAPI_POLL_WEIGHT;
27 28
module_param(napi_weight, int, 0444);

29
static bool csum = true, gso = true, napi_tx = true;
R
Rusty Russell 已提交
30 31
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
W
Willem de Bruijn 已提交
32
module_param(napi_tx, bool, 0644);
R
Rusty Russell 已提交
33

R
Rusty Russell 已提交
34
/* FIXME: MTU in config. */
35
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
36
#define GOOD_COPY_LEN	128
R
Rusty Russell 已提交
37

38 39
#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)

40 41 42
/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
#define VIRTIO_XDP_HEADROOM 256

43 44 45 46
/* Separating two types of XDP xmit */
#define VIRTIO_XDP_TX		BIT(0)
#define VIRTIO_XDP_REDIR	BIT(1)

47 48
#define VIRTIO_XDP_FLAG	BIT(0)

J
Johannes Berg 已提交
49 50 51 52
/* RX packet size EWMA. The average packet size is used to determine the packet
 * buffer size when refilling RX rings. As the entire RX ring may be refilled
 * at once, the weight is chosen so that the EWMA will be insensitive to short-
 * term, transient changes in packet size.
53
 */
54
DECLARE_EWMA(pkt_len, 0, 64)
55

56
#define VIRTNET_DRIVER_VERSION "1.0.0"
57

58 59 60 61
static const unsigned long guest_offloads[] = {
	VIRTIO_NET_F_GUEST_TSO4,
	VIRTIO_NET_F_GUEST_TSO6,
	VIRTIO_NET_F_GUEST_ECN,
62 63
	VIRTIO_NET_F_GUEST_UFO,
	VIRTIO_NET_F_GUEST_CSUM
64
};
65

66
#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
67 68 69 70
				(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
				(1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
				(1ULL << VIRTIO_NET_F_GUEST_UFO))

T
Toshiaki Makita 已提交
71 72 73
struct virtnet_stat_desc {
	char desc[ETH_GSTRING_LEN];
	size_t offset;
74 75
};

T
Toshiaki Makita 已提交
76 77 78 79
struct virtnet_sq_stats {
	struct u64_stats_sync syncp;
	u64 packets;
	u64 bytes;
80 81
	u64 xdp_tx;
	u64 xdp_tx_drops;
T
Toshiaki Makita 已提交
82
	u64 kicks;
83
	u64 tx_timeouts;
T
Toshiaki Makita 已提交
84 85
};

86 87
struct virtnet_rq_stats {
	struct u64_stats_sync syncp;
T
Toshiaki Makita 已提交
88 89
	u64 packets;
	u64 bytes;
90
	u64 drops;
91 92 93 94
	u64 xdp_packets;
	u64 xdp_tx;
	u64 xdp_redirects;
	u64 xdp_drops;
T
Toshiaki Makita 已提交
95
	u64 kicks;
T
Toshiaki Makita 已提交
96 97 98
};

#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
99
#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
T
Toshiaki Makita 已提交
100 101

static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
102 103 104 105
	{ "packets",		VIRTNET_SQ_STAT(packets) },
	{ "bytes",		VIRTNET_SQ_STAT(bytes) },
	{ "xdp_tx",		VIRTNET_SQ_STAT(xdp_tx) },
	{ "xdp_tx_drops",	VIRTNET_SQ_STAT(xdp_tx_drops) },
T
Toshiaki Makita 已提交
106
	{ "kicks",		VIRTNET_SQ_STAT(kicks) },
107
	{ "tx_timeouts",	VIRTNET_SQ_STAT(tx_timeouts) },
T
Toshiaki Makita 已提交
108 109 110
};

static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
111 112 113 114 115 116 117
	{ "packets",		VIRTNET_RQ_STAT(packets) },
	{ "bytes",		VIRTNET_RQ_STAT(bytes) },
	{ "drops",		VIRTNET_RQ_STAT(drops) },
	{ "xdp_packets",	VIRTNET_RQ_STAT(xdp_packets) },
	{ "xdp_tx",		VIRTNET_RQ_STAT(xdp_tx) },
	{ "xdp_redirects",	VIRTNET_RQ_STAT(xdp_redirects) },
	{ "xdp_drops",		VIRTNET_RQ_STAT(xdp_drops) },
T
Toshiaki Makita 已提交
118
	{ "kicks",		VIRTNET_RQ_STAT(kicks) },
T
Toshiaki Makita 已提交
119 120 121 122 123
};

#define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
#define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)

124 125 126 127 128 129 130
/* Internal representation of a send virtqueue */
struct send_queue {
	/* Virtqueue associated with this send _queue */
	struct virtqueue *vq;

	/* TX: fragments + linear part + virtio header */
	struct scatterlist sg[MAX_SKB_FRAGS + 2];
J
Jason Wang 已提交
131 132 133

	/* Name of the send queue: output.$index */
	char name[40];
W
Willem de Bruijn 已提交
134

T
Toshiaki Makita 已提交
135 136
	struct virtnet_sq_stats stats;

W
Willem de Bruijn 已提交
137
	struct napi_struct napi;
138 139 140 141 142 143 144
};

/* Internal representation of a receive virtqueue */
struct receive_queue {
	/* Virtqueue associated with this receive_queue */
	struct virtqueue *vq;

R
Rusty Russell 已提交
145 146
	struct napi_struct napi;

J
John Fastabend 已提交
147 148
	struct bpf_prog __rcu *xdp_prog;

T
Toshiaki Makita 已提交
149 150
	struct virtnet_rq_stats stats;

151 152 153
	/* Chain pages by the private ptr. */
	struct page *pages;

154
	/* Average packet length for mergeable receive buffers. */
J
Johannes Berg 已提交
155
	struct ewma_pkt_len mrg_avg_pkt_len;
156

157 158 159
	/* Page frag for packet buffer allocation. */
	struct page_frag alloc_frag;

160 161
	/* RX: fragments + linear part + virtio header */
	struct scatterlist sg[MAX_SKB_FRAGS + 2];
J
Jason Wang 已提交
162

163 164 165
	/* Min single buffer size for mergeable buffers case. */
	unsigned int min_buf_len;

J
Jason Wang 已提交
166 167
	/* Name of this receive queue: input.$index */
	char name[40];
168 169

	struct xdp_rxq_info xdp_rxq;
170 171
};

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
/* This structure can contain rss message with maximum settings for indirection table and keysize
 * Note, that default structure that describes RSS configuration virtio_net_rss_config
 * contains same info but can't handle table values.
 * In any case, structure would be passed to virtio hw through sg_buf split by parts
 * because table sizes may be differ according to the device configuration.
 */
#define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
#define VIRTIO_NET_RSS_MAX_TABLE_LEN    128
struct virtio_net_ctrl_rss {
	u32 hash_types;
	u16 indirection_table_mask;
	u16 unclassified_queue;
	u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
	u16 max_tx_vq;
	u8 hash_key_length;
	u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
};

190 191 192 193 194 195 196
/* Control VQ buffers: protected by the rtnl lock */
struct control_buf {
	struct virtio_net_ctrl_hdr hdr;
	virtio_net_ctrl_ack status;
	struct virtio_net_ctrl_mq mq;
	u8 promisc;
	u8 allmulti;
197
	__virtio16 vid;
198
	__virtio64 offloads;
199
	struct virtio_net_ctrl_rss rss;
200 201
};

202 203 204 205
struct virtnet_info {
	struct virtio_device *vdev;
	struct virtqueue *cvq;
	struct net_device *dev;
J
Jason Wang 已提交
206 207
	struct send_queue *sq;
	struct receive_queue *rq;
208 209
	unsigned int status;

J
Jason Wang 已提交
210 211 212 213 214 215
	/* Max # of queue pairs supported by the device */
	u16 max_queue_pairs;

	/* # of queue pairs currently used by the driver */
	u16 curr_queue_pairs;

216 217 218
	/* # of XDP queue pairs currently used by the driver */
	u16 xdp_queue_pairs;

219 220 221
	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
	bool xdp_enabled;

222 223 224
	/* I like... big packets and I cannot lie! */
	bool big_packets;

225 226 227
	/* Host will merge rx buffers for big packets (shake it! shake it!) */
	bool mergeable_rx_bufs;

228 229
	/* Host supports rss and/or hash report */
	bool has_rss;
230
	bool has_rss_hash_report;
231 232 233 234
	u8 rss_key_size;
	u16 rss_indir_table_size;
	u32 rss_hash_types_supported;

J
Jason Wang 已提交
235 236 237
	/* Has control virtqueue */
	bool has_cvq;

238 239 240
	/* Host can handle any s/g split between our header and packet data */
	bool any_header_sg;

241 242 243
	/* Packet virtio header size */
	u8 hdr_len;

244 245 246
	/* Work struct for refilling if we run low on memory. */
	struct delayed_work refill;

247 248 249
	/* Work struct for config space updates */
	struct work_struct config_work;

J
Jason Wang 已提交
250 251
	/* Does the affinity hint is set for virtqueues? */
	bool affinity_hint_set;
252

253 254 255
	/* CPU hotplug instances for online & dead */
	struct hlist_node node;
	struct hlist_node node_dead;
256

257
	struct control_buf *ctrl;
258 259 260 261

	/* Ethtool settings */
	u8 duplex;
	u32 speed;
262 263

	unsigned long guest_offloads;
264
	unsigned long guest_offloads_capable;
265 266 267

	/* failover when STANDBY feature enabled */
	struct failover *failover;
R
Rusty Russell 已提交
268 269
};

270
struct padded_vnet_hdr {
271
	struct virtio_net_hdr_v1_hash hdr;
272
	/*
273 274 275
	 * hdr is in a separate sg buffer, and data sg buffer shares same page
	 * with this header sg. This padding makes next sg 16 byte aligned
	 * after the header.
276
	 */
277
	char padding[12];
278 279
};

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
static bool is_xdp_frame(void *ptr)
{
	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
}

static void *xdp_to_ptr(struct xdp_frame *ptr)
{
	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
}

static struct xdp_frame *ptr_to_xdp(void *ptr)
{
	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
}

J
Jason Wang 已提交
295 296 297 298 299
/* Converting between virtqueue no. and kernel tx/rx queue no.
 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
 */
static int vq2txq(struct virtqueue *vq)
{
300
	return (vq->index - 1) / 2;
J
Jason Wang 已提交
301 302 303 304 305 306 307 308 309
}

static int txq2vq(int txq)
{
	return txq * 2 + 1;
}

static int vq2rxq(struct virtqueue *vq)
{
310
	return vq->index / 2;
J
Jason Wang 已提交
311 312 313 314 315 316 317
}

static int rxq2vq(int rxq)
{
	return rxq * 2;
}

318
static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
R
Rusty Russell 已提交
319
{
320
	return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
R
Rusty Russell 已提交
321 322
}

323 324 325 326
/*
 * private is used to chain pages for big packets, put the whole
 * most recent used list in the beginning for reuse
 */
327
static void give_pages(struct receive_queue *rq, struct page *page)
328
{
329
	struct page *end;
330

331
	/* Find end of list, sew whole thing into vi->rq.pages. */
332
	for (end = page; end->private; end = (struct page *)end->private);
333 334
	end->private = (unsigned long)rq->pages;
	rq->pages = page;
335 336
}

337
static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
338
{
339
	struct page *p = rq->pages;
340

341
	if (p) {
342
		rq->pages = (struct page *)p->private;
343 344 345
		/* clear private here, it is used to chain pages */
		p->private = 0;
	} else
346 347 348 349
		p = alloc_page(gfp_mask);
	return p;
}

350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
static void virtqueue_napi_schedule(struct napi_struct *napi,
				    struct virtqueue *vq)
{
	if (napi_schedule_prep(napi)) {
		virtqueue_disable_cb(vq);
		__napi_schedule(napi);
	}
}

static void virtqueue_napi_complete(struct napi_struct *napi,
				    struct virtqueue *vq, int processed)
{
	int opaque;

	opaque = virtqueue_enable_cb_prepare(vq);
365 366 367 368 369 370
	if (napi_complete_done(napi, processed)) {
		if (unlikely(virtqueue_poll(vq, opaque)))
			virtqueue_napi_schedule(napi, vq);
	} else {
		virtqueue_disable_cb(vq);
	}
371 372
}

373
static void skb_xmit_done(struct virtqueue *vq)
R
Rusty Russell 已提交
374
{
375
	struct virtnet_info *vi = vq->vdev->priv;
W
Willem de Bruijn 已提交
376
	struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
R
Rusty Russell 已提交
377

378
	/* Suppress further interrupts. */
379
	virtqueue_disable_cb(vq);
380

W
Willem de Bruijn 已提交
381 382 383 384 385
	if (napi->weight)
		virtqueue_napi_schedule(napi, vq);
	else
		/* We were probably waiting for more output buffers. */
		netif_wake_subqueue(vi->dev, vq2txq(vq));
R
Rusty Russell 已提交
386 387
}

388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
#define MRG_CTX_HEADER_SHIFT 22
static void *mergeable_len_to_ctx(unsigned int truesize,
				  unsigned int headroom)
{
	return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
}

static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
{
	return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
}

static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
{
	return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
}

405
/* Called from bottom half context */
M
Michael S. Tsirkin 已提交
406 407
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
				   struct receive_queue *rq,
408
				   struct page *page, unsigned int offset,
409
				   unsigned int len, unsigned int truesize,
410
				   bool hdr_valid, unsigned int metasize,
411
				   unsigned int headroom)
412 413
{
	struct sk_buff *skb;
414
	struct virtio_net_hdr_mrg_rxbuf *hdr;
415
	unsigned int copy, hdr_len, hdr_padded_len;
416
	struct page *page_to_free = NULL;
417
	int tailroom, shinfo_size;
418
	char *p, *hdr_p, *buf;
419

420
	p = page_address(page) + offset;
421
	hdr_p = p;
422

423 424
	hdr_len = vi->hdr_len;
	if (vi->mergeable_rx_bufs)
425
		hdr_padded_len = hdr_len;
426
	else
427
		hdr_padded_len = sizeof(struct padded_vnet_hdr);
428

429
	/* If headroom is not 0, there is an offset between the beginning of the
430 431
	 * data and the allocated space, otherwise the data and the allocated
	 * space are aligned.
432 433 434
	 *
	 * Buffers with headroom use PAGE_SIZE as alloc size, see
	 * add_recvbuf_mergeable() + get_mergeable_buf_len()
435
	 */
436
	truesize = headroom ? PAGE_SIZE : truesize;
437
	tailroom = truesize - headroom;
438
	buf = p - headroom;
439

440
	len -= hdr_len;
441 442
	offset += hdr_padded_len;
	p += hdr_padded_len;
443
	tailroom -= hdr_padded_len + len;
444

445 446
	shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));

447
	/* copy small packet so we can reuse these pages */
448
	if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
449
		skb = build_skb(buf, truesize);
450 451 452
		if (unlikely(!skb))
			return NULL;

453
		skb_reserve(skb, p - buf);
454
		skb_put(skb, len);
455 456 457 458

		page = (struct page *)page->private;
		if (page)
			give_pages(rq, page);
459 460 461 462 463 464 465 466
		goto ok;
	}

	/* copy small packet so we can reuse these pages for small data */
	skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
	if (unlikely(!skb))
		return NULL;

467 468 469 470 471 472 473
	/* Copy all frame if it fits skb->head, otherwise
	 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
	 */
	if (len <= skb_tailroom(skb))
		copy = len;
	else
		copy = ETH_HLEN + metasize;
474
	skb_put_data(skb, p, copy);
475

476 477
	len -= copy;
	offset += copy;
478

479 480 481 482
	if (vi->mergeable_rx_bufs) {
		if (len)
			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
		else
483
			page_to_free = page;
484
		goto ok;
485 486
	}

487 488 489 490 491 492 493
	/*
	 * Verify that we can indeed put this data into a skb.
	 * This is here to handle cases when the device erroneously
	 * tries to receive more than is possible. This is usually
	 * the case of a broken device.
	 */
	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
494
		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
495 496 497
		dev_kfree_skb(skb);
		return NULL;
	}
498
	BUG_ON(offset >= PAGE_SIZE);
499
	while (len) {
500 501 502 503
		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
				frag_size, truesize);
		len -= frag_size;
504 505 506
		page = (struct page *)page->private;
		offset = 0;
	}
507

508
	if (page)
509
		give_pages(rq, page);
510

511 512 513 514 515 516
ok:
	/* hdr_valid means no XDP, so we can copy the vnet header */
	if (hdr_valid) {
		hdr = skb_vnet_hdr(skb);
		memcpy(hdr, hdr_p, hdr_len);
	}
517 518
	if (page_to_free)
		put_page(page_to_free);
519 520 521 522 523 524

	if (metasize) {
		__skb_pull(skb, metasize);
		skb_metadata_set(skb, metasize);
	}

525 526
	return skb;
}
527

528 529 530
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
				   struct send_queue *sq,
				   struct xdp_frame *xdpf)
J
John Fastabend 已提交
531 532 533 534
{
	struct virtio_net_hdr_mrg_rxbuf *hdr;
	int err;

535 536 537 538 539
	if (unlikely(xdpf->headroom < vi->hdr_len))
		return -EOVERFLOW;

	/* Make room for virtqueue hdr (also change xdpf->headroom?) */
	xdpf->data -= vi->hdr_len;
540
	/* Zero header and leave csum up to XDP layers */
541
	hdr = xdpf->data;
542
	memset(hdr, 0, vi->hdr_len);
543
	xdpf->len   += vi->hdr_len;
544

545
	sg_init_one(sq->sg, xdpf->data, xdpf->len);
546

547 548
	err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
				   GFP_ATOMIC);
549
	if (unlikely(err))
550
		return -ENOSPC; /* Caller handle free/refcnt */
J
John Fastabend 已提交
551

552
	return 0;
J
John Fastabend 已提交
553 554
}

555 556 557 558 559 560 561 562 563
/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
 * the current cpu, so it does not need to be locked.
 *
 * Here we use marco instead of inline functions because we have to deal with
 * three issues at the same time: 1. the choice of sq. 2. judge and execute the
 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
 * functions to perfectly solve these three problems at the same time.
 */
#define virtnet_xdp_get_sq(vi) ({                                       \
564
	int cpu = smp_processor_id();                                   \
565 566 567 568 569 570
	struct netdev_queue *txq;                                       \
	typeof(vi) v = (vi);                                            \
	unsigned int qp;                                                \
									\
	if (v->curr_queue_pairs > nr_cpu_ids) {                         \
		qp = v->curr_queue_pairs - v->xdp_queue_pairs;          \
571
		qp += cpu;                                              \
572 573 574
		txq = netdev_get_tx_queue(v->dev, qp);                  \
		__netif_tx_acquire(txq);                                \
	} else {                                                        \
575
		qp = cpu % v->curr_queue_pairs;                         \
576
		txq = netdev_get_tx_queue(v->dev, qp);                  \
577
		__netif_tx_lock(txq, cpu);                              \
578 579 580 581 582 583 584 585 586 587 588 589 590
	}                                                               \
	v->sq + qp;                                                     \
})

#define virtnet_xdp_put_sq(vi, q) {                                     \
	struct netdev_queue *txq;                                       \
	typeof(vi) v = (vi);                                            \
									\
	txq = netdev_get_tx_queue(v->dev, (q) - v->sq);                 \
	if (v->curr_queue_pairs > nr_cpu_ids)                           \
		__netif_tx_release(txq);                                \
	else                                                            \
		__netif_tx_unlock(txq);                                 \
591 592
}

593
static int virtnet_xdp_xmit(struct net_device *dev,
594
			    int n, struct xdp_frame **frames, u32 flags)
J
Jason Wang 已提交
595 596
{
	struct virtnet_info *vi = netdev_priv(dev);
597 598
	struct receive_queue *rq = vi->rq;
	struct bpf_prog *xdp_prog;
599 600
	struct send_queue *sq;
	unsigned int len;
601 602
	int packets = 0;
	int bytes = 0;
603
	int nxmit = 0;
T
Toshiaki Makita 已提交
604
	int kicks = 0;
605
	void *ptr;
606
	int ret;
607 608
	int i;

609 610 611
	/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
	 * indicate XDP resources have been successfully allocated.
	 */
612
	xdp_prog = rcu_access_pointer(rq->xdp_prog);
613 614 615
	if (!xdp_prog)
		return -ENXIO;

616
	sq = virtnet_xdp_get_sq(vi);
617 618 619

	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
		ret = -EINVAL;
620 621
		goto out;
	}
622

623
	/* Free up any pending old buffers before queueing new ones. */
624
	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
625 626 627 628 629 630 631 632 633 634 635 636
		if (likely(is_xdp_frame(ptr))) {
			struct xdp_frame *frame = ptr_to_xdp(ptr);

			bytes += frame->len;
			xdp_return_frame(frame);
		} else {
			struct sk_buff *skb = ptr;

			bytes += skb->len;
			napi_consume_skb(skb, false);
		}
		packets++;
637
	}
638 639 640 641

	for (i = 0; i < n; i++) {
		struct xdp_frame *xdpf = frames[i];

642 643 644
		if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
			break;
		nxmit++;
645
	}
646
	ret = nxmit;
647

T
Toshiaki Makita 已提交
648 649 650 651
	if (flags & XDP_XMIT_FLUSH) {
		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
			kicks = 1;
	}
652 653
out:
	u64_stats_update_begin(&sq->stats.syncp);
654 655
	sq->stats.bytes += bytes;
	sq->stats.packets += packets;
656
	sq->stats.xdp_tx += n;
657
	sq->stats.xdp_tx_drops += n - nxmit;
T
Toshiaki Makita 已提交
658
	sq->stats.kicks += kicks;
659
	u64_stats_update_end(&sq->stats.syncp);
660

661
	virtnet_xdp_put_sq(vi, sq);
662
	return ret;
J
Jason Wang 已提交
663 664
}

665 666
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
{
667
	return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
668 669
}

670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
/* We copy the packet for XDP in the following cases:
 *
 * 1) Packet is scattered across multiple rx buffers.
 * 2) Headroom space is insufficient.
 *
 * This is inefficient but it's a temporary condition that
 * we hit right after XDP is enabled and until queue is refilled
 * with large buffers with sufficient headroom - so it should affect
 * at most queue size packets.
 * Afterwards, the conditions to enable
 * XDP should preclude the underlying device from sending packets
 * across multiple buffers (num_buf > 1), and we make sure buffers
 * have enough headroom.
 */
static struct page *xdp_linearize_page(struct receive_queue *rq,
				       u16 *num_buf,
				       struct page *p,
				       int offset,
				       int page_off,
				       unsigned int *len)
{
	struct page *page = alloc_page(GFP_ATOMIC);

	if (!page)
		return NULL;

	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
	page_off += *len;

	while (--*num_buf) {
700
		int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
701 702 703 704 705 706 707 708 709 710 711 712 713 714
		unsigned int buflen;
		void *buf;
		int off;

		buf = virtqueue_get_buf(rq->vq, &buflen);
		if (unlikely(!buf))
			goto err_buf;

		p = virt_to_head_page(buf);
		off = buf - page_address(p);

		/* guard against a misconfigured or uncooperative backend that
		 * is sending packet larger than the MTU.
		 */
715
		if ((page_off + buflen + tailroom) > PAGE_SIZE) {
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
			put_page(p);
			goto err_buf;
		}

		memcpy(page_address(page) + page_off,
		       page_address(p) + off, buflen);
		page_off += buflen;
		put_page(p);
	}

	/* Headroom does not contribute to packet length */
	*len = page_off - VIRTIO_XDP_HEADROOM;
	return page;
err_buf:
	__free_pages(page, 0);
	return NULL;
}

734 735 736
static struct sk_buff *receive_small(struct net_device *dev,
				     struct virtnet_info *vi,
				     struct receive_queue *rq,
737
				     void *buf, void *ctx,
J
Jason Wang 已提交
738
				     unsigned int len,
739
				     unsigned int *xdp_xmit,
740
				     struct virtnet_rq_stats *stats)
741
{
742
	struct sk_buff *skb;
743
	struct bpf_prog *xdp_prog;
744
	unsigned int xdp_headroom = (unsigned long)ctx;
745 746 747 748
	unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
	unsigned int headroom = vi->hdr_len + header_offset;
	unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
			      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
749
	struct page *page = virt_to_head_page(buf);
750
	unsigned int delta = 0;
751
	struct page *xdp_page;
752
	int err;
753
	unsigned int metasize = 0;
754

755
	len -= vi->hdr_len;
756
	stats->bytes += len;
757

758 759 760 761
	if (unlikely(len > GOOD_PACKET_LEN)) {
		pr_debug("%s: rx error: len %u exceeds max size %d\n",
			 dev->name, len, GOOD_PACKET_LEN);
		dev->stats.rx_length_errors++;
762
		goto err;
763
	}
764 765 766 767 768 769

	if (likely(!vi->xdp_enabled)) {
		xdp_prog = NULL;
		goto skip_xdp;
	}

770 771 772
	rcu_read_lock();
	xdp_prog = rcu_dereference(rq->xdp_prog);
	if (xdp_prog) {
773
		struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
774
		struct xdp_frame *xdpf;
775
		struct xdp_buff xdp;
776
		void *orig_data;
777 778
		u32 act;

779
		if (unlikely(hdr->hdr.gso_type))
780
			goto err_xdp;
781

782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
		if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
			int offset = buf - page_address(page) + header_offset;
			unsigned int tlen = len + vi->hdr_len;
			u16 num_buf = 1;

			xdp_headroom = virtnet_get_headroom(vi);
			header_offset = VIRTNET_RX_PAD + xdp_headroom;
			headroom = vi->hdr_len + header_offset;
			buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
				 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
			xdp_page = xdp_linearize_page(rq, &num_buf, page,
						      offset, header_offset,
						      &tlen);
			if (!xdp_page)
				goto err_xdp;

			buf = page_address(xdp_page);
			put_page(page);
			page = xdp_page;
		}

803
		xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
804 805
		xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
				 xdp_headroom, len, true);
806
		orig_data = xdp.data;
807
		act = bpf_prog_run_xdp(xdp_prog, &xdp);
808
		stats->xdp_packets++;
809

810 811
		switch (act) {
		case XDP_PASS:
812
			/* Recalculate length in case bpf program changed it */
813
			delta = orig_data - xdp.data;
814
			len = xdp.data_end - xdp.data;
815
			metasize = xdp.data - xdp.data_meta;
816 817
			break;
		case XDP_TX:
818
			stats->xdp_tx++;
819
			xdpf = xdp_convert_buff_to_frame(&xdp);
820 821
			if (unlikely(!xdpf))
				goto err_xdp;
822
			err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
823 824 825
			if (unlikely(!err)) {
				xdp_return_frame_rx_napi(xdpf);
			} else if (unlikely(err < 0)) {
826
				trace_xdp_exception(vi->dev, xdp_prog, act);
827 828
				goto err_xdp;
			}
829
			*xdp_xmit |= VIRTIO_XDP_TX;
J
Jason Wang 已提交
830 831 832
			rcu_read_unlock();
			goto xdp_xmit;
		case XDP_REDIRECT:
833
			stats->xdp_redirects++;
J
Jason Wang 已提交
834
			err = xdp_do_redirect(dev, &xdp, xdp_prog);
835 836
			if (err)
				goto err_xdp;
837
			*xdp_xmit |= VIRTIO_XDP_REDIR;
838 839 840
			rcu_read_unlock();
			goto xdp_xmit;
		default:
841
			bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
842
			fallthrough;
843 844
		case XDP_ABORTED:
			trace_xdp_exception(vi->dev, xdp_prog, act);
845
			goto err_xdp;
846
		case XDP_DROP:
847 848 849 850 851
			goto err_xdp;
		}
	}
	rcu_read_unlock();

852
skip_xdp:
853
	skb = build_skb(buf, buflen);
854
	if (!skb)
855 856
		goto err;
	skb_reserve(skb, headroom - delta);
857
	skb_put(skb, len);
858
	if (!xdp_prog) {
859 860
		buf += header_offset;
		memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
861
	} /* keep zeroed vnet hdr since XDP is loaded */
862

863 864 865
	if (metasize)
		skb_metadata_set(skb, metasize);

866
	return skb;
867 868 869

err_xdp:
	rcu_read_unlock();
870
	stats->xdp_drops++;
871
err:
872
	stats->drops++;
873
	put_page(page);
874 875
xdp_xmit:
	return NULL;
876 877 878
}

static struct sk_buff *receive_big(struct net_device *dev,
M
Michael S. Tsirkin 已提交
879
				   struct virtnet_info *vi,
880 881
				   struct receive_queue *rq,
				   void *buf,
882
				   unsigned int len,
883
				   struct virtnet_rq_stats *stats)
884 885
{
	struct page *page = buf;
886
	struct sk_buff *skb =
887
		page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0, 0);
J
John Fastabend 已提交
888

889
	stats->bytes += len - vi->hdr_len;
890 891 892 893 894 895
	if (unlikely(!skb))
		goto err;

	return skb;

err:
896
	stats->drops++;
897 898 899 900
	give_pages(rq, page);
	return NULL;
}

901
static struct sk_buff *receive_mergeable(struct net_device *dev,
M
Michael S. Tsirkin 已提交
902
					 struct virtnet_info *vi,
903
					 struct receive_queue *rq,
904 905
					 void *buf,
					 void *ctx,
J
Jason Wang 已提交
906
					 unsigned int len,
907
					 unsigned int *xdp_xmit,
908
					 struct virtnet_rq_stats *stats)
909
{
910 911
	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
	u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
912 913
	struct page *page = virt_to_head_page(buf);
	int offset = buf - page_address(page);
J
John Fastabend 已提交
914 915
	struct sk_buff *head_skb, *curr_skb;
	struct bpf_prog *xdp_prog;
916
	unsigned int truesize = mergeable_ctx_to_truesize(ctx);
917
	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
918
	unsigned int metasize = 0;
919 920
	unsigned int frame_sz;
	int err;
J
John Fastabend 已提交
921

J
John Fastabend 已提交
922
	head_skb = NULL;
923
	stats->bytes += len - vi->hdr_len;
J
John Fastabend 已提交
924

925 926 927 928 929 930
	if (unlikely(len > truesize)) {
		pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
			 dev->name, len, (unsigned long)ctx);
		dev->stats.rx_length_errors++;
		goto err_skb;
	}
931 932 933 934 935 936

	if (likely(!vi->xdp_enabled)) {
		xdp_prog = NULL;
		goto skip_xdp;
	}

J
John Fastabend 已提交
937 938 939
	rcu_read_lock();
	xdp_prog = rcu_dereference(rq->xdp_prog);
	if (xdp_prog) {
940
		struct xdp_frame *xdpf;
941
		struct page *xdp_page;
942 943
		struct xdp_buff xdp;
		void *data;
J
John Fastabend 已提交
944 945
		u32 act;

946 947 948 949 950 951 952
		/* Transient failure which in theory could occur if
		 * in-flight packets from before XDP was enabled reach
		 * the receive path after XDP is loaded.
		 */
		if (unlikely(hdr->hdr.gso_type))
			goto err_xdp;

953 954 955 956 957
		/* Buffers with headroom use PAGE_SIZE as alloc size,
		 * see add_recvbuf_mergeable() + get_mergeable_buf_len()
		 */
		frame_sz = headroom ? PAGE_SIZE : truesize;

958 959 960 961 962 963
		/* This happens when rx buffer size is underestimated
		 * or headroom is not enough because of the buffer
		 * was refilled before XDP is set. This should only
		 * happen for the first several packets, so we don't
		 * care much about its performance.
		 */
964 965
		if (unlikely(num_buf > 1 ||
			     headroom < virtnet_get_headroom(vi))) {
966
			/* linearize data for XDP */
967
			xdp_page = xdp_linearize_page(rq, &num_buf,
968 969 970
						      page, offset,
						      VIRTIO_XDP_HEADROOM,
						      &len);
971 972
			frame_sz = PAGE_SIZE;

973 974
			if (!xdp_page)
				goto err_xdp;
975
			offset = VIRTIO_XDP_HEADROOM;
976 977
		} else {
			xdp_page = page;
J
John Fastabend 已提交
978 979
		}

980 981 982
		/* Allow consuming headroom but reserve enough space to push
		 * the descriptor on if we get an XDP_TX return code.
		 */
983
		data = page_address(xdp_page) + offset;
984
		xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq);
985 986
		xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len,
				 VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true);
987

988
		act = bpf_prog_run_xdp(xdp_prog, &xdp);
989
		stats->xdp_packets++;
990

J
John Fastabend 已提交
991 992
		switch (act) {
		case XDP_PASS:
993 994
			metasize = xdp.data - xdp.data_meta;

995
			/* recalculate offset to account for any header
996 997 998
			 * adjustments and minus the metasize to copy the
			 * metadata in page_to_skb(). Note other cases do not
			 * build an skb and avoid using offset
999
			 */
1000 1001
			offset = xdp.data - page_address(xdp_page) -
				 vi->hdr_len - metasize;
1002

1003 1004
			/* recalculate len if xdp.data, xdp.data_end or
			 * xdp.data_meta were adjusted
1005
			 */
1006
			len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
1007 1008 1009 1010
			/* We can only create skb based on xdp_page. */
			if (unlikely(xdp_page != page)) {
				rcu_read_unlock();
				put_page(page);
1011 1012
				head_skb = page_to_skb(vi, rq, xdp_page, offset,
						       len, PAGE_SIZE, false,
1013 1014
						       metasize,
						       VIRTIO_XDP_HEADROOM);
1015 1016
				return head_skb;
			}
J
John Fastabend 已提交
1017 1018
			break;
		case XDP_TX:
1019
			stats->xdp_tx++;
1020
			xdpf = xdp_convert_buff_to_frame(&xdp);
1021 1022
			if (unlikely(!xdpf))
				goto err_xdp;
1023
			err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
1024 1025 1026
			if (unlikely(!err)) {
				xdp_return_frame_rx_napi(xdpf);
			} else if (unlikely(err < 0)) {
1027
				trace_xdp_exception(vi->dev, xdp_prog, act);
1028 1029 1030 1031
				if (unlikely(xdp_page != page))
					put_page(xdp_page);
				goto err_xdp;
			}
1032
			*xdp_xmit |= VIRTIO_XDP_TX;
1033
			if (unlikely(xdp_page != page))
1034
				put_page(page);
J
John Fastabend 已提交
1035 1036
			rcu_read_unlock();
			goto xdp_xmit;
1037
		case XDP_REDIRECT:
1038
			stats->xdp_redirects++;
1039 1040 1041 1042 1043 1044
			err = xdp_do_redirect(dev, &xdp, xdp_prog);
			if (err) {
				if (unlikely(xdp_page != page))
					put_page(xdp_page);
				goto err_xdp;
			}
1045
			*xdp_xmit |= VIRTIO_XDP_REDIR;
1046
			if (unlikely(xdp_page != page))
1047
				put_page(page);
1048 1049
			rcu_read_unlock();
			goto xdp_xmit;
J
John Fastabend 已提交
1050
		default:
1051
			bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
1052
			fallthrough;
1053 1054
		case XDP_ABORTED:
			trace_xdp_exception(vi->dev, xdp_prog, act);
1055
			fallthrough;
1056
		case XDP_DROP:
1057 1058
			if (unlikely(xdp_page != page))
				__free_pages(xdp_page, 0);
J
John Fastabend 已提交
1059
			goto err_xdp;
J
John Fastabend 已提交
1060
		}
J
John Fastabend 已提交
1061 1062
	}
	rcu_read_unlock();
1063

1064
skip_xdp:
1065
	head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
1066
			       metasize, headroom);
J
John Fastabend 已提交
1067
	curr_skb = head_skb;
1068

1069 1070
	if (unlikely(!curr_skb))
		goto err_skb;
1071
	while (--num_buf) {
1072 1073
		int num_skb_frags;

1074
		buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
1075
		if (unlikely(!buf)) {
1076
			pr_debug("%s: rx error: %d buffers out of %d missing\n",
M
Michael S. Tsirkin 已提交
1077
				 dev->name, num_buf,
1078 1079
				 virtio16_to_cpu(vi->vdev,
						 hdr->num_buffers));
1080 1081
			dev->stats.rx_length_errors++;
			goto err_buf;
1082
		}
1083

1084
		stats->bytes += len;
1085
		page = virt_to_head_page(buf);
1086 1087 1088

		truesize = mergeable_ctx_to_truesize(ctx);
		if (unlikely(len > truesize)) {
1089
			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1090 1091 1092 1093
				 dev->name, len, (unsigned long)ctx);
			dev->stats.rx_length_errors++;
			goto err_skb;
		}
1094 1095

		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
1096 1097
		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
1098 1099 1100

			if (unlikely(!nskb))
				goto err_skb;
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
			if (curr_skb == head_skb)
				skb_shinfo(curr_skb)->frag_list = nskb;
			else
				curr_skb->next = nskb;
			curr_skb = nskb;
			head_skb->truesize += nskb->truesize;
			num_skb_frags = 0;
		}
		if (curr_skb != head_skb) {
			head_skb->data_len += len;
			head_skb->len += len;
1112
			head_skb->truesize += truesize;
1113
		}
1114
		offset = buf - page_address(page);
1115 1116 1117
		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
			put_page(page);
			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
1118
					     len, truesize);
1119 1120
		} else {
			skb_add_rx_frag(curr_skb, num_skb_frags, page,
1121
					offset, len, truesize);
1122
		}
1123 1124
	}

J
Johannes Berg 已提交
1125
	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
1126 1127
	return head_skb;

J
John Fastabend 已提交
1128 1129
err_xdp:
	rcu_read_unlock();
1130
	stats->xdp_drops++;
1131 1132
err_skb:
	put_page(page);
1133
	while (num_buf-- > 1) {
1134 1135
		buf = virtqueue_get_buf(rq->vq, &len);
		if (unlikely(!buf)) {
1136 1137 1138 1139 1140
			pr_debug("%s: rx error: %d buffers missing\n",
				 dev->name, num_buf);
			dev->stats.rx_length_errors++;
			break;
		}
1141
		stats->bytes += len;
1142
		page = virt_to_head_page(buf);
1143
		put_page(page);
1144
	}
1145
err_buf:
1146
	stats->drops++;
1147
	dev_kfree_skb(head_skb);
J
John Fastabend 已提交
1148
xdp_xmit:
1149
	return NULL;
1150 1151
}

1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
				struct sk_buff *skb)
{
	enum pkt_hash_types rss_hash_type;

	if (!hdr_hash || !skb)
		return;

	switch ((int)hdr_hash->hash_report) {
	case VIRTIO_NET_HASH_REPORT_TCPv4:
	case VIRTIO_NET_HASH_REPORT_UDPv4:
	case VIRTIO_NET_HASH_REPORT_TCPv6:
	case VIRTIO_NET_HASH_REPORT_UDPv6:
	case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
	case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
		rss_hash_type = PKT_HASH_TYPE_L4;
		break;
	case VIRTIO_NET_HASH_REPORT_IPv4:
	case VIRTIO_NET_HASH_REPORT_IPv6:
	case VIRTIO_NET_HASH_REPORT_IPv6_EX:
		rss_hash_type = PKT_HASH_TYPE_L3;
		break;
	case VIRTIO_NET_HASH_REPORT_NONE:
	default:
		rss_hash_type = PKT_HASH_TYPE_NONE;
	}
	skb_set_hash(skb, (unsigned int)hdr_hash->hash_value, rss_hash_type);
}

1181 1182
static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
			void *buf, unsigned int len, void **ctx,
1183
			unsigned int *xdp_xmit,
1184
			struct virtnet_rq_stats *stats)
1185
{
1186
	struct net_device *dev = vi->dev;
1187
	struct sk_buff *skb;
1188
	struct virtio_net_hdr_mrg_rxbuf *hdr;
1189

1190
	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1191 1192
		pr_debug("%s: short packet %i\n", dev->name, len);
		dev->stats.rx_length_errors++;
1193
		if (vi->mergeable_rx_bufs) {
1194
			put_page(virt_to_head_page(buf));
1195
		} else if (vi->big_packets) {
1196
			give_pages(rq, buf);
1197
		} else {
1198
			put_page(virt_to_head_page(buf));
1199
		}
1200
		return;
1201
	}
1202

1203
	if (vi->mergeable_rx_bufs)
1204
		skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1205
					stats);
1206
	else if (vi->big_packets)
1207
		skb = receive_big(dev, vi, rq, buf, len, stats);
1208
	else
1209
		skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1210 1211

	if (unlikely(!skb))
1212
		return;
1213

1214
	hdr = skb_vnet_hdr(skb);
1215 1216
	if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
		virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb);
1217

1218
	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
1219
		skb->ip_summed = CHECKSUM_UNNECESSARY;
R
Rusty Russell 已提交
1220

1221 1222 1223 1224 1225 1226
	if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
				  virtio_is_little_endian(vi->vdev))) {
		net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
				     dev->name, hdr->hdr.gso_type,
				     hdr->hdr.gso_size);
		goto frame_err;
R
Rusty Russell 已提交
1227 1228
	}

1229
	skb_record_rx_queue(skb, vq2rxq(rq->vq));
1230 1231 1232 1233
	skb->protocol = eth_type_trans(skb, dev);
	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
		 ntohs(skb->protocol), skb->len, skb->pkt_type);

E
Eric Dumazet 已提交
1234
	napi_gro_receive(&rq->napi, skb);
1235
	return;
R
Rusty Russell 已提交
1236 1237 1238 1239 1240 1241

frame_err:
	dev->stats.rx_frame_errors++;
	dev_kfree_skb(skb);
}

1242 1243 1244 1245 1246
/* Unlike mergeable buffers, all buffers are allocated to the
 * same size, except for the headroom. For this reason we do
 * not need to use  mergeable_len_to_ctx here - it is enough
 * to store the headroom as the context ignoring the truesize.
 */
M
Michael S. Tsirkin 已提交
1247 1248
static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
			     gfp_t gfp)
R
Rusty Russell 已提交
1249
{
1250 1251
	struct page_frag *alloc_frag = &rq->alloc_frag;
	char *buf;
1252
	unsigned int xdp_headroom = virtnet_get_headroom(vi);
1253
	void *ctx = (void *)(unsigned long)xdp_headroom;
1254
	int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
1255
	int err;
1256

1257 1258 1259
	len = SKB_DATA_ALIGN(len) +
	      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
	if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
1260
		return -ENOMEM;
R
Rusty Russell 已提交
1261

1262 1263 1264 1265 1266
	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
	get_page(alloc_frag->page);
	alloc_frag->offset += len;
	sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
		    vi->hdr_len + GOOD_PACKET_LEN);
1267
	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1268
	if (err < 0)
1269
		put_page(virt_to_head_page(buf));
1270 1271
	return err;
}
1272

1273 1274
static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
			   gfp_t gfp)
1275 1276 1277 1278 1279
{
	struct page *first, *list = NULL;
	char *p;
	int i, err, offset;

1280 1281
	sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);

1282
	/* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
1283
	for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
1284
		first = get_a_page(rq, gfp);
1285 1286
		if (!first) {
			if (list)
1287
				give_pages(rq, list);
1288
			return -ENOMEM;
1289
		}
1290
		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
1291

1292 1293 1294 1295
		/* chain new page in list head to match sg */
		first->private = (unsigned long)list;
		list = first;
	}
R
Rusty Russell 已提交
1296

1297
	first = get_a_page(rq, gfp);
1298
	if (!first) {
1299
		give_pages(rq, list);
1300 1301 1302 1303
		return -ENOMEM;
	}
	p = page_address(first);

1304
	/* rq->sg[0], rq->sg[1] share the same page */
1305 1306
	/* a separated rq->sg[0] for header - required in case !any_header_sg */
	sg_set_buf(&rq->sg[0], p, vi->hdr_len);
1307

1308
	/* rq->sg[1] for data packet, from offset */
1309
	offset = sizeof(struct padded_vnet_hdr);
1310
	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
1311 1312 1313

	/* chain first in list head */
	first->private = (unsigned long)list;
1314 1315
	err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
				  first, gfp);
1316
	if (err < 0)
1317
		give_pages(rq, first);
1318 1319

	return err;
R
Rusty Russell 已提交
1320 1321
}

1322
static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1323 1324
					  struct ewma_pkt_len *avg_pkt_len,
					  unsigned int room)
1325
{
1326 1327
	struct virtnet_info *vi = rq->vq->vdev->priv;
	const size_t hdr_len = vi->hdr_len;
1328 1329
	unsigned int len;

1330 1331 1332 1333
	if (room)
		return PAGE_SIZE - room;

	len = hdr_len +	clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1334
				rq->min_buf_len, PAGE_SIZE - hdr_len);
1335

1336
	return ALIGN(len, L1_CACHE_BYTES);
1337 1338
}

1339 1340
static int add_recvbuf_mergeable(struct virtnet_info *vi,
				 struct receive_queue *rq, gfp_t gfp)
1341
{
1342
	struct page_frag *alloc_frag = &rq->alloc_frag;
1343
	unsigned int headroom = virtnet_get_headroom(vi);
1344 1345
	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1346
	char *buf;
1347
	void *ctx;
1348
	int err;
1349
	unsigned int len, hole;
1350

1351 1352 1353 1354 1355 1356
	/* Extra tailroom is needed to satisfy XDP's assumption. This
	 * means rx frags coalescing won't work, but consider we've
	 * disabled GSO for XDP, it won't be a big issue.
	 */
	len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
	if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
1357
		return -ENOMEM;
1358

1359
	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1360
	buf += headroom; /* advance address leaving hole at front of pkt */
1361
	get_page(alloc_frag->page);
1362
	alloc_frag->offset += len + room;
1363
	hole = alloc_frag->size - alloc_frag->offset;
1364
	if (hole < len + room) {
1365 1366
		/* To avoid internal fragmentation, if there is very likely not
		 * enough space for another buffer, add the remaining space to
1367
		 * the current buffer.
1368
		 */
1369 1370 1371
		len += hole;
		alloc_frag->offset += hole;
	}
1372

1373
	sg_init_one(rq->sg, buf, len);
1374
	ctx = mergeable_len_to_ctx(len, headroom);
1375
	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1376
	if (err < 0)
1377
		put_page(virt_to_head_page(buf));
1378

1379 1380
	return err;
}
1381

1382 1383 1384 1385 1386 1387 1388
/*
 * Returns false if we couldn't fill entirely (OOM).
 *
 * Normally run in the receive path, but can also be run from ndo_open
 * before we're receiving packets, or from refill_work which is
 * careful to disable receiving (using napi_disable).
 */
M
Michael S. Tsirkin 已提交
1389 1390
static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
			  gfp_t gfp)
1391 1392
{
	int err;
1393
	bool oom;
1394

1395 1396
	do {
		if (vi->mergeable_rx_bufs)
1397
			err = add_recvbuf_mergeable(vi, rq, gfp);
1398
		else if (vi->big_packets)
1399
			err = add_recvbuf_big(vi, rq, gfp);
1400
		else
M
Michael S. Tsirkin 已提交
1401
			err = add_recvbuf_small(vi, rq, gfp);
1402

1403
		oom = err == -ENOMEM;
1404
		if (err)
1405
			break;
1406
	} while (rq->vq->num_free);
T
Toshiaki Makita 已提交
1407
	if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
1408 1409 1410
		unsigned long flags;

		flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
1411
		rq->stats.kicks++;
1412
		u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
T
Toshiaki Makita 已提交
1413 1414
	}

1415
	return !oom;
1416 1417
}

1418
static void skb_recv_done(struct virtqueue *rvq)
R
Rusty Russell 已提交
1419 1420
{
	struct virtnet_info *vi = rvq->vdev->priv;
J
Jason Wang 已提交
1421
	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
1422

1423
	virtqueue_napi_schedule(&rq->napi, rvq);
R
Rusty Russell 已提交
1424 1425
}

1426
static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
1427
{
1428
	napi_enable(napi);
1429 1430

	/* If all buffers were filled by other side before we napi_enabled, we
1431 1432 1433 1434 1435 1436
	 * won't get another interrupt, so process any outstanding packets now.
	 * Call local_bh_enable after to trigger softIRQ processing.
	 */
	local_bh_disable();
	virtqueue_napi_schedule(napi, vq);
	local_bh_enable();
1437 1438
}

W
Willem de Bruijn 已提交
1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
static void virtnet_napi_tx_enable(struct virtnet_info *vi,
				   struct virtqueue *vq,
				   struct napi_struct *napi)
{
	if (!napi->weight)
		return;

	/* Tx napi touches cachelines on the cpu handling tx interrupts. Only
	 * enable the feature if this is likely affine with the transmit path.
	 */
	if (!vi->affinity_hint_set) {
		napi->weight = 0;
		return;
	}

	return virtnet_napi_enable(vq, napi);
}

1457 1458 1459 1460 1461 1462
static void virtnet_napi_tx_disable(struct napi_struct *napi)
{
	if (napi->weight)
		napi_disable(napi);
}

1463 1464
static void refill_work(struct work_struct *work)
{
1465 1466
	struct virtnet_info *vi =
		container_of(work, struct virtnet_info, refill.work);
1467
	bool still_empty;
J
Jason Wang 已提交
1468 1469
	int i;

1470
	for (i = 0; i < vi->curr_queue_pairs; i++) {
J
Jason Wang 已提交
1471
		struct receive_queue *rq = &vi->rq[i];
1472

J
Jason Wang 已提交
1473
		napi_disable(&rq->napi);
M
Michael S. Tsirkin 已提交
1474
		still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
1475
		virtnet_napi_enable(rq->vq, &rq->napi);
1476

J
Jason Wang 已提交
1477 1478 1479 1480 1481 1482
		/* In theory, this can happen: if we don't get any buffers in
		 * we will *never* try to fill again.
		 */
		if (still_empty)
			schedule_delayed_work(&vi->refill, HZ/2);
	}
1483 1484
}

1485 1486
static int virtnet_receive(struct receive_queue *rq, int budget,
			   unsigned int *xdp_xmit)
R
Rusty Russell 已提交
1487
{
1488
	struct virtnet_info *vi = rq->vq->vdev->priv;
1489
	struct virtnet_rq_stats stats = {};
1490
	unsigned int len;
1491
	void *buf;
1492
	int i;
R
Rusty Russell 已提交
1493

1494
	if (!vi->big_packets || vi->mergeable_rx_bufs) {
1495 1496
		void *ctx;

1497
		while (stats.packets < budget &&
1498
		       (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
1499
			receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
1500
			stats.packets++;
1501 1502
		}
	} else {
1503
		while (stats.packets < budget &&
1504
		       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
1505
			receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
1506
			stats.packets++;
1507
		}
R
Rusty Russell 已提交
1508 1509
	}

1510
	if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
M
Michael S. Tsirkin 已提交
1511
		if (!try_fill_recv(vi, rq, GFP_ATOMIC))
1512
			schedule_delayed_work(&vi->refill, 0);
1513
	}
R
Rusty Russell 已提交
1514

T
Toshiaki Makita 已提交
1515
	u64_stats_update_begin(&rq->stats.syncp);
1516 1517 1518 1519
	for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
		size_t offset = virtnet_rq_stats_desc[i].offset;
		u64 *item;

1520 1521
		item = (u64 *)((u8 *)&rq->stats + offset);
		*item += *(u64 *)((u8 *)&stats + offset);
1522
	}
T
Toshiaki Makita 已提交
1523
	u64_stats_update_end(&rq->stats.syncp);
J
Jason Wang 已提交
1524

1525
	return stats.packets;
1526 1527
}

1528
static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
1529 1530 1531 1532
{
	unsigned int len;
	unsigned int packets = 0;
	unsigned int bytes = 0;
1533
	void *ptr;
1534

1535 1536 1537
	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
		if (likely(!is_xdp_frame(ptr))) {
			struct sk_buff *skb = ptr;
1538

1539
			pr_debug("Sent skb %p\n", skb);
1540

1541 1542 1543 1544
			bytes += skb->len;
			napi_consume_skb(skb, in_napi);
		} else {
			struct xdp_frame *frame = ptr_to_xdp(ptr);
1545

1546 1547 1548 1549
			bytes += frame->len;
			xdp_return_frame(frame);
		}
		packets++;
1550 1551 1552 1553 1554 1555 1556 1557
	}

	/* Avoid overhead when no packets have been processed
	 * happens when called speculatively from start_xmit.
	 */
	if (!packets)
		return;

T
Toshiaki Makita 已提交
1558 1559 1560 1561
	u64_stats_update_begin(&sq->stats.syncp);
	sq->stats.bytes += bytes;
	sq->stats.packets += packets;
	u64_stats_update_end(&sq->stats.syncp);
1562 1563
}

1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
{
	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
		return false;
	else if (q < vi->curr_queue_pairs)
		return true;
	else
		return false;
}

1574 1575 1576 1577 1578 1579 1580
static void virtnet_poll_cleantx(struct receive_queue *rq)
{
	struct virtnet_info *vi = rq->vq->vdev->priv;
	unsigned int index = vq2rxq(rq->vq);
	struct send_queue *sq = &vi->sq[index];
	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);

1581
	if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
1582 1583 1584
		return;

	if (__netif_tx_trylock(txq)) {
1585 1586 1587 1588
		do {
			virtqueue_disable_cb(sq->vq);
			free_old_xmit_skbs(sq, true);
		} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
1589 1590 1591 1592

		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
			netif_tx_wake_queue(txq);

1593 1594 1595 1596
		__netif_tx_unlock(txq);
	}
}

1597 1598 1599 1600
static int virtnet_poll(struct napi_struct *napi, int budget)
{
	struct receive_queue *rq =
		container_of(napi, struct receive_queue, napi);
1601 1602
	struct virtnet_info *vi = rq->vq->vdev->priv;
	struct send_queue *sq;
1603
	unsigned int received;
1604
	unsigned int xdp_xmit = 0;
1605

1606 1607
	virtnet_poll_cleantx(rq);

J
Jason Wang 已提交
1608
	received = virtnet_receive(rq, budget, &xdp_xmit);
1609

1610
	/* Out of packets? */
1611 1612
	if (received < budget)
		virtqueue_napi_complete(napi, rq->vq, received);
R
Rusty Russell 已提交
1613

1614
	if (xdp_xmit & VIRTIO_XDP_REDIR)
1615
		xdp_do_flush();
1616 1617

	if (xdp_xmit & VIRTIO_XDP_TX) {
1618
		sq = virtnet_xdp_get_sq(vi);
T
Toshiaki Makita 已提交
1619 1620 1621 1622 1623
		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
			u64_stats_update_begin(&sq->stats.syncp);
			sq->stats.kicks++;
			u64_stats_update_end(&sq->stats.syncp);
		}
1624
		virtnet_xdp_put_sq(vi, sq);
1625
	}
J
Jason Wang 已提交
1626

R
Rusty Russell 已提交
1627 1628 1629
	return received;
}

J
Jason Wang 已提交
1630 1631 1632
static int virtnet_open(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
1633
	int i, err;
J
Jason Wang 已提交
1634

1635 1636 1637
	for (i = 0; i < vi->max_queue_pairs; i++) {
		if (i < vi->curr_queue_pairs)
			/* Make sure we have some buffers: if oom use wq. */
M
Michael S. Tsirkin 已提交
1638
			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
1639
				schedule_delayed_work(&vi->refill, 0);
1640

1641
		err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id);
1642 1643 1644
		if (err < 0)
			return err;

1645 1646 1647 1648 1649 1650 1651
		err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
						 MEM_TYPE_PAGE_SHARED, NULL);
		if (err < 0) {
			xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
			return err;
		}

1652
		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
W
Willem de Bruijn 已提交
1653
		virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
J
Jason Wang 已提交
1654 1655 1656 1657 1658
	}

	return 0;
}

W
Willem de Bruijn 已提交
1659 1660 1661 1662
static int virtnet_poll_tx(struct napi_struct *napi, int budget)
{
	struct send_queue *sq = container_of(napi, struct send_queue, napi);
	struct virtnet_info *vi = sq->vq->vdev->priv;
1663 1664
	unsigned int index = vq2txq(sq->vq);
	struct netdev_queue *txq;
1665 1666
	int opaque;
	bool done;
W
Willem de Bruijn 已提交
1667

1668 1669 1670 1671 1672 1673 1674
	if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
		/* We don't need to enable cb for XDP */
		napi_complete_done(napi, 0);
		return 0;
	}

	txq = netdev_get_tx_queue(vi->dev, index);
W
Willem de Bruijn 已提交
1675
	__netif_tx_lock(txq, raw_smp_processor_id());
1676
	virtqueue_disable_cb(sq->vq);
1677
	free_old_xmit_skbs(sq, true);
1678

1679 1680 1681
	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
		netif_tx_wake_queue(txq);

1682 1683 1684 1685 1686 1687 1688
	opaque = virtqueue_enable_cb_prepare(sq->vq);

	done = napi_complete_done(napi, 0);

	if (!done)
		virtqueue_disable_cb(sq->vq);

W
Willem de Bruijn 已提交
1689 1690
	__netif_tx_unlock(txq);

1691 1692 1693 1694 1695 1696 1697 1698 1699 1700
	if (done) {
		if (unlikely(virtqueue_poll(sq->vq, opaque))) {
			if (napi_schedule_prep(napi)) {
				__netif_tx_lock(txq, raw_smp_processor_id());
				virtqueue_disable_cb(sq->vq);
				__netif_tx_unlock(txq);
				__napi_schedule(napi);
			}
		}
	}
W
Willem de Bruijn 已提交
1701 1702 1703 1704

	return 0;
}

1705
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
R
Rusty Russell 已提交
1706
{
1707
	struct virtio_net_hdr_mrg_rxbuf *hdr;
R
Rusty Russell 已提交
1708
	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
1709
	struct virtnet_info *vi = sq->vq->vdev->priv;
1710
	int num_sg;
1711
	unsigned hdr_len = vi->hdr_len;
1712
	bool can_push;
R
Rusty Russell 已提交
1713

J
Johannes Berg 已提交
1714
	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
1715 1716 1717 1718 1719 1720 1721

	can_push = vi->any_header_sg &&
		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
	/* Even if we can, don't push here yet as this would skew
	 * csum_start offset below. */
	if (can_push)
1722
		hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
1723 1724
	else
		hdr = skb_vnet_hdr(skb);
R
Rusty Russell 已提交
1725

1726
	if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
1727 1728
				    virtio_is_little_endian(vi->vdev), false,
				    0))
1729
		return -EPROTO;
R
Rusty Russell 已提交
1730

1731
	if (vi->mergeable_rx_bufs)
1732
		hdr->num_buffers = 0;
1733

1734
	sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
1735 1736 1737
	if (can_push) {
		__skb_push(skb, hdr_len);
		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
1738 1739
		if (unlikely(num_sg < 0))
			return num_sg;
1740 1741 1742 1743
		/* Pull header back to avoid skew in tx bytes calculations. */
		__skb_pull(skb, hdr_len);
	} else {
		sg_set_buf(sq->sg, hdr, hdr_len);
1744 1745 1746 1747
		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
		if (unlikely(num_sg < 0))
			return num_sg;
		num_sg++;
1748
	}
1749
	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
1750 1751
}

1752
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1753 1754
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
1755 1756
	int qnum = skb_get_queue_mapping(skb);
	struct send_queue *sq = &vi->sq[qnum];
1757
	int err;
1758
	struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1759
	bool kick = !netdev_xmit_more();
W
Willem de Bruijn 已提交
1760
	bool use_napi = sq->napi.weight;
1761 1762

	/* Free up any pending old buffers before queueing new ones. */
1763 1764 1765 1766 1767
	do {
		if (use_napi)
			virtqueue_disable_cb(sq->vq);

		free_old_xmit_skbs(sq, false);
1768

1769 1770
	} while (use_napi && kick &&
	       unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
1771

1772 1773 1774
	/* timestamp packet in software */
	skb_tx_timestamp(skb);

1775
	/* Try to transmit */
1776
	err = xmit_skb(sq, skb);
1777

1778
	/* This should not happen! */
1779
	if (unlikely(err)) {
1780 1781 1782
		dev->stats.tx_fifo_errors++;
		if (net_ratelimit())
			dev_warn(&dev->dev,
1783 1784
				 "Unexpected TXQ (%d) queue failure: %d\n",
				 qnum, err);
1785
		dev->stats.tx_dropped++;
1786
		dev_kfree_skb_any(skb);
1787
		return NETDEV_TX_OK;
R
Rusty Russell 已提交
1788
	}
1789

1790
	/* Don't wait up for transmitted skbs to be freed. */
W
Willem de Bruijn 已提交
1791 1792
	if (!use_napi) {
		skb_orphan(skb);
1793
		nf_reset_ct(skb);
W
Willem de Bruijn 已提交
1794
	}
1795

1796 1797 1798 1799 1800 1801 1802 1803 1804
	/* If running out of space, stop queue to avoid getting packets that we
	 * are then unable to transmit.
	 * An alternative would be to force queuing layer to requeue the skb by
	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
	 * returned in a normal path of operation: it means that driver is not
	 * maintaining the TX queue stop/start state properly, and causes
	 * the stack to do a non-trivial amount of useless work.
	 * Since most packets only take 1 or 2 ring slots, stopping the queue
	 * early means 16 slots are typically wasted.
1805
	 */
1806
	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
J
Jason Wang 已提交
1807
		netif_stop_subqueue(dev, qnum);
W
Willem de Bruijn 已提交
1808 1809
		if (!use_napi &&
		    unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1810
			/* More just got used, free them then recheck. */
1811
			free_old_xmit_skbs(sq, false);
1812
			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
J
Jason Wang 已提交
1813
				netif_start_subqueue(dev, qnum);
1814
				virtqueue_disable_cb(sq->vq);
1815 1816
			}
		}
1817
	}
1818

T
Toshiaki Makita 已提交
1819 1820 1821 1822 1823 1824 1825
	if (kick || netif_xmit_stopped(txq)) {
		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
			u64_stats_update_begin(&sq->stats.syncp);
			sq->stats.kicks++;
			u64_stats_update_end(&sq->stats.syncp);
		}
	}
R
Rusty Russell 已提交
1826

1827
	return NETDEV_TX_OK;
1828 1829
}

1830 1831 1832
/*
 * Send command via the control virtqueue and check status.  Commands
 * supported by the hypervisor, as indicated by feature bits, should
S
stephen hemminger 已提交
1833
 * never fail unless improperly formatted.
1834 1835
 */
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1836
				 struct scatterlist *out)
1837
{
1838
	struct scatterlist *sgs[4], hdr, stat;
1839
	unsigned out_num = 0, tmp;
1840
	int ret;
1841 1842

	/* Caller should know better */
1843
	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
1844

1845 1846 1847
	vi->ctrl->status = ~0;
	vi->ctrl->hdr.class = class;
	vi->ctrl->hdr.cmd = cmd;
1848
	/* Add header */
1849
	sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
1850
	sgs[out_num++] = &hdr;
1851

1852 1853
	if (out)
		sgs[out_num++] = out;
1854

1855
	/* Add return status. */
1856
	sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
1857
	sgs[out_num] = &stat;
1858

1859
	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1860 1861 1862 1863 1864 1865
	ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
	if (ret < 0) {
		dev_warn(&vi->vdev->dev,
			 "Failed to add sgs for command vq: %d\n.", ret);
		return false;
	}
1866

1867
	if (unlikely(!virtqueue_kick(vi->cvq)))
1868
		return vi->ctrl->status == VIRTIO_NET_OK;
1869 1870 1871 1872

	/* Spin for a response, the kick causes an ioport write, trapping
	 * into the hypervisor, so the request should be handled immediately.
	 */
1873 1874
	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
	       !virtqueue_is_broken(vi->cvq))
1875 1876
		cpu_relax();

1877
	return vi->ctrl->status == VIRTIO_NET_OK;
1878 1879
}

1880 1881 1882 1883
static int virtnet_set_mac_address(struct net_device *dev, void *p)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;
1884
	int ret;
1885
	struct sockaddr *addr;
1886
	struct scatterlist sg;
1887

1888 1889 1890
	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
		return -EOPNOTSUPP;

1891
	addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
1892 1893 1894 1895
	if (!addr)
		return -ENOMEM;

	ret = eth_prepare_mac_addr_change(dev, addr);
1896
	if (ret)
1897
		goto out;
1898

1899 1900 1901
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
		sg_init_one(&sg, addr->sa_data, dev->addr_len);
		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1902
					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
1903 1904
			dev_warn(&vdev->dev,
				 "Failed to set mac address by vq command.\n");
1905 1906
			ret = -EINVAL;
			goto out;
1907
		}
1908 1909
	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
		   !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1910 1911 1912 1913 1914 1915 1916
		unsigned int i;

		/* Naturally, this has an atomicity problem. */
		for (i = 0; i < dev->addr_len; i++)
			virtio_cwrite8(vdev,
				       offsetof(struct virtio_net_config, mac) +
				       i, addr->sa_data[i]);
1917 1918 1919
	}

	eth_commit_mac_addr_change(dev, p);
1920
	ret = 0;
1921

1922 1923 1924
out:
	kfree(addr);
	return ret;
1925 1926
}

1927 1928
static void virtnet_stats(struct net_device *dev,
			  struct rtnl_link_stats64 *tot)
1929 1930 1931
{
	struct virtnet_info *vi = netdev_priv(dev);
	unsigned int start;
T
Toshiaki Makita 已提交
1932
	int i;
1933

T
Toshiaki Makita 已提交
1934
	for (i = 0; i < vi->max_queue_pairs; i++) {
1935
		u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
T
Toshiaki Makita 已提交
1936 1937
		struct receive_queue *rq = &vi->rq[i];
		struct send_queue *sq = &vi->sq[i];
1938 1939

		do {
T
Toshiaki Makita 已提交
1940 1941 1942
			start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
			tpackets = sq->stats.packets;
			tbytes   = sq->stats.bytes;
1943
			terrors  = sq->stats.tx_timeouts;
T
Toshiaki Makita 已提交
1944
		} while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
1945 1946

		do {
T
Toshiaki Makita 已提交
1947
			start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
1948 1949 1950
			rpackets = rq->stats.packets;
			rbytes   = rq->stats.bytes;
			rdrops   = rq->stats.drops;
T
Toshiaki Makita 已提交
1951
		} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
1952 1953 1954 1955 1956

		tot->rx_packets += rpackets;
		tot->tx_packets += tpackets;
		tot->rx_bytes   += rbytes;
		tot->tx_bytes   += tbytes;
1957
		tot->rx_dropped += rdrops;
1958
		tot->tx_errors  += terrors;
1959 1960 1961
	}

	tot->tx_dropped = dev->stats.tx_dropped;
1962
	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
1963 1964 1965 1966
	tot->rx_length_errors = dev->stats.rx_length_errors;
	tot->rx_frame_errors = dev->stats.rx_frame_errors;
}

1967 1968 1969 1970
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
	rtnl_lock();
	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
1971
				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
1972 1973 1974 1975
		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
	rtnl_unlock();
}

1976
static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
J
Jason Wang 已提交
1977 1978 1979 1980 1981 1982 1983
{
	struct scatterlist sg;
	struct net_device *dev = vi->dev;

	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
		return 0;

1984 1985
	vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
	sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
J
Jason Wang 已提交
1986 1987

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1988
				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
J
Jason Wang 已提交
1989 1990 1991
		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
			 queue_pairs);
		return -EINVAL;
1992
	} else {
J
Jason Wang 已提交
1993
		vi->curr_queue_pairs = queue_pairs;
1994 1995 1996
		/* virtnet_open() will refill when device is going to up. */
		if (dev->flags & IFF_UP)
			schedule_delayed_work(&vi->refill, 0);
1997
	}
J
Jason Wang 已提交
1998 1999 2000 2001

	return 0;
}

2002 2003 2004 2005 2006 2007 2008 2009 2010 2011
static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
	int err;

	rtnl_lock();
	err = _virtnet_set_queues(vi, queue_pairs);
	rtnl_unlock();
	return err;
}

R
Rusty Russell 已提交
2012 2013 2014
static int virtnet_close(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
2015
	int i;
R
Rusty Russell 已提交
2016

2017 2018
	/* Make sure refill_work doesn't re-enable napi! */
	cancel_delayed_work_sync(&vi->refill);
J
Jason Wang 已提交
2019

W
Willem de Bruijn 已提交
2020
	for (i = 0; i < vi->max_queue_pairs; i++) {
2021
		xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
J
Jason Wang 已提交
2022
		napi_disable(&vi->rq[i].napi);
2023
		virtnet_napi_tx_disable(&vi->sq[i].napi);
W
Willem de Bruijn 已提交
2024
	}
R
Rusty Russell 已提交
2025 2026 2027 2028

	return 0;
}

2029 2030 2031
static void virtnet_set_rx_mode(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
2032 2033
	struct scatterlist sg[2];
	struct virtio_net_ctrl_mac *mac_data;
J
Jiri Pirko 已提交
2034
	struct netdev_hw_addr *ha;
2035
	int uc_count;
2036
	int mc_count;
2037 2038
	void *buf;
	int i;
2039

S
stephen hemminger 已提交
2040
	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
2041 2042 2043
	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
		return;

2044 2045
	vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
	vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2046

2047
	sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
2048 2049

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2050
				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
2051
		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
2052
			 vi->ctrl->promisc ? "en" : "dis");
2053

2054
	sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
2055 2056

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2057
				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
2058
		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
2059
			 vi->ctrl->allmulti ? "en" : "dis");
2060

2061
	uc_count = netdev_uc_count(dev);
2062
	mc_count = netdev_mc_count(dev);
2063
	/* MAC filter - use one buffer for both lists */
2064 2065 2066
	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
	mac_data = buf;
2067
	if (!buf)
2068 2069
		return;

2070 2071
	sg_init_table(sg, 2);

2072
	/* Store the unicast list and count in the front of the buffer */
M
Michael S. Tsirkin 已提交
2073
	mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
J
Jiri Pirko 已提交
2074
	i = 0;
2075
	netdev_for_each_uc_addr(ha, dev)
J
Jiri Pirko 已提交
2076
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2077 2078

	sg_set_buf(&sg[0], mac_data,
2079
		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
2080 2081

	/* multicast list and count fill the end */
2082
	mac_data = (void *)&mac_data->macs[uc_count][0];
2083

M
Michael S. Tsirkin 已提交
2084
	mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
2085
	i = 0;
2086 2087
	netdev_for_each_mc_addr(ha, dev)
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2088 2089

	sg_set_buf(&sg[1], mac_data,
2090
		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
2091 2092

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2093
				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
2094
		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
2095 2096

	kfree(buf);
2097 2098
}

2099 2100
static int virtnet_vlan_rx_add_vid(struct net_device *dev,
				   __be16 proto, u16 vid)
2101 2102 2103 2104
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

2105
	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2106
	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2107 2108

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2109
				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
2110
		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
2111
	return 0;
2112 2113
}

2114 2115
static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
				    __be16 proto, u16 vid)
2116 2117 2118 2119
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

2120
	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2121
	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2122 2123

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2124
				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
2125
		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
2126
	return 0;
2127 2128
}

2129
static void virtnet_clean_affinity(struct virtnet_info *vi)
J
Jason Wang 已提交
2130 2131 2132
{
	int i;

2133 2134
	if (vi->affinity_hint_set) {
		for (i = 0; i < vi->max_queue_pairs; i++) {
2135 2136
			virtqueue_set_affinity(vi->rq[i].vq, NULL);
			virtqueue_set_affinity(vi->sq[i].vq, NULL);
2137 2138
		}

2139 2140 2141
		vi->affinity_hint_set = false;
	}
}
2142

2143 2144
static void virtnet_set_affinity(struct virtnet_info *vi)
{
2145 2146 2147 2148 2149 2150 2151 2152
	cpumask_var_t mask;
	int stragglers;
	int group_size;
	int i, j, cpu;
	int num_cpu;
	int stride;

	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2153
		virtnet_clean_affinity(vi);
2154
		return;
J
Jason Wang 已提交
2155 2156
	}

2157 2158 2159 2160 2161
	num_cpu = num_online_cpus();
	stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
	stragglers = num_cpu >= vi->curr_queue_pairs ?
			num_cpu % vi->curr_queue_pairs :
			0;
2162
	cpu = cpumask_first(cpu_online_mask);
2163

2164 2165 2166 2167 2168 2169 2170 2171 2172 2173
	for (i = 0; i < vi->curr_queue_pairs; i++) {
		group_size = stride + (i < stragglers ? 1 : 0);

		for (j = 0; j < group_size; j++) {
			cpumask_set_cpu(cpu, mask);
			cpu = cpumask_next_wrap(cpu, cpu_online_mask,
						nr_cpu_ids, false);
		}
		virtqueue_set_affinity(vi->rq[i].vq, mask);
		virtqueue_set_affinity(vi->sq[i].vq, mask);
2174
		__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
2175
		cpumask_clear(mask);
J
Jason Wang 已提交
2176 2177
	}

2178
	vi->affinity_hint_set = true;
2179
	free_cpumask_var(mask);
J
Jason Wang 已提交
2180 2181
}

2182
static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
2183
{
2184 2185 2186 2187 2188
	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
						   node);
	virtnet_set_affinity(vi);
	return 0;
}
2189

2190 2191 2192 2193 2194 2195 2196
static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
						   node_dead);
	virtnet_set_affinity(vi);
	return 0;
}
2197

2198 2199 2200 2201 2202
static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
{
	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
						   node);

2203
	virtnet_clean_affinity(vi);
2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228
	return 0;
}

static enum cpuhp_state virtionet_online;

static int virtnet_cpu_notif_add(struct virtnet_info *vi)
{
	int ret;

	ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
	if (ret)
		return ret;
	ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
					       &vi->node_dead);
	if (!ret)
		return ret;
	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
	return ret;
}

static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
{
	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
	cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
					    &vi->node_dead);
J
Jason Wang 已提交
2229 2230
}

R
Rick Jones 已提交
2231
static void virtnet_get_ringparam(struct net_device *dev,
2232 2233 2234
				  struct ethtool_ringparam *ring,
				  struct kernel_ethtool_ringparam *kernel_ring,
				  struct netlink_ext_ack *extack)
R
Rick Jones 已提交
2235 2236 2237
{
	struct virtnet_info *vi = netdev_priv(dev);

J
Jason Wang 已提交
2238 2239
	ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
	ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
R
Rick Jones 已提交
2240 2241 2242 2243
	ring->rx_pending = ring->rx_max_pending;
	ring->tx_pending = ring->tx_max_pending;
}

2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266
static bool virtnet_commit_rss_command(struct virtnet_info *vi)
{
	struct net_device *dev = vi->dev;
	struct scatterlist sgs[4];
	unsigned int sg_buf_size;

	/* prepare sgs */
	sg_init_table(sgs, 4);

	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
	sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);

	sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
	sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);

	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
			- offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
	sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);

	sg_buf_size = vi->rss_key_size;
	sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2267 2268
				  vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
				  : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295
		dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
		return false;
	}
	return true;
}

static void virtnet_init_default_rss(struct virtnet_info *vi)
{
	u32 indir_val = 0;
	int i = 0;

	vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
	vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
						? vi->rss_indir_table_size - 1 : 0;
	vi->ctrl->rss.unclassified_queue = 0;

	for (; i < vi->rss_indir_table_size; ++i) {
		indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
		vi->ctrl->rss.indirection_table[i] = indir_val;
	}

	vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs;
	vi->ctrl->rss.hash_key_length = vi->rss_key_size;

	netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
}

2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308

static void virtnet_get_drvinfo(struct net_device *dev,
				struct ethtool_drvinfo *info)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;

	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
	strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
	strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));

}

2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322
/* TODO: Eliminate OOO packets during switching */
static int virtnet_set_channels(struct net_device *dev,
				struct ethtool_channels *channels)
{
	struct virtnet_info *vi = netdev_priv(dev);
	u16 queue_pairs = channels->combined_count;
	int err;

	/* We don't support separate rx/tx channels.
	 * We don't allow setting 'other' channels.
	 */
	if (channels->rx_count || channels->tx_count || channels->other_count)
		return -EINVAL;

2323
	if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
2324 2325
		return -EINVAL;

J
John Fastabend 已提交
2326 2327 2328 2329 2330 2331 2332
	/* For now we don't support modifying channels while XDP is loaded
	 * also when XDP is loaded all RX queues have XDP programs so we only
	 * need to check a single RX queue.
	 */
	if (vi->rq[0].xdp_prog)
		return -EINVAL;

2333
	cpus_read_lock();
2334
	err = _virtnet_set_queues(vi, queue_pairs);
2335
	if (err) {
2336
		cpus_read_unlock();
2337
		goto err;
2338
	}
2339
	virtnet_set_affinity(vi);
2340
	cpus_read_unlock();
2341

2342 2343 2344
	netif_set_real_num_tx_queues(dev, queue_pairs);
	netif_set_real_num_rx_queues(dev, queue_pairs);
 err:
2345 2346 2347
	return err;
}

T
Toshiaki Makita 已提交
2348 2349 2350 2351
static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
	struct virtnet_info *vi = netdev_priv(dev);
	unsigned int i, j;
2352
	u8 *p = data;
T
Toshiaki Makita 已提交
2353 2354 2355 2356

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < vi->curr_queue_pairs; i++) {
2357 2358 2359
			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
				ethtool_sprintf(&p, "rx_queue_%u_%s", i,
						virtnet_rq_stats_desc[j].desc);
T
Toshiaki Makita 已提交
2360 2361 2362
		}

		for (i = 0; i < vi->curr_queue_pairs; i++) {
2363 2364 2365
			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
				ethtool_sprintf(&p, "tx_queue_%u_%s", i,
						virtnet_sq_stats_desc[j].desc);
T
Toshiaki Makita 已提交
2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394
		}
		break;
	}
}

static int virtnet_get_sset_count(struct net_device *dev, int sset)
{
	struct virtnet_info *vi = netdev_priv(dev);

	switch (sset) {
	case ETH_SS_STATS:
		return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
					       VIRTNET_SQ_STATS_LEN);
	default:
		return -EOPNOTSUPP;
	}
}

static void virtnet_get_ethtool_stats(struct net_device *dev,
				      struct ethtool_stats *stats, u64 *data)
{
	struct virtnet_info *vi = netdev_priv(dev);
	unsigned int idx = 0, start, i, j;
	const u8 *stats_base;
	size_t offset;

	for (i = 0; i < vi->curr_queue_pairs; i++) {
		struct receive_queue *rq = &vi->rq[i];

2395
		stats_base = (u8 *)&rq->stats;
T
Toshiaki Makita 已提交
2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420
		do {
			start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
				offset = virtnet_rq_stats_desc[j].offset;
				data[idx + j] = *(u64 *)(stats_base + offset);
			}
		} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
		idx += VIRTNET_RQ_STATS_LEN;
	}

	for (i = 0; i < vi->curr_queue_pairs; i++) {
		struct send_queue *sq = &vi->sq[i];

		stats_base = (u8 *)&sq->stats;
		do {
			start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
				offset = virtnet_sq_stats_desc[j].offset;
				data[idx + j] = *(u64 *)(stats_base + offset);
			}
		} while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
		idx += VIRTNET_SQ_STATS_LEN;
	}
}

2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
static void virtnet_get_channels(struct net_device *dev,
				 struct ethtool_channels *channels)
{
	struct virtnet_info *vi = netdev_priv(dev);

	channels->combined_count = vi->curr_queue_pairs;
	channels->max_combined = vi->max_queue_pairs;
	channels->max_other = 0;
	channels->rx_count = 0;
	channels->tx_count = 0;
	channels->other_count = 0;
}

2434 2435
static int virtnet_set_link_ksettings(struct net_device *dev,
				      const struct ethtool_link_ksettings *cmd)
2436 2437 2438
{
	struct virtnet_info *vi = netdev_priv(dev);

2439 2440
	return ethtool_virtdev_set_link_ksettings(dev, cmd,
						  &vi->speed, &vi->duplex);
2441 2442
}

2443 2444
static int virtnet_get_link_ksettings(struct net_device *dev,
				      struct ethtool_link_ksettings *cmd)
2445 2446 2447
{
	struct virtnet_info *vi = netdev_priv(dev);

2448 2449 2450
	cmd->base.speed = vi->speed;
	cmd->base.duplex = vi->duplex;
	cmd->base.port = PORT_OTHER;
2451 2452 2453 2454

	return 0;
}

2455
static int virtnet_set_coalesce(struct net_device *dev,
2456 2457 2458
				struct ethtool_coalesce *ec,
				struct kernel_ethtool_coalesce *kernel_coal,
				struct netlink_ext_ack *extack)
2459 2460 2461 2462
{
	struct virtnet_info *vi = netdev_priv(dev);
	int i, napi_weight;

2463 2464
	if (ec->tx_max_coalesced_frames > 1 ||
	    ec->rx_max_coalesced_frames != 1)
2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478
		return -EINVAL;

	napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
	if (napi_weight ^ vi->sq[0].napi.weight) {
		if (dev->flags & IFF_UP)
			return -EBUSY;
		for (i = 0; i < vi->max_queue_pairs; i++)
			vi->sq[i].napi.weight = napi_weight;
	}

	return 0;
}

static int virtnet_get_coalesce(struct net_device *dev,
2479 2480 2481
				struct ethtool_coalesce *ec,
				struct kernel_ethtool_coalesce *kernel_coal,
				struct netlink_ext_ack *extack)
2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496
{
	struct ethtool_coalesce ec_default = {
		.cmd = ETHTOOL_GCOALESCE,
		.rx_max_coalesced_frames = 1,
	};
	struct virtnet_info *vi = netdev_priv(dev);

	memcpy(ec, &ec_default, sizeof(ec_default));

	if (vi->sq[0].napi.weight)
		ec->tx_max_coalesced_frames = 1;

	return 0;
}

2497 2498 2499 2500 2501 2502 2503 2504
static void virtnet_init_settings(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);

	vi->speed = SPEED_UNKNOWN;
	vi->duplex = DUPLEX_UNKNOWN;
}

2505 2506 2507 2508 2509 2510 2511 2512
static void virtnet_update_settings(struct virtnet_info *vi)
{
	u32 speed;
	u8 duplex;

	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
		return;

2513 2514
	virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);

2515 2516
	if (ethtool_validate_speed(speed))
		vi->speed = speed;
2517 2518 2519

	virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);

2520 2521 2522 2523
	if (ethtool_validate_duplex(duplex))
		vi->duplex = duplex;
}

2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588
static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
{
	return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
}

static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
{
	return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
}

static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int i;

	if (indir) {
		for (i = 0; i < vi->rss_indir_table_size; ++i)
			indir[i] = vi->ctrl->rss.indirection_table[i];
	}

	if (key)
		memcpy(key, vi->ctrl->rss.key, vi->rss_key_size);

	if (hfunc)
		*hfunc = ETH_RSS_HASH_TOP;

	return 0;
}

static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int i;

	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
		return -EOPNOTSUPP;

	if (indir) {
		for (i = 0; i < vi->rss_indir_table_size; ++i)
			vi->ctrl->rss.indirection_table[i] = indir[i];
	}
	if (key)
		memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);

	virtnet_commit_rss_command(vi);

	return 0;
}

static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int rc = 0;

	switch (info->cmd) {
	case ETHTOOL_GRXRINGS:
		info->data = vi->curr_queue_pairs;
		break;
	default:
		rc = -EOPNOTSUPP;
	}

	return rc;
}

2589
static const struct ethtool_ops virtnet_ethtool_ops = {
2590
	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
2591
	.get_drvinfo = virtnet_get_drvinfo,
2592
	.get_link = ethtool_op_get_link,
R
Rick Jones 已提交
2593
	.get_ringparam = virtnet_get_ringparam,
T
Toshiaki Makita 已提交
2594 2595 2596
	.get_strings = virtnet_get_strings,
	.get_sset_count = virtnet_get_sset_count,
	.get_ethtool_stats = virtnet_get_ethtool_stats,
2597 2598
	.set_channels = virtnet_set_channels,
	.get_channels = virtnet_get_channels,
2599
	.get_ts_info = ethtool_op_get_ts_info,
2600 2601
	.get_link_ksettings = virtnet_get_link_ksettings,
	.set_link_ksettings = virtnet_set_link_ksettings,
2602 2603
	.set_coalesce = virtnet_set_coalesce,
	.get_coalesce = virtnet_get_coalesce,
2604 2605 2606 2607 2608
	.get_rxfh_key_size = virtnet_get_rxfh_key_size,
	.get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
	.get_rxfh = virtnet_get_rxfh,
	.set_rxfh = virtnet_set_rxfh,
	.get_rxnfc = virtnet_get_rxnfc,
2609 2610
};

2611 2612 2613 2614 2615 2616 2617 2618
static void virtnet_freeze_down(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;
	int i;

	/* Make sure no work handler is accessing the device */
	flush_work(&vi->config_work);

2619
	netif_tx_lock_bh(vi->dev);
2620
	netif_device_detach(vi->dev);
2621
	netif_tx_unlock_bh(vi->dev);
2622 2623 2624
	cancel_delayed_work_sync(&vi->refill);

	if (netif_running(vi->dev)) {
W
Willem de Bruijn 已提交
2625
		for (i = 0; i < vi->max_queue_pairs; i++) {
2626
			napi_disable(&vi->rq[i].napi);
2627
			virtnet_napi_tx_disable(&vi->sq[i].napi);
W
Willem de Bruijn 已提交
2628
		}
2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649
	}
}

static int init_vqs(struct virtnet_info *vi);

static int virtnet_restore_up(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;
	int err, i;

	err = init_vqs(vi);
	if (err)
		return err;

	virtio_device_ready(vdev);

	if (netif_running(vi->dev)) {
		for (i = 0; i < vi->curr_queue_pairs; i++)
			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
				schedule_delayed_work(&vi->refill, 0);

W
Willem de Bruijn 已提交
2650
		for (i = 0; i < vi->max_queue_pairs; i++) {
2651
			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
W
Willem de Bruijn 已提交
2652 2653 2654
			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
					       &vi->sq[i].napi);
		}
2655 2656
	}

2657
	netif_tx_lock_bh(vi->dev);
2658
	netif_device_attach(vi->dev);
2659
	netif_tx_unlock_bh(vi->dev);
2660 2661 2662
	return err;
}

2663 2664 2665
static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
{
	struct scatterlist sg;
2666
	vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
2667

2668
	sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
2669 2670 2671

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
				  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
2672
		dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698
		return -EINVAL;
	}

	return 0;
}

static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
{
	u64 offloads = 0;

	if (!vi->guest_offloads)
		return 0;

	return virtnet_set_guest_offloads(vi, offloads);
}

static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
{
	u64 offloads = vi->guest_offloads;

	if (!vi->guest_offloads)
		return 0;

	return virtnet_set_guest_offloads(vi, offloads);
}

2699 2700
static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
			   struct netlink_ext_ack *extack)
J
John Fastabend 已提交
2701 2702 2703 2704
{
	unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
	struct virtnet_info *vi = netdev_priv(dev);
	struct bpf_prog *old_prog;
2705
	u16 xdp_qp = 0, curr_qp;
2706
	int i, err;
J
John Fastabend 已提交
2707

2708 2709 2710 2711
	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
	    && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
2712 2713
		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
2714
		NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
J
John Fastabend 已提交
2715 2716 2717 2718
		return -EOPNOTSUPP;
	}

	if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
2719
		NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
J
John Fastabend 已提交
2720 2721 2722 2723
		return -EINVAL;
	}

	if (dev->mtu > max_sz) {
2724
		NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
J
John Fastabend 已提交
2725 2726 2727 2728
		netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
		return -EINVAL;
	}

2729 2730 2731 2732 2733 2734
	curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
	if (prog)
		xdp_qp = nr_cpu_ids;

	/* XDP requires extra queues for XDP_TX */
	if (curr_qp + xdp_qp > vi->max_queue_pairs) {
2735 2736
		netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
				 curr_qp + xdp_qp, vi->max_queue_pairs);
2737
		xdp_qp = 0;
2738 2739
	}

2740 2741 2742 2743
	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
	if (!prog && !old_prog)
		return 0;

2744 2745
	if (prog)
		bpf_prog_add(prog, vi->max_queue_pairs - 1);
2746

2747
	/* Make sure NAPI is not using any XDP TX queues for RX. */
2748 2749
	if (netif_running(dev)) {
		for (i = 0; i < vi->max_queue_pairs; i++) {
2750
			napi_disable(&vi->rq[i].napi);
2751 2752 2753
			virtnet_napi_tx_disable(&vi->sq[i].napi);
		}
	}
J
John Fastabend 已提交
2754

2755 2756 2757 2758 2759 2760 2761 2762
	if (!prog) {
		for (i = 0; i < vi->max_queue_pairs; i++) {
			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
			if (i == 0)
				virtnet_restore_guest_offloads(vi);
		}
		synchronize_net();
	}
J
John Fastabend 已提交
2763

2764 2765 2766
	err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
	if (err)
		goto err;
2767
	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2768
	vi->xdp_queue_pairs = xdp_qp;
2769

2770
	if (prog) {
2771
		vi->xdp_enabled = true;
2772 2773 2774
		for (i = 0; i < vi->max_queue_pairs; i++) {
			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
			if (i == 0 && !old_prog)
2775 2776
				virtnet_clear_guest_offloads(vi);
		}
2777 2778
	} else {
		vi->xdp_enabled = false;
2779 2780 2781
	}

	for (i = 0; i < vi->max_queue_pairs; i++) {
J
John Fastabend 已提交
2782 2783
		if (old_prog)
			bpf_prog_put(old_prog);
2784
		if (netif_running(dev)) {
2785
			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2786 2787 2788
			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
					       &vi->sq[i].napi);
		}
J
John Fastabend 已提交
2789 2790 2791
	}

	return 0;
2792

2793
err:
2794 2795 2796 2797 2798 2799
	if (!prog) {
		virtnet_clear_guest_offloads(vi);
		for (i = 0; i < vi->max_queue_pairs; i++)
			rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
	}

2800
	if (netif_running(dev)) {
2801
		for (i = 0; i < vi->max_queue_pairs; i++) {
2802
			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2803 2804 2805
			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
					       &vi->sq[i].napi);
		}
2806
	}
2807 2808 2809
	if (prog)
		bpf_prog_sub(prog, vi->max_queue_pairs - 1);
	return err;
J
John Fastabend 已提交
2810 2811
}

2812
static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
J
John Fastabend 已提交
2813 2814 2815
{
	switch (xdp->command) {
	case XDP_SETUP_PROG:
2816
		return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
J
John Fastabend 已提交
2817 2818 2819 2820 2821
	default:
		return -EINVAL;
	}
}

2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837
static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
				      size_t len)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int ret;

	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
		return -EOPNOTSUPP;

	ret = snprintf(buf, len, "sby");
	if (ret >= len)
		return -EOPNOTSUPP;

	return 0;
}

2838 2839 2840 2841
static int virtnet_set_features(struct net_device *dev,
				netdev_features_t features)
{
	struct virtnet_info *vi = netdev_priv(dev);
2842
	u64 offloads;
2843 2844
	int err;

2845
	if ((dev->features ^ features) & NETIF_F_GRO_HW) {
2846
		if (vi->xdp_enabled)
2847 2848
			return -EBUSY;

2849
		if (features & NETIF_F_GRO_HW)
2850
			offloads = vi->guest_offloads_capable;
2851
		else
2852
			offloads = vi->guest_offloads_capable &
2853
				   ~GUEST_OFFLOAD_GRO_HW_MASK;
2854

2855 2856 2857 2858
		err = virtnet_set_guest_offloads(vi, offloads);
		if (err)
			return err;
		vi->guest_offloads = offloads;
2859 2860
	}

2861 2862 2863 2864 2865 2866 2867 2868 2869 2870
	if ((dev->features ^ features) & NETIF_F_RXHASH) {
		if (features & NETIF_F_RXHASH)
			vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
		else
			vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;

		if (!virtnet_commit_rss_command(vi))
			return -EINVAL;
	}

2871 2872 2873
	return 0;
}

2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885
static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
	struct virtnet_info *priv = netdev_priv(dev);
	struct send_queue *sq = &priv->sq[txqueue];
	struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);

	u64_stats_update_begin(&sq->stats.syncp);
	sq->stats.tx_timeouts++;
	u64_stats_update_end(&sq->stats.syncp);

	netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
		   txqueue, sq->name, sq->vq->index, sq->vq->name,
2886
		   jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
2887 2888
}

2889 2890 2891 2892 2893
static const struct net_device_ops virtnet_netdev = {
	.ndo_open            = virtnet_open,
	.ndo_stop   	     = virtnet_close,
	.ndo_start_xmit      = start_xmit,
	.ndo_validate_addr   = eth_validate_addr,
2894
	.ndo_set_mac_address = virtnet_set_mac_address,
2895
	.ndo_set_rx_mode     = virtnet_set_rx_mode,
2896
	.ndo_get_stats64     = virtnet_stats,
2897 2898
	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
2899
	.ndo_bpf		= virtnet_xdp,
J
Jason Wang 已提交
2900
	.ndo_xdp_xmit		= virtnet_xdp_xmit,
2901
	.ndo_features_check	= passthru_features_check,
2902
	.ndo_get_phys_port_name	= virtnet_get_phys_port_name,
2903
	.ndo_set_features	= virtnet_set_features,
2904
	.ndo_tx_timeout		= virtnet_tx_timeout,
2905 2906
};

2907
static void virtnet_config_changed_work(struct work_struct *work)
2908
{
2909 2910
	struct virtnet_info *vi =
		container_of(work, struct virtnet_info, config_work);
2911 2912
	u16 v;

2913 2914
	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
				 struct virtio_net_config, status, &v) < 0)
M
Michael S. Tsirkin 已提交
2915
		return;
2916 2917

	if (v & VIRTIO_NET_S_ANNOUNCE) {
2918
		netdev_notify_peers(vi->dev);
2919 2920
		virtnet_ack_link_announce(vi);
	}
2921 2922 2923 2924 2925

	/* Ignore unknown (future) status bits */
	v &= VIRTIO_NET_S_LINK_UP;

	if (vi->status == v)
M
Michael S. Tsirkin 已提交
2926
		return;
2927 2928 2929 2930

	vi->status = v;

	if (vi->status & VIRTIO_NET_S_LINK_UP) {
2931
		virtnet_update_settings(vi);
2932
		netif_carrier_on(vi->dev);
J
Jason Wang 已提交
2933
		netif_tx_wake_all_queues(vi->dev);
2934 2935
	} else {
		netif_carrier_off(vi->dev);
J
Jason Wang 已提交
2936
		netif_tx_stop_all_queues(vi->dev);
2937 2938 2939 2940 2941 2942 2943
	}
}

static void virtnet_config_changed(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;

2944
	schedule_work(&vi->config_work);
2945 2946
}

J
Jason Wang 已提交
2947 2948
static void virtnet_free_queues(struct virtnet_info *vi)
{
2949 2950
	int i;

2951
	for (i = 0; i < vi->max_queue_pairs; i++) {
2952 2953
		__netif_napi_del(&vi->rq[i].napi);
		__netif_napi_del(&vi->sq[i].napi);
2954
	}
2955

2956
	/* We called __netif_napi_del(),
2957 2958 2959 2960
	 * we need to respect an RCU grace period before freeing vi->rq
	 */
	synchronize_net();

J
Jason Wang 已提交
2961 2962
	kfree(vi->rq);
	kfree(vi->sq);
2963
	kfree(vi->ctrl);
J
Jason Wang 已提交
2964 2965
}

2966
static void _free_receive_bufs(struct virtnet_info *vi)
J
Jason Wang 已提交
2967
{
J
John Fastabend 已提交
2968
	struct bpf_prog *old_prog;
J
Jason Wang 已提交
2969 2970 2971 2972 2973
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
		while (vi->rq[i].pages)
			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
J
John Fastabend 已提交
2974 2975 2976 2977 2978

		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
		RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
		if (old_prog)
			bpf_prog_put(old_prog);
J
Jason Wang 已提交
2979
	}
2980 2981 2982 2983 2984 2985
}

static void free_receive_bufs(struct virtnet_info *vi)
{
	rtnl_lock();
	_free_receive_bufs(vi);
J
John Fastabend 已提交
2986
	rtnl_unlock();
J
Jason Wang 已提交
2987 2988
}

2989 2990 2991 2992 2993 2994 2995 2996
static void free_receive_page_frags(struct virtnet_info *vi)
{
	int i;
	for (i = 0; i < vi->max_queue_pairs; i++)
		if (vi->rq[i].alloc_frag.page)
			put_page(vi->rq[i].alloc_frag.page);
}

J
Jason Wang 已提交
2997 2998 2999 3000 3001 3002 3003
static void free_unused_bufs(struct virtnet_info *vi)
{
	void *buf;
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
		struct virtqueue *vq = vi->sq[i].vq;
J
John Fastabend 已提交
3004
		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
3005
			if (!is_xdp_frame(buf))
J
John Fastabend 已提交
3006 3007
				dev_kfree_skb(buf);
			else
3008
				xdp_return_frame(ptr_to_xdp(buf));
J
John Fastabend 已提交
3009
		}
J
Jason Wang 已提交
3010 3011 3012 3013 3014 3015
	}

	for (i = 0; i < vi->max_queue_pairs; i++) {
		struct virtqueue *vq = vi->rq[i].vq;

		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
3016
			if (vi->mergeable_rx_bufs) {
3017
				put_page(virt_to_head_page(buf));
3018
			} else if (vi->big_packets) {
3019
				give_pages(&vi->rq[i], buf);
3020
			} else {
3021
				put_page(virt_to_head_page(buf));
3022
			}
J
Jason Wang 已提交
3023 3024 3025 3026
		}
	}
}

3027 3028 3029 3030
static void virtnet_del_vqs(struct virtnet_info *vi)
{
	struct virtio_device *vdev = vi->vdev;

3031
	virtnet_clean_affinity(vi);
J
Jason Wang 已提交
3032

3033
	vdev->config->del_vqs(vdev);
J
Jason Wang 已提交
3034 3035

	virtnet_free_queues(vi);
3036 3037
}

3038 3039 3040 3041 3042 3043
/* How large should a single buffer be so a queue full of these can fit at
 * least one full packet?
 * Logic below assumes the mergeable buffer header is used.
 */
static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
{
3044
	const unsigned int hdr_len = vi->hdr_len;
3045 3046 3047 3048 3049
	unsigned int rq_size = virtqueue_get_vring_size(vq);
	unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
	unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
	unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);

3050 3051
	return max(max(min_buf_len, hdr_len) - hdr_len,
		   (unsigned int)GOOD_PACKET_LEN);
3052 3053
}

J
Jason Wang 已提交
3054
static int virtnet_find_vqs(struct virtnet_info *vi)
3055
{
J
Jason Wang 已提交
3056 3057 3058 3059 3060
	vq_callback_t **callbacks;
	struct virtqueue **vqs;
	int ret = -ENOMEM;
	int i, total_vqs;
	const char **names;
3061
	bool *ctx;
J
Jason Wang 已提交
3062 3063 3064 3065 3066 3067 3068 3069 3070

	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
	 * possible control vq.
	 */
	total_vqs = vi->max_queue_pairs * 2 +
		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);

	/* Allocate space for find_vqs parameters */
K
Kees Cook 已提交
3071
	vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
J
Jason Wang 已提交
3072 3073
	if (!vqs)
		goto err_vq;
3074
	callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
J
Jason Wang 已提交
3075 3076
	if (!callbacks)
		goto err_callback;
3077
	names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
J
Jason Wang 已提交
3078 3079
	if (!names)
		goto err_names;
3080
	if (!vi->big_packets || vi->mergeable_rx_bufs) {
K
Kees Cook 已提交
3081
		ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
3082 3083 3084 3085 3086
		if (!ctx)
			goto err_ctx;
	} else {
		ctx = NULL;
	}
J
Jason Wang 已提交
3087 3088 3089 3090 3091 3092

	/* Parameters for control virtqueue, if any */
	if (vi->has_cvq) {
		callbacks[total_vqs - 1] = NULL;
		names[total_vqs - 1] = "control";
	}
3093

J
Jason Wang 已提交
3094 3095 3096 3097 3098 3099 3100 3101
	/* Allocate/initialize parameters for send/receive virtqueues */
	for (i = 0; i < vi->max_queue_pairs; i++) {
		callbacks[rxq2vq(i)] = skb_recv_done;
		callbacks[txq2vq(i)] = skb_xmit_done;
		sprintf(vi->rq[i].name, "input.%d", i);
		sprintf(vi->sq[i].name, "output.%d", i);
		names[rxq2vq(i)] = vi->rq[i].name;
		names[txq2vq(i)] = vi->sq[i].name;
3102 3103
		if (ctx)
			ctx[rxq2vq(i)] = true;
J
Jason Wang 已提交
3104
	}
3105

3106 3107
	ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
				  names, ctx, NULL);
J
Jason Wang 已提交
3108 3109
	if (ret)
		goto err_find;
3110

J
Jason Wang 已提交
3111 3112
	if (vi->has_cvq) {
		vi->cvq = vqs[total_vqs - 1];
3113
		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
3114
			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3115
	}
J
Jason Wang 已提交
3116 3117 3118

	for (i = 0; i < vi->max_queue_pairs; i++) {
		vi->rq[i].vq = vqs[rxq2vq(i)];
3119
		vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
J
Jason Wang 已提交
3120 3121 3122
		vi->sq[i].vq = vqs[txq2vq(i)];
	}

3123
	/* run here: ret == 0. */
J
Jason Wang 已提交
3124 3125 3126


err_find:
3127 3128
	kfree(ctx);
err_ctx:
J
Jason Wang 已提交
3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141
	kfree(names);
err_names:
	kfree(callbacks);
err_callback:
	kfree(vqs);
err_vq:
	return ret;
}

static int virtnet_alloc_queues(struct virtnet_info *vi)
{
	int i;

3142 3143 3144 3145 3146 3147 3148
	if (vi->has_cvq) {
		vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
		if (!vi->ctrl)
			goto err_ctrl;
	} else {
		vi->ctrl = NULL;
	}
K
Kees Cook 已提交
3149
	vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
J
Jason Wang 已提交
3150 3151
	if (!vi->sq)
		goto err_sq;
K
Kees Cook 已提交
3152
	vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
3153
	if (!vi->rq)
J
Jason Wang 已提交
3154 3155 3156 3157 3158 3159 3160
		goto err_rq;

	INIT_DELAYED_WORK(&vi->refill, refill_work);
	for (i = 0; i < vi->max_queue_pairs; i++) {
		vi->rq[i].pages = NULL;
		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
			       napi_weight);
3161 3162
		netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
				  napi_tx ? napi_weight : 0);
J
Jason Wang 已提交
3163 3164

		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
J
Johannes Berg 已提交
3165
		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
J
Jason Wang 已提交
3166
		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
T
Toshiaki Makita 已提交
3167 3168 3169

		u64_stats_init(&vi->rq[i].stats.syncp);
		u64_stats_init(&vi->sq[i].stats.syncp);
J
Jason Wang 已提交
3170 3171 3172 3173 3174 3175 3176
	}

	return 0;

err_rq:
	kfree(vi->sq);
err_sq:
3177 3178
	kfree(vi->ctrl);
err_ctrl:
J
Jason Wang 已提交
3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194
	return -ENOMEM;
}

static int init_vqs(struct virtnet_info *vi)
{
	int ret;

	/* Allocate send & receive queues */
	ret = virtnet_alloc_queues(vi);
	if (ret)
		goto err;

	ret = virtnet_find_vqs(vi);
	if (ret)
		goto err_free;

3195
	cpus_read_lock();
3196
	virtnet_set_affinity(vi);
3197
	cpus_read_unlock();
3198

J
Jason Wang 已提交
3199 3200 3201 3202 3203 3204
	return 0;

err_free:
	virtnet_free_queues(vi);
err:
	return ret;
3205 3206
}

3207 3208
#ifdef CONFIG_SYSFS
static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
3209
		char *buf)
3210 3211 3212
{
	struct virtnet_info *vi = netdev_priv(queue->dev);
	unsigned int queue_index = get_netdev_rx_queue_index(queue);
3213 3214
	unsigned int headroom = virtnet_get_headroom(vi);
	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
J
Johannes Berg 已提交
3215
	struct ewma_pkt_len *avg;
3216 3217 3218

	BUG_ON(queue_index >= vi->max_queue_pairs);
	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
3219
	return sprintf(buf, "%u\n",
3220 3221
		       get_mergeable_buf_len(&vi->rq[queue_index], avg,
				       SKB_DATA_ALIGN(headroom + tailroom)));
3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237
}

static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
	__ATTR_RO(mergeable_rx_buffer_size);

static struct attribute *virtio_net_mrg_rx_attrs[] = {
	&mergeable_rx_buffer_size_attribute.attr,
	NULL
};

static const struct attribute_group virtio_net_mrg_rx_group = {
	.name = "virtio_net",
	.attrs = virtio_net_mrg_rx_attrs
};
#endif

3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264
static bool virtnet_fail_on_feature(struct virtio_device *vdev,
				    unsigned int fbit,
				    const char *fname, const char *dname)
{
	if (!virtio_has_feature(vdev, fbit))
		return false;

	dev_err(&vdev->dev, "device advertises feature %s but not %s",
		fname, dname);

	return true;
}

#define VIRTNET_FAIL_ON(vdev, fbit, dbit)			\
	virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)

static bool virtnet_validate_features(struct virtio_device *vdev)
{
	if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
	    (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
			     "VIRTIO_NET_F_CTRL_VQ") ||
	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
			     "VIRTIO_NET_F_CTRL_VQ") ||
	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
			     "VIRTIO_NET_F_CTRL_VQ") ||
	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
3265 3266
			     "VIRTIO_NET_F_CTRL_VQ") ||
	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
3267 3268
			     "VIRTIO_NET_F_CTRL_VQ") ||
	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
3269 3270 3271 3272 3273 3274 3275
			     "VIRTIO_NET_F_CTRL_VQ"))) {
		return false;
	}

	return true;
}

3276 3277 3278
#define MIN_MTU ETH_MIN_MTU
#define MAX_MTU ETH_MAX_MTU

3279
static int virtnet_validate(struct virtio_device *vdev)
R
Rusty Russell 已提交
3280
{
3281 3282 3283 3284 3285 3286
	if (!vdev->config->get) {
		dev_err(&vdev->dev, "%s failure: config access disabled\n",
			__func__);
		return -EINVAL;
	}

3287 3288 3289
	if (!virtnet_validate_features(vdev))
		return -EINVAL;

3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
		int mtu = virtio_cread16(vdev,
					 offsetof(struct virtio_net_config,
						  mtu));
		if (mtu < MIN_MTU)
			__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
	}

	return 0;
}

static int virtnet_probe(struct virtio_device *vdev)
{
T
Toshiaki Makita 已提交
3303
	int i, err = -ENOMEM;
3304 3305 3306 3307 3308
	struct net_device *dev;
	struct virtnet_info *vi;
	u16 max_queue_pairs;
	int mtu;

3309 3310 3311 3312 3313
	/* Find if host supports multiqueue/rss virtio_net device */
	max_queue_pairs = 1;
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
		max_queue_pairs =
		     virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
J
Jason Wang 已提交
3314 3315

	/* We need at least 2 queue's */
3316
	if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
J
Jason Wang 已提交
3317 3318 3319
	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
		max_queue_pairs = 1;
R
Rusty Russell 已提交
3320 3321

	/* Allocate ourselves a network device with room for our info */
J
Jason Wang 已提交
3322
	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
R
Rusty Russell 已提交
3323 3324 3325 3326
	if (!dev)
		return -ENOMEM;

	/* Set up network device as normal. */
3327 3328
	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
			   IFF_TX_SKB_NO_LINEAR;
3329
	dev->netdev_ops = &virtnet_netdev;
R
Rusty Russell 已提交
3330
	dev->features = NETIF_F_HIGHDMA;
3331

3332
	dev->ethtool_ops = &virtnet_ethtool_ops;
R
Rusty Russell 已提交
3333 3334 3335
	SET_NETDEV_DEV(dev, &vdev->dev);

	/* Do we support "hardware" checksums? */
3336
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
R
Rusty Russell 已提交
3337
		/* This opens up the world of extra features. */
J
Jason Wang 已提交
3338
		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
3339
		if (csum)
J
Jason Wang 已提交
3340
			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
3341 3342

		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
3343
			dev->hw_features |= NETIF_F_TSO
R
Rusty Russell 已提交
3344 3345
				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
		}
3346
		/* Individual feature bits: what can host handle? */
3347 3348 3349 3350 3351 3352 3353
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
			dev->hw_features |= NETIF_F_TSO;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
			dev->hw_features |= NETIF_F_TSO6;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
			dev->hw_features |= NETIF_F_TSO_ECN;

3354 3355
		dev->features |= NETIF_F_GSO_ROBUST;

3356
		if (gso)
3357
			dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
3358
		/* (!csum && gso) case will be fixed by register_netdev() */
R
Rusty Russell 已提交
3359
	}
3360 3361
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
		dev->features |= NETIF_F_RXCSUM;
3362 3363
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
3364
		dev->features |= NETIF_F_GRO_HW;
3365
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
3366
		dev->hw_features |= NETIF_F_GRO_HW;
R
Rusty Russell 已提交
3367

3368 3369
	dev->vlan_features = dev->features;

3370 3371 3372 3373
	/* MTU range: 68 - 65535 */
	dev->min_mtu = MIN_MTU;
	dev->max_mtu = MAX_MTU;

R
Rusty Russell 已提交
3374
	/* Configuration may specify what MAC to use.  Otherwise random. */
3375 3376 3377
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
		u8 addr[ETH_ALEN];

3378 3379
		virtio_cread_bytes(vdev,
				   offsetof(struct virtio_net_config, mac),
3380 3381 3382
				   addr, ETH_ALEN);
		eth_hw_addr_set(dev, addr);
	} else {
3383
		eth_hw_addr_random(dev);
3384
	}
R
Rusty Russell 已提交
3385 3386 3387 3388 3389

	/* Set up our device-specific information */
	vi = netdev_priv(dev);
	vi->dev = dev;
	vi->vdev = vdev;
3390
	vdev->priv = vi;
3391

3392
	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
R
Rusty Russell 已提交
3393

3394
	/* If we can receive ANY GSO packets, we must allocate large ones. */
3395 3396
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3397 3398
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
3399 3400
		vi->big_packets = true;

3401 3402 3403
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
		vi->mergeable_rx_bufs = true;

3404 3405 3406 3407
	if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
		vi->has_rss_hash_report = true;

	if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
3408
		vi->has_rss = true;
3409 3410

	if (vi->has_rss || vi->has_rss_hash_report) {
3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425
		vi->rss_indir_table_size =
			virtio_cread16(vdev, offsetof(struct virtio_net_config,
				rss_max_indirection_table_length));
		vi->rss_key_size =
			virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));

		vi->rss_hash_types_supported =
		    virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
		vi->rss_hash_types_supported &=
				~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
				  VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
				  VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);

		dev->hw_features |= NETIF_F_RXHASH;
	}
3426 3427 3428 3429 3430

	if (vi->has_rss_hash_report)
		vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
	else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
		 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
3431 3432 3433 3434
		vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
	else
		vi->hdr_len = sizeof(struct virtio_net_hdr);

3435 3436
	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
3437 3438
		vi->any_header_sg = true;

J
Jason Wang 已提交
3439 3440 3441
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
		vi->has_cvq = true;

3442 3443 3444 3445
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
		mtu = virtio_cread16(vdev,
				     offsetof(struct virtio_net_config,
					      mtu));
3446
		if (mtu < dev->min_mtu) {
3447 3448 3449
			/* Should never trigger: MTU was previously validated
			 * in virtnet_validate.
			 */
3450 3451 3452
			dev_err(&vdev->dev,
				"device MTU appears to have changed it is now %d < %d",
				mtu, dev->min_mtu);
3453
			err = -EINVAL;
T
Toshiaki Makita 已提交
3454
			goto free;
3455
		}
3456

3457 3458 3459
		dev->mtu = mtu;
		dev->max_mtu = mtu;

3460 3461 3462
		/* TODO: size buffers correctly in this case. */
		if (dev->mtu > ETH_DATA_LEN)
			vi->big_packets = true;
3463 3464
	}

3465 3466
	if (vi->any_header_sg)
		dev->needed_headroom = vi->hdr_len;
3467

3468 3469 3470 3471 3472
	/* Enable multiqueue by default */
	if (num_online_cpus() >= max_queue_pairs)
		vi->curr_queue_pairs = max_queue_pairs;
	else
		vi->curr_queue_pairs = num_online_cpus();
J
Jason Wang 已提交
3473 3474 3475
	vi->max_queue_pairs = max_queue_pairs;

	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
3476
	err = init_vqs(vi);
3477
	if (err)
T
Toshiaki Makita 已提交
3478
		goto free;
R
Rusty Russell 已提交
3479

3480 3481 3482 3483
#ifdef CONFIG_SYSFS
	if (vi->mergeable_rx_bufs)
		dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
#endif
3484 3485
	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
J
Jason Wang 已提交
3486

3487 3488
	virtnet_init_settings(dev);

3489 3490
	if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
		vi->failover = net_failover_create(vi->dev);
3491 3492
		if (IS_ERR(vi->failover)) {
			err = PTR_ERR(vi->failover);
3493
			goto free_vqs;
3494
		}
3495 3496
	}

3497
	if (vi->has_rss || vi->has_rss_hash_report)
3498 3499
		virtnet_init_default_rss(vi);

R
Rusty Russell 已提交
3500 3501 3502
	err = register_netdev(dev);
	if (err) {
		pr_debug("virtio_net: registering device failed\n");
3503
		goto free_failover;
R
Rusty Russell 已提交
3504
	}
3505

M
Michael S. Tsirkin 已提交
3506 3507
	virtio_device_ready(vdev);

3508
	err = virtnet_cpu_notif_add(vi);
3509 3510
	if (err) {
		pr_debug("virtio_net: registering cpu notifier failed\n");
3511
		goto free_unregister_netdev;
3512 3513
	}

3514
	virtnet_set_queues(vi, vi->curr_queue_pairs);
3515

J
Jason Wang 已提交
3516 3517
	/* Assume link up if device can't report link status,
	   otherwise get link status from config. */
3518
	netif_carrier_off(dev);
J
Jason Wang 已提交
3519
	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
3520
		schedule_work(&vi->config_work);
J
Jason Wang 已提交
3521 3522
	} else {
		vi->status = VIRTIO_NET_S_LINK_UP;
3523
		virtnet_update_settings(vi);
J
Jason Wang 已提交
3524 3525
		netif_carrier_on(dev);
	}
3526

3527 3528 3529
	for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
		if (virtio_has_feature(vi->vdev, guest_offloads[i]))
			set_bit(guest_offloads[i], &vi->guest_offloads);
3530
	vi->guest_offloads_capable = vi->guest_offloads;
3531

J
Jason Wang 已提交
3532 3533 3534
	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
		 dev->name, max_queue_pairs);

R
Rusty Russell 已提交
3535 3536
	return 0;

3537
free_unregister_netdev:
3538
	virtio_reset_device(vdev);
3539

3540
	unregister_netdev(dev);
3541 3542
free_failover:
	net_failover_destroy(vi->failover);
3543
free_vqs:
J
Jason Wang 已提交
3544
	cancel_delayed_work_sync(&vi->refill);
3545
	free_receive_page_frags(vi);
3546
	virtnet_del_vqs(vi);
R
Rusty Russell 已提交
3547 3548 3549 3550 3551
free:
	free_netdev(dev);
	return err;
}

3552
static void remove_vq_common(struct virtnet_info *vi)
R
Rusty Russell 已提交
3553
{
3554
	virtio_reset_device(vi->vdev);
S
Shirley Ma 已提交
3555 3556

	/* Free unused buffers in both send and recv, if any. */
3557
	free_unused_bufs(vi);
3558

J
Jason Wang 已提交
3559
	free_receive_bufs(vi);
3560

3561 3562
	free_receive_page_frags(vi);

J
Jason Wang 已提交
3563
	virtnet_del_vqs(vi);
3564 3565
}

3566
static void virtnet_remove(struct virtio_device *vdev)
3567 3568 3569
{
	struct virtnet_info *vi = vdev->priv;

3570
	virtnet_cpu_notif_remove(vi);
3571

3572 3573
	/* Make sure no work handler is accessing the device. */
	flush_work(&vi->config_work);
3574

3575 3576
	unregister_netdev(vi->dev);

3577 3578
	net_failover_destroy(vi->failover);

3579
	remove_vq_common(vi);
3580

3581
	free_netdev(vi->dev);
R
Rusty Russell 已提交
3582 3583
}

3584
static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
3585 3586 3587
{
	struct virtnet_info *vi = vdev->priv;

3588
	virtnet_cpu_notif_remove(vi);
3589
	virtnet_freeze_down(vdev);
3590 3591 3592 3593 3594
	remove_vq_common(vi);

	return 0;
}

3595
static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
3596 3597
{
	struct virtnet_info *vi = vdev->priv;
3598
	int err;
3599

3600
	err = virtnet_restore_up(vdev);
3601 3602
	if (err)
		return err;
J
Jason Wang 已提交
3603 3604
	virtnet_set_queues(vi, vi->curr_queue_pairs);

3605
	err = virtnet_cpu_notif_add(vi);
3606 3607 3608
	if (err) {
		virtnet_freeze_down(vdev);
		remove_vq_common(vi);
3609
		return err;
3610
	}
3611

3612 3613 3614
	return 0;
}

R
Rusty Russell 已提交
3615 3616 3617 3618 3619
static struct virtio_device_id id_table[] = {
	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

3620 3621 3622 3623 3624 3625 3626 3627 3628 3629
#define VIRTNET_FEATURES \
	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
	VIRTIO_NET_F_MAC, \
	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
	VIRTIO_NET_F_CTRL_MAC_ADDR, \
3630
	VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
3631
	VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
3632
	VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT
3633

3634
static unsigned int features[] = {
3635 3636 3637 3638 3639 3640
	VIRTNET_FEATURES,
};

static unsigned int features_legacy[] = {
	VIRTNET_FEATURES,
	VIRTIO_NET_F_GSO,
3641
	VIRTIO_F_ANY_LAYOUT,
3642 3643
};

3644
static struct virtio_driver virtio_net_driver = {
3645 3646
	.feature_table = features,
	.feature_table_size = ARRAY_SIZE(features),
3647 3648
	.feature_table_legacy = features_legacy,
	.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
R
Rusty Russell 已提交
3649 3650 3651
	.driver.name =	KBUILD_MODNAME,
	.driver.owner =	THIS_MODULE,
	.id_table =	id_table,
3652
	.validate =	virtnet_validate,
R
Rusty Russell 已提交
3653
	.probe =	virtnet_probe,
3654
	.remove =	virtnet_remove,
3655
	.config_changed = virtnet_config_changed,
3656
#ifdef CONFIG_PM_SLEEP
3657 3658 3659
	.freeze =	virtnet_freeze,
	.restore =	virtnet_restore,
#endif
R
Rusty Russell 已提交
3660 3661
};

3662 3663 3664 3665
static __init int virtio_net_driver_init(void)
{
	int ret;

T
Thomas Gleixner 已提交
3666
	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
3667 3668 3669 3670 3671
				      virtnet_cpu_online,
				      virtnet_cpu_down_prep);
	if (ret < 0)
		goto out;
	virtionet_online = ret;
T
Thomas Gleixner 已提交
3672
	ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691
				      NULL, virtnet_cpu_dead);
	if (ret)
		goto err_dead;

        ret = register_virtio_driver(&virtio_net_driver);
	if (ret)
		goto err_virtio;
	return 0;
err_virtio:
	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
err_dead:
	cpuhp_remove_multi_state(virtionet_online);
out:
	return ret;
}
module_init(virtio_net_driver_init);

static __exit void virtio_net_driver_exit(void)
{
A
Andrew Jones 已提交
3692
	unregister_virtio_driver(&virtio_net_driver);
3693 3694 3695 3696
	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
	cpuhp_remove_multi_state(virtionet_online);
}
module_exit(virtio_net_driver_exit);
R
Rusty Russell 已提交
3697 3698 3699 3700

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio network driver");
MODULE_LICENSE("GPL");