virtio_net.c 46.1 KB
Newer Older
1
/* A network driver using virtio.
R
Rusty Russell 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15
 *
 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
16
 * along with this program; if not, see <http://www.gnu.org/licenses/>.
R
Rusty Russell 已提交
17 18 19 20
 */
//#define DEBUG
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
21
#include <linux/ethtool.h>
R
Rusty Russell 已提交
22 23 24 25
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
#include <linux/scatterlist.h>
26
#include <linux/if_vlan.h>
27
#include <linux/slab.h>
28
#include <linux/cpu.h>
R
Rusty Russell 已提交
29

30
static int napi_weight = NAPI_POLL_WEIGHT;
31 32
module_param(napi_weight, int, 0444);

33
static bool csum = true, gso = true;
R
Rusty Russell 已提交
34 35 36
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);

R
Rusty Russell 已提交
37
/* FIXME: MTU in config. */
38 39 40 41
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
#define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \
                                sizeof(struct virtio_net_hdr_mrg_rxbuf), \
                                L1_CACHE_BYTES))
42
#define GOOD_COPY_LEN	128
R
Rusty Russell 已提交
43

44
#define VIRTNET_DRIVER_VERSION "1.0.0"
45

46
struct virtnet_stats {
47 48
	struct u64_stats_sync tx_syncp;
	struct u64_stats_sync rx_syncp;
49 50 51 52 53 54 55
	u64 tx_bytes;
	u64 tx_packets;

	u64 rx_bytes;
	u64 rx_packets;
};

56 57 58 59 60 61 62
/* Internal representation of a send virtqueue */
struct send_queue {
	/* Virtqueue associated with this send _queue */
	struct virtqueue *vq;

	/* TX: fragments + linear part + virtio header */
	struct scatterlist sg[MAX_SKB_FRAGS + 2];
J
Jason Wang 已提交
63 64 65

	/* Name of the send queue: output.$index */
	char name[40];
66 67 68 69 70 71 72
};

/* Internal representation of a receive virtqueue */
struct receive_queue {
	/* Virtqueue associated with this receive_queue */
	struct virtqueue *vq;

R
Rusty Russell 已提交
73 74 75 76 77
	struct napi_struct napi;

	/* Number of input buffers, and max we've ever had. */
	unsigned int num, max;

78 79 80 81 82
	/* Chain pages by the private ptr. */
	struct page *pages;

	/* RX: fragments + linear part + virtio header */
	struct scatterlist sg[MAX_SKB_FRAGS + 2];
J
Jason Wang 已提交
83 84 85

	/* Name of this receive queue: input.$index */
	char name[40];
86 87 88 89 90 91
};

struct virtnet_info {
	struct virtio_device *vdev;
	struct virtqueue *cvq;
	struct net_device *dev;
J
Jason Wang 已提交
92 93
	struct send_queue *sq;
	struct receive_queue *rq;
94 95
	unsigned int status;

J
Jason Wang 已提交
96 97 98 99 100 101
	/* Max # of queue pairs supported by the device */
	u16 max_queue_pairs;

	/* # of queue pairs currently used by the driver */
	u16 curr_queue_pairs;

102 103 104
	/* I like... big packets and I cannot lie! */
	bool big_packets;

105 106 107
	/* Host will merge rx buffers for big packets (shake it! shake it!) */
	bool mergeable_rx_bufs;

J
Jason Wang 已提交
108 109 110
	/* Has control virtqueue */
	bool has_cvq;

111 112 113
	/* Host can handle any s/g split between our header and packet data */
	bool any_header_sg;

114 115 116
	/* enable config space updates */
	bool config_enable;

117 118 119
	/* Active statistics */
	struct virtnet_stats __percpu *stats;

120 121 122
	/* Work struct for refilling if we run low on memory. */
	struct delayed_work refill;

123 124 125 126 127
	/* Work struct for config space updates */
	struct work_struct config_work;

	/* Lock for config space updates */
	struct mutex config_lock;
J
Jason Wang 已提交
128

129 130 131 132 133
	/* Page_frag for GFP_KERNEL packet buffer allocation when we run
	 * low on memory.
	 */
	struct page_frag alloc_frag;

J
Jason Wang 已提交
134 135
	/* Does the affinity hint is set for virtqueues? */
	bool affinity_hint_set;
136

137 138
	/* CPU hot plug notifier */
	struct notifier_block nb;
R
Rusty Russell 已提交
139 140
};

141 142 143 144 145 146 147
struct skb_vnet_hdr {
	union {
		struct virtio_net_hdr hdr;
		struct virtio_net_hdr_mrg_rxbuf mhdr;
	};
};

148 149 150 151 152 153 154 155 156 157
struct padded_vnet_hdr {
	struct virtio_net_hdr hdr;
	/*
	 * virtio_net_hdr should be in a separated sg buffer because of a
	 * QEMU bug, and data sg buffer shares same page with this header sg.
	 * This padding makes next sg 16 byte aligned after virtio_net_hdr.
	 */
	char padding[6];
};

J
Jason Wang 已提交
158 159 160 161 162
/* Converting between virtqueue no. and kernel tx/rx queue no.
 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
 */
static int vq2txq(struct virtqueue *vq)
{
163
	return (vq->index - 1) / 2;
J
Jason Wang 已提交
164 165 166 167 168 169 170 171 172
}

static int txq2vq(int txq)
{
	return txq * 2 + 1;
}

static int vq2rxq(struct virtqueue *vq)
{
173
	return vq->index / 2;
J
Jason Wang 已提交
174 175 176 177 178 179 180
}

static int rxq2vq(int rxq)
{
	return rxq * 2;
}

181
static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
R
Rusty Russell 已提交
182
{
183
	return (struct skb_vnet_hdr *)skb->cb;
R
Rusty Russell 已提交
184 185
}

186 187 188 189
/*
 * private is used to chain pages for big packets, put the whole
 * most recent used list in the beginning for reuse
 */
190
static void give_pages(struct receive_queue *rq, struct page *page)
191
{
192
	struct page *end;
193

194
	/* Find end of list, sew whole thing into vi->rq.pages. */
195
	for (end = page; end->private; end = (struct page *)end->private);
196 197
	end->private = (unsigned long)rq->pages;
	rq->pages = page;
198 199
}

200
static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
201
{
202
	struct page *p = rq->pages;
203

204
	if (p) {
205
		rq->pages = (struct page *)p->private;
206 207 208
		/* clear private here, it is used to chain pages */
		p->private = 0;
	} else
209 210 211 212
		p = alloc_page(gfp_mask);
	return p;
}

213
static void skb_xmit_done(struct virtqueue *vq)
R
Rusty Russell 已提交
214
{
215
	struct virtnet_info *vi = vq->vdev->priv;
R
Rusty Russell 已提交
216

217
	/* Suppress further interrupts. */
218
	virtqueue_disable_cb(vq);
219

220
	/* We were probably waiting for more output buffers. */
J
Jason Wang 已提交
221
	netif_wake_subqueue(vi->dev, vq2txq(vq));
R
Rusty Russell 已提交
222 223
}

224
/* Called from bottom half context */
225
static struct sk_buff *page_to_skb(struct receive_queue *rq,
226 227
				   struct page *page, unsigned int offset,
				   unsigned int len, unsigned int truesize)
228
{
229
	struct virtnet_info *vi = rq->vq->vdev->priv;
230 231
	struct sk_buff *skb;
	struct skb_vnet_hdr *hdr;
232
	unsigned int copy, hdr_len, hdr_padded_len;
233
	char *p;
234

235
	p = page_address(page) + offset;
236

237 238 239 240
	/* copy small packet so we can reuse these pages for small data */
	skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
	if (unlikely(!skb))
		return NULL;
241

242
	hdr = skb_vnet_hdr(skb);
243

244 245
	if (vi->mergeable_rx_bufs) {
		hdr_len = sizeof hdr->mhdr;
246
		hdr_padded_len = sizeof hdr->mhdr;
247 248
	} else {
		hdr_len = sizeof hdr->hdr;
249
		hdr_padded_len = sizeof(struct padded_vnet_hdr);
250
	}
251

252
	memcpy(hdr, p, hdr_len);
253

254
	len -= hdr_len;
255 256
	offset += hdr_padded_len;
	p += hdr_padded_len;
257

258 259 260 261
	copy = len;
	if (copy > skb_tailroom(skb))
		copy = skb_tailroom(skb);
	memcpy(skb_put(skb, copy), p, copy);
262

263 264
	len -= copy;
	offset += copy;
265

266 267 268 269 270 271 272 273
	if (vi->mergeable_rx_bufs) {
		if (len)
			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
		else
			put_page(page);
		return skb;
	}

274 275 276 277 278 279 280
	/*
	 * Verify that we can indeed put this data into a skb.
	 * This is here to handle cases when the device erroneously
	 * tries to receive more than is possible. This is usually
	 * the case of a broken device.
	 */
	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
281
		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
282 283 284
		dev_kfree_skb(skb);
		return NULL;
	}
285
	BUG_ON(offset >= PAGE_SIZE);
286
	while (len) {
287 288 289 290
		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
				frag_size, truesize);
		len -= frag_size;
291 292 293
		page = (struct page *)page->private;
		offset = 0;
	}
294

295
	if (page)
296
		give_pages(rq, page);
297

298 299
	return skb;
}
300

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
static struct sk_buff *receive_small(void *buf, unsigned int len)
{
	struct sk_buff * skb = buf;

	len -= sizeof(struct virtio_net_hdr);
	skb_trim(skb, len);

	return skb;
}

static struct sk_buff *receive_big(struct net_device *dev,
				   struct receive_queue *rq,
				   void *buf,
				   unsigned int len)
{
	struct page *page = buf;
	struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);

	if (unlikely(!skb))
		goto err;

	return skb;

err:
	dev->stats.rx_dropped++;
	give_pages(rq, page);
	return NULL;
}

330 331 332 333
static struct sk_buff *receive_mergeable(struct net_device *dev,
					 struct receive_queue *rq,
					 void *buf,
					 unsigned int len)
334
{
335 336 337 338 339 340
	struct skb_vnet_hdr *hdr = buf;
	int num_buf = hdr->mhdr.num_buffers;
	struct page *page = virt_to_head_page(buf);
	int offset = buf - page_address(page);
	struct sk_buff *head_skb = page_to_skb(rq, page, offset, len,
					       MERGE_BUFFER_LEN);
341
	struct sk_buff *curr_skb = head_skb;
342

343 344 345
	if (unlikely(!curr_skb))
		goto err_skb;

346
	while (--num_buf) {
347 348
		int num_skb_frags;

349 350
		buf = virtqueue_get_buf(rq->vq, &len);
		if (unlikely(!buf)) {
351 352 353 354
			pr_debug("%s: rx error: %d buffers out of %d missing\n",
				 dev->name, num_buf, hdr->mhdr.num_buffers);
			dev->stats.rx_length_errors++;
			goto err_buf;
355
		}
356
		if (unlikely(len > MERGE_BUFFER_LEN)) {
357
			pr_debug("%s: rx error: merge buffer too long\n",
358
				 dev->name);
359
			len = MERGE_BUFFER_LEN;
360
		}
361 362 363 364 365

		page = virt_to_head_page(buf);
		--rq->num;

		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
366 367
		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
368 369 370

			if (unlikely(!nskb))
				goto err_skb;
371 372 373 374 375 376 377 378 379 380 381
			if (curr_skb == head_skb)
				skb_shinfo(curr_skb)->frag_list = nskb;
			else
				curr_skb->next = nskb;
			curr_skb = nskb;
			head_skb->truesize += nskb->truesize;
			num_skb_frags = 0;
		}
		if (curr_skb != head_skb) {
			head_skb->data_len += len;
			head_skb->len += len;
382
			head_skb->truesize += MERGE_BUFFER_LEN;
383
		}
384
		offset = buf - page_address(page);
385 386 387
		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
			put_page(page);
			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
388
					     len, MERGE_BUFFER_LEN);
389 390
		} else {
			skb_add_rx_frag(curr_skb, num_skb_frags, page,
391
					offset, len, MERGE_BUFFER_LEN);
392
		}
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
	}

	return head_skb;

err_skb:
	put_page(page);
	while (--num_buf) {
		buf = virtqueue_get_buf(rq->vq, &len);
		if (unlikely(!buf)) {
			pr_debug("%s: rx error: %d buffers missing\n",
				 dev->name, num_buf);
			dev->stats.rx_length_errors++;
			break;
		}
		page = virt_to_head_page(buf);
		put_page(page);
409
		--rq->num;
410
	}
411 412 413 414
err_buf:
	dev->stats.rx_dropped++;
	dev_kfree_skb(head_skb);
	return NULL;
415 416
}

417
static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
418
{
419 420
	struct virtnet_info *vi = rq->vq->vdev->priv;
	struct net_device *dev = vi->dev;
E
Eric Dumazet 已提交
421
	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
422 423
	struct sk_buff *skb;
	struct skb_vnet_hdr *hdr;
424

425 426 427
	if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
		pr_debug("%s: short packet %i\n", dev->name, len);
		dev->stats.rx_length_errors++;
428
		if (vi->mergeable_rx_bufs)
429
			put_page(virt_to_head_page(buf));
430 431
		else if (vi->big_packets)
			give_pages(rq, buf);
432 433 434 435
		else
			dev_kfree_skb(buf);
		return;
	}
436

437
	if (vi->mergeable_rx_bufs)
438
		skb = receive_mergeable(dev, rq, buf, len);
439 440 441 442 443 444 445
	else if (vi->big_packets)
		skb = receive_big(dev, rq, buf, len);
	else
		skb = receive_small(buf, len);

	if (unlikely(!skb))
		return;
446

447
	hdr = skb_vnet_hdr(skb);
448

449
	u64_stats_update_begin(&stats->rx_syncp);
450 451
	stats->rx_bytes += skb->len;
	stats->rx_packets++;
452
	u64_stats_update_end(&stats->rx_syncp);
R
Rusty Russell 已提交
453

454
	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
R
Rusty Russell 已提交
455
		pr_debug("Needs csum!\n");
456 457 458
		if (!skb_partial_csum_set(skb,
					  hdr->hdr.csum_start,
					  hdr->hdr.csum_offset))
R
Rusty Russell 已提交
459
			goto frame_err;
460 461
	} else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
R
Rusty Russell 已提交
462 463
	}

464 465 466 467
	skb->protocol = eth_type_trans(skb, dev);
	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
		 ntohs(skb->protocol), skb->len, skb->pkt_type);

468
	if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
R
Rusty Russell 已提交
469
		pr_debug("GSO!\n");
470
		switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
R
Rusty Russell 已提交
471
		case VIRTIO_NET_HDR_GSO_TCPV4:
472
			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
R
Rusty Russell 已提交
473 474
			break;
		case VIRTIO_NET_HDR_GSO_UDP:
475
			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
R
Rusty Russell 已提交
476 477
			break;
		case VIRTIO_NET_HDR_GSO_TCPV6:
478
			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
R
Rusty Russell 已提交
479 480
			break;
		default:
481 482
			net_warn_ratelimited("%s: bad gso type %u.\n",
					     dev->name, hdr->hdr.gso_type);
R
Rusty Russell 已提交
483 484 485
			goto frame_err;
		}

486
		if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
487
			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
R
Rusty Russell 已提交
488

489
		skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
R
Rusty Russell 已提交
490
		if (skb_shinfo(skb)->gso_size == 0) {
491
			net_warn_ratelimited("%s: zero gso size.\n", dev->name);
R
Rusty Russell 已提交
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
			goto frame_err;
		}

		/* Header must be checked, and gso_segs computed. */
		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
		skb_shinfo(skb)->gso_segs = 0;
	}

	netif_receive_skb(skb);
	return;

frame_err:
	dev->stats.rx_frame_errors++;
	dev_kfree_skb(skb);
}

508
static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
R
Rusty Russell 已提交
509
{
510
	struct virtnet_info *vi = rq->vq->vdev->priv;
R
Rusty Russell 已提交
511
	struct sk_buff *skb;
512 513
	struct skb_vnet_hdr *hdr;
	int err;
514

515
	skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp);
516 517
	if (unlikely(!skb))
		return -ENOMEM;
R
Rusty Russell 已提交
518

519
	skb_put(skb, GOOD_PACKET_LEN);
520

521
	hdr = skb_vnet_hdr(skb);
522
	sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
523

524
	skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
525

526
	err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
527 528
	if (err < 0)
		dev_kfree_skb(skb);
529

530 531
	return err;
}
532

533
static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
534 535 536 537 538
{
	struct page *first, *list = NULL;
	char *p;
	int i, err, offset;

539
	/* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
540
	for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
541
		first = get_a_page(rq, gfp);
542 543
		if (!first) {
			if (list)
544
				give_pages(rq, list);
545
			return -ENOMEM;
546
		}
547
		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
548

549 550 551 552
		/* chain new page in list head to match sg */
		first->private = (unsigned long)list;
		list = first;
	}
R
Rusty Russell 已提交
553

554
	first = get_a_page(rq, gfp);
555
	if (!first) {
556
		give_pages(rq, list);
557 558 559 560
		return -ENOMEM;
	}
	p = page_address(first);

561 562 563
	/* rq->sg[0], rq->sg[1] share the same page */
	/* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */
	sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr));
564

565
	/* rq->sg[1] for data packet, from offset */
566
	offset = sizeof(struct padded_vnet_hdr);
567
	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
568 569 570

	/* chain first in list head */
	first->private = (unsigned long)list;
571 572
	err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
				  first, gfp);
573
	if (err < 0)
574
		give_pages(rq, first);
575 576

	return err;
R
Rusty Russell 已提交
577 578
}

579
static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
580
{
581 582
	struct virtnet_info *vi = rq->vq->vdev->priv;
	char *buf = NULL;
583 584
	int err;

585
	if (gfp & __GFP_WAIT) {
586
		if (skb_page_frag_refill(MERGE_BUFFER_LEN, &vi->alloc_frag,
587 588 589 590
					 gfp)) {
			buf = (char *)page_address(vi->alloc_frag.page) +
			      vi->alloc_frag.offset;
			get_page(vi->alloc_frag.page);
591
			vi->alloc_frag.offset += MERGE_BUFFER_LEN;
592 593
		}
	} else {
594
		buf = netdev_alloc_frag(MERGE_BUFFER_LEN);
595 596
	}
	if (!buf)
597
		return -ENOMEM;
598

599
	sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN);
600
	err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
601
	if (err < 0)
602
		put_page(virt_to_head_page(buf));
603

604 605
	return err;
}
606

607 608 609 610 611 612 613
/*
 * Returns false if we couldn't fill entirely (OOM).
 *
 * Normally run in the receive path, but can also be run from ndo_open
 * before we're receiving packets, or from refill_work which is
 * careful to disable receiving (using napi_disable).
 */
614
static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
615
{
616
	struct virtnet_info *vi = rq->vq->vdev->priv;
617
	int err;
618
	bool oom;
619

620 621
	do {
		if (vi->mergeable_rx_bufs)
622
			err = add_recvbuf_mergeable(rq, gfp);
623
		else if (vi->big_packets)
624
			err = add_recvbuf_big(rq, gfp);
625
		else
626
			err = add_recvbuf_small(rq, gfp);
627

628
		oom = err == -ENOMEM;
629
		if (err)
630
			break;
631
		++rq->num;
632
	} while (rq->vq->num_free);
633 634
	if (unlikely(rq->num > rq->max))
		rq->max = rq->num;
635 636
	if (unlikely(!virtqueue_kick(rq->vq)))
		return false;
637
	return !oom;
638 639
}

640
static void skb_recv_done(struct virtqueue *rvq)
R
Rusty Russell 已提交
641 642
{
	struct virtnet_info *vi = rvq->vdev->priv;
J
Jason Wang 已提交
643
	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
644

645
	/* Schedule NAPI, Suppress further interrupts if successful. */
646
	if (napi_schedule_prep(&rq->napi)) {
647
		virtqueue_disable_cb(rvq);
648
		__napi_schedule(&rq->napi);
649
	}
R
Rusty Russell 已提交
650 651
}

652
static void virtnet_napi_enable(struct receive_queue *rq)
653
{
654
	napi_enable(&rq->napi);
655 656 657 658 659

	/* If all buffers were filled by other side before we napi_enabled, we
	 * won't get another interrupt, so process any outstanding packets
	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
	 * We synchronize against interrupts via NAPI_STATE_SCHED */
660 661
	if (napi_schedule_prep(&rq->napi)) {
		virtqueue_disable_cb(rq->vq);
662
		local_bh_disable();
663
		__napi_schedule(&rq->napi);
664
		local_bh_enable();
665 666 667
	}
}

668 669
static void refill_work(struct work_struct *work)
{
670 671
	struct virtnet_info *vi =
		container_of(work, struct virtnet_info, refill.work);
672
	bool still_empty;
J
Jason Wang 已提交
673 674
	int i;

675
	for (i = 0; i < vi->curr_queue_pairs; i++) {
J
Jason Wang 已提交
676
		struct receive_queue *rq = &vi->rq[i];
677

J
Jason Wang 已提交
678 679 680
		napi_disable(&rq->napi);
		still_empty = !try_fill_recv(rq, GFP_KERNEL);
		virtnet_napi_enable(rq);
681

J
Jason Wang 已提交
682 683 684 685 686 687
		/* In theory, this can happen: if we don't get any buffers in
		 * we will *never* try to fill again.
		 */
		if (still_empty)
			schedule_delayed_work(&vi->refill, HZ/2);
	}
688 689
}

R
Rusty Russell 已提交
690 691
static int virtnet_poll(struct napi_struct *napi, int budget)
{
692 693 694
	struct receive_queue *rq =
		container_of(napi, struct receive_queue, napi);
	struct virtnet_info *vi = rq->vq->vdev->priv;
695
	void *buf;
696
	unsigned int r, len, received = 0;
R
Rusty Russell 已提交
697 698 699

again:
	while (received < budget &&
700 701 702
	       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
		receive_buf(rq, buf, len);
		--rq->num;
R
Rusty Russell 已提交
703 704 705
		received++;
	}

706 707
	if (rq->num < rq->max / 2) {
		if (!try_fill_recv(rq, GFP_ATOMIC))
708
			schedule_delayed_work(&vi->refill, 0);
709
	}
R
Rusty Russell 已提交
710

711 712
	/* Out of packets? */
	if (received < budget) {
713
		r = virtqueue_enable_cb_prepare(rq->vq);
714
		napi_complete(napi);
715
		if (unlikely(virtqueue_poll(rq->vq, r)) &&
716
		    napi_schedule_prep(napi)) {
717
			virtqueue_disable_cb(rq->vq);
718
			__napi_schedule(napi);
R
Rusty Russell 已提交
719
			goto again;
720
		}
R
Rusty Russell 已提交
721 722 723 724 725
	}

	return received;
}

J
Jason Wang 已提交
726 727 728 729 730
static int virtnet_open(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int i;

731 732 733 734 735
	for (i = 0; i < vi->max_queue_pairs; i++) {
		if (i < vi->curr_queue_pairs)
			/* Make sure we have some buffers: if oom use wq. */
			if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
				schedule_delayed_work(&vi->refill, 0);
J
Jason Wang 已提交
736 737 738 739 740 741
		virtnet_napi_enable(&vi->rq[i]);
	}

	return 0;
}

742
static void free_old_xmit_skbs(struct send_queue *sq)
R
Rusty Russell 已提交
743 744
{
	struct sk_buff *skb;
745
	unsigned int len;
746
	struct virtnet_info *vi = sq->vq->vdev->priv;
E
Eric Dumazet 已提交
747
	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
R
Rusty Russell 已提交
748

749
	while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
R
Rusty Russell 已提交
750
		pr_debug("Sent skb %p\n", skb);
751

752
		u64_stats_update_begin(&stats->tx_syncp);
753 754
		stats->tx_bytes += skb->len;
		stats->tx_packets++;
755
		u64_stats_update_end(&stats->tx_syncp);
756

757
		dev_kfree_skb_any(skb);
R
Rusty Russell 已提交
758 759 760
	}
}

761
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
R
Rusty Russell 已提交
762
{
763
	struct skb_vnet_hdr *hdr;
R
Rusty Russell 已提交
764
	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
765
	struct virtnet_info *vi = sq->vq->vdev->priv;
766
	unsigned num_sg;
767 768
	unsigned hdr_len;
	bool can_push;
R
Rusty Russell 已提交
769

J
Johannes Berg 已提交
770
	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
771 772 773 774 775 776 777 778 779 780 781 782 783 784
	if (vi->mergeable_rx_bufs)
		hdr_len = sizeof hdr->mhdr;
	else
		hdr_len = sizeof hdr->hdr;

	can_push = vi->any_header_sg &&
		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
	/* Even if we can, don't push here yet as this would skew
	 * csum_start offset below. */
	if (can_push)
		hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len);
	else
		hdr = skb_vnet_hdr(skb);
R
Rusty Russell 已提交
785 786

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
787
		hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
788
		hdr->hdr.csum_start = skb_checksum_start_offset(skb);
789
		hdr->hdr.csum_offset = skb->csum_offset;
R
Rusty Russell 已提交
790
	} else {
791 792
		hdr->hdr.flags = 0;
		hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
R
Rusty Russell 已提交
793 794 795
	}

	if (skb_is_gso(skb)) {
796 797
		hdr->hdr.hdr_len = skb_headlen(skb);
		hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
R
Rusty Russell 已提交
798
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
799
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
R
Rusty Russell 已提交
800
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
801
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
R
Rusty Russell 已提交
802
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
803
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
R
Rusty Russell 已提交
804 805
		else
			BUG();
R
Rusty Russell 已提交
806
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
807
			hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
R
Rusty Russell 已提交
808
	} else {
809 810
		hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
		hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
R
Rusty Russell 已提交
811 812
	}

813
	if (vi->mergeable_rx_bufs)
814
		hdr->mhdr.num_buffers = 0;
815

816 817 818 819 820 821 822 823 824
	if (can_push) {
		__skb_push(skb, hdr_len);
		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
		/* Pull header back to avoid skew in tx bytes calculations. */
		__skb_pull(skb, hdr_len);
	} else {
		sg_set_buf(sq->sg, hdr, hdr_len);
		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
	}
825
	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
826 827
}

828
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
829 830
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
831 832
	int qnum = skb_get_queue_mapping(skb);
	struct send_queue *sq = &vi->sq[qnum];
833
	int err;
834 835

	/* Free up any pending old buffers before queueing new ones. */
836
	free_old_xmit_skbs(sq);
837

838
	/* Try to transmit */
839
	err = xmit_skb(sq, skb);
840

841
	/* This should not happen! */
842
	if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
843 844 845
		dev->stats.tx_fifo_errors++;
		if (net_ratelimit())
			dev_warn(&dev->dev,
846
				 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
847 848 849
		dev->stats.tx_dropped++;
		kfree_skb(skb);
		return NETDEV_TX_OK;
R
Rusty Russell 已提交
850
	}
851

852 853 854 855 856 857
	/* Don't wait up for transmitted skbs to be freed. */
	skb_orphan(skb);
	nf_reset(skb);

	/* Apparently nice girls don't return TX_BUSY; stop the queue
	 * before it gets out of hand.  Naturally, this wastes entries. */
858
	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
J
Jason Wang 已提交
859
		netif_stop_subqueue(dev, qnum);
860
		if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
861
			/* More just got used, free them then recheck. */
862 863
			free_old_xmit_skbs(sq);
			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
J
Jason Wang 已提交
864
				netif_start_subqueue(dev, qnum);
865
				virtqueue_disable_cb(sq->vq);
866 867
			}
		}
868
	}
869 870

	return NETDEV_TX_OK;
R
Rusty Russell 已提交
871 872
}

873 874 875 876 877 878
/*
 * Send command via the control virtqueue and check status.  Commands
 * supported by the hypervisor, as indicated by feature bits, should
 * never fail unless improperly formated.
 */
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
879 880
				 struct scatterlist *out,
				 struct scatterlist *in)
881
{
882
	struct scatterlist *sgs[4], hdr, stat;
883 884
	struct virtio_net_ctrl_hdr ctrl;
	virtio_net_ctrl_ack status = ~0;
885
	unsigned out_num = 0, in_num = 0, tmp;
886 887

	/* Caller should know better */
888
	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
889 890 891

	ctrl.class = class;
	ctrl.cmd = cmd;
892 893 894
	/* Add header */
	sg_init_one(&hdr, &ctrl, sizeof(ctrl));
	sgs[out_num++] = &hdr;
895

896 897 898 899
	if (out)
		sgs[out_num++] = out;
	if (in)
		sgs[out_num + in_num++] = in;
900

901 902 903
	/* Add return status. */
	sg_init_one(&stat, &status, sizeof(status));
	sgs[out_num + in_num++] = &stat;
904

905 906 907
	BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
	BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
	       < 0);
908

909 910
	if (unlikely(!virtqueue_kick(vi->cvq)))
		return status == VIRTIO_NET_OK;
911 912 913 914

	/* Spin for a response, the kick causes an ioport write, trapping
	 * into the hypervisor, so the request should be handled immediately.
	 */
915 916
	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
	       !virtqueue_is_broken(vi->cvq))
917 918 919 920 921
		cpu_relax();

	return status == VIRTIO_NET_OK;
}

922 923 924 925
static int virtnet_set_mac_address(struct net_device *dev, void *p)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;
926
	int ret;
927 928
	struct sockaddr *addr = p;
	struct scatterlist sg;
929

930
	ret = eth_prepare_mac_addr_change(dev, p);
931 932
	if (ret)
		return ret;
933

934 935 936 937
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
		sg_init_one(&sg, addr->sa_data, dev->addr_len);
		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
					  VIRTIO_NET_CTRL_MAC_ADDR_SET,
938
					  &sg, NULL)) {
939 940 941 942 943
			dev_warn(&vdev->dev,
				 "Failed to set mac address by vq command.\n");
			return -EINVAL;
		}
	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
944 945 946 947 948 949 950
		unsigned int i;

		/* Naturally, this has an atomicity problem. */
		for (i = 0; i < dev->addr_len; i++)
			virtio_cwrite8(vdev,
				       offsetof(struct virtio_net_config, mac) +
				       i, addr->sa_data[i]);
951 952 953
	}

	eth_commit_mac_addr_change(dev, p);
954 955 956 957

	return 0;
}

958 959 960 961 962 963 964 965
static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
					       struct rtnl_link_stats64 *tot)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int cpu;
	unsigned int start;

	for_each_possible_cpu(cpu) {
E
Eric Dumazet 已提交
966
		struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
967 968 969
		u64 tpackets, tbytes, rpackets, rbytes;

		do {
970
			start = u64_stats_fetch_begin_bh(&stats->tx_syncp);
971 972
			tpackets = stats->tx_packets;
			tbytes   = stats->tx_bytes;
973
		} while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start));
974 975

		do {
976
			start = u64_stats_fetch_begin_bh(&stats->rx_syncp);
977 978
			rpackets = stats->rx_packets;
			rbytes   = stats->rx_bytes;
979
		} while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start));
980 981 982 983 984 985 986 987

		tot->rx_packets += rpackets;
		tot->tx_packets += tpackets;
		tot->rx_bytes   += rbytes;
		tot->tx_bytes   += tbytes;
	}

	tot->tx_dropped = dev->stats.tx_dropped;
988
	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
989 990 991 992 993 994 995
	tot->rx_dropped = dev->stats.rx_dropped;
	tot->rx_length_errors = dev->stats.rx_length_errors;
	tot->rx_frame_errors = dev->stats.rx_frame_errors;

	return tot;
}

996 997 998 999
#ifdef CONFIG_NET_POLL_CONTROLLER
static void virtnet_netpoll(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
1000
	int i;
1001

J
Jason Wang 已提交
1002 1003
	for (i = 0; i < vi->curr_queue_pairs; i++)
		napi_schedule(&vi->rq[i].napi);
1004 1005 1006
}
#endif

1007 1008 1009 1010
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
	rtnl_lock();
	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
1011
				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
1012 1013 1014 1015
		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
	rtnl_unlock();
}

J
Jason Wang 已提交
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
	struct scatterlist sg;
	struct virtio_net_ctrl_mq s;
	struct net_device *dev = vi->dev;

	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
		return 0;

	s.virtqueue_pairs = queue_pairs;
	sg_init_one(&sg, &s, sizeof(s));

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1029
				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
J
Jason Wang 已提交
1030 1031 1032
		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
			 queue_pairs);
		return -EINVAL;
1033
	} else {
J
Jason Wang 已提交
1034
		vi->curr_queue_pairs = queue_pairs;
1035 1036 1037
		/* virtnet_open() will refill when device is going to up. */
		if (dev->flags & IFF_UP)
			schedule_delayed_work(&vi->refill, 0);
1038
	}
J
Jason Wang 已提交
1039 1040 1041 1042

	return 0;
}

R
Rusty Russell 已提交
1043 1044 1045
static int virtnet_close(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
1046
	int i;
R
Rusty Russell 已提交
1047

1048 1049
	/* Make sure refill_work doesn't re-enable napi! */
	cancel_delayed_work_sync(&vi->refill);
J
Jason Wang 已提交
1050 1051 1052

	for (i = 0; i < vi->max_queue_pairs; i++)
		napi_disable(&vi->rq[i].napi);
R
Rusty Russell 已提交
1053 1054 1055 1056

	return 0;
}

1057 1058 1059
static void virtnet_set_rx_mode(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
1060
	struct scatterlist sg[2];
1061
	u8 promisc, allmulti;
1062
	struct virtio_net_ctrl_mac *mac_data;
J
Jiri Pirko 已提交
1063
	struct netdev_hw_addr *ha;
1064
	int uc_count;
1065
	int mc_count;
1066 1067
	void *buf;
	int i;
1068 1069 1070 1071 1072

	/* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
		return;

1073 1074
	promisc = ((dev->flags & IFF_PROMISC) != 0);
	allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1075

1076
	sg_init_one(sg, &promisc, sizeof(promisc));
1077 1078 1079

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
				  VIRTIO_NET_CTRL_RX_PROMISC,
1080
				  sg, NULL))
1081 1082 1083
		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
			 promisc ? "en" : "dis");

1084
	sg_init_one(sg, &allmulti, sizeof(allmulti));
1085 1086 1087

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
				  VIRTIO_NET_CTRL_RX_ALLMULTI,
1088
				  sg, NULL))
1089 1090
		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
			 allmulti ? "en" : "dis");
1091

1092
	uc_count = netdev_uc_count(dev);
1093
	mc_count = netdev_mc_count(dev);
1094
	/* MAC filter - use one buffer for both lists */
1095 1096 1097
	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
	mac_data = buf;
1098
	if (!buf)
1099 1100
		return;

1101 1102
	sg_init_table(sg, 2);

1103
	/* Store the unicast list and count in the front of the buffer */
1104
	mac_data->entries = uc_count;
J
Jiri Pirko 已提交
1105
	i = 0;
1106
	netdev_for_each_uc_addr(ha, dev)
J
Jiri Pirko 已提交
1107
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1108 1109

	sg_set_buf(&sg[0], mac_data,
1110
		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
1111 1112

	/* multicast list and count fill the end */
1113
	mac_data = (void *)&mac_data->macs[uc_count][0];
1114

1115
	mac_data->entries = mc_count;
1116
	i = 0;
1117 1118
	netdev_for_each_mc_addr(ha, dev)
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1119 1120

	sg_set_buf(&sg[1], mac_data,
1121
		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
1122 1123 1124

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
				  VIRTIO_NET_CTRL_MAC_TABLE_SET,
1125
				  sg, NULL))
1126
		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
1127 1128

	kfree(buf);
1129 1130
}

1131 1132
static int virtnet_vlan_rx_add_vid(struct net_device *dev,
				   __be16 proto, u16 vid)
1133 1134 1135 1136
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

1137
	sg_init_one(&sg, &vid, sizeof(vid));
1138 1139

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1140
				  VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
1141
		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1142
	return 0;
1143 1144
}

1145 1146
static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
				    __be16 proto, u16 vid)
1147 1148 1149 1150
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

1151
	sg_init_one(&sg, &vid, sizeof(vid));
1152 1153

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1154
				  VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
1155
		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1156
	return 0;
1157 1158
}

1159
static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
J
Jason Wang 已提交
1160 1161 1162
{
	int i;

1163 1164
	if (vi->affinity_hint_set) {
		for (i = 0; i < vi->max_queue_pairs; i++) {
1165 1166 1167 1168
			virtqueue_set_affinity(vi->rq[i].vq, -1);
			virtqueue_set_affinity(vi->sq[i].vq, -1);
		}

1169 1170 1171
		vi->affinity_hint_set = false;
	}
}
1172

1173 1174 1175 1176
static void virtnet_set_affinity(struct virtnet_info *vi)
{
	int i;
	int cpu;
J
Jason Wang 已提交
1177 1178 1179 1180 1181

	/* In multiqueue mode, when the number of cpu is equal to the number of
	 * queue pairs, we let the queue pairs to be private to one cpu by
	 * setting the affinity hint to eliminate the contention.
	 */
1182 1183 1184 1185
	if (vi->curr_queue_pairs == 1 ||
	    vi->max_queue_pairs != num_online_cpus()) {
		virtnet_clean_affinity(vi, -1);
		return;
J
Jason Wang 已提交
1186 1187
	}

1188 1189
	i = 0;
	for_each_online_cpu(cpu) {
J
Jason Wang 已提交
1190 1191
		virtqueue_set_affinity(vi->rq[i].vq, cpu);
		virtqueue_set_affinity(vi->sq[i].vq, cpu);
1192
		netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
1193
		i++;
J
Jason Wang 已提交
1194 1195
	}

1196
	vi->affinity_hint_set = true;
J
Jason Wang 已提交
1197 1198
}

1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
static int virtnet_cpu_callback(struct notifier_block *nfb,
			        unsigned long action, void *hcpu)
{
	struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);

	switch(action & ~CPU_TASKS_FROZEN) {
	case CPU_ONLINE:
	case CPU_DOWN_FAILED:
	case CPU_DEAD:
		virtnet_set_affinity(vi);
		break;
	case CPU_DOWN_PREPARE:
		virtnet_clean_affinity(vi, (long)hcpu);
		break;
	default:
		break;
	}
1216

1217
	return NOTIFY_OK;
J
Jason Wang 已提交
1218 1219
}

R
Rick Jones 已提交
1220 1221 1222 1223 1224
static void virtnet_get_ringparam(struct net_device *dev,
				struct ethtool_ringparam *ring)
{
	struct virtnet_info *vi = netdev_priv(dev);

J
Jason Wang 已提交
1225 1226
	ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
	ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
R
Rick Jones 已提交
1227 1228 1229 1230
	ring->rx_pending = ring->rx_max_pending;
	ring->tx_pending = ring->tx_max_pending;
}

1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243

static void virtnet_get_drvinfo(struct net_device *dev,
				struct ethtool_drvinfo *info)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;

	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
	strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
	strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));

}

1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
/* TODO: Eliminate OOO packets during switching */
static int virtnet_set_channels(struct net_device *dev,
				struct ethtool_channels *channels)
{
	struct virtnet_info *vi = netdev_priv(dev);
	u16 queue_pairs = channels->combined_count;
	int err;

	/* We don't support separate rx/tx channels.
	 * We don't allow setting 'other' channels.
	 */
	if (channels->rx_count || channels->tx_count || channels->other_count)
		return -EINVAL;

	if (queue_pairs > vi->max_queue_pairs)
		return -EINVAL;

1261
	get_online_cpus();
1262 1263 1264 1265 1266
	err = virtnet_set_queues(vi, queue_pairs);
	if (!err) {
		netif_set_real_num_tx_queues(dev, queue_pairs);
		netif_set_real_num_rx_queues(dev, queue_pairs);

1267
		virtnet_set_affinity(vi);
1268
	}
1269
	put_online_cpus();
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286

	return err;
}

static void virtnet_get_channels(struct net_device *dev,
				 struct ethtool_channels *channels)
{
	struct virtnet_info *vi = netdev_priv(dev);

	channels->combined_count = vi->curr_queue_pairs;
	channels->max_combined = vi->max_queue_pairs;
	channels->max_other = 0;
	channels->rx_count = 0;
	channels->tx_count = 0;
	channels->other_count = 0;
}

1287
static const struct ethtool_ops virtnet_ethtool_ops = {
1288
	.get_drvinfo = virtnet_get_drvinfo,
1289
	.get_link = ethtool_op_get_link,
R
Rick Jones 已提交
1290
	.get_ringparam = virtnet_get_ringparam,
1291 1292
	.set_channels = virtnet_set_channels,
	.get_channels = virtnet_get_channels,
1293 1294
};

M
Mark McLoughlin 已提交
1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
#define MIN_MTU 68
#define MAX_MTU 65535

static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
{
	if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
		return -EINVAL;
	dev->mtu = new_mtu;
	return 0;
}

1306 1307 1308 1309 1310
static const struct net_device_ops virtnet_netdev = {
	.ndo_open            = virtnet_open,
	.ndo_stop   	     = virtnet_close,
	.ndo_start_xmit      = start_xmit,
	.ndo_validate_addr   = eth_validate_addr,
1311
	.ndo_set_mac_address = virtnet_set_mac_address,
1312
	.ndo_set_rx_mode     = virtnet_set_rx_mode,
1313
	.ndo_change_mtu	     = virtnet_change_mtu,
1314
	.ndo_get_stats64     = virtnet_stats,
1315 1316
	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
1317 1318 1319 1320 1321
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = virtnet_netpoll,
#endif
};

1322
static void virtnet_config_changed_work(struct work_struct *work)
1323
{
1324 1325
	struct virtnet_info *vi =
		container_of(work, struct virtnet_info, config_work);
1326 1327
	u16 v;

1328 1329 1330 1331
	mutex_lock(&vi->config_lock);
	if (!vi->config_enable)
		goto done;

1332 1333
	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
				 struct virtio_net_config, status, &v) < 0)
1334 1335 1336
		goto done;

	if (v & VIRTIO_NET_S_ANNOUNCE) {
1337
		netdev_notify_peers(vi->dev);
1338 1339
		virtnet_ack_link_announce(vi);
	}
1340 1341 1342 1343 1344

	/* Ignore unknown (future) status bits */
	v &= VIRTIO_NET_S_LINK_UP;

	if (vi->status == v)
1345
		goto done;
1346 1347 1348 1349 1350

	vi->status = v;

	if (vi->status & VIRTIO_NET_S_LINK_UP) {
		netif_carrier_on(vi->dev);
J
Jason Wang 已提交
1351
		netif_tx_wake_all_queues(vi->dev);
1352 1353
	} else {
		netif_carrier_off(vi->dev);
J
Jason Wang 已提交
1354
		netif_tx_stop_all_queues(vi->dev);
1355
	}
1356 1357
done:
	mutex_unlock(&vi->config_lock);
1358 1359 1360 1361 1362 1363
}

static void virtnet_config_changed(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;

1364
	schedule_work(&vi->config_work);
1365 1366
}

J
Jason Wang 已提交
1367 1368
static void virtnet_free_queues(struct virtnet_info *vi)
{
1369 1370 1371 1372 1373
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++)
		netif_napi_del(&vi->rq[i].napi);

J
Jason Wang 已提交
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
	kfree(vi->rq);
	kfree(vi->sq);
}

static void free_receive_bufs(struct virtnet_info *vi)
{
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
		while (vi->rq[i].pages)
			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
	}
}

static void free_unused_bufs(struct virtnet_info *vi)
{
	void *buf;
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
		struct virtqueue *vq = vi->sq[i].vq;
		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
			dev_kfree_skb(buf);
	}

	for (i = 0; i < vi->max_queue_pairs; i++) {
		struct virtqueue *vq = vi->rq[i].vq;

		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1403
			if (vi->mergeable_rx_bufs)
1404
				put_page(virt_to_head_page(buf));
1405 1406
			else if (vi->big_packets)
				give_pages(&vi->rq[i], buf);
J
Jason Wang 已提交
1407 1408 1409 1410 1411 1412 1413 1414
			else
				dev_kfree_skb(buf);
			--vi->rq[i].num;
		}
		BUG_ON(vi->rq[i].num != 0);
	}
}

1415 1416 1417 1418
static void virtnet_del_vqs(struct virtnet_info *vi)
{
	struct virtio_device *vdev = vi->vdev;

1419
	virtnet_clean_affinity(vi, -1);
J
Jason Wang 已提交
1420

1421
	vdev->config->del_vqs(vdev);
J
Jason Wang 已提交
1422 1423

	virtnet_free_queues(vi);
1424 1425
}

J
Jason Wang 已提交
1426
static int virtnet_find_vqs(struct virtnet_info *vi)
1427
{
J
Jason Wang 已提交
1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
	vq_callback_t **callbacks;
	struct virtqueue **vqs;
	int ret = -ENOMEM;
	int i, total_vqs;
	const char **names;

	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
	 * possible control vq.
	 */
	total_vqs = vi->max_queue_pairs * 2 +
		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);

	/* Allocate space for find_vqs parameters */
	vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
	if (!vqs)
		goto err_vq;
	callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
	if (!callbacks)
		goto err_callback;
	names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
	if (!names)
		goto err_names;

	/* Parameters for control virtqueue, if any */
	if (vi->has_cvq) {
		callbacks[total_vqs - 1] = NULL;
		names[total_vqs - 1] = "control";
	}
1457

J
Jason Wang 已提交
1458 1459 1460 1461 1462 1463 1464 1465 1466
	/* Allocate/initialize parameters for send/receive virtqueues */
	for (i = 0; i < vi->max_queue_pairs; i++) {
		callbacks[rxq2vq(i)] = skb_recv_done;
		callbacks[txq2vq(i)] = skb_xmit_done;
		sprintf(vi->rq[i].name, "input.%d", i);
		sprintf(vi->sq[i].name, "output.%d", i);
		names[rxq2vq(i)] = vi->rq[i].name;
		names[txq2vq(i)] = vi->sq[i].name;
	}
1467

J
Jason Wang 已提交
1468 1469 1470 1471
	ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
					 names);
	if (ret)
		goto err_find;
1472

J
Jason Wang 已提交
1473 1474
	if (vi->has_cvq) {
		vi->cvq = vqs[total_vqs - 1];
1475
		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
1476
			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1477
	}
J
Jason Wang 已提交
1478 1479 1480 1481 1482 1483 1484 1485 1486 1487

	for (i = 0; i < vi->max_queue_pairs; i++) {
		vi->rq[i].vq = vqs[rxq2vq(i)];
		vi->sq[i].vq = vqs[txq2vq(i)];
	}

	kfree(names);
	kfree(callbacks);
	kfree(vqs);

1488
	return 0;
J
Jason Wang 已提交
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507

err_find:
	kfree(names);
err_names:
	kfree(callbacks);
err_callback:
	kfree(vqs);
err_vq:
	return ret;
}

static int virtnet_alloc_queues(struct virtnet_info *vi)
{
	int i;

	vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
	if (!vi->sq)
		goto err_sq;
	vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
1508
	if (!vi->rq)
J
Jason Wang 已提交
1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
		goto err_rq;

	INIT_DELAYED_WORK(&vi->refill, refill_work);
	for (i = 0; i < vi->max_queue_pairs; i++) {
		vi->rq[i].pages = NULL;
		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
			       napi_weight);

		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
	}

	return 0;

err_rq:
	kfree(vi->sq);
err_sq:
	return -ENOMEM;
}

static int init_vqs(struct virtnet_info *vi)
{
	int ret;

	/* Allocate send & receive queues */
	ret = virtnet_alloc_queues(vi);
	if (ret)
		goto err;

	ret = virtnet_find_vqs(vi);
	if (ret)
		goto err_free;

1542
	get_online_cpus();
1543
	virtnet_set_affinity(vi);
1544 1545
	put_online_cpus();

J
Jason Wang 已提交
1546 1547 1548 1549 1550 1551
	return 0;

err_free:
	virtnet_free_queues(vi);
err:
	return ret;
1552 1553
}

R
Rusty Russell 已提交
1554 1555
static int virtnet_probe(struct virtio_device *vdev)
{
J
Jason Wang 已提交
1556
	int i, err;
R
Rusty Russell 已提交
1557 1558
	struct net_device *dev;
	struct virtnet_info *vi;
J
Jason Wang 已提交
1559 1560 1561
	u16 max_queue_pairs;

	/* Find if host supports multiqueue virtio_net device */
1562 1563 1564
	err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
				   struct virtio_net_config,
				   max_virtqueue_pairs, &max_queue_pairs);
J
Jason Wang 已提交
1565 1566 1567 1568 1569 1570

	/* We need at least 2 queue's */
	if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
		max_queue_pairs = 1;
R
Rusty Russell 已提交
1571 1572

	/* Allocate ourselves a network device with room for our info */
J
Jason Wang 已提交
1573
	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
R
Rusty Russell 已提交
1574 1575 1576 1577
	if (!dev)
		return -ENOMEM;

	/* Set up network device as normal. */
1578
	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
1579
	dev->netdev_ops = &virtnet_netdev;
R
Rusty Russell 已提交
1580
	dev->features = NETIF_F_HIGHDMA;
1581

1582
	SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
R
Rusty Russell 已提交
1583 1584 1585
	SET_NETDEV_DEV(dev, &vdev->dev);

	/* Do we support "hardware" checksums? */
1586
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
R
Rusty Russell 已提交
1587
		/* This opens up the world of extra features. */
1588 1589 1590 1591 1592 1593
		dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
		if (csum)
			dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;

		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
R
Rusty Russell 已提交
1594 1595
				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
		}
1596
		/* Individual feature bits: what can host handle? */
1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
			dev->hw_features |= NETIF_F_TSO;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
			dev->hw_features |= NETIF_F_TSO6;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
			dev->hw_features |= NETIF_F_TSO_ECN;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
			dev->hw_features |= NETIF_F_UFO;

		if (gso)
			dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
		/* (!csum && gso) case will be fixed by register_netdev() */
R
Rusty Russell 已提交
1609
	}
1610 1611
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
		dev->features |= NETIF_F_RXCSUM;
R
Rusty Russell 已提交
1612

1613 1614
	dev->vlan_features = dev->features;

R
Rusty Russell 已提交
1615
	/* Configuration may specify what MAC to use.  Otherwise random. */
1616 1617 1618 1619 1620
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
		virtio_cread_bytes(vdev,
				   offsetof(struct virtio_net_config, mac),
				   dev->dev_addr, dev->addr_len);
	else
1621
		eth_hw_addr_random(dev);
R
Rusty Russell 已提交
1622 1623 1624 1625 1626

	/* Set up our device-specific information */
	vi = netdev_priv(dev);
	vi->dev = dev;
	vi->vdev = vdev;
1627
	vdev->priv = vi;
1628 1629 1630 1631 1632
	vi->stats = alloc_percpu(struct virtnet_stats);
	err = -ENOMEM;
	if (vi->stats == NULL)
		goto free;

1633 1634 1635 1636 1637 1638 1639
	for_each_possible_cpu(i) {
		struct virtnet_stats *virtnet_stats;
		virtnet_stats = per_cpu_ptr(vi->stats, i);
		u64_stats_init(&virtnet_stats->tx_syncp);
		u64_stats_init(&virtnet_stats->rx_syncp);
	}

1640 1641 1642
	mutex_init(&vi->config_lock);
	vi->config_enable = true;
	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
R
Rusty Russell 已提交
1643

1644
	/* If we can receive ANY GSO packets, we must allocate large ones. */
1645 1646 1647
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
1648 1649
		vi->big_packets = true;

1650 1651 1652
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
		vi->mergeable_rx_bufs = true;

1653 1654 1655
	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
		vi->any_header_sg = true;

J
Jason Wang 已提交
1656 1657 1658 1659 1660 1661 1662 1663
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
		vi->has_cvq = true;

	/* Use single tx/rx queue pair as default */
	vi->curr_queue_pairs = 1;
	vi->max_queue_pairs = max_queue_pairs;

	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
1664
	err = init_vqs(vi);
1665
	if (err)
1666
		goto free_stats;
R
Rusty Russell 已提交
1667

1668 1669
	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
J
Jason Wang 已提交
1670

R
Rusty Russell 已提交
1671 1672 1673
	err = register_netdev(dev);
	if (err) {
		pr_debug("virtio_net: registering device failed\n");
1674
		goto free_vqs;
R
Rusty Russell 已提交
1675
	}
1676 1677

	/* Last of all, set up some receive buffers. */
1678
	for (i = 0; i < vi->curr_queue_pairs; i++) {
J
Jason Wang 已提交
1679 1680 1681 1682 1683 1684 1685 1686
		try_fill_recv(&vi->rq[i], GFP_KERNEL);

		/* If we didn't even get one input buffer, we're useless. */
		if (vi->rq[i].num == 0) {
			free_unused_bufs(vi);
			err = -ENOMEM;
			goto free_recv_bufs;
		}
1687 1688
	}

1689 1690 1691 1692 1693 1694 1695
	vi->nb.notifier_call = &virtnet_cpu_callback;
	err = register_hotcpu_notifier(&vi->nb);
	if (err) {
		pr_debug("virtio_net: registering cpu notifier failed\n");
		goto free_recv_bufs;
	}

J
Jason Wang 已提交
1696 1697 1698 1699
	/* Assume link up if device can't report link status,
	   otherwise get link status from config. */
	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
		netif_carrier_off(dev);
1700
		schedule_work(&vi->config_work);
J
Jason Wang 已提交
1701 1702 1703 1704
	} else {
		vi->status = VIRTIO_NET_S_LINK_UP;
		netif_carrier_on(dev);
	}
1705

J
Jason Wang 已提交
1706 1707 1708
	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
		 dev->name, max_queue_pairs);

R
Rusty Russell 已提交
1709 1710
	return 0;

J
Jason Wang 已提交
1711 1712
free_recv_bufs:
	free_receive_bufs(vi);
1713
	unregister_netdev(dev);
1714
free_vqs:
J
Jason Wang 已提交
1715
	cancel_delayed_work_sync(&vi->refill);
1716
	virtnet_del_vqs(vi);
1717 1718
	if (vi->alloc_frag.page)
		put_page(vi->alloc_frag.page);
1719 1720
free_stats:
	free_percpu(vi->stats);
R
Rusty Russell 已提交
1721 1722 1723 1724 1725
free:
	free_netdev(dev);
	return err;
}

1726
static void remove_vq_common(struct virtnet_info *vi)
R
Rusty Russell 已提交
1727
{
1728
	vi->vdev->config->reset(vi->vdev);
S
Shirley Ma 已提交
1729 1730

	/* Free unused buffers in both send and recv, if any. */
1731
	free_unused_bufs(vi);
1732

J
Jason Wang 已提交
1733
	free_receive_bufs(vi);
1734

J
Jason Wang 已提交
1735
	virtnet_del_vqs(vi);
1736 1737
}

1738
static void virtnet_remove(struct virtio_device *vdev)
1739 1740 1741
{
	struct virtnet_info *vi = vdev->priv;

1742 1743
	unregister_hotcpu_notifier(&vi->nb);

1744 1745 1746 1747 1748
	/* Prevent config work handler from accessing the device. */
	mutex_lock(&vi->config_lock);
	vi->config_enable = false;
	mutex_unlock(&vi->config_lock);

1749 1750 1751
	unregister_netdev(vi->dev);

	remove_vq_common(vi);
1752 1753
	if (vi->alloc_frag.page)
		put_page(vi->alloc_frag.page);
1754

1755 1756
	flush_work(&vi->config_work);

1757
	free_percpu(vi->stats);
1758
	free_netdev(vi->dev);
R
Rusty Russell 已提交
1759 1760
}

1761
#ifdef CONFIG_PM_SLEEP
1762 1763 1764
static int virtnet_freeze(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;
J
Jason Wang 已提交
1765
	int i;
1766

1767 1768
	unregister_hotcpu_notifier(&vi->nb);

1769 1770 1771 1772 1773
	/* Prevent config work handler from accessing the device */
	mutex_lock(&vi->config_lock);
	vi->config_enable = false;
	mutex_unlock(&vi->config_lock);

1774 1775 1776 1777
	netif_device_detach(vi->dev);
	cancel_delayed_work_sync(&vi->refill);

	if (netif_running(vi->dev))
J
Jason Wang 已提交
1778 1779 1780 1781
		for (i = 0; i < vi->max_queue_pairs; i++) {
			napi_disable(&vi->rq[i].napi);
			netif_napi_del(&vi->rq[i].napi);
		}
1782 1783 1784

	remove_vq_common(vi);

1785 1786
	flush_work(&vi->config_work);

1787 1788 1789 1790 1791 1792
	return 0;
}

static int virtnet_restore(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;
J
Jason Wang 已提交
1793
	int err, i;
1794 1795 1796 1797 1798 1799

	err = init_vqs(vi);
	if (err)
		return err;

	if (netif_running(vi->dev))
J
Jason Wang 已提交
1800 1801
		for (i = 0; i < vi->max_queue_pairs; i++)
			virtnet_napi_enable(&vi->rq[i]);
1802 1803 1804

	netif_device_attach(vi->dev);

1805
	for (i = 0; i < vi->curr_queue_pairs; i++)
J
Jason Wang 已提交
1806 1807
		if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
			schedule_delayed_work(&vi->refill, 0);
1808

1809 1810 1811 1812
	mutex_lock(&vi->config_lock);
	vi->config_enable = true;
	mutex_unlock(&vi->config_lock);

1813
	rtnl_lock();
J
Jason Wang 已提交
1814
	virtnet_set_queues(vi, vi->curr_queue_pairs);
1815
	rtnl_unlock();
J
Jason Wang 已提交
1816

1817 1818 1819 1820
	err = register_hotcpu_notifier(&vi->nb);
	if (err)
		return err;

1821 1822 1823 1824
	return 0;
}
#endif

R
Rusty Russell 已提交
1825 1826 1827 1828 1829
static struct virtio_device_id id_table[] = {
	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

1830
static unsigned int features[] = {
1831 1832
	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
	VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1833
	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1834
	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1835
	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1836
	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1837
	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
J
Jason Wang 已提交
1838
	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
1839
	VIRTIO_NET_F_CTRL_MAC_ADDR,
1840
	VIRTIO_F_ANY_LAYOUT,
1841 1842
};

1843
static struct virtio_driver virtio_net_driver = {
1844 1845
	.feature_table = features,
	.feature_table_size = ARRAY_SIZE(features),
R
Rusty Russell 已提交
1846 1847 1848 1849
	.driver.name =	KBUILD_MODNAME,
	.driver.owner =	THIS_MODULE,
	.id_table =	id_table,
	.probe =	virtnet_probe,
1850
	.remove =	virtnet_remove,
1851
	.config_changed = virtnet_config_changed,
1852
#ifdef CONFIG_PM_SLEEP
1853 1854 1855
	.freeze =	virtnet_freeze,
	.restore =	virtnet_restore,
#endif
R
Rusty Russell 已提交
1856 1857
};

1858
module_virtio_driver(virtio_net_driver);
R
Rusty Russell 已提交
1859 1860 1861 1862

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio network driver");
MODULE_LICENSE("GPL");