virtio_net.c 45.5 KB
Newer Older
1
/* A network driver using virtio.
R
Rusty Russell 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 *
 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
//#define DEBUG
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
22
#include <linux/ethtool.h>
R
Rusty Russell 已提交
23 24 25 26
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
#include <linux/scatterlist.h>
27
#include <linux/if_vlan.h>
28
#include <linux/slab.h>
29
#include <linux/cpu.h>
R
Rusty Russell 已提交
30

31
static int napi_weight = NAPI_POLL_WEIGHT;
32 33
module_param(napi_weight, int, 0444);

34
static bool csum = true, gso = true;
R
Rusty Russell 已提交
35 36 37
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);

R
Rusty Russell 已提交
38
/* FIXME: MTU in config. */
39 40 41 42
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
#define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \
                                sizeof(struct virtio_net_hdr_mrg_rxbuf), \
                                L1_CACHE_BYTES))
43
#define GOOD_COPY_LEN	128
R
Rusty Russell 已提交
44

45
#define VIRTNET_DRIVER_VERSION "1.0.0"
46

47
struct virtnet_stats {
48 49
	struct u64_stats_sync tx_syncp;
	struct u64_stats_sync rx_syncp;
50 51 52 53 54 55 56
	u64 tx_bytes;
	u64 tx_packets;

	u64 rx_bytes;
	u64 rx_packets;
};

57 58 59 60 61 62 63
/* Internal representation of a send virtqueue */
struct send_queue {
	/* Virtqueue associated with this send _queue */
	struct virtqueue *vq;

	/* TX: fragments + linear part + virtio header */
	struct scatterlist sg[MAX_SKB_FRAGS + 2];
J
Jason Wang 已提交
64 65 66

	/* Name of the send queue: output.$index */
	char name[40];
67 68 69 70 71 72 73
};

/* Internal representation of a receive virtqueue */
struct receive_queue {
	/* Virtqueue associated with this receive_queue */
	struct virtqueue *vq;

R
Rusty Russell 已提交
74 75 76 77 78
	struct napi_struct napi;

	/* Number of input buffers, and max we've ever had. */
	unsigned int num, max;

79 80 81 82 83
	/* Chain pages by the private ptr. */
	struct page *pages;

	/* RX: fragments + linear part + virtio header */
	struct scatterlist sg[MAX_SKB_FRAGS + 2];
J
Jason Wang 已提交
84 85 86

	/* Name of this receive queue: input.$index */
	char name[40];
87 88 89 90 91 92
};

struct virtnet_info {
	struct virtio_device *vdev;
	struct virtqueue *cvq;
	struct net_device *dev;
J
Jason Wang 已提交
93 94
	struct send_queue *sq;
	struct receive_queue *rq;
95 96
	unsigned int status;

J
Jason Wang 已提交
97 98 99 100 101 102
	/* Max # of queue pairs supported by the device */
	u16 max_queue_pairs;

	/* # of queue pairs currently used by the driver */
	u16 curr_queue_pairs;

103 104 105
	/* I like... big packets and I cannot lie! */
	bool big_packets;

106 107 108
	/* Host will merge rx buffers for big packets (shake it! shake it!) */
	bool mergeable_rx_bufs;

J
Jason Wang 已提交
109 110 111
	/* Has control virtqueue */
	bool has_cvq;

112 113 114
	/* Host can handle any s/g split between our header and packet data */
	bool any_header_sg;

115 116 117
	/* enable config space updates */
	bool config_enable;

118 119 120
	/* Active statistics */
	struct virtnet_stats __percpu *stats;

121 122 123
	/* Work struct for refilling if we run low on memory. */
	struct delayed_work refill;

124 125 126 127 128
	/* Work struct for config space updates */
	struct work_struct config_work;

	/* Lock for config space updates */
	struct mutex config_lock;
J
Jason Wang 已提交
129

130 131 132 133 134
	/* Page_frag for GFP_KERNEL packet buffer allocation when we run
	 * low on memory.
	 */
	struct page_frag alloc_frag;

J
Jason Wang 已提交
135 136
	/* Does the affinity hint is set for virtqueues? */
	bool affinity_hint_set;
137

138 139
	/* CPU hot plug notifier */
	struct notifier_block nb;
R
Rusty Russell 已提交
140 141
};

142 143 144 145 146 147 148
struct skb_vnet_hdr {
	union {
		struct virtio_net_hdr hdr;
		struct virtio_net_hdr_mrg_rxbuf mhdr;
	};
};

149 150 151 152 153 154 155 156 157 158
struct padded_vnet_hdr {
	struct virtio_net_hdr hdr;
	/*
	 * virtio_net_hdr should be in a separated sg buffer because of a
	 * QEMU bug, and data sg buffer shares same page with this header sg.
	 * This padding makes next sg 16 byte aligned after virtio_net_hdr.
	 */
	char padding[6];
};

J
Jason Wang 已提交
159 160 161 162 163
/* Converting between virtqueue no. and kernel tx/rx queue no.
 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
 */
static int vq2txq(struct virtqueue *vq)
{
164
	return (vq->index - 1) / 2;
J
Jason Wang 已提交
165 166 167 168 169 170 171 172 173
}

static int txq2vq(int txq)
{
	return txq * 2 + 1;
}

static int vq2rxq(struct virtqueue *vq)
{
174
	return vq->index / 2;
J
Jason Wang 已提交
175 176 177 178 179 180 181
}

static int rxq2vq(int rxq)
{
	return rxq * 2;
}

182
static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
R
Rusty Russell 已提交
183
{
184
	return (struct skb_vnet_hdr *)skb->cb;
R
Rusty Russell 已提交
185 186
}

187 188 189 190
/*
 * private is used to chain pages for big packets, put the whole
 * most recent used list in the beginning for reuse
 */
191
static void give_pages(struct receive_queue *rq, struct page *page)
192
{
193
	struct page *end;
194

195
	/* Find end of list, sew whole thing into vi->rq.pages. */
196
	for (end = page; end->private; end = (struct page *)end->private);
197 198
	end->private = (unsigned long)rq->pages;
	rq->pages = page;
199 200
}

201
static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
202
{
203
	struct page *p = rq->pages;
204

205
	if (p) {
206
		rq->pages = (struct page *)p->private;
207 208 209
		/* clear private here, it is used to chain pages */
		p->private = 0;
	} else
210 211 212 213
		p = alloc_page(gfp_mask);
	return p;
}

214
static void skb_xmit_done(struct virtqueue *vq)
R
Rusty Russell 已提交
215
{
216
	struct virtnet_info *vi = vq->vdev->priv;
R
Rusty Russell 已提交
217

218
	/* Suppress further interrupts. */
219
	virtqueue_disable_cb(vq);
220

221
	/* We were probably waiting for more output buffers. */
J
Jason Wang 已提交
222
	netif_wake_subqueue(vi->dev, vq2txq(vq));
R
Rusty Russell 已提交
223 224
}

225
/* Called from bottom half context */
226
static struct sk_buff *page_to_skb(struct receive_queue *rq,
227 228
				   struct page *page, unsigned int offset,
				   unsigned int len, unsigned int truesize)
229
{
230
	struct virtnet_info *vi = rq->vq->vdev->priv;
231 232
	struct sk_buff *skb;
	struct skb_vnet_hdr *hdr;
233
	unsigned int copy, hdr_len, hdr_padded_len;
234
	char *p;
235

236
	p = page_address(page) + offset;
237

238 239 240 241
	/* copy small packet so we can reuse these pages for small data */
	skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
	if (unlikely(!skb))
		return NULL;
242

243
	hdr = skb_vnet_hdr(skb);
244

245 246
	if (vi->mergeable_rx_bufs) {
		hdr_len = sizeof hdr->mhdr;
247
		hdr_padded_len = sizeof hdr->mhdr;
248 249
	} else {
		hdr_len = sizeof hdr->hdr;
250
		hdr_padded_len = sizeof(struct padded_vnet_hdr);
251
	}
252

253
	memcpy(hdr, p, hdr_len);
254

255
	len -= hdr_len;
256 257
	offset += hdr_padded_len;
	p += hdr_padded_len;
258

259 260 261 262
	copy = len;
	if (copy > skb_tailroom(skb))
		copy = skb_tailroom(skb);
	memcpy(skb_put(skb, copy), p, copy);
263

264 265
	len -= copy;
	offset += copy;
266

267 268 269 270 271 272 273 274
	if (vi->mergeable_rx_bufs) {
		if (len)
			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
		else
			put_page(page);
		return skb;
	}

275 276 277 278 279 280 281
	/*
	 * Verify that we can indeed put this data into a skb.
	 * This is here to handle cases when the device erroneously
	 * tries to receive more than is possible. This is usually
	 * the case of a broken device.
	 */
	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
282
		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
283 284 285
		dev_kfree_skb(skb);
		return NULL;
	}
286
	BUG_ON(offset >= PAGE_SIZE);
287
	while (len) {
288 289 290 291
		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
				frag_size, truesize);
		len -= frag_size;
292 293 294
		page = (struct page *)page->private;
		offset = 0;
	}
295

296
	if (page)
297
		give_pages(rq, page);
298

299 300
	return skb;
}
301

302
static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
303
{
304 305 306
	struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb);
	struct sk_buff *curr_skb = head_skb;
	char *buf;
307
	struct page *page;
308
	int num_buf, len, offset;
309 310 311

	num_buf = hdr->mhdr.num_buffers;
	while (--num_buf) {
312 313 314
		int num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
		buf = virtqueue_get_buf(rq->vq, &len);
		if (unlikely(!buf)) {
315
			pr_debug("%s: rx error: %d buffers missing\n",
316 317
				 head_skb->dev->name, hdr->mhdr.num_buffers);
			head_skb->dev->stats.rx_length_errors++;
318
			return -EINVAL;
319
		}
320
		if (unlikely(len > MERGE_BUFFER_LEN)) {
321 322
			pr_debug("%s: rx error: merge buffer too long\n",
				 head_skb->dev->name);
323
			len = MERGE_BUFFER_LEN;
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
		}
		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
			if (unlikely(!nskb)) {
				head_skb->dev->stats.rx_dropped++;
				return -ENOMEM;
			}
			if (curr_skb == head_skb)
				skb_shinfo(curr_skb)->frag_list = nskb;
			else
				curr_skb->next = nskb;
			curr_skb = nskb;
			head_skb->truesize += nskb->truesize;
			num_skb_frags = 0;
		}
		if (curr_skb != head_skb) {
			head_skb->data_len += len;
			head_skb->len += len;
342
			head_skb->truesize += MERGE_BUFFER_LEN;
343 344
		}
		page = virt_to_head_page(buf);
345 346 347 348
		offset = buf - (char *)page_address(page);
		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
			put_page(page);
			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
349
					     len, MERGE_BUFFER_LEN);
350 351
		} else {
			skb_add_rx_frag(curr_skb, num_skb_frags, page,
352
					offset, len, MERGE_BUFFER_LEN);
353
		}
354
		--rq->num;
355 356 357 358
	}
	return 0;
}

359
static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
360
{
361 362
	struct virtnet_info *vi = rq->vq->vdev->priv;
	struct net_device *dev = vi->dev;
E
Eric Dumazet 已提交
363
	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
364 365 366
	struct sk_buff *skb;
	struct page *page;
	struct skb_vnet_hdr *hdr;
367

368 369 370
	if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
		pr_debug("%s: short packet %i\n", dev->name, len);
		dev->stats.rx_length_errors++;
371
		if (vi->big_packets)
372
			give_pages(rq, buf);
373 374
		else if (vi->mergeable_rx_bufs)
			put_page(virt_to_head_page(buf));
375 376 377 378
		else
			dev_kfree_skb(buf);
		return;
	}
379

380 381 382 383
	if (!vi->mergeable_rx_bufs && !vi->big_packets) {
		skb = buf;
		len -= sizeof(struct virtio_net_hdr);
		skb_trim(skb, len);
384 385 386 387
	} else if (vi->mergeable_rx_bufs) {
		struct page *page = virt_to_head_page(buf);
		skb = page_to_skb(rq, page,
				  (char *)buf - (char *)page_address(page),
388
				  len, MERGE_BUFFER_LEN);
389 390 391 392 393 394 395 396 397
		if (unlikely(!skb)) {
			dev->stats.rx_dropped++;
			put_page(page);
			return;
		}
		if (receive_mergeable(rq, skb)) {
			dev_kfree_skb(skb);
			return;
		}
398 399
	} else {
		page = buf;
400
		skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
401
		if (unlikely(!skb)) {
402
			dev->stats.rx_dropped++;
403
			give_pages(rq, page);
404
			return;
405
		}
406
	}
407

408
	hdr = skb_vnet_hdr(skb);
409

410
	u64_stats_update_begin(&stats->rx_syncp);
411 412
	stats->rx_bytes += skb->len;
	stats->rx_packets++;
413
	u64_stats_update_end(&stats->rx_syncp);
R
Rusty Russell 已提交
414

415
	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
R
Rusty Russell 已提交
416
		pr_debug("Needs csum!\n");
417 418 419
		if (!skb_partial_csum_set(skb,
					  hdr->hdr.csum_start,
					  hdr->hdr.csum_offset))
R
Rusty Russell 已提交
420
			goto frame_err;
421 422
	} else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
R
Rusty Russell 已提交
423 424
	}

425 426 427 428
	skb->protocol = eth_type_trans(skb, dev);
	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
		 ntohs(skb->protocol), skb->len, skb->pkt_type);

429
	if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
R
Rusty Russell 已提交
430
		pr_debug("GSO!\n");
431
		switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
R
Rusty Russell 已提交
432
		case VIRTIO_NET_HDR_GSO_TCPV4:
433
			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
R
Rusty Russell 已提交
434 435
			break;
		case VIRTIO_NET_HDR_GSO_UDP:
436
			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
R
Rusty Russell 已提交
437 438
			break;
		case VIRTIO_NET_HDR_GSO_TCPV6:
439
			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
R
Rusty Russell 已提交
440 441
			break;
		default:
442 443
			net_warn_ratelimited("%s: bad gso type %u.\n",
					     dev->name, hdr->hdr.gso_type);
R
Rusty Russell 已提交
444 445 446
			goto frame_err;
		}

447
		if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
448
			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
R
Rusty Russell 已提交
449

450
		skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
R
Rusty Russell 已提交
451
		if (skb_shinfo(skb)->gso_size == 0) {
452
			net_warn_ratelimited("%s: zero gso size.\n", dev->name);
R
Rusty Russell 已提交
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
			goto frame_err;
		}

		/* Header must be checked, and gso_segs computed. */
		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
		skb_shinfo(skb)->gso_segs = 0;
	}

	netif_receive_skb(skb);
	return;

frame_err:
	dev->stats.rx_frame_errors++;
	dev_kfree_skb(skb);
}

469
static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
R
Rusty Russell 已提交
470
{
471
	struct virtnet_info *vi = rq->vq->vdev->priv;
R
Rusty Russell 已提交
472
	struct sk_buff *skb;
473 474
	struct skb_vnet_hdr *hdr;
	int err;
475

476
	skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp);
477 478
	if (unlikely(!skb))
		return -ENOMEM;
R
Rusty Russell 已提交
479

480
	skb_put(skb, GOOD_PACKET_LEN);
481

482
	hdr = skb_vnet_hdr(skb);
483
	sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
484

485
	skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
486

487
	err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
488 489
	if (err < 0)
		dev_kfree_skb(skb);
490

491 492
	return err;
}
493

494
static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
495 496 497 498 499
{
	struct page *first, *list = NULL;
	char *p;
	int i, err, offset;

500
	/* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
501
	for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
502
		first = get_a_page(rq, gfp);
503 504
		if (!first) {
			if (list)
505
				give_pages(rq, list);
506
			return -ENOMEM;
507
		}
508
		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
509

510 511 512 513
		/* chain new page in list head to match sg */
		first->private = (unsigned long)list;
		list = first;
	}
R
Rusty Russell 已提交
514

515
	first = get_a_page(rq, gfp);
516
	if (!first) {
517
		give_pages(rq, list);
518 519 520 521
		return -ENOMEM;
	}
	p = page_address(first);

522 523 524
	/* rq->sg[0], rq->sg[1] share the same page */
	/* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */
	sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr));
525

526
	/* rq->sg[1] for data packet, from offset */
527
	offset = sizeof(struct padded_vnet_hdr);
528
	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
529 530 531

	/* chain first in list head */
	first->private = (unsigned long)list;
532 533
	err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
				  first, gfp);
534
	if (err < 0)
535
		give_pages(rq, first);
536 537

	return err;
R
Rusty Russell 已提交
538 539
}

540
static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
541
{
542 543
	struct virtnet_info *vi = rq->vq->vdev->priv;
	char *buf = NULL;
544 545
	int err;

546
	if (gfp & __GFP_WAIT) {
547
		if (skb_page_frag_refill(MERGE_BUFFER_LEN, &vi->alloc_frag,
548 549 550 551
					 gfp)) {
			buf = (char *)page_address(vi->alloc_frag.page) +
			      vi->alloc_frag.offset;
			get_page(vi->alloc_frag.page);
552
			vi->alloc_frag.offset += MERGE_BUFFER_LEN;
553 554
		}
	} else {
555
		buf = netdev_alloc_frag(MERGE_BUFFER_LEN);
556 557
	}
	if (!buf)
558
		return -ENOMEM;
559

560
	sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN);
561
	err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
562
	if (err < 0)
563
		put_page(virt_to_head_page(buf));
564

565 566
	return err;
}
567

568 569 570 571 572 573 574
/*
 * Returns false if we couldn't fill entirely (OOM).
 *
 * Normally run in the receive path, but can also be run from ndo_open
 * before we're receiving packets, or from refill_work which is
 * careful to disable receiving (using napi_disable).
 */
575
static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
576
{
577
	struct virtnet_info *vi = rq->vq->vdev->priv;
578
	int err;
579
	bool oom;
580

581 582
	do {
		if (vi->mergeable_rx_bufs)
583
			err = add_recvbuf_mergeable(rq, gfp);
584
		else if (vi->big_packets)
585
			err = add_recvbuf_big(rq, gfp);
586
		else
587
			err = add_recvbuf_small(rq, gfp);
588

589
		oom = err == -ENOMEM;
590
		if (err)
591
			break;
592
		++rq->num;
593
	} while (rq->vq->num_free);
594 595
	if (unlikely(rq->num > rq->max))
		rq->max = rq->num;
596 597
	if (unlikely(!virtqueue_kick(rq->vq)))
		return false;
598
	return !oom;
599 600
}

601
static void skb_recv_done(struct virtqueue *rvq)
R
Rusty Russell 已提交
602 603
{
	struct virtnet_info *vi = rvq->vdev->priv;
J
Jason Wang 已提交
604
	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
605

606
	/* Schedule NAPI, Suppress further interrupts if successful. */
607
	if (napi_schedule_prep(&rq->napi)) {
608
		virtqueue_disable_cb(rvq);
609
		__napi_schedule(&rq->napi);
610
	}
R
Rusty Russell 已提交
611 612
}

613
static void virtnet_napi_enable(struct receive_queue *rq)
614
{
615
	napi_enable(&rq->napi);
616 617 618 619 620

	/* If all buffers were filled by other side before we napi_enabled, we
	 * won't get another interrupt, so process any outstanding packets
	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
	 * We synchronize against interrupts via NAPI_STATE_SCHED */
621 622
	if (napi_schedule_prep(&rq->napi)) {
		virtqueue_disable_cb(rq->vq);
623
		local_bh_disable();
624
		__napi_schedule(&rq->napi);
625
		local_bh_enable();
626 627 628
	}
}

629 630
static void refill_work(struct work_struct *work)
{
631 632
	struct virtnet_info *vi =
		container_of(work, struct virtnet_info, refill.work);
633
	bool still_empty;
J
Jason Wang 已提交
634 635
	int i;

636
	for (i = 0; i < vi->curr_queue_pairs; i++) {
J
Jason Wang 已提交
637
		struct receive_queue *rq = &vi->rq[i];
638

J
Jason Wang 已提交
639 640 641
		napi_disable(&rq->napi);
		still_empty = !try_fill_recv(rq, GFP_KERNEL);
		virtnet_napi_enable(rq);
642

J
Jason Wang 已提交
643 644 645 646 647 648
		/* In theory, this can happen: if we don't get any buffers in
		 * we will *never* try to fill again.
		 */
		if (still_empty)
			schedule_delayed_work(&vi->refill, HZ/2);
	}
649 650
}

R
Rusty Russell 已提交
651 652
static int virtnet_poll(struct napi_struct *napi, int budget)
{
653 654 655
	struct receive_queue *rq =
		container_of(napi, struct receive_queue, napi);
	struct virtnet_info *vi = rq->vq->vdev->priv;
656
	void *buf;
657
	unsigned int r, len, received = 0;
R
Rusty Russell 已提交
658 659 660

again:
	while (received < budget &&
661 662 663
	       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
		receive_buf(rq, buf, len);
		--rq->num;
R
Rusty Russell 已提交
664 665 666
		received++;
	}

667 668
	if (rq->num < rq->max / 2) {
		if (!try_fill_recv(rq, GFP_ATOMIC))
669
			schedule_delayed_work(&vi->refill, 0);
670
	}
R
Rusty Russell 已提交
671

672 673
	/* Out of packets? */
	if (received < budget) {
674
		r = virtqueue_enable_cb_prepare(rq->vq);
675
		napi_complete(napi);
676
		if (unlikely(virtqueue_poll(rq->vq, r)) &&
677
		    napi_schedule_prep(napi)) {
678
			virtqueue_disable_cb(rq->vq);
679
			__napi_schedule(napi);
R
Rusty Russell 已提交
680
			goto again;
681
		}
R
Rusty Russell 已提交
682 683 684 685 686
	}

	return received;
}

J
Jason Wang 已提交
687 688 689 690 691
static int virtnet_open(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int i;

692 693 694 695 696
	for (i = 0; i < vi->max_queue_pairs; i++) {
		if (i < vi->curr_queue_pairs)
			/* Make sure we have some buffers: if oom use wq. */
			if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
				schedule_delayed_work(&vi->refill, 0);
J
Jason Wang 已提交
697 698 699 700 701 702
		virtnet_napi_enable(&vi->rq[i]);
	}

	return 0;
}

703
static void free_old_xmit_skbs(struct send_queue *sq)
R
Rusty Russell 已提交
704 705
{
	struct sk_buff *skb;
706
	unsigned int len;
707
	struct virtnet_info *vi = sq->vq->vdev->priv;
E
Eric Dumazet 已提交
708
	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
R
Rusty Russell 已提交
709

710
	while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
R
Rusty Russell 已提交
711
		pr_debug("Sent skb %p\n", skb);
712

713
		u64_stats_update_begin(&stats->tx_syncp);
714 715
		stats->tx_bytes += skb->len;
		stats->tx_packets++;
716
		u64_stats_update_end(&stats->tx_syncp);
717

718
		dev_kfree_skb_any(skb);
R
Rusty Russell 已提交
719 720 721
	}
}

722
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
R
Rusty Russell 已提交
723
{
724
	struct skb_vnet_hdr *hdr;
R
Rusty Russell 已提交
725
	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
726
	struct virtnet_info *vi = sq->vq->vdev->priv;
727
	unsigned num_sg;
728 729
	unsigned hdr_len;
	bool can_push;
R
Rusty Russell 已提交
730

J
Johannes Berg 已提交
731
	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
732 733 734 735 736 737 738 739 740 741 742 743 744 745
	if (vi->mergeable_rx_bufs)
		hdr_len = sizeof hdr->mhdr;
	else
		hdr_len = sizeof hdr->hdr;

	can_push = vi->any_header_sg &&
		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
	/* Even if we can, don't push here yet as this would skew
	 * csum_start offset below. */
	if (can_push)
		hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len);
	else
		hdr = skb_vnet_hdr(skb);
R
Rusty Russell 已提交
746 747

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
748
		hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
749
		hdr->hdr.csum_start = skb_checksum_start_offset(skb);
750
		hdr->hdr.csum_offset = skb->csum_offset;
R
Rusty Russell 已提交
751
	} else {
752 753
		hdr->hdr.flags = 0;
		hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
R
Rusty Russell 已提交
754 755 756
	}

	if (skb_is_gso(skb)) {
757 758
		hdr->hdr.hdr_len = skb_headlen(skb);
		hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
R
Rusty Russell 已提交
759
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
760
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
R
Rusty Russell 已提交
761
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
762
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
R
Rusty Russell 已提交
763
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
764
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
R
Rusty Russell 已提交
765 766
		else
			BUG();
R
Rusty Russell 已提交
767
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
768
			hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
R
Rusty Russell 已提交
769
	} else {
770 771
		hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
		hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
R
Rusty Russell 已提交
772 773
	}

774
	if (vi->mergeable_rx_bufs)
775
		hdr->mhdr.num_buffers = 0;
776

777 778 779 780 781 782 783 784 785
	if (can_push) {
		__skb_push(skb, hdr_len);
		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
		/* Pull header back to avoid skew in tx bytes calculations. */
		__skb_pull(skb, hdr_len);
	} else {
		sg_set_buf(sq->sg, hdr, hdr_len);
		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
	}
786
	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
787 788
}

789
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
790 791
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
792 793
	int qnum = skb_get_queue_mapping(skb);
	struct send_queue *sq = &vi->sq[qnum];
794
	int err;
795 796

	/* Free up any pending old buffers before queueing new ones. */
797
	free_old_xmit_skbs(sq);
798

799
	/* Try to transmit */
800
	err = xmit_skb(sq, skb);
801

802
	/* This should not happen! */
803
	if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
804 805 806
		dev->stats.tx_fifo_errors++;
		if (net_ratelimit())
			dev_warn(&dev->dev,
807
				 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
808 809 810
		dev->stats.tx_dropped++;
		kfree_skb(skb);
		return NETDEV_TX_OK;
R
Rusty Russell 已提交
811
	}
812

813 814 815 816 817 818
	/* Don't wait up for transmitted skbs to be freed. */
	skb_orphan(skb);
	nf_reset(skb);

	/* Apparently nice girls don't return TX_BUSY; stop the queue
	 * before it gets out of hand.  Naturally, this wastes entries. */
819
	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
J
Jason Wang 已提交
820
		netif_stop_subqueue(dev, qnum);
821
		if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
822
			/* More just got used, free them then recheck. */
823 824
			free_old_xmit_skbs(sq);
			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
J
Jason Wang 已提交
825
				netif_start_subqueue(dev, qnum);
826
				virtqueue_disable_cb(sq->vq);
827 828
			}
		}
829
	}
830 831

	return NETDEV_TX_OK;
R
Rusty Russell 已提交
832 833
}

834 835 836 837 838 839
/*
 * Send command via the control virtqueue and check status.  Commands
 * supported by the hypervisor, as indicated by feature bits, should
 * never fail unless improperly formated.
 */
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
840 841
				 struct scatterlist *out,
				 struct scatterlist *in)
842
{
843
	struct scatterlist *sgs[4], hdr, stat;
844 845
	struct virtio_net_ctrl_hdr ctrl;
	virtio_net_ctrl_ack status = ~0;
846
	unsigned out_num = 0, in_num = 0, tmp;
847 848

	/* Caller should know better */
849
	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
850 851 852

	ctrl.class = class;
	ctrl.cmd = cmd;
853 854 855
	/* Add header */
	sg_init_one(&hdr, &ctrl, sizeof(ctrl));
	sgs[out_num++] = &hdr;
856

857 858 859 860
	if (out)
		sgs[out_num++] = out;
	if (in)
		sgs[out_num + in_num++] = in;
861

862 863 864
	/* Add return status. */
	sg_init_one(&stat, &status, sizeof(status));
	sgs[out_num + in_num++] = &stat;
865

866 867 868
	BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
	BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
	       < 0);
869

870 871
	if (unlikely(!virtqueue_kick(vi->cvq)))
		return status == VIRTIO_NET_OK;
872 873 874 875

	/* Spin for a response, the kick causes an ioport write, trapping
	 * into the hypervisor, so the request should be handled immediately.
	 */
876 877
	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
	       !virtqueue_is_broken(vi->cvq))
878 879 880 881 882
		cpu_relax();

	return status == VIRTIO_NET_OK;
}

883 884 885 886
static int virtnet_set_mac_address(struct net_device *dev, void *p)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;
887
	int ret;
888 889
	struct sockaddr *addr = p;
	struct scatterlist sg;
890

891
	ret = eth_prepare_mac_addr_change(dev, p);
892 893
	if (ret)
		return ret;
894

895 896 897 898
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
		sg_init_one(&sg, addr->sa_data, dev->addr_len);
		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
					  VIRTIO_NET_CTRL_MAC_ADDR_SET,
899
					  &sg, NULL)) {
900 901 902 903 904
			dev_warn(&vdev->dev,
				 "Failed to set mac address by vq command.\n");
			return -EINVAL;
		}
	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
905 906 907 908 909 910 911
		unsigned int i;

		/* Naturally, this has an atomicity problem. */
		for (i = 0; i < dev->addr_len; i++)
			virtio_cwrite8(vdev,
				       offsetof(struct virtio_net_config, mac) +
				       i, addr->sa_data[i]);
912 913 914
	}

	eth_commit_mac_addr_change(dev, p);
915 916 917 918

	return 0;
}

919 920 921 922 923 924 925 926
static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
					       struct rtnl_link_stats64 *tot)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int cpu;
	unsigned int start;

	for_each_possible_cpu(cpu) {
E
Eric Dumazet 已提交
927
		struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
928 929 930
		u64 tpackets, tbytes, rpackets, rbytes;

		do {
931
			start = u64_stats_fetch_begin_bh(&stats->tx_syncp);
932 933
			tpackets = stats->tx_packets;
			tbytes   = stats->tx_bytes;
934
		} while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start));
935 936

		do {
937
			start = u64_stats_fetch_begin_bh(&stats->rx_syncp);
938 939
			rpackets = stats->rx_packets;
			rbytes   = stats->rx_bytes;
940
		} while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start));
941 942 943 944 945 946 947 948

		tot->rx_packets += rpackets;
		tot->tx_packets += tpackets;
		tot->rx_bytes   += rbytes;
		tot->tx_bytes   += tbytes;
	}

	tot->tx_dropped = dev->stats.tx_dropped;
949
	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
950 951 952 953 954 955 956
	tot->rx_dropped = dev->stats.rx_dropped;
	tot->rx_length_errors = dev->stats.rx_length_errors;
	tot->rx_frame_errors = dev->stats.rx_frame_errors;

	return tot;
}

957 958 959 960
#ifdef CONFIG_NET_POLL_CONTROLLER
static void virtnet_netpoll(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
961
	int i;
962

J
Jason Wang 已提交
963 964
	for (i = 0; i < vi->curr_queue_pairs; i++)
		napi_schedule(&vi->rq[i].napi);
965 966 967
}
#endif

968 969 970 971
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
	rtnl_lock();
	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
972
				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
973 974 975 976
		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
	rtnl_unlock();
}

J
Jason Wang 已提交
977 978 979 980 981 982 983 984 985 986 987 988 989
static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
	struct scatterlist sg;
	struct virtio_net_ctrl_mq s;
	struct net_device *dev = vi->dev;

	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
		return 0;

	s.virtqueue_pairs = queue_pairs;
	sg_init_one(&sg, &s, sizeof(s));

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
990
				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
J
Jason Wang 已提交
991 992 993
		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
			 queue_pairs);
		return -EINVAL;
994
	} else {
J
Jason Wang 已提交
995
		vi->curr_queue_pairs = queue_pairs;
996 997 998
		/* virtnet_open() will refill when device is going to up. */
		if (dev->flags & IFF_UP)
			schedule_delayed_work(&vi->refill, 0);
999
	}
J
Jason Wang 已提交
1000 1001 1002 1003

	return 0;
}

R
Rusty Russell 已提交
1004 1005 1006
static int virtnet_close(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
1007
	int i;
R
Rusty Russell 已提交
1008

1009 1010
	/* Make sure refill_work doesn't re-enable napi! */
	cancel_delayed_work_sync(&vi->refill);
J
Jason Wang 已提交
1011 1012 1013

	for (i = 0; i < vi->max_queue_pairs; i++)
		napi_disable(&vi->rq[i].napi);
R
Rusty Russell 已提交
1014 1015 1016 1017

	return 0;
}

1018 1019 1020
static void virtnet_set_rx_mode(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
1021
	struct scatterlist sg[2];
1022
	u8 promisc, allmulti;
1023
	struct virtio_net_ctrl_mac *mac_data;
J
Jiri Pirko 已提交
1024
	struct netdev_hw_addr *ha;
1025
	int uc_count;
1026
	int mc_count;
1027 1028
	void *buf;
	int i;
1029 1030 1031 1032 1033

	/* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
		return;

1034 1035
	promisc = ((dev->flags & IFF_PROMISC) != 0);
	allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1036

1037
	sg_init_one(sg, &promisc, sizeof(promisc));
1038 1039 1040

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
				  VIRTIO_NET_CTRL_RX_PROMISC,
1041
				  sg, NULL))
1042 1043 1044
		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
			 promisc ? "en" : "dis");

1045
	sg_init_one(sg, &allmulti, sizeof(allmulti));
1046 1047 1048

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
				  VIRTIO_NET_CTRL_RX_ALLMULTI,
1049
				  sg, NULL))
1050 1051
		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
			 allmulti ? "en" : "dis");
1052

1053
	uc_count = netdev_uc_count(dev);
1054
	mc_count = netdev_mc_count(dev);
1055
	/* MAC filter - use one buffer for both lists */
1056 1057 1058
	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
	mac_data = buf;
1059
	if (!buf)
1060 1061
		return;

1062 1063
	sg_init_table(sg, 2);

1064
	/* Store the unicast list and count in the front of the buffer */
1065
	mac_data->entries = uc_count;
J
Jiri Pirko 已提交
1066
	i = 0;
1067
	netdev_for_each_uc_addr(ha, dev)
J
Jiri Pirko 已提交
1068
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1069 1070

	sg_set_buf(&sg[0], mac_data,
1071
		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
1072 1073

	/* multicast list and count fill the end */
1074
	mac_data = (void *)&mac_data->macs[uc_count][0];
1075

1076
	mac_data->entries = mc_count;
1077
	i = 0;
1078 1079
	netdev_for_each_mc_addr(ha, dev)
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1080 1081

	sg_set_buf(&sg[1], mac_data,
1082
		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
1083 1084 1085

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
				  VIRTIO_NET_CTRL_MAC_TABLE_SET,
1086
				  sg, NULL))
1087
		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
1088 1089

	kfree(buf);
1090 1091
}

1092 1093
static int virtnet_vlan_rx_add_vid(struct net_device *dev,
				   __be16 proto, u16 vid)
1094 1095 1096 1097
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

1098
	sg_init_one(&sg, &vid, sizeof(vid));
1099 1100

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1101
				  VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
1102
		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1103
	return 0;
1104 1105
}

1106 1107
static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
				    __be16 proto, u16 vid)
1108 1109 1110 1111
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

1112
	sg_init_one(&sg, &vid, sizeof(vid));
1113 1114

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1115
				  VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
1116
		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1117
	return 0;
1118 1119
}

1120
static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
J
Jason Wang 已提交
1121 1122 1123
{
	int i;

1124 1125
	if (vi->affinity_hint_set) {
		for (i = 0; i < vi->max_queue_pairs; i++) {
1126 1127 1128 1129
			virtqueue_set_affinity(vi->rq[i].vq, -1);
			virtqueue_set_affinity(vi->sq[i].vq, -1);
		}

1130 1131 1132
		vi->affinity_hint_set = false;
	}
}
1133

1134 1135 1136 1137
static void virtnet_set_affinity(struct virtnet_info *vi)
{
	int i;
	int cpu;
J
Jason Wang 已提交
1138 1139 1140 1141 1142

	/* In multiqueue mode, when the number of cpu is equal to the number of
	 * queue pairs, we let the queue pairs to be private to one cpu by
	 * setting the affinity hint to eliminate the contention.
	 */
1143 1144 1145 1146
	if (vi->curr_queue_pairs == 1 ||
	    vi->max_queue_pairs != num_online_cpus()) {
		virtnet_clean_affinity(vi, -1);
		return;
J
Jason Wang 已提交
1147 1148
	}

1149 1150
	i = 0;
	for_each_online_cpu(cpu) {
J
Jason Wang 已提交
1151 1152
		virtqueue_set_affinity(vi->rq[i].vq, cpu);
		virtqueue_set_affinity(vi->sq[i].vq, cpu);
1153
		netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
1154
		i++;
J
Jason Wang 已提交
1155 1156
	}

1157
	vi->affinity_hint_set = true;
J
Jason Wang 已提交
1158 1159
}

1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
static int virtnet_cpu_callback(struct notifier_block *nfb,
			        unsigned long action, void *hcpu)
{
	struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);

	switch(action & ~CPU_TASKS_FROZEN) {
	case CPU_ONLINE:
	case CPU_DOWN_FAILED:
	case CPU_DEAD:
		virtnet_set_affinity(vi);
		break;
	case CPU_DOWN_PREPARE:
		virtnet_clean_affinity(vi, (long)hcpu);
		break;
	default:
		break;
	}
1177

1178
	return NOTIFY_OK;
J
Jason Wang 已提交
1179 1180
}

R
Rick Jones 已提交
1181 1182 1183 1184 1185
static void virtnet_get_ringparam(struct net_device *dev,
				struct ethtool_ringparam *ring)
{
	struct virtnet_info *vi = netdev_priv(dev);

J
Jason Wang 已提交
1186 1187
	ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
	ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
R
Rick Jones 已提交
1188 1189 1190 1191
	ring->rx_pending = ring->rx_max_pending;
	ring->tx_pending = ring->tx_max_pending;
}

1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204

static void virtnet_get_drvinfo(struct net_device *dev,
				struct ethtool_drvinfo *info)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;

	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
	strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
	strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));

}

1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
/* TODO: Eliminate OOO packets during switching */
static int virtnet_set_channels(struct net_device *dev,
				struct ethtool_channels *channels)
{
	struct virtnet_info *vi = netdev_priv(dev);
	u16 queue_pairs = channels->combined_count;
	int err;

	/* We don't support separate rx/tx channels.
	 * We don't allow setting 'other' channels.
	 */
	if (channels->rx_count || channels->tx_count || channels->other_count)
		return -EINVAL;

	if (queue_pairs > vi->max_queue_pairs)
		return -EINVAL;

1222
	get_online_cpus();
1223 1224 1225 1226 1227
	err = virtnet_set_queues(vi, queue_pairs);
	if (!err) {
		netif_set_real_num_tx_queues(dev, queue_pairs);
		netif_set_real_num_rx_queues(dev, queue_pairs);

1228
		virtnet_set_affinity(vi);
1229
	}
1230
	put_online_cpus();
1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247

	return err;
}

static void virtnet_get_channels(struct net_device *dev,
				 struct ethtool_channels *channels)
{
	struct virtnet_info *vi = netdev_priv(dev);

	channels->combined_count = vi->curr_queue_pairs;
	channels->max_combined = vi->max_queue_pairs;
	channels->max_other = 0;
	channels->rx_count = 0;
	channels->tx_count = 0;
	channels->other_count = 0;
}

1248
static const struct ethtool_ops virtnet_ethtool_ops = {
1249
	.get_drvinfo = virtnet_get_drvinfo,
1250
	.get_link = ethtool_op_get_link,
R
Rick Jones 已提交
1251
	.get_ringparam = virtnet_get_ringparam,
1252 1253
	.set_channels = virtnet_set_channels,
	.get_channels = virtnet_get_channels,
1254 1255
};

M
Mark McLoughlin 已提交
1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
#define MIN_MTU 68
#define MAX_MTU 65535

static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
{
	if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
		return -EINVAL;
	dev->mtu = new_mtu;
	return 0;
}

1267 1268 1269 1270 1271
static const struct net_device_ops virtnet_netdev = {
	.ndo_open            = virtnet_open,
	.ndo_stop   	     = virtnet_close,
	.ndo_start_xmit      = start_xmit,
	.ndo_validate_addr   = eth_validate_addr,
1272
	.ndo_set_mac_address = virtnet_set_mac_address,
1273
	.ndo_set_rx_mode     = virtnet_set_rx_mode,
1274
	.ndo_change_mtu	     = virtnet_change_mtu,
1275
	.ndo_get_stats64     = virtnet_stats,
1276 1277
	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
1278 1279 1280 1281 1282
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = virtnet_netpoll,
#endif
};

1283
static void virtnet_config_changed_work(struct work_struct *work)
1284
{
1285 1286
	struct virtnet_info *vi =
		container_of(work, struct virtnet_info, config_work);
1287 1288
	u16 v;

1289 1290 1291 1292
	mutex_lock(&vi->config_lock);
	if (!vi->config_enable)
		goto done;

1293 1294
	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
				 struct virtio_net_config, status, &v) < 0)
1295 1296 1297
		goto done;

	if (v & VIRTIO_NET_S_ANNOUNCE) {
1298
		netdev_notify_peers(vi->dev);
1299 1300
		virtnet_ack_link_announce(vi);
	}
1301 1302 1303 1304 1305

	/* Ignore unknown (future) status bits */
	v &= VIRTIO_NET_S_LINK_UP;

	if (vi->status == v)
1306
		goto done;
1307 1308 1309 1310 1311

	vi->status = v;

	if (vi->status & VIRTIO_NET_S_LINK_UP) {
		netif_carrier_on(vi->dev);
J
Jason Wang 已提交
1312
		netif_tx_wake_all_queues(vi->dev);
1313 1314
	} else {
		netif_carrier_off(vi->dev);
J
Jason Wang 已提交
1315
		netif_tx_stop_all_queues(vi->dev);
1316
	}
1317 1318
done:
	mutex_unlock(&vi->config_lock);
1319 1320 1321 1322 1323 1324
}

static void virtnet_config_changed(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;

1325
	schedule_work(&vi->config_work);
1326 1327
}

J
Jason Wang 已提交
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
static void virtnet_free_queues(struct virtnet_info *vi)
{
	kfree(vi->rq);
	kfree(vi->sq);
}

static void free_receive_bufs(struct virtnet_info *vi)
{
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
		while (vi->rq[i].pages)
			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
	}
}

static void free_unused_bufs(struct virtnet_info *vi)
{
	void *buf;
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
		struct virtqueue *vq = vi->sq[i].vq;
		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
			dev_kfree_skb(buf);
	}

	for (i = 0; i < vi->max_queue_pairs; i++) {
		struct virtqueue *vq = vi->rq[i].vq;

		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1359
			if (vi->big_packets)
J
Jason Wang 已提交
1360
				give_pages(&vi->rq[i], buf);
1361 1362
			else if (vi->mergeable_rx_bufs)
				put_page(virt_to_head_page(buf));
J
Jason Wang 已提交
1363 1364 1365 1366 1367 1368 1369 1370
			else
				dev_kfree_skb(buf);
			--vi->rq[i].num;
		}
		BUG_ON(vi->rq[i].num != 0);
	}
}

1371 1372 1373 1374
static void virtnet_del_vqs(struct virtnet_info *vi)
{
	struct virtio_device *vdev = vi->vdev;

1375
	virtnet_clean_affinity(vi, -1);
J
Jason Wang 已提交
1376

1377
	vdev->config->del_vqs(vdev);
J
Jason Wang 已提交
1378 1379

	virtnet_free_queues(vi);
1380 1381
}

J
Jason Wang 已提交
1382
static int virtnet_find_vqs(struct virtnet_info *vi)
1383
{
J
Jason Wang 已提交
1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412
	vq_callback_t **callbacks;
	struct virtqueue **vqs;
	int ret = -ENOMEM;
	int i, total_vqs;
	const char **names;

	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
	 * possible control vq.
	 */
	total_vqs = vi->max_queue_pairs * 2 +
		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);

	/* Allocate space for find_vqs parameters */
	vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
	if (!vqs)
		goto err_vq;
	callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
	if (!callbacks)
		goto err_callback;
	names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
	if (!names)
		goto err_names;

	/* Parameters for control virtqueue, if any */
	if (vi->has_cvq) {
		callbacks[total_vqs - 1] = NULL;
		names[total_vqs - 1] = "control";
	}
1413

J
Jason Wang 已提交
1414 1415 1416 1417 1418 1419 1420 1421 1422
	/* Allocate/initialize parameters for send/receive virtqueues */
	for (i = 0; i < vi->max_queue_pairs; i++) {
		callbacks[rxq2vq(i)] = skb_recv_done;
		callbacks[txq2vq(i)] = skb_xmit_done;
		sprintf(vi->rq[i].name, "input.%d", i);
		sprintf(vi->sq[i].name, "output.%d", i);
		names[rxq2vq(i)] = vi->rq[i].name;
		names[txq2vq(i)] = vi->sq[i].name;
	}
1423

J
Jason Wang 已提交
1424 1425 1426 1427
	ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
					 names);
	if (ret)
		goto err_find;
1428

J
Jason Wang 已提交
1429 1430
	if (vi->has_cvq) {
		vi->cvq = vqs[total_vqs - 1];
1431
		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
1432
			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1433
	}
J
Jason Wang 已提交
1434 1435 1436 1437 1438 1439 1440 1441 1442 1443

	for (i = 0; i < vi->max_queue_pairs; i++) {
		vi->rq[i].vq = vqs[rxq2vq(i)];
		vi->sq[i].vq = vqs[txq2vq(i)];
	}

	kfree(names);
	kfree(callbacks);
	kfree(vqs);

1444
	return 0;
J
Jason Wang 已提交
1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463

err_find:
	kfree(names);
err_names:
	kfree(callbacks);
err_callback:
	kfree(vqs);
err_vq:
	return ret;
}

static int virtnet_alloc_queues(struct virtnet_info *vi)
{
	int i;

	vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
	if (!vi->sq)
		goto err_sq;
	vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
1464
	if (!vi->rq)
J
Jason Wang 已提交
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
		goto err_rq;

	INIT_DELAYED_WORK(&vi->refill, refill_work);
	for (i = 0; i < vi->max_queue_pairs; i++) {
		vi->rq[i].pages = NULL;
		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
			       napi_weight);

		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
	}

	return 0;

err_rq:
	kfree(vi->sq);
err_sq:
	return -ENOMEM;
}

static int init_vqs(struct virtnet_info *vi)
{
	int ret;

	/* Allocate send & receive queues */
	ret = virtnet_alloc_queues(vi);
	if (ret)
		goto err;

	ret = virtnet_find_vqs(vi);
	if (ret)
		goto err_free;

1498
	get_online_cpus();
1499
	virtnet_set_affinity(vi);
1500 1501
	put_online_cpus();

J
Jason Wang 已提交
1502 1503 1504 1505 1506 1507
	return 0;

err_free:
	virtnet_free_queues(vi);
err:
	return ret;
1508 1509
}

R
Rusty Russell 已提交
1510 1511
static int virtnet_probe(struct virtio_device *vdev)
{
J
Jason Wang 已提交
1512
	int i, err;
R
Rusty Russell 已提交
1513 1514
	struct net_device *dev;
	struct virtnet_info *vi;
J
Jason Wang 已提交
1515 1516 1517
	u16 max_queue_pairs;

	/* Find if host supports multiqueue virtio_net device */
1518 1519 1520
	err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
				   struct virtio_net_config,
				   max_virtqueue_pairs, &max_queue_pairs);
J
Jason Wang 已提交
1521 1522 1523 1524 1525 1526

	/* We need at least 2 queue's */
	if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
		max_queue_pairs = 1;
R
Rusty Russell 已提交
1527 1528

	/* Allocate ourselves a network device with room for our info */
J
Jason Wang 已提交
1529
	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
R
Rusty Russell 已提交
1530 1531 1532 1533
	if (!dev)
		return -ENOMEM;

	/* Set up network device as normal. */
1534
	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
1535
	dev->netdev_ops = &virtnet_netdev;
R
Rusty Russell 已提交
1536
	dev->features = NETIF_F_HIGHDMA;
1537

1538
	SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
R
Rusty Russell 已提交
1539 1540 1541
	SET_NETDEV_DEV(dev, &vdev->dev);

	/* Do we support "hardware" checksums? */
1542
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
R
Rusty Russell 已提交
1543
		/* This opens up the world of extra features. */
1544 1545 1546 1547 1548 1549
		dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
		if (csum)
			dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;

		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
R
Rusty Russell 已提交
1550 1551
				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
		}
1552
		/* Individual feature bits: what can host handle? */
1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
			dev->hw_features |= NETIF_F_TSO;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
			dev->hw_features |= NETIF_F_TSO6;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
			dev->hw_features |= NETIF_F_TSO_ECN;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
			dev->hw_features |= NETIF_F_UFO;

		if (gso)
			dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
		/* (!csum && gso) case will be fixed by register_netdev() */
R
Rusty Russell 已提交
1565
	}
1566 1567
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
		dev->features |= NETIF_F_RXCSUM;
R
Rusty Russell 已提交
1568

1569 1570
	dev->vlan_features = dev->features;

R
Rusty Russell 已提交
1571
	/* Configuration may specify what MAC to use.  Otherwise random. */
1572 1573 1574 1575 1576
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
		virtio_cread_bytes(vdev,
				   offsetof(struct virtio_net_config, mac),
				   dev->dev_addr, dev->addr_len);
	else
1577
		eth_hw_addr_random(dev);
R
Rusty Russell 已提交
1578 1579 1580 1581 1582

	/* Set up our device-specific information */
	vi = netdev_priv(dev);
	vi->dev = dev;
	vi->vdev = vdev;
1583
	vdev->priv = vi;
1584 1585 1586 1587 1588
	vi->stats = alloc_percpu(struct virtnet_stats);
	err = -ENOMEM;
	if (vi->stats == NULL)
		goto free;

1589 1590 1591 1592 1593 1594 1595
	for_each_possible_cpu(i) {
		struct virtnet_stats *virtnet_stats;
		virtnet_stats = per_cpu_ptr(vi->stats, i);
		u64_stats_init(&virtnet_stats->tx_syncp);
		u64_stats_init(&virtnet_stats->rx_syncp);
	}

1596 1597 1598
	mutex_init(&vi->config_lock);
	vi->config_enable = true;
	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
R
Rusty Russell 已提交
1599

1600
	/* If we can receive ANY GSO packets, we must allocate large ones. */
1601 1602 1603
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
1604 1605
		vi->big_packets = true;

1606 1607 1608
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
		vi->mergeable_rx_bufs = true;

1609 1610 1611
	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
		vi->any_header_sg = true;

J
Jason Wang 已提交
1612 1613 1614 1615 1616 1617 1618 1619
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
		vi->has_cvq = true;

	/* Use single tx/rx queue pair as default */
	vi->curr_queue_pairs = 1;
	vi->max_queue_pairs = max_queue_pairs;

	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
1620
	err = init_vqs(vi);
1621
	if (err)
1622
		goto free_stats;
R
Rusty Russell 已提交
1623

1624 1625
	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
J
Jason Wang 已提交
1626

R
Rusty Russell 已提交
1627 1628 1629
	err = register_netdev(dev);
	if (err) {
		pr_debug("virtio_net: registering device failed\n");
1630
		goto free_vqs;
R
Rusty Russell 已提交
1631
	}
1632 1633

	/* Last of all, set up some receive buffers. */
1634
	for (i = 0; i < vi->curr_queue_pairs; i++) {
J
Jason Wang 已提交
1635 1636 1637 1638 1639 1640 1641 1642
		try_fill_recv(&vi->rq[i], GFP_KERNEL);

		/* If we didn't even get one input buffer, we're useless. */
		if (vi->rq[i].num == 0) {
			free_unused_bufs(vi);
			err = -ENOMEM;
			goto free_recv_bufs;
		}
1643 1644
	}

1645 1646 1647 1648 1649 1650 1651
	vi->nb.notifier_call = &virtnet_cpu_callback;
	err = register_hotcpu_notifier(&vi->nb);
	if (err) {
		pr_debug("virtio_net: registering cpu notifier failed\n");
		goto free_recv_bufs;
	}

J
Jason Wang 已提交
1652 1653 1654 1655
	/* Assume link up if device can't report link status,
	   otherwise get link status from config. */
	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
		netif_carrier_off(dev);
1656
		schedule_work(&vi->config_work);
J
Jason Wang 已提交
1657 1658 1659 1660
	} else {
		vi->status = VIRTIO_NET_S_LINK_UP;
		netif_carrier_on(dev);
	}
1661

J
Jason Wang 已提交
1662 1663 1664
	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
		 dev->name, max_queue_pairs);

R
Rusty Russell 已提交
1665 1666
	return 0;

J
Jason Wang 已提交
1667 1668
free_recv_bufs:
	free_receive_bufs(vi);
1669
	unregister_netdev(dev);
1670
free_vqs:
J
Jason Wang 已提交
1671
	cancel_delayed_work_sync(&vi->refill);
1672
	virtnet_del_vqs(vi);
1673 1674
	if (vi->alloc_frag.page)
		put_page(vi->alloc_frag.page);
1675 1676
free_stats:
	free_percpu(vi->stats);
R
Rusty Russell 已提交
1677 1678 1679 1680 1681
free:
	free_netdev(dev);
	return err;
}

1682
static void remove_vq_common(struct virtnet_info *vi)
R
Rusty Russell 已提交
1683
{
1684
	vi->vdev->config->reset(vi->vdev);
S
Shirley Ma 已提交
1685 1686

	/* Free unused buffers in both send and recv, if any. */
1687
	free_unused_bufs(vi);
1688

J
Jason Wang 已提交
1689
	free_receive_bufs(vi);
1690

J
Jason Wang 已提交
1691
	virtnet_del_vqs(vi);
1692 1693
}

1694
static void virtnet_remove(struct virtio_device *vdev)
1695 1696 1697
{
	struct virtnet_info *vi = vdev->priv;

1698 1699
	unregister_hotcpu_notifier(&vi->nb);

1700 1701 1702 1703 1704
	/* Prevent config work handler from accessing the device. */
	mutex_lock(&vi->config_lock);
	vi->config_enable = false;
	mutex_unlock(&vi->config_lock);

1705 1706 1707
	unregister_netdev(vi->dev);

	remove_vq_common(vi);
1708 1709
	if (vi->alloc_frag.page)
		put_page(vi->alloc_frag.page);
1710

1711 1712
	flush_work(&vi->config_work);

1713
	free_percpu(vi->stats);
1714
	free_netdev(vi->dev);
R
Rusty Russell 已提交
1715 1716
}

1717
#ifdef CONFIG_PM_SLEEP
1718 1719 1720
static int virtnet_freeze(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;
J
Jason Wang 已提交
1721
	int i;
1722

1723 1724
	unregister_hotcpu_notifier(&vi->nb);

1725 1726 1727 1728 1729
	/* Prevent config work handler from accessing the device */
	mutex_lock(&vi->config_lock);
	vi->config_enable = false;
	mutex_unlock(&vi->config_lock);

1730 1731 1732 1733
	netif_device_detach(vi->dev);
	cancel_delayed_work_sync(&vi->refill);

	if (netif_running(vi->dev))
J
Jason Wang 已提交
1734 1735 1736 1737
		for (i = 0; i < vi->max_queue_pairs; i++) {
			napi_disable(&vi->rq[i].napi);
			netif_napi_del(&vi->rq[i].napi);
		}
1738 1739 1740

	remove_vq_common(vi);

1741 1742
	flush_work(&vi->config_work);

1743 1744 1745 1746 1747 1748
	return 0;
}

static int virtnet_restore(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;
J
Jason Wang 已提交
1749
	int err, i;
1750 1751 1752 1753 1754 1755

	err = init_vqs(vi);
	if (err)
		return err;

	if (netif_running(vi->dev))
J
Jason Wang 已提交
1756 1757
		for (i = 0; i < vi->max_queue_pairs; i++)
			virtnet_napi_enable(&vi->rq[i]);
1758 1759 1760

	netif_device_attach(vi->dev);

1761
	for (i = 0; i < vi->curr_queue_pairs; i++)
J
Jason Wang 已提交
1762 1763
		if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
			schedule_delayed_work(&vi->refill, 0);
1764

1765 1766 1767 1768
	mutex_lock(&vi->config_lock);
	vi->config_enable = true;
	mutex_unlock(&vi->config_lock);

1769
	rtnl_lock();
J
Jason Wang 已提交
1770
	virtnet_set_queues(vi, vi->curr_queue_pairs);
1771
	rtnl_unlock();
J
Jason Wang 已提交
1772

1773 1774 1775 1776
	err = register_hotcpu_notifier(&vi->nb);
	if (err)
		return err;

1777 1778 1779 1780
	return 0;
}
#endif

R
Rusty Russell 已提交
1781 1782 1783 1784 1785
static struct virtio_device_id id_table[] = {
	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

1786
static unsigned int features[] = {
1787 1788
	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
	VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1789
	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1790
	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1791
	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1792
	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1793
	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
J
Jason Wang 已提交
1794
	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
1795
	VIRTIO_NET_F_CTRL_MAC_ADDR,
1796
	VIRTIO_F_ANY_LAYOUT,
1797 1798
};

1799
static struct virtio_driver virtio_net_driver = {
1800 1801
	.feature_table = features,
	.feature_table_size = ARRAY_SIZE(features),
R
Rusty Russell 已提交
1802 1803 1804 1805
	.driver.name =	KBUILD_MODNAME,
	.driver.owner =	THIS_MODULE,
	.id_table =	id_table,
	.probe =	virtnet_probe,
1806
	.remove =	virtnet_remove,
1807
	.config_changed = virtnet_config_changed,
1808
#ifdef CONFIG_PM_SLEEP
1809 1810 1811
	.freeze =	virtnet_freeze,
	.restore =	virtnet_restore,
#endif
R
Rusty Russell 已提交
1812 1813
};

1814
module_virtio_driver(virtio_net_driver);
R
Rusty Russell 已提交
1815 1816 1817 1818

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio network driver");
MODULE_LICENSE("GPL");