virtio_net.c 33.0 KB
Newer Older
1
/* A network driver using virtio.
R
Rusty Russell 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 *
 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
//#define DEBUG
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
22
#include <linux/ethtool.h>
R
Rusty Russell 已提交
23 24 25 26
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
#include <linux/scatterlist.h>
27
#include <linux/if_vlan.h>
28
#include <linux/slab.h>
R
Rusty Russell 已提交
29

30 31 32
static int napi_weight = 128;
module_param(napi_weight, int, 0444);

33
static bool csum = true, gso = true;
R
Rusty Russell 已提交
34 35 36
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);

R
Rusty Russell 已提交
37
/* FIXME: MTU in config. */
38
#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
39
#define GOOD_COPY_LEN	128
R
Rusty Russell 已提交
40

41
#define VIRTNET_SEND_COMMAND_SG_MAX    2
42
#define VIRTNET_DRIVER_VERSION "1.0.0"
43

44
struct virtnet_stats {
45 46
	struct u64_stats_sync tx_syncp;
	struct u64_stats_sync rx_syncp;
47 48 49 50 51 52 53
	u64 tx_bytes;
	u64 tx_packets;

	u64 rx_bytes;
	u64 rx_packets;
};

54
struct virtnet_info {
R
Rusty Russell 已提交
55
	struct virtio_device *vdev;
56
	struct virtqueue *rvq, *svq, *cvq;
R
Rusty Russell 已提交
57 58
	struct net_device *dev;
	struct napi_struct napi;
59
	unsigned int status;
R
Rusty Russell 已提交
60 61 62 63

	/* Number of input buffers, and max we've ever had. */
	unsigned int num, max;

64 65 66
	/* I like... big packets and I cannot lie! */
	bool big_packets;

67 68 69
	/* Host will merge rx buffers for big packets (shake it! shake it!) */
	bool mergeable_rx_bufs;

70 71 72
	/* enable config space updates */
	bool config_enable;

73 74 75
	/* Active statistics */
	struct virtnet_stats __percpu *stats;

76 77 78
	/* Work struct for refilling if we run low on memory. */
	struct delayed_work refill;

79 80 81 82 83 84
	/* Work struct for config space updates */
	struct work_struct config_work;

	/* Lock for config space updates */
	struct mutex config_lock;

85 86
	/* Chain pages by the private ptr. */
	struct page *pages;
87 88 89 90

	/* fragments + linear part + virtio header */
	struct scatterlist rx_sg[MAX_SKB_FRAGS + 2];
	struct scatterlist tx_sg[MAX_SKB_FRAGS + 2];
R
Rusty Russell 已提交
91 92
};

93 94 95 96 97
struct skb_vnet_hdr {
	union {
		struct virtio_net_hdr hdr;
		struct virtio_net_hdr_mrg_rxbuf mhdr;
	};
98
	unsigned int num_sg;
99 100
};

101 102 103 104 105 106 107 108 109 110
struct padded_vnet_hdr {
	struct virtio_net_hdr hdr;
	/*
	 * virtio_net_hdr should be in a separated sg buffer because of a
	 * QEMU bug, and data sg buffer shares same page with this header sg.
	 * This padding makes next sg 16 byte aligned after virtio_net_hdr.
	 */
	char padding[6];
};

111
static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
R
Rusty Russell 已提交
112
{
113
	return (struct skb_vnet_hdr *)skb->cb;
R
Rusty Russell 已提交
114 115
}

116 117 118 119 120
/*
 * private is used to chain pages for big packets, put the whole
 * most recent used list in the beginning for reuse
 */
static void give_pages(struct virtnet_info *vi, struct page *page)
121
{
122
	struct page *end;
123

124 125 126 127
	/* Find end of list, sew whole thing into vi->pages. */
	for (end = page; end->private; end = (struct page *)end->private);
	end->private = (unsigned long)vi->pages;
	vi->pages = page;
128 129
}

130 131 132 133
static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
{
	struct page *p = vi->pages;

134
	if (p) {
135
		vi->pages = (struct page *)p->private;
136 137 138
		/* clear private here, it is used to chain pages */
		p->private = 0;
	} else
139 140 141 142
		p = alloc_page(gfp_mask);
	return p;
}

143
static void skb_xmit_done(struct virtqueue *svq)
R
Rusty Russell 已提交
144
{
145
	struct virtnet_info *vi = svq->vdev->priv;
R
Rusty Russell 已提交
146

147
	/* Suppress further interrupts. */
148
	virtqueue_disable_cb(svq);
149

150
	/* We were probably waiting for more output buffers. */
R
Rusty Russell 已提交
151 152 153
	netif_wake_queue(vi->dev);
}

154 155
static void set_skb_frag(struct sk_buff *skb, struct page *page,
			 unsigned int offset, unsigned int *len)
R
Rusty Russell 已提交
156
{
157
	int size = min((unsigned)PAGE_SIZE - offset, *len);
158 159
	int i = skb_shinfo(skb)->nr_frags;

160
	__skb_fill_page_desc(skb, i, page, offset, size);
161

162 163
	skb->data_len += size;
	skb->len += size;
164
	skb->truesize += PAGE_SIZE;
165
	skb_shinfo(skb)->nr_frags++;
166
	*len -= size;
167
}
168

169
/* Called from bottom half context */
170 171 172 173 174 175 176
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
				   struct page *page, unsigned int len)
{
	struct sk_buff *skb;
	struct skb_vnet_hdr *hdr;
	unsigned int copy, hdr_len, offset;
	char *p;
177

178
	p = page_address(page);
179

180 181 182 183
	/* copy small packet so we can reuse these pages for small data */
	skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
	if (unlikely(!skb))
		return NULL;
184

185
	hdr = skb_vnet_hdr(skb);
186

187 188 189 190 191 192 193
	if (vi->mergeable_rx_bufs) {
		hdr_len = sizeof hdr->mhdr;
		offset = hdr_len;
	} else {
		hdr_len = sizeof hdr->hdr;
		offset = sizeof(struct padded_vnet_hdr);
	}
194

195
	memcpy(hdr, p, hdr_len);
196

197 198
	len -= hdr_len;
	p += offset;
199

200 201 202 203
	copy = len;
	if (copy > skb_tailroom(skb))
		copy = skb_tailroom(skb);
	memcpy(skb_put(skb, copy), p, copy);
204

205 206
	len -= copy;
	offset += copy;
207

208 209 210 211 212 213 214
	/*
	 * Verify that we can indeed put this data into a skb.
	 * This is here to handle cases when the device erroneously
	 * tries to receive more than is possible. This is usually
	 * the case of a broken device.
	 */
	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
215
		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
216 217 218 219
		dev_kfree_skb(skb);
		return NULL;
	}

220 221 222 223 224
	while (len) {
		set_skb_frag(skb, page, offset, &len);
		page = (struct page *)page->private;
		offset = 0;
	}
225

226 227
	if (page)
		give_pages(vi, page);
228

229 230
	return skb;
}
231

232 233 234 235 236 237 238 239 240 241 242 243 244 245
static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
{
	struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
	struct page *page;
	int num_buf, i, len;

	num_buf = hdr->mhdr.num_buffers;
	while (--num_buf) {
		i = skb_shinfo(skb)->nr_frags;
		if (i >= MAX_SKB_FRAGS) {
			pr_debug("%s: packet too long\n", skb->dev->name);
			skb->dev->stats.rx_length_errors++;
			return -EINVAL;
		}
246
		page = virtqueue_get_buf(vi->rvq, &len);
247 248 249 250 251
		if (!page) {
			pr_debug("%s: rx error: %d buffers missing\n",
				 skb->dev->name, hdr->mhdr.num_buffers);
			skb->dev->stats.rx_length_errors++;
			return -EINVAL;
252
		}
253

254 255 256 257 258 259 260 261 262 263 264 265 266
		if (len > PAGE_SIZE)
			len = PAGE_SIZE;

		set_skb_frag(skb, page, 0, &len);

		--vi->num;
	}
	return 0;
}

static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
{
	struct virtnet_info *vi = netdev_priv(dev);
E
Eric Dumazet 已提交
267
	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
268 269 270
	struct sk_buff *skb;
	struct page *page;
	struct skb_vnet_hdr *hdr;
271

272 273 274 275 276 277 278 279 280
	if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
		pr_debug("%s: short packet %i\n", dev->name, len);
		dev->stats.rx_length_errors++;
		if (vi->mergeable_rx_bufs || vi->big_packets)
			give_pages(vi, buf);
		else
			dev_kfree_skb(buf);
		return;
	}
281

282 283 284 285 286 287 288 289
	if (!vi->mergeable_rx_bufs && !vi->big_packets) {
		skb = buf;
		len -= sizeof(struct virtio_net_hdr);
		skb_trim(skb, len);
	} else {
		page = buf;
		skb = page_to_skb(vi, page, len);
		if (unlikely(!skb)) {
290
			dev->stats.rx_dropped++;
291 292
			give_pages(vi, page);
			return;
293
		}
294 295 296 297 298
		if (vi->mergeable_rx_bufs)
			if (receive_mergeable(vi, skb)) {
				dev_kfree_skb(skb);
				return;
			}
299
	}
300

301
	hdr = skb_vnet_hdr(skb);
302

303
	u64_stats_update_begin(&stats->rx_syncp);
304 305
	stats->rx_bytes += skb->len;
	stats->rx_packets++;
306
	u64_stats_update_end(&stats->rx_syncp);
R
Rusty Russell 已提交
307

308
	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
R
Rusty Russell 已提交
309
		pr_debug("Needs csum!\n");
310 311 312
		if (!skb_partial_csum_set(skb,
					  hdr->hdr.csum_start,
					  hdr->hdr.csum_offset))
R
Rusty Russell 已提交
313
			goto frame_err;
314 315
	} else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
R
Rusty Russell 已提交
316 317
	}

318 319 320 321
	skb->protocol = eth_type_trans(skb, dev);
	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
		 ntohs(skb->protocol), skb->len, skb->pkt_type);

322
	if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
R
Rusty Russell 已提交
323
		pr_debug("GSO!\n");
324
		switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
R
Rusty Russell 已提交
325 326 327 328 329 330 331 332 333 334
		case VIRTIO_NET_HDR_GSO_TCPV4:
			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
			break;
		case VIRTIO_NET_HDR_GSO_UDP:
			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
			break;
		case VIRTIO_NET_HDR_GSO_TCPV6:
			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
			break;
		default:
335 336
			net_warn_ratelimited("%s: bad gso type %u.\n",
					     dev->name, hdr->hdr.gso_type);
R
Rusty Russell 已提交
337 338 339
			goto frame_err;
		}

340
		if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
R
Rusty Russell 已提交
341 342
			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;

343
		skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
R
Rusty Russell 已提交
344
		if (skb_shinfo(skb)->gso_size == 0) {
345
			net_warn_ratelimited("%s: zero gso size.\n", dev->name);
R
Rusty Russell 已提交
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
			goto frame_err;
		}

		/* Header must be checked, and gso_segs computed. */
		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
		skb_shinfo(skb)->gso_segs = 0;
	}

	netif_receive_skb(skb);
	return;

frame_err:
	dev->stats.rx_frame_errors++;
	dev_kfree_skb(skb);
}

362
static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
R
Rusty Russell 已提交
363 364
{
	struct sk_buff *skb;
365 366
	struct skb_vnet_hdr *hdr;
	int err;
367

368
	skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
369 370
	if (unlikely(!skb))
		return -ENOMEM;
R
Rusty Russell 已提交
371

372
	skb_put(skb, MAX_PACKET_LEN);
373

374
	hdr = skb_vnet_hdr(skb);
375
	sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr);
376

377
	skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
378

379
	err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
380 381
	if (err < 0)
		dev_kfree_skb(skb);
382

383 384
	return err;
}
385

386 387 388 389 390 391
static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
{
	struct page *first, *list = NULL;
	char *p;
	int i, err, offset;

392
	/* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */
393 394 395 396 397 398
	for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
		first = get_a_page(vi, gfp);
		if (!first) {
			if (list)
				give_pages(vi, list);
			return -ENOMEM;
399
		}
400
		sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE);
401

402 403 404 405
		/* chain new page in list head to match sg */
		first->private = (unsigned long)list;
		list = first;
	}
R
Rusty Russell 已提交
406

407 408 409 410 411 412 413
	first = get_a_page(vi, gfp);
	if (!first) {
		give_pages(vi, list);
		return -ENOMEM;
	}
	p = page_address(first);

414 415 416
	/* vi->rx_sg[0], vi->rx_sg[1] share the same page */
	/* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */
	sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr));
417

418
	/* vi->rx_sg[1] for data packet, from offset */
419
	offset = sizeof(struct padded_vnet_hdr);
420
	sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset);
421 422 423

	/* chain first in list head */
	first->private = (unsigned long)list;
424 425
	err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
				first, gfp);
426 427 428 429
	if (err < 0)
		give_pages(vi, first);

	return err;
R
Rusty Russell 已提交
430 431
}

432
static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
433
{
434
	struct page *page;
435 436
	int err;

437 438 439
	page = get_a_page(vi, gfp);
	if (!page)
		return -ENOMEM;
440

441
	sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
442

443
	err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
444 445
	if (err < 0)
		give_pages(vi, page);
446

447 448
	return err;
}
449

450 451 452 453 454 455 456
/*
 * Returns false if we couldn't fill entirely (OOM).
 *
 * Normally run in the receive path, but can also be run from ndo_open
 * before we're receiving packets, or from refill_work which is
 * careful to disable receiving (using napi_disable).
 */
457 458 459
static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
{
	int err;
460
	bool oom;
461

462 463 464 465 466 467 468
	do {
		if (vi->mergeable_rx_bufs)
			err = add_recvbuf_mergeable(vi, gfp);
		else if (vi->big_packets)
			err = add_recvbuf_big(vi, gfp);
		else
			err = add_recvbuf_small(vi, gfp);
469

470 471
		oom = err == -ENOMEM;
		if (err < 0)
472
			break;
473
		++vi->num;
474
	} while (err > 0);
475 476
	if (unlikely(vi->num > vi->max))
		vi->max = vi->num;
477
	virtqueue_kick(vi->rvq);
478
	return !oom;
479 480
}

481
static void skb_recv_done(struct virtqueue *rvq)
R
Rusty Russell 已提交
482 483
{
	struct virtnet_info *vi = rvq->vdev->priv;
484
	/* Schedule NAPI, Suppress further interrupts if successful. */
485
	if (napi_schedule_prep(&vi->napi)) {
486
		virtqueue_disable_cb(rvq);
487
		__napi_schedule(&vi->napi);
488
	}
R
Rusty Russell 已提交
489 490
}

491 492 493 494 495 496 497 498 499 500
static void virtnet_napi_enable(struct virtnet_info *vi)
{
	napi_enable(&vi->napi);

	/* If all buffers were filled by other side before we napi_enabled, we
	 * won't get another interrupt, so process any outstanding packets
	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
	 * We synchronize against interrupts via NAPI_STATE_SCHED */
	if (napi_schedule_prep(&vi->napi)) {
		virtqueue_disable_cb(vi->rvq);
501
		local_bh_disable();
502
		__napi_schedule(&vi->napi);
503
		local_bh_enable();
504 505 506
	}
}

507 508 509 510 511 512 513
static void refill_work(struct work_struct *work)
{
	struct virtnet_info *vi;
	bool still_empty;

	vi = container_of(work, struct virtnet_info, refill.work);
	napi_disable(&vi->napi);
514
	still_empty = !try_fill_recv(vi, GFP_KERNEL);
515
	virtnet_napi_enable(vi);
516 517 518 519

	/* In theory, this can happen: if we don't get any buffers in
	 * we will *never* try to fill again. */
	if (still_empty)
520
		schedule_delayed_work(&vi->refill, HZ/2);
521 522
}

R
Rusty Russell 已提交
523 524 525
static int virtnet_poll(struct napi_struct *napi, int budget)
{
	struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
526
	void *buf;
R
Rusty Russell 已提交
527 528 529 530
	unsigned int len, received = 0;

again:
	while (received < budget &&
531
	       (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
532 533
		receive_buf(vi->dev, buf, len);
		--vi->num;
R
Rusty Russell 已提交
534 535 536
		received++;
	}

537 538
	if (vi->num < vi->max / 2) {
		if (!try_fill_recv(vi, GFP_ATOMIC))
539
			schedule_delayed_work(&vi->refill, 0);
540
	}
R
Rusty Russell 已提交
541

542 543
	/* Out of packets? */
	if (received < budget) {
544
		napi_complete(napi);
545
		if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
546
		    napi_schedule_prep(napi)) {
547
			virtqueue_disable_cb(vi->rvq);
548
			__napi_schedule(napi);
R
Rusty Russell 已提交
549
			goto again;
550
		}
R
Rusty Russell 已提交
551 552 553 554 555
	}

	return received;
}

556
static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
R
Rusty Russell 已提交
557 558
{
	struct sk_buff *skb;
559
	unsigned int len, tot_sgs = 0;
E
Eric Dumazet 已提交
560
	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
R
Rusty Russell 已提交
561

562
	while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
R
Rusty Russell 已提交
563
		pr_debug("Sent skb %p\n", skb);
564

565
		u64_stats_update_begin(&stats->tx_syncp);
566 567
		stats->tx_bytes += skb->len;
		stats->tx_packets++;
568
		u64_stats_update_end(&stats->tx_syncp);
569

570
		tot_sgs += skb_vnet_hdr(skb)->num_sg;
571
		dev_kfree_skb_any(skb);
R
Rusty Russell 已提交
572
	}
573
	return tot_sgs;
R
Rusty Russell 已提交
574 575
}

576
static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
R
Rusty Russell 已提交
577
{
578
	struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
R
Rusty Russell 已提交
579 580
	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;

J
Johannes Berg 已提交
581
	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
R
Rusty Russell 已提交
582 583

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
584
		hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
585
		hdr->hdr.csum_start = skb_checksum_start_offset(skb);
586
		hdr->hdr.csum_offset = skb->csum_offset;
R
Rusty Russell 已提交
587
	} else {
588 589
		hdr->hdr.flags = 0;
		hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
R
Rusty Russell 已提交
590 591 592
	}

	if (skb_is_gso(skb)) {
593 594
		hdr->hdr.hdr_len = skb_headlen(skb);
		hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
R
Rusty Russell 已提交
595
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
596
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
R
Rusty Russell 已提交
597
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
598
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
R
Rusty Russell 已提交
599
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
600
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
R
Rusty Russell 已提交
601 602
		else
			BUG();
R
Rusty Russell 已提交
603
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
604
			hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
R
Rusty Russell 已提交
605
	} else {
606 607
		hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
		hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
R
Rusty Russell 已提交
608 609
	}

610
	hdr->mhdr.num_buffers = 0;
611 612 613

	/* Encode metadata header at front. */
	if (vi->mergeable_rx_bufs)
614
		sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
615
	else
616
		sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
617

618
	hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
619
	return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
620
				 0, skb, GFP_ATOMIC);
621 622
}

623
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
624 625
{
	struct virtnet_info *vi = netdev_priv(dev);
626
	int capacity;
627 628 629

	/* Free up any pending old buffers before queueing new ones. */
	free_old_xmit_skbs(vi);
630

631
	/* Try to transmit */
632 633 634 635
	capacity = xmit_skb(vi, skb);

	/* This can happen with OOM and indirect buffers. */
	if (unlikely(capacity < 0)) {
636
		if (likely(capacity == -ENOMEM)) {
637
			if (net_ratelimit())
638 639
				dev_warn(&dev->dev,
					 "TX queue failure: out of memory\n");
640
		} else {
641 642
			dev->stats.tx_fifo_errors++;
			if (net_ratelimit())
643 644 645
				dev_warn(&dev->dev,
					 "Unexpected TX queue failure: %d\n",
					 capacity);
646
		}
647 648 649
		dev->stats.tx_dropped++;
		kfree_skb(skb);
		return NETDEV_TX_OK;
R
Rusty Russell 已提交
650
	}
651
	virtqueue_kick(vi->svq);
652

653 654 655 656 657 658 659 660
	/* Don't wait up for transmitted skbs to be freed. */
	skb_orphan(skb);
	nf_reset(skb);

	/* Apparently nice girls don't return TX_BUSY; stop the queue
	 * before it gets out of hand.  Naturally, this wastes entries. */
	if (capacity < 2+MAX_SKB_FRAGS) {
		netif_stop_queue(dev);
661
		if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
662 663 664 665
			/* More just got used, free them then recheck. */
			capacity += free_old_xmit_skbs(vi);
			if (capacity >= 2+MAX_SKB_FRAGS) {
				netif_start_queue(dev);
666
				virtqueue_disable_cb(vi->svq);
667 668
			}
		}
669
	}
670 671

	return NETDEV_TX_OK;
R
Rusty Russell 已提交
672 673
}

674 675 676 677
static int virtnet_set_mac_address(struct net_device *dev, void *p)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;
678
	int ret;
679

680 681 682
	ret = eth_mac_addr(dev, p);
	if (ret)
		return ret;
683

684 685 686
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
		vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
		                  dev->dev_addr, dev->addr_len);
687 688 689 690

	return 0;
}

691 692 693 694 695 696 697 698
static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
					       struct rtnl_link_stats64 *tot)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int cpu;
	unsigned int start;

	for_each_possible_cpu(cpu) {
E
Eric Dumazet 已提交
699
		struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
700 701 702
		u64 tpackets, tbytes, rpackets, rbytes;

		do {
703
			start = u64_stats_fetch_begin_bh(&stats->tx_syncp);
704 705
			tpackets = stats->tx_packets;
			tbytes   = stats->tx_bytes;
706
		} while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start));
707 708

		do {
709
			start = u64_stats_fetch_begin_bh(&stats->rx_syncp);
710 711
			rpackets = stats->rx_packets;
			rbytes   = stats->rx_bytes;
712
		} while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start));
713 714 715 716 717 718 719 720

		tot->rx_packets += rpackets;
		tot->tx_packets += tpackets;
		tot->rx_bytes   += rbytes;
		tot->tx_bytes   += tbytes;
	}

	tot->tx_dropped = dev->stats.tx_dropped;
721
	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
722 723 724 725 726 727 728
	tot->rx_dropped = dev->stats.rx_dropped;
	tot->rx_length_errors = dev->stats.rx_length_errors;
	tot->rx_frame_errors = dev->stats.rx_frame_errors;

	return tot;
}

729 730 731 732 733 734 735 736 737
#ifdef CONFIG_NET_POLL_CONTROLLER
static void virtnet_netpoll(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);

	napi_schedule(&vi->napi);
}
#endif

R
Rusty Russell 已提交
738 739 740 741
static int virtnet_open(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);

742 743
	/* Make sure we have some buffers: if oom use wq. */
	if (!try_fill_recv(vi, GFP_KERNEL))
744
		schedule_delayed_work(&vi->refill, 0);
745

746
	virtnet_napi_enable(vi);
R
Rusty Russell 已提交
747 748 749
	return 0;
}

750 751 752 753 754 755 756 757
/*
 * Send command via the control virtqueue and check status.  Commands
 * supported by the hypervisor, as indicated by feature bits, should
 * never fail unless improperly formated.
 */
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
				 struct scatterlist *data, int out, int in)
{
758
	struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
759 760 761
	struct virtio_net_ctrl_hdr ctrl;
	virtio_net_ctrl_ack status = ~0;
	unsigned int tmp;
762
	int i;
763

764 765 766
	/* Caller should know better */
	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
		(out + in > VIRTNET_SEND_COMMAND_SG_MAX));
767 768 769 770 771 772 773 774 775 776

	out++; /* Add header */
	in++; /* Add return status */

	ctrl.class = class;
	ctrl.cmd = cmd;

	sg_init_table(sg, out + in);

	sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
777 778
	for_each_sg(data, s, out + in - 2, i)
		sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
779 780
	sg_set_buf(&sg[out + in - 1], &status, sizeof(status));

781
	BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
782

783
	virtqueue_kick(vi->cvq);
784 785 786 787 788

	/*
	 * Spin for a response, the kick causes an ioport write, trapping
	 * into the hypervisor, so the request should be handled immediately.
	 */
789
	while (!virtqueue_get_buf(vi->cvq, &tmp))
790 791 792 793 794
		cpu_relax();

	return status == VIRTIO_NET_OK;
}

795 796 797 798 799 800 801 802 803 804
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
	rtnl_lock();
	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL,
				  0, 0))
		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
	rtnl_unlock();
}

R
Rusty Russell 已提交
805 806 807 808
static int virtnet_close(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);

809 810
	/* Make sure refill_work doesn't re-enable napi! */
	cancel_delayed_work_sync(&vi->refill);
R
Rusty Russell 已提交
811 812 813 814 815
	napi_disable(&vi->napi);

	return 0;
}

816 817 818
static void virtnet_set_rx_mode(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
819
	struct scatterlist sg[2];
820
	u8 promisc, allmulti;
821
	struct virtio_net_ctrl_mac *mac_data;
J
Jiri Pirko 已提交
822
	struct netdev_hw_addr *ha;
823
	int uc_count;
824
	int mc_count;
825 826
	void *buf;
	int i;
827 828 829 830 831

	/* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
		return;

832 833
	promisc = ((dev->flags & IFF_PROMISC) != 0);
	allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
834

835
	sg_init_one(sg, &promisc, sizeof(promisc));
836 837 838

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
				  VIRTIO_NET_CTRL_RX_PROMISC,
839
				  sg, 1, 0))
840 841 842
		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
			 promisc ? "en" : "dis");

843
	sg_init_one(sg, &allmulti, sizeof(allmulti));
844 845 846

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
				  VIRTIO_NET_CTRL_RX_ALLMULTI,
847
				  sg, 1, 0))
848 849
		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
			 allmulti ? "en" : "dis");
850

851
	uc_count = netdev_uc_count(dev);
852
	mc_count = netdev_mc_count(dev);
853
	/* MAC filter - use one buffer for both lists */
854 855 856
	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
	mac_data = buf;
857 858 859 860 861
	if (!buf) {
		dev_warn(&dev->dev, "No memory for MAC address buffer\n");
		return;
	}

862 863
	sg_init_table(sg, 2);

864
	/* Store the unicast list and count in the front of the buffer */
865
	mac_data->entries = uc_count;
J
Jiri Pirko 已提交
866
	i = 0;
867
	netdev_for_each_uc_addr(ha, dev)
J
Jiri Pirko 已提交
868
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
869 870

	sg_set_buf(&sg[0], mac_data,
871
		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
872 873

	/* multicast list and count fill the end */
874
	mac_data = (void *)&mac_data->macs[uc_count][0];
875

876
	mac_data->entries = mc_count;
877
	i = 0;
878 879
	netdev_for_each_mc_addr(ha, dev)
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
880 881

	sg_set_buf(&sg[1], mac_data,
882
		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
883 884 885 886 887 888 889

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
				  VIRTIO_NET_CTRL_MAC_TABLE_SET,
				  sg, 2, 0))
		dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");

	kfree(buf);
890 891
}

892
static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
893 894 895 896
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

897
	sg_init_one(&sg, &vid, sizeof(vid));
898 899 900 901

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
				  VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
902
	return 0;
903 904
}

905
static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
906 907 908 909
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

910
	sg_init_one(&sg, &vid, sizeof(vid));
911 912 913 914

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
				  VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
915
	return 0;
916 917
}

R
Rick Jones 已提交
918 919 920 921 922 923 924 925 926 927 928 929
static void virtnet_get_ringparam(struct net_device *dev,
				struct ethtool_ringparam *ring)
{
	struct virtnet_info *vi = netdev_priv(dev);

	ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq);
	ring->tx_max_pending = virtqueue_get_vring_size(vi->svq);
	ring->rx_pending = ring->rx_max_pending;
	ring->tx_pending = ring->tx_max_pending;

}

930 931 932 933 934 935 936 937 938 939 940 941 942

static void virtnet_get_drvinfo(struct net_device *dev,
				struct ethtool_drvinfo *info)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;

	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
	strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
	strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));

}

943
static const struct ethtool_ops virtnet_ethtool_ops = {
944
	.get_drvinfo = virtnet_get_drvinfo,
945
	.get_link = ethtool_op_get_link,
R
Rick Jones 已提交
946
	.get_ringparam = virtnet_get_ringparam,
947 948
};

M
Mark McLoughlin 已提交
949 950 951 952 953 954 955 956 957 958 959
#define MIN_MTU 68
#define MAX_MTU 65535

static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
{
	if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
		return -EINVAL;
	dev->mtu = new_mtu;
	return 0;
}

960 961 962 963 964
static const struct net_device_ops virtnet_netdev = {
	.ndo_open            = virtnet_open,
	.ndo_stop   	     = virtnet_close,
	.ndo_start_xmit      = start_xmit,
	.ndo_validate_addr   = eth_validate_addr,
965
	.ndo_set_mac_address = virtnet_set_mac_address,
966
	.ndo_set_rx_mode     = virtnet_set_rx_mode,
967
	.ndo_change_mtu	     = virtnet_change_mtu,
968
	.ndo_get_stats64     = virtnet_stats,
969 970
	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
971 972 973 974 975
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = virtnet_netpoll,
#endif
};

976
static void virtnet_config_changed_work(struct work_struct *work)
977
{
978 979
	struct virtnet_info *vi =
		container_of(work, struct virtnet_info, config_work);
980 981
	u16 v;

982 983 984 985
	mutex_lock(&vi->config_lock);
	if (!vi->config_enable)
		goto done;

986
	if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
987
			      offsetof(struct virtio_net_config, status),
988
			      &v) < 0)
989 990 991
		goto done;

	if (v & VIRTIO_NET_S_ANNOUNCE) {
992
		netdev_notify_peers(vi->dev);
993 994
		virtnet_ack_link_announce(vi);
	}
995 996 997 998 999

	/* Ignore unknown (future) status bits */
	v &= VIRTIO_NET_S_LINK_UP;

	if (vi->status == v)
1000
		goto done;
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010

	vi->status = v;

	if (vi->status & VIRTIO_NET_S_LINK_UP) {
		netif_carrier_on(vi->dev);
		netif_wake_queue(vi->dev);
	} else {
		netif_carrier_off(vi->dev);
		netif_stop_queue(vi->dev);
	}
1011 1012
done:
	mutex_unlock(&vi->config_lock);
1013 1014 1015 1016 1017 1018
}

static void virtnet_config_changed(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;

1019
	schedule_work(&vi->config_work);
1020 1021
}

1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
static int init_vqs(struct virtnet_info *vi)
{
	struct virtqueue *vqs[3];
	vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
	const char *names[] = { "input", "output", "control" };
	int nvqs, err;

	/* We expect two virtqueues, receive then send,
	 * and optionally control. */
	nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;

	err = vi->vdev->config->find_vqs(vi->vdev, nvqs, vqs, callbacks, names);
	if (err)
		return err;

	vi->rvq = vqs[0];
	vi->svq = vqs[1];

	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
		vi->cvq = vqs[2];

		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
			vi->dev->features |= NETIF_F_HW_VLAN_FILTER;
	}
	return 0;
}

R
Rusty Russell 已提交
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
static int virtnet_probe(struct virtio_device *vdev)
{
	int err;
	struct net_device *dev;
	struct virtnet_info *vi;

	/* Allocate ourselves a network device with room for our info */
	dev = alloc_etherdev(sizeof(struct virtnet_info));
	if (!dev)
		return -ENOMEM;

	/* Set up network device as normal. */
1061
	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
1062
	dev->netdev_ops = &virtnet_netdev;
R
Rusty Russell 已提交
1063
	dev->features = NETIF_F_HIGHDMA;
1064

1065
	SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
R
Rusty Russell 已提交
1066 1067 1068
	SET_NETDEV_DEV(dev, &vdev->dev);

	/* Do we support "hardware" checksums? */
1069
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
R
Rusty Russell 已提交
1070
		/* This opens up the world of extra features. */
1071 1072 1073 1074 1075 1076
		dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
		if (csum)
			dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;

		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
R
Rusty Russell 已提交
1077 1078
				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
		}
1079
		/* Individual feature bits: what can host handle? */
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
			dev->hw_features |= NETIF_F_TSO;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
			dev->hw_features |= NETIF_F_TSO6;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
			dev->hw_features |= NETIF_F_TSO_ECN;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
			dev->hw_features |= NETIF_F_UFO;

		if (gso)
			dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
		/* (!csum && gso) case will be fixed by register_netdev() */
R
Rusty Russell 已提交
1092 1093 1094
	}

	/* Configuration may specify what MAC to use.  Otherwise random. */
1095
	if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
1096
				  offsetof(struct virtio_net_config, mac),
1097
				  dev->dev_addr, dev->addr_len) < 0)
1098
		eth_hw_addr_random(dev);
R
Rusty Russell 已提交
1099 1100 1101

	/* Set up our device-specific information */
	vi = netdev_priv(dev);
1102
	netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
R
Rusty Russell 已提交
1103 1104
	vi->dev = dev;
	vi->vdev = vdev;
1105
	vdev->priv = vi;
1106
	vi->pages = NULL;
1107 1108 1109 1110 1111
	vi->stats = alloc_percpu(struct virtnet_stats);
	err = -ENOMEM;
	if (vi->stats == NULL)
		goto free;

1112
	INIT_DELAYED_WORK(&vi->refill, refill_work);
1113 1114 1115
	mutex_init(&vi->config_lock);
	vi->config_enable = true;
	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
1116 1117
	sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
	sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
R
Rusty Russell 已提交
1118

1119
	/* If we can receive ANY GSO packets, we must allocate large ones. */
1120 1121 1122
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
1123 1124
		vi->big_packets = true;

1125 1126 1127
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
		vi->mergeable_rx_bufs = true;

1128
	err = init_vqs(vi);
1129
	if (err)
1130
		goto free_stats;
R
Rusty Russell 已提交
1131 1132 1133 1134

	err = register_netdev(dev);
	if (err) {
		pr_debug("virtio_net: registering device failed\n");
1135
		goto free_vqs;
R
Rusty Russell 已提交
1136
	}
1137 1138

	/* Last of all, set up some receive buffers. */
1139
	try_fill_recv(vi, GFP_KERNEL);
1140 1141 1142 1143 1144 1145 1146

	/* If we didn't even get one input buffer, we're useless. */
	if (vi->num == 0) {
		err = -ENOMEM;
		goto unregister;
	}

J
Jason Wang 已提交
1147 1148 1149 1150
	/* Assume link up if device can't report link status,
	   otherwise get link status from config. */
	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
		netif_carrier_off(dev);
1151
		schedule_work(&vi->config_work);
J
Jason Wang 已提交
1152 1153 1154 1155
	} else {
		vi->status = VIRTIO_NET_S_LINK_UP;
		netif_carrier_on(dev);
	}
1156

R
Rusty Russell 已提交
1157 1158 1159
	pr_debug("virtnet: registered device %s\n", dev->name);
	return 0;

1160 1161
unregister:
	unregister_netdev(dev);
1162 1163
free_vqs:
	vdev->config->del_vqs(vdev);
1164 1165
free_stats:
	free_percpu(vi->stats);
R
Rusty Russell 已提交
1166 1167 1168 1169 1170
free:
	free_netdev(dev);
	return err;
}

1171 1172 1173
static void free_unused_bufs(struct virtnet_info *vi)
{
	void *buf;
S
Shirley Ma 已提交
1174
	while (1) {
1175
		buf = virtqueue_detach_unused_buf(vi->svq);
S
Shirley Ma 已提交
1176 1177 1178 1179
		if (!buf)
			break;
		dev_kfree_skb(buf);
	}
1180
	while (1) {
1181
		buf = virtqueue_detach_unused_buf(vi->rvq);
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
		if (!buf)
			break;
		if (vi->mergeable_rx_bufs || vi->big_packets)
			give_pages(vi, buf);
		else
			dev_kfree_skb(buf);
		--vi->num;
	}
	BUG_ON(vi->num != 0);
}

1193
static void remove_vq_common(struct virtnet_info *vi)
R
Rusty Russell 已提交
1194
{
1195
	vi->vdev->config->reset(vi->vdev);
S
Shirley Ma 已提交
1196 1197

	/* Free unused buffers in both send and recv, if any. */
1198
	free_unused_bufs(vi);
1199

1200
	vi->vdev->config->del_vqs(vi->vdev);
1201

1202 1203
	while (vi->pages)
		__free_pages(get_a_page(vi, GFP_KERNEL), 0);
1204 1205 1206 1207 1208 1209
}

static void __devexit virtnet_remove(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;

1210 1211 1212 1213 1214
	/* Prevent config work handler from accessing the device. */
	mutex_lock(&vi->config_lock);
	vi->config_enable = false;
	mutex_unlock(&vi->config_lock);

1215 1216 1217
	unregister_netdev(vi->dev);

	remove_vq_common(vi);
1218

1219 1220
	flush_work(&vi->config_work);

1221
	free_percpu(vi->stats);
1222
	free_netdev(vi->dev);
R
Rusty Russell 已提交
1223 1224
}

1225 1226 1227 1228 1229
#ifdef CONFIG_PM
static int virtnet_freeze(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;

1230 1231 1232 1233 1234
	/* Prevent config work handler from accessing the device */
	mutex_lock(&vi->config_lock);
	vi->config_enable = false;
	mutex_unlock(&vi->config_lock);

1235 1236 1237 1238 1239 1240 1241 1242
	netif_device_detach(vi->dev);
	cancel_delayed_work_sync(&vi->refill);

	if (netif_running(vi->dev))
		napi_disable(&vi->napi);

	remove_vq_common(vi);

1243 1244
	flush_work(&vi->config_work);

1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
	return 0;
}

static int virtnet_restore(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;
	int err;

	err = init_vqs(vi);
	if (err)
		return err;

	if (netif_running(vi->dev))
		virtnet_napi_enable(vi);

	netif_device_attach(vi->dev);

	if (!try_fill_recv(vi, GFP_KERNEL))
1263
		schedule_delayed_work(&vi->refill, 0);
1264

1265 1266 1267 1268
	mutex_lock(&vi->config_lock);
	vi->config_enable = true;
	mutex_unlock(&vi->config_lock);

1269 1270 1271 1272
	return 0;
}
#endif

R
Rusty Russell 已提交
1273 1274 1275 1276 1277
static struct virtio_device_id id_table[] = {
	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

1278
static unsigned int features[] = {
1279 1280
	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
	VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1281
	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1282
	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1283
	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1284
	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1285
	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1286
	VIRTIO_NET_F_GUEST_ANNOUNCE,
1287 1288
};

1289
static struct virtio_driver virtio_net_driver = {
1290 1291
	.feature_table = features,
	.feature_table_size = ARRAY_SIZE(features),
R
Rusty Russell 已提交
1292 1293 1294 1295 1296
	.driver.name =	KBUILD_MODNAME,
	.driver.owner =	THIS_MODULE,
	.id_table =	id_table,
	.probe =	virtnet_probe,
	.remove =	__devexit_p(virtnet_remove),
1297
	.config_changed = virtnet_config_changed,
1298 1299 1300 1301
#ifdef CONFIG_PM
	.freeze =	virtnet_freeze,
	.restore =	virtnet_restore,
#endif
R
Rusty Russell 已提交
1302 1303 1304 1305
};

static int __init init(void)
{
1306
	return register_virtio_driver(&virtio_net_driver);
R
Rusty Russell 已提交
1307 1308 1309 1310
}

static void __exit fini(void)
{
1311
	unregister_virtio_driver(&virtio_net_driver);
R
Rusty Russell 已提交
1312 1313 1314 1315 1316 1317 1318
}
module_init(init);
module_exit(fini);

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio network driver");
MODULE_LICENSE("GPL");