interface.c 19.9 KB
Newer Older
I
Ian Campbell 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Network-device interface management.
 *
 * Copyright (c) 2004-2005, Keir Fraser
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "common.h"

33
#include <linux/kthread.h>
I
Ian Campbell 已提交
34 35 36
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
37
#include <linux/vmalloc.h>
I
Ian Campbell 已提交
38 39 40

#include <xen/events.h>
#include <asm/xen/hypercall.h>
41
#include <xen/balloon.h>
I
Ian Campbell 已提交
42 43

#define XENVIF_QUEUE_LENGTH 32
44
#define XENVIF_NAPI_WEIGHT  64
I
Ian Campbell 已提交
45

46 47 48 49 50 51 52 53 54 55
static inline void xenvif_stop_queue(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;

	if (!queue->vif->can_queue)
		return;

	netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
}

I
Ian Campbell 已提交
56 57 58 59 60
int xenvif_schedulable(struct xenvif *vif)
{
	return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
}

61
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
I
Ian Campbell 已提交
62
{
63
	struct xenvif_queue *queue = dev_id;
I
Ian Campbell 已提交
64

65 66
	if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
		napi_schedule(&queue->napi);
I
Ian Campbell 已提交
67

68 69 70
	return IRQ_HANDLED;
}

71
int xenvif_poll(struct napi_struct *napi, int budget)
72
{
73 74
	struct xenvif_queue *queue =
		container_of(napi, struct xenvif_queue, napi);
75 76
	int work_done;

77 78 79 80
	/* This vif is rogue, we pretend we've there is nothing to do
	 * for this vif to deschedule it from NAPI. But this interface
	 * will be turned off in thread context later.
	 */
81
	if (unlikely(queue->vif->disabled)) {
82 83 84 85
		napi_complete(napi);
		return 0;
	}

86
	work_done = xenvif_tx_action(queue, budget);
87 88

	if (work_done < budget) {
89
		napi_complete(napi);
90
		xenvif_napi_schedule_or_enable_events(queue);
91 92 93 94 95
	}

	return work_done;
}

96 97
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
98
	struct xenvif_queue *queue = dev_id;
99

100
	xenvif_kick_thread(queue);
I
Ian Campbell 已提交
101 102 103 104

	return IRQ_HANDLED;
}

105 106 107 108 109 110 111 112
static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
{
	xenvif_tx_interrupt(irq, dev_id);
	xenvif_rx_interrupt(irq, dev_id);

	return IRQ_HANDLED;
}

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
int xenvif_queue_stopped(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;
	unsigned int id = queue->id;
	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
}

void xenvif_wake_queue(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;
	unsigned int id = queue->id;
	netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
}

/* Callback to wake the queue and drain it on timeout */
static void xenvif_wake_queue_callback(unsigned long data)
129
{
130 131 132 133 134 135 136 137 138
	struct xenvif_queue *queue = (struct xenvif_queue *)data;

	if (xenvif_queue_stopped(queue)) {
		netdev_err(queue->vif->dev, "draining TX queue\n");
		queue->rx_queue_purge = true;
		xenvif_kick_thread(queue);
		xenvif_wake_queue(queue);
	}
}
139

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
			       void *accel_priv, select_queue_fallback_t fallback)
{
	unsigned int num_queues = dev->real_num_tx_queues;
	u32 hash;
	u16 queue_index;

	/* First, check if there is only one queue to optimise the
	 * single-queue or old frontend scenario.
	 */
	if (num_queues == 1) {
		queue_index = 0;
	} else {
		/* Use skb_get_hash to obtain an L4 hash if available */
		hash = skb_get_hash(skb);
		queue_index = hash % num_queues;
156
	}
157 158

	return queue_index;
159 160
}

I
Ian Campbell 已提交
161 162 163
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
164 165 166
	struct xenvif_queue *queue = NULL;
	unsigned int num_queues = dev->real_num_tx_queues;
	u16 index;
167
	int min_slots_needed;
I
Ian Campbell 已提交
168 169 170

	BUG_ON(skb->dev != dev);

171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
	/* Drop the packet if queues are not set up */
	if (num_queues < 1)
		goto drop;

	/* Obtain the queue to be used to transmit this packet */
	index = skb_get_queue_mapping(skb);
	if (index >= num_queues) {
		pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
				    index, vif->dev->name);
		index %= num_queues;
	}
	queue = &vif->queues[index];

	/* Drop the packet if queue is not ready */
	if (queue->task == NULL ||
	    queue->dealloc_task == NULL ||
187
	    !xenvif_schedulable(vif))
I
Ian Campbell 已提交
188 189
		goto drop;

190 191 192 193
	/* At best we'll need one slot for the header and one for each
	 * frag.
	 */
	min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
I
Ian Campbell 已提交
194

195 196 197
	/* If the skb is GSO then we'll also need an extra slot for the
	 * metadata.
	 */
198
	if (skb_is_gso(skb))
199
		min_slots_needed++;
I
Ian Campbell 已提交
200

201 202 203 204
	/* If the skb can't possibly fit in the remaining slots
	 * then turn off the queue to give the ring a chance to
	 * drain.
	 */
205 206 207 208 209
	if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
		queue->wake_queue.function = xenvif_wake_queue_callback;
		queue->wake_queue.data = (unsigned long)queue;
		xenvif_stop_queue(queue);
		mod_timer(&queue->wake_queue,
210 211
			jiffies + rx_drain_timeout_jiffies);
	}
I
Ian Campbell 已提交
212

213 214
	skb_queue_tail(&queue->rx_queue, skb);
	xenvif_kick_thread(queue);
I
Ian Campbell 已提交
215 216 217 218 219 220 221 222 223 224 225 226

	return NETDEV_TX_OK;

 drop:
	vif->dev->stats.tx_dropped++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}

static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
	struct xenvif_queue *queue = NULL;
	unsigned int num_queues = dev->real_num_tx_queues;
	unsigned long rx_bytes = 0;
	unsigned long rx_packets = 0;
	unsigned long tx_bytes = 0;
	unsigned long tx_packets = 0;
	unsigned int index;

	if (vif->queues == NULL)
		goto out;

	/* Aggregate tx and rx stats from each queue */
	for (index = 0; index < num_queues; ++index) {
		queue = &vif->queues[index];
		rx_bytes += queue->stats.rx_bytes;
		rx_packets += queue->stats.rx_packets;
		tx_bytes += queue->stats.tx_bytes;
		tx_packets += queue->stats.tx_packets;
	}

out:
	vif->dev->stats.rx_bytes = rx_bytes;
	vif->dev->stats.rx_packets = rx_packets;
	vif->dev->stats.tx_bytes = tx_bytes;
	vif->dev->stats.tx_packets = tx_packets;

I
Ian Campbell 已提交
253 254 255 256 257
	return &vif->dev->stats;
}

static void xenvif_up(struct xenvif *vif)
{
258 259 260 261 262 263 264 265 266 267 268 269
	struct xenvif_queue *queue = NULL;
	unsigned int num_queues = vif->dev->real_num_tx_queues;
	unsigned int queue_index;

	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		napi_enable(&queue->napi);
		enable_irq(queue->tx_irq);
		if (queue->tx_irq != queue->rx_irq)
			enable_irq(queue->rx_irq);
		xenvif_napi_schedule_or_enable_events(queue);
	}
I
Ian Campbell 已提交
270 271 272 273
}

static void xenvif_down(struct xenvif *vif)
{
274 275 276 277 278 279 280 281 282 283 284 285
	struct xenvif_queue *queue = NULL;
	unsigned int num_queues = vif->dev->real_num_tx_queues;
	unsigned int queue_index;

	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		napi_disable(&queue->napi);
		disable_irq(queue->tx_irq);
		if (queue->tx_irq != queue->rx_irq)
			disable_irq(queue->rx_irq);
		del_timer_sync(&queue->credit_timeout);
	}
I
Ian Campbell 已提交
286 287 288 289 290 291 292
}

static int xenvif_open(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
	if (netif_carrier_ok(dev))
		xenvif_up(vif);
293
	netif_tx_start_all_queues(dev);
I
Ian Campbell 已提交
294 295 296 297 298 299 300 301
	return 0;
}

static int xenvif_close(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
	if (netif_carrier_ok(dev))
		xenvif_down(vif);
302
	netif_tx_stop_all_queues(dev);
I
Ian Campbell 已提交
303 304 305 306 307 308 309 310 311 312 313 314 315 316
	return 0;
}

static int xenvif_change_mtu(struct net_device *dev, int mtu)
{
	struct xenvif *vif = netdev_priv(dev);
	int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;

	if (mtu > max)
		return -EINVAL;
	dev->mtu = mtu;
	return 0;
}

317 318
static netdev_features_t xenvif_fix_features(struct net_device *dev,
	netdev_features_t features)
I
Ian Campbell 已提交
319 320 321
{
	struct xenvif *vif = netdev_priv(dev);

322 323
	if (!vif->can_sg)
		features &= ~NETIF_F_SG;
324
	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
325
		features &= ~NETIF_F_TSO;
326 327
	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
		features &= ~NETIF_F_TSO6;
328
	if (!vif->ip_csum)
329
		features &= ~NETIF_F_IP_CSUM;
330 331
	if (!vif->ipv6_csum)
		features &= ~NETIF_F_IPV6_CSUM;
I
Ian Campbell 已提交
332

333
	return features;
I
Ian Campbell 已提交
334 335 336 337 338 339 340 341
}

static const struct xenvif_stat {
	char name[ETH_GSTRING_LEN];
	u16 offset;
} xenvif_stats[] = {
	{
		"rx_gso_checksum_fixup",
342
		offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
I
Ian Campbell 已提交
343
	},
344 345 346 347 348
	/* If (sent != success + fail), there are probably packets never
	 * freed up properly!
	 */
	{
		"tx_zerocopy_sent",
349
		offsetof(struct xenvif_stats, tx_zerocopy_sent),
350 351 352
	},
	{
		"tx_zerocopy_success",
353
		offsetof(struct xenvif_stats, tx_zerocopy_success),
354 355 356
	},
	{
		"tx_zerocopy_fail",
357
		offsetof(struct xenvif_stats, tx_zerocopy_fail)
358
	},
359 360 361 362 363
	/* Number of packets exceeding MAX_SKB_FRAG slots. You should use
	 * a guest with the same MAX_SKB_FRAG
	 */
	{
		"tx_frag_overflow",
364
		offsetof(struct xenvif_stats, tx_frag_overflow)
365
	},
I
Ian Campbell 已提交
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
};

static int xenvif_get_sset_count(struct net_device *dev, int string_set)
{
	switch (string_set) {
	case ETH_SS_STATS:
		return ARRAY_SIZE(xenvif_stats);
	default:
		return -EINVAL;
	}
}

static void xenvif_get_ethtool_stats(struct net_device *dev,
				     struct ethtool_stats *stats, u64 * data)
{
381 382
	struct xenvif *vif = netdev_priv(dev);
	unsigned int num_queues = dev->real_num_tx_queues;
I
Ian Campbell 已提交
383
	int i;
384 385 386 387 388 389 390 391 392 393 394
	unsigned int queue_index;
	struct xenvif_stats *vif_stats;

	for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
		unsigned long accum = 0;
		for (queue_index = 0; queue_index < num_queues; ++queue_index) {
			vif_stats = &vif->queues[queue_index].stats;
			accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
		}
		data[i] = accum;
	}
I
Ian Campbell 已提交
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
}

static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
{
	int i;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
			memcpy(data + i * ETH_GSTRING_LEN,
			       xenvif_stats[i].name, ETH_GSTRING_LEN);
		break;
	}
}

410
static const struct ethtool_ops xenvif_ethtool_ops = {
I
Ian Campbell 已提交
411 412 413 414 415 416 417
	.get_link	= ethtool_op_get_link,

	.get_sset_count = xenvif_get_sset_count,
	.get_ethtool_stats = xenvif_get_ethtool_stats,
	.get_strings = xenvif_get_strings,
};

418
static const struct net_device_ops xenvif_netdev_ops = {
I
Ian Campbell 已提交
419 420 421 422 423
	.ndo_start_xmit	= xenvif_start_xmit,
	.ndo_get_stats	= xenvif_get_stats,
	.ndo_open	= xenvif_open,
	.ndo_stop	= xenvif_close,
	.ndo_change_mtu	= xenvif_change_mtu,
424
	.ndo_fix_features = xenvif_fix_features,
425 426
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_validate_addr   = eth_validate_addr,
427
	.ndo_select_queue = xenvif_select_queue,
I
Ian Campbell 已提交
428 429 430 431 432 433 434 435 436 437 438
};

struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
			    unsigned int handle)
{
	int err;
	struct net_device *dev;
	struct xenvif *vif;
	char name[IFNAMSIZ] = {};

	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
439 440 441 442 443 444
	/* Allocate a netdev with the max. supported number of queues.
	 * When the guest selects the desired number, it will be updated
	 * via netif_set_real_num_tx_queues().
	 */
	dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
			      xenvif_max_queues);
I
Ian Campbell 已提交
445
	if (dev == NULL) {
446
		pr_warn("Could not allocate netdev for %s\n", name);
I
Ian Campbell 已提交
447 448 449 450 451 452
		return ERR_PTR(-ENOMEM);
	}

	SET_NETDEV_DEV(dev, parent);

	vif = netdev_priv(dev);
453

I
Ian Campbell 已提交
454 455 456
	vif->domid  = domid;
	vif->handle = handle;
	vif->can_sg = 1;
457
	vif->ip_csum = 1;
I
Ian Campbell 已提交
458
	vif->dev = dev;
459 460
	vif->disabled = false;

461 462 463 464 465
	/* Start out with no queues. The call below does not require
	 * rtnl_lock() as it happens before register_netdev().
	 */
	vif->queues = NULL;
	netif_set_real_num_tx_queues(dev, 0);
466

I
Ian Campbell 已提交
467
	dev->netdev_ops	= &xenvif_netdev_ops;
468 469
	dev->hw_features = NETIF_F_SG |
		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
470
		NETIF_F_TSO | NETIF_F_TSO6;
471
	dev->features = dev->hw_features | NETIF_F_RXCSUM;
472
	dev->ethtool_ops = &xenvif_ethtool_ops;
I
Ian Campbell 已提交
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494

	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;

	/*
	 * Initialise a dummy MAC address. We choose the numerically
	 * largest non-broadcast address to prevent the address getting
	 * stolen by an Ethernet bridge for STP purposes.
	 * (FE:FF:FF:FF:FF:FF)
	 */
	memset(dev->dev_addr, 0xFF, ETH_ALEN);
	dev->dev_addr[0] &= ~0x01;

	netif_carrier_off(dev);

	err = register_netdev(dev);
	if (err) {
		netdev_warn(dev, "Could not register device: err=%d\n", err);
		free_netdev(dev);
		return ERR_PTR(err);
	}

	netdev_dbg(dev, "Successfully created xenvif\n");
495 496 497

	__module_get(THIS_MODULE);

I
Ian Campbell 已提交
498 499 500
	return vif;
}

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
int xenvif_init_queue(struct xenvif_queue *queue)
{
	int err, i;

	queue->credit_bytes = queue->remaining_credit = ~0UL;
	queue->credit_usec  = 0UL;
	init_timer(&queue->credit_timeout);
	queue->credit_window_start = get_jiffies_64();

	skb_queue_head_init(&queue->rx_queue);
	skb_queue_head_init(&queue->tx_queue);

	queue->pending_cons = 0;
	queue->pending_prod = MAX_PENDING_REQS;
	for (i = 0; i < MAX_PENDING_REQS; ++i)
		queue->pending_ring[i] = i;

	spin_lock_init(&queue->callback_lock);
	spin_lock_init(&queue->response_lock);

	/* If ballooning is disabled, this will consume real memory, so you
	 * better enable it. The long term solution would be to use just a
	 * bunch of valid page descriptors, without dependency on ballooning
	 */
	err = alloc_xenballooned_pages(MAX_PENDING_REQS,
				       queue->mmap_pages,
				       false);
	if (err) {
		netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
		return -ENOMEM;
	}

	for (i = 0; i < MAX_PENDING_REQS; i++) {
		queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
			{ .callback = xenvif_zerocopy_callback,
			  .ctx = NULL,
			  .desc = i };
		queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
	}

	init_timer(&queue->wake_queue);

	netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
			XENVIF_NAPI_WEIGHT);

	return 0;
}

void xenvif_carrier_on(struct xenvif *vif)
{
	rtnl_lock();
	if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
		dev_set_mtu(vif->dev, ETH_DATA_LEN);
	netdev_update_features(vif->dev);
	netif_carrier_on(vif->dev);
	if (netif_running(vif->dev))
		xenvif_up(vif);
	rtnl_unlock();
}

int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
562 563
		   unsigned long rx_ring_ref, unsigned int tx_evtchn,
		   unsigned int rx_evtchn)
I
Ian Campbell 已提交
564
{
565
	struct task_struct *task;
I
Ian Campbell 已提交
566 567
	int err = -ENOMEM;

568 569 570
	BUG_ON(queue->tx_irq);
	BUG_ON(queue->task);
	BUG_ON(queue->dealloc_task);
I
Ian Campbell 已提交
571

572
	err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
I
Ian Campbell 已提交
573 574 575
	if (err < 0)
		goto err;

576 577
	init_waitqueue_head(&queue->wq);
	init_waitqueue_head(&queue->dealloc_wq);
578

579 580 581
	if (tx_evtchn == rx_evtchn) {
		/* feature-split-event-channels == 0 */
		err = bind_interdomain_evtchn_to_irqhandler(
582 583
			queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
			queue->name, queue);
584 585
		if (err < 0)
			goto err_unmap;
586 587
		queue->tx_irq = queue->rx_irq = err;
		disable_irq(queue->tx_irq);
588 589
	} else {
		/* feature-split-event-channels == 1 */
590 591
		snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
			 "%s-tx", queue->name);
592
		err = bind_interdomain_evtchn_to_irqhandler(
593 594
			queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
			queue->tx_irq_name, queue);
595 596
		if (err < 0)
			goto err_unmap;
597 598
		queue->tx_irq = err;
		disable_irq(queue->tx_irq);
599

600 601
		snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
			 "%s-rx", queue->name);
602
		err = bind_interdomain_evtchn_to_irqhandler(
603 604
			queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
			queue->rx_irq_name, queue);
605 606
		if (err < 0)
			goto err_tx_unbind;
607 608
		queue->rx_irq = err;
		disable_irq(queue->rx_irq);
609
	}
I
Ian Campbell 已提交
610

611
	task = kthread_create(xenvif_kthread_guest_rx,
612
			      (void *)queue, "%s-guest-rx", queue->name);
613
	if (IS_ERR(task)) {
614
		pr_warn("Could not allocate kthread for %s\n", queue->name);
615
		err = PTR_ERR(task);
616 617
		goto err_rx_unbind;
	}
618
	queue->task = task;
619

620
	task = kthread_create(xenvif_dealloc_kthread,
621
			      (void *)queue, "%s-dealloc", queue->name);
622
	if (IS_ERR(task)) {
623
		pr_warn("Could not allocate kthread for %s\n", queue->name);
624 625 626
		err = PTR_ERR(task);
		goto err_rx_unbind;
	}
627
	queue->dealloc_task = task;
628

629 630
	wake_up_process(queue->task);
	wake_up_process(queue->dealloc_task);
631

I
Ian Campbell 已提交
632
	return 0;
633 634

err_rx_unbind:
635 636
	unbind_from_irqhandler(queue->rx_irq, queue);
	queue->rx_irq = 0;
637
err_tx_unbind:
638 639
	unbind_from_irqhandler(queue->tx_irq, queue);
	queue->tx_irq = 0;
I
Ian Campbell 已提交
640
err_unmap:
641
	xenvif_unmap_frontend_rings(queue);
I
Ian Campbell 已提交
642
err:
643
	module_put(THIS_MODULE);
I
Ian Campbell 已提交
644 645 646
	return err;
}

647
void xenvif_carrier_off(struct xenvif *vif)
I
Ian Campbell 已提交
648 649
{
	struct net_device *dev = vif->dev;
650 651 652 653 654 655 656 657

	rtnl_lock();
	netif_carrier_off(dev); /* discard queued packets */
	if (netif_running(dev))
		xenvif_down(vif);
	rtnl_unlock();
}

658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
				      unsigned int worst_case_skb_lifetime)
{
	int i, unmap_timeout = 0;

	for (i = 0; i < MAX_PENDING_REQS; ++i) {
		if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
			unmap_timeout++;
			schedule_timeout(msecs_to_jiffies(1000));
			if (unmap_timeout > worst_case_skb_lifetime &&
			    net_ratelimit())
				netdev_err(queue->vif->dev,
					   "Page still granted! Index: %x\n",
					   i);
			i = -1;
		}
	}
}

677 678
void xenvif_disconnect(struct xenvif *vif)
{
679 680 681 682
	struct xenvif_queue *queue = NULL;
	unsigned int num_queues = vif->dev->real_num_tx_queues;
	unsigned int queue_index;

683 684
	if (netif_carrier_ok(vif->dev))
		xenvif_carrier_off(vif);
I
Ian Campbell 已提交
685

686 687
	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
688

689 690 691 692 693
		if (queue->task) {
			del_timer_sync(&queue->wake_queue);
			kthread_stop(queue->task);
			queue->task = NULL;
		}
694

695 696 697 698 699 700 701 702 703 704 705 706 707
		if (queue->dealloc_task) {
			kthread_stop(queue->dealloc_task);
			queue->dealloc_task = NULL;
		}

		if (queue->tx_irq) {
			if (queue->tx_irq == queue->rx_irq)
				unbind_from_irqhandler(queue->tx_irq, queue);
			else {
				unbind_from_irqhandler(queue->tx_irq, queue);
				unbind_from_irqhandler(queue->rx_irq, queue);
			}
			queue->tx_irq = 0;
708
		}
I
Ian Campbell 已提交
709

710 711
		xenvif_unmap_frontend_rings(queue);
	}
712 713
}

714 715 716 717 718 719 720 721 722 723
/* Reverse the relevant parts of xenvif_init_queue().
 * Used for queue teardown from xenvif_free(), and on the
 * error handling paths in xenbus.c:connect().
 */
void xenvif_deinit_queue(struct xenvif_queue *queue)
{
	free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
	netif_napi_del(&queue->napi);
}

724 725
void xenvif_free(struct xenvif *vif)
{
726 727 728
	struct xenvif_queue *queue = NULL;
	unsigned int num_queues = vif->dev->real_num_tx_queues;
	unsigned int queue_index;
729 730
	/* Here we want to avoid timeout messages if an skb can be legitimately
	 * stuck somewhere else. Realistically this could be an another vif's
731 732 733 734 735 736
	 * internal or QDisc queue. That another vif also has this
	 * rx_drain_timeout_msecs timeout, but the timer only ditches the
	 * internal queue. After that, the QDisc queue can put in worst case
	 * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's
	 * internal queue, so we need several rounds of such timeouts until we
	 * can be sure that no another vif should have skb's from us. We are
737
	 * not sending more skb's, so newly stuck packets are not interesting
738 739 740 741
	 * for us here.
	 */
	unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
		DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
742

743
	unregister_netdev(vif->dev);
744

745 746 747
	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime);
748
		xenvif_deinit_queue(queue);
749 750 751 752 753 754 755 756
	}

	/* Free the array of queues. The call below does not require
	 * rtnl_lock() because it happens after unregister_netdev().
	 */
	netif_set_real_num_tx_queues(vif->dev, 0);
	vfree(vif->queues);
	vif->queues = NULL;
I
Ian Campbell 已提交
757 758

	free_netdev(vif->dev);
759

760
	module_put(THIS_MODULE);
I
Ian Campbell 已提交
761
}