interface.c 19.1 KB
Newer Older
I
Ian Campbell 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Network-device interface management.
 *
 * Copyright (c) 2004-2005, Keir Fraser
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "common.h"

33
#include <linux/kthread.h>
I
Ian Campbell 已提交
34 35 36
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
37
#include <linux/vmalloc.h>
I
Ian Campbell 已提交
38 39 40

#include <xen/events.h>
#include <asm/xen/hypercall.h>
41
#include <xen/balloon.h>
I
Ian Campbell 已提交
42 43

#define XENVIF_QUEUE_LENGTH 32
44
#define XENVIF_NAPI_WEIGHT  64
I
Ian Campbell 已提交
45

46 47 48 49 50 51 52 53 54 55
static inline void xenvif_stop_queue(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;

	if (!queue->vif->can_queue)
		return;

	netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
}

I
Ian Campbell 已提交
56 57
int xenvif_schedulable(struct xenvif *vif)
{
58 59
	return netif_running(vif->dev) &&
		test_bit(VIF_STATUS_CONNECTED, &vif->status);
I
Ian Campbell 已提交
60 61
}

62
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
I
Ian Campbell 已提交
63
{
64
	struct xenvif_queue *queue = dev_id;
I
Ian Campbell 已提交
65

66 67
	if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
		napi_schedule(&queue->napi);
I
Ian Campbell 已提交
68

69 70 71
	return IRQ_HANDLED;
}

72
int xenvif_poll(struct napi_struct *napi, int budget)
73
{
74 75
	struct xenvif_queue *queue =
		container_of(napi, struct xenvif_queue, napi);
76 77
	int work_done;

78 79 80 81
	/* This vif is rogue, we pretend we've there is nothing to do
	 * for this vif to deschedule it from NAPI. But this interface
	 * will be turned off in thread context later.
	 */
82
	if (unlikely(queue->vif->disabled)) {
83 84 85 86
		napi_complete(napi);
		return 0;
	}

87
	work_done = xenvif_tx_action(queue, budget);
88 89

	if (work_done < budget) {
90
		napi_complete(napi);
91
		xenvif_napi_schedule_or_enable_events(queue);
92 93 94 95 96
	}

	return work_done;
}

97 98
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
99
	struct xenvif_queue *queue = dev_id;
100

101
	xenvif_kick_thread(queue);
I
Ian Campbell 已提交
102 103 104 105

	return IRQ_HANDLED;
}

106
irqreturn_t xenvif_interrupt(int irq, void *dev_id)
107 108 109 110 111 112 113
{
	xenvif_tx_interrupt(irq, dev_id);
	xenvif_rx_interrupt(irq, dev_id);

	return IRQ_HANDLED;
}

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
int xenvif_queue_stopped(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;
	unsigned int id = queue->id;
	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
}

void xenvif_wake_queue(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;
	unsigned int id = queue->id;
	netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
}

/* Callback to wake the queue and drain it on timeout */
static void xenvif_wake_queue_callback(unsigned long data)
130
{
131 132 133 134 135 136 137 138 139
	struct xenvif_queue *queue = (struct xenvif_queue *)data;

	if (xenvif_queue_stopped(queue)) {
		netdev_err(queue->vif->dev, "draining TX queue\n");
		queue->rx_queue_purge = true;
		xenvif_kick_thread(queue);
		xenvif_wake_queue(queue);
	}
}
140

I
Ian Campbell 已提交
141 142 143
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
144
	struct xenvif_queue *queue = NULL;
145
	unsigned int num_queues = vif->num_queues;
146
	u16 index;
147
	int min_slots_needed;
I
Ian Campbell 已提交
148 149 150

	BUG_ON(skb->dev != dev);

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
	/* Drop the packet if queues are not set up */
	if (num_queues < 1)
		goto drop;

	/* Obtain the queue to be used to transmit this packet */
	index = skb_get_queue_mapping(skb);
	if (index >= num_queues) {
		pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
				    index, vif->dev->name);
		index %= num_queues;
	}
	queue = &vif->queues[index];

	/* Drop the packet if queue is not ready */
	if (queue->task == NULL ||
	    queue->dealloc_task == NULL ||
167
	    !xenvif_schedulable(vif))
I
Ian Campbell 已提交
168 169
		goto drop;

170 171 172 173
	/* At best we'll need one slot for the header and one for each
	 * frag.
	 */
	min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
I
Ian Campbell 已提交
174

175 176 177
	/* If the skb is GSO then we'll also need an extra slot for the
	 * metadata.
	 */
178
	if (skb_is_gso(skb))
179
		min_slots_needed++;
I
Ian Campbell 已提交
180

181 182 183 184
	/* If the skb can't possibly fit in the remaining slots
	 * then turn off the queue to give the ring a chance to
	 * drain.
	 */
185 186 187 188 189
	if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
		queue->wake_queue.function = xenvif_wake_queue_callback;
		queue->wake_queue.data = (unsigned long)queue;
		xenvif_stop_queue(queue);
		mod_timer(&queue->wake_queue,
190 191
			jiffies + rx_drain_timeout_jiffies);
	}
I
Ian Campbell 已提交
192

193 194
	skb_queue_tail(&queue->rx_queue, skb);
	xenvif_kick_thread(queue);
I
Ian Campbell 已提交
195 196 197 198 199 200 201 202 203 204 205 206

	return NETDEV_TX_OK;

 drop:
	vif->dev->stats.tx_dropped++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}

static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
207
	struct xenvif_queue *queue = NULL;
208
	unsigned int num_queues = vif->num_queues;
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
	unsigned long rx_bytes = 0;
	unsigned long rx_packets = 0;
	unsigned long tx_bytes = 0;
	unsigned long tx_packets = 0;
	unsigned int index;

	if (vif->queues == NULL)
		goto out;

	/* Aggregate tx and rx stats from each queue */
	for (index = 0; index < num_queues; ++index) {
		queue = &vif->queues[index];
		rx_bytes += queue->stats.rx_bytes;
		rx_packets += queue->stats.rx_packets;
		tx_bytes += queue->stats.tx_bytes;
		tx_packets += queue->stats.tx_packets;
	}

out:
	vif->dev->stats.rx_bytes = rx_bytes;
	vif->dev->stats.rx_packets = rx_packets;
	vif->dev->stats.tx_bytes = tx_bytes;
	vif->dev->stats.tx_packets = tx_packets;

I
Ian Campbell 已提交
233 234 235 236 237
	return &vif->dev->stats;
}

static void xenvif_up(struct xenvif *vif)
{
238
	struct xenvif_queue *queue = NULL;
239
	unsigned int num_queues = vif->num_queues;
240 241 242 243 244 245 246 247 248 249
	unsigned int queue_index;

	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		napi_enable(&queue->napi);
		enable_irq(queue->tx_irq);
		if (queue->tx_irq != queue->rx_irq)
			enable_irq(queue->rx_irq);
		xenvif_napi_schedule_or_enable_events(queue);
	}
I
Ian Campbell 已提交
250 251 252 253
}

static void xenvif_down(struct xenvif *vif)
{
254
	struct xenvif_queue *queue = NULL;
255
	unsigned int num_queues = vif->num_queues;
256 257 258 259 260 261 262 263 264 265
	unsigned int queue_index;

	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		napi_disable(&queue->napi);
		disable_irq(queue->tx_irq);
		if (queue->tx_irq != queue->rx_irq)
			disable_irq(queue->rx_irq);
		del_timer_sync(&queue->credit_timeout);
	}
I
Ian Campbell 已提交
266 267 268 269 270
}

static int xenvif_open(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
271
	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
I
Ian Campbell 已提交
272
		xenvif_up(vif);
273
	netif_tx_start_all_queues(dev);
I
Ian Campbell 已提交
274 275 276 277 278 279
	return 0;
}

static int xenvif_close(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
280
	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
I
Ian Campbell 已提交
281
		xenvif_down(vif);
282
	netif_tx_stop_all_queues(dev);
I
Ian Campbell 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295 296
	return 0;
}

static int xenvif_change_mtu(struct net_device *dev, int mtu)
{
	struct xenvif *vif = netdev_priv(dev);
	int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;

	if (mtu > max)
		return -EINVAL;
	dev->mtu = mtu;
	return 0;
}

297 298
static netdev_features_t xenvif_fix_features(struct net_device *dev,
	netdev_features_t features)
I
Ian Campbell 已提交
299 300 301
{
	struct xenvif *vif = netdev_priv(dev);

302 303
	if (!vif->can_sg)
		features &= ~NETIF_F_SG;
304
	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
305
		features &= ~NETIF_F_TSO;
306 307
	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
		features &= ~NETIF_F_TSO6;
308
	if (!vif->ip_csum)
309
		features &= ~NETIF_F_IP_CSUM;
310 311
	if (!vif->ipv6_csum)
		features &= ~NETIF_F_IPV6_CSUM;
I
Ian Campbell 已提交
312

313
	return features;
I
Ian Campbell 已提交
314 315 316 317 318 319 320 321
}

static const struct xenvif_stat {
	char name[ETH_GSTRING_LEN];
	u16 offset;
} xenvif_stats[] = {
	{
		"rx_gso_checksum_fixup",
322
		offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
I
Ian Campbell 已提交
323
	},
324 325 326 327 328
	/* If (sent != success + fail), there are probably packets never
	 * freed up properly!
	 */
	{
		"tx_zerocopy_sent",
329
		offsetof(struct xenvif_stats, tx_zerocopy_sent),
330 331 332
	},
	{
		"tx_zerocopy_success",
333
		offsetof(struct xenvif_stats, tx_zerocopy_success),
334 335 336
	},
	{
		"tx_zerocopy_fail",
337
		offsetof(struct xenvif_stats, tx_zerocopy_fail)
338
	},
339 340 341 342 343
	/* Number of packets exceeding MAX_SKB_FRAG slots. You should use
	 * a guest with the same MAX_SKB_FRAG
	 */
	{
		"tx_frag_overflow",
344
		offsetof(struct xenvif_stats, tx_frag_overflow)
345
	},
I
Ian Campbell 已提交
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
};

static int xenvif_get_sset_count(struct net_device *dev, int string_set)
{
	switch (string_set) {
	case ETH_SS_STATS:
		return ARRAY_SIZE(xenvif_stats);
	default:
		return -EINVAL;
	}
}

static void xenvif_get_ethtool_stats(struct net_device *dev,
				     struct ethtool_stats *stats, u64 * data)
{
361
	struct xenvif *vif = netdev_priv(dev);
362
	unsigned int num_queues = vif->num_queues;
I
Ian Campbell 已提交
363
	int i;
364 365 366 367 368 369 370 371 372 373 374
	unsigned int queue_index;
	struct xenvif_stats *vif_stats;

	for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
		unsigned long accum = 0;
		for (queue_index = 0; queue_index < num_queues; ++queue_index) {
			vif_stats = &vif->queues[queue_index].stats;
			accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
		}
		data[i] = accum;
	}
I
Ian Campbell 已提交
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
}

static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
{
	int i;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
			memcpy(data + i * ETH_GSTRING_LEN,
			       xenvif_stats[i].name, ETH_GSTRING_LEN);
		break;
	}
}

390
static const struct ethtool_ops xenvif_ethtool_ops = {
I
Ian Campbell 已提交
391 392 393 394 395 396 397
	.get_link	= ethtool_op_get_link,

	.get_sset_count = xenvif_get_sset_count,
	.get_ethtool_stats = xenvif_get_ethtool_stats,
	.get_strings = xenvif_get_strings,
};

398
static const struct net_device_ops xenvif_netdev_ops = {
I
Ian Campbell 已提交
399 400 401 402 403
	.ndo_start_xmit	= xenvif_start_xmit,
	.ndo_get_stats	= xenvif_get_stats,
	.ndo_open	= xenvif_open,
	.ndo_stop	= xenvif_close,
	.ndo_change_mtu	= xenvif_change_mtu,
404
	.ndo_fix_features = xenvif_fix_features,
405 406
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_validate_addr   = eth_validate_addr,
I
Ian Campbell 已提交
407 408 409 410 411 412 413 414 415 416 417
};

struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
			    unsigned int handle)
{
	int err;
	struct net_device *dev;
	struct xenvif *vif;
	char name[IFNAMSIZ] = {};

	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
418 419
	/* Allocate a netdev with the max. supported number of queues.
	 * When the guest selects the desired number, it will be updated
420
	 * via netif_set_real_num_*_queues().
421
	 */
422 423
	dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
			      ether_setup, xenvif_max_queues);
I
Ian Campbell 已提交
424
	if (dev == NULL) {
425
		pr_warn("Could not allocate netdev for %s\n", name);
I
Ian Campbell 已提交
426 427 428 429 430 431
		return ERR_PTR(-ENOMEM);
	}

	SET_NETDEV_DEV(dev, parent);

	vif = netdev_priv(dev);
432

I
Ian Campbell 已提交
433 434 435
	vif->domid  = domid;
	vif->handle = handle;
	vif->can_sg = 1;
436
	vif->ip_csum = 1;
I
Ian Campbell 已提交
437
	vif->dev = dev;
438 439
	vif->disabled = false;

440
	/* Start out with no queues. */
441
	vif->queues = NULL;
442
	vif->num_queues = 0;
443

I
Ian Campbell 已提交
444
	dev->netdev_ops	= &xenvif_netdev_ops;
445 446
	dev->hw_features = NETIF_F_SG |
		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
447
		NETIF_F_TSO | NETIF_F_TSO6;
448
	dev->features = dev->hw_features | NETIF_F_RXCSUM;
449
	dev->ethtool_ops = &xenvif_ethtool_ops;
I
Ian Campbell 已提交
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471

	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;

	/*
	 * Initialise a dummy MAC address. We choose the numerically
	 * largest non-broadcast address to prevent the address getting
	 * stolen by an Ethernet bridge for STP purposes.
	 * (FE:FF:FF:FF:FF:FF)
	 */
	memset(dev->dev_addr, 0xFF, ETH_ALEN);
	dev->dev_addr[0] &= ~0x01;

	netif_carrier_off(dev);

	err = register_netdev(dev);
	if (err) {
		netdev_warn(dev, "Could not register device: err=%d\n", err);
		free_netdev(dev);
		return ERR_PTR(err);
	}

	netdev_dbg(dev, "Successfully created xenvif\n");
472 473 474

	__module_get(THIS_MODULE);

I
Ian Campbell 已提交
475 476 477
	return vif;
}

478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
int xenvif_init_queue(struct xenvif_queue *queue)
{
	int err, i;

	queue->credit_bytes = queue->remaining_credit = ~0UL;
	queue->credit_usec  = 0UL;
	init_timer(&queue->credit_timeout);
	queue->credit_window_start = get_jiffies_64();

	skb_queue_head_init(&queue->rx_queue);
	skb_queue_head_init(&queue->tx_queue);

	queue->pending_cons = 0;
	queue->pending_prod = MAX_PENDING_REQS;
	for (i = 0; i < MAX_PENDING_REQS; ++i)
		queue->pending_ring[i] = i;

	spin_lock_init(&queue->callback_lock);
	spin_lock_init(&queue->response_lock);

	/* If ballooning is disabled, this will consume real memory, so you
	 * better enable it. The long term solution would be to use just a
	 * bunch of valid page descriptors, without dependency on ballooning
	 */
	err = alloc_xenballooned_pages(MAX_PENDING_REQS,
				       queue->mmap_pages,
				       false);
	if (err) {
		netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
		return -ENOMEM;
	}

	for (i = 0; i < MAX_PENDING_REQS; i++) {
		queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
			{ .callback = xenvif_zerocopy_callback,
			  .ctx = NULL,
			  .desc = i };
		queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
	}

	init_timer(&queue->wake_queue);

	netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
			XENVIF_NAPI_WEIGHT);

	return 0;
}

void xenvif_carrier_on(struct xenvif *vif)
{
	rtnl_lock();
	if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
		dev_set_mtu(vif->dev, ETH_DATA_LEN);
	netdev_update_features(vif->dev);
532
	set_bit(VIF_STATUS_CONNECTED, &vif->status);
533 534 535 536 537 538 539
	netif_carrier_on(vif->dev);
	if (netif_running(vif->dev))
		xenvif_up(vif);
	rtnl_unlock();
}

int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
540 541
		   unsigned long rx_ring_ref, unsigned int tx_evtchn,
		   unsigned int rx_evtchn)
I
Ian Campbell 已提交
542
{
543
	struct task_struct *task;
I
Ian Campbell 已提交
544 545
	int err = -ENOMEM;

546 547 548
	BUG_ON(queue->tx_irq);
	BUG_ON(queue->task);
	BUG_ON(queue->dealloc_task);
I
Ian Campbell 已提交
549

550
	err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
I
Ian Campbell 已提交
551 552 553
	if (err < 0)
		goto err;

554 555
	init_waitqueue_head(&queue->wq);
	init_waitqueue_head(&queue->dealloc_wq);
556

557 558 559
	if (tx_evtchn == rx_evtchn) {
		/* feature-split-event-channels == 0 */
		err = bind_interdomain_evtchn_to_irqhandler(
560 561
			queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
			queue->name, queue);
562 563
		if (err < 0)
			goto err_unmap;
564 565
		queue->tx_irq = queue->rx_irq = err;
		disable_irq(queue->tx_irq);
566 567
	} else {
		/* feature-split-event-channels == 1 */
568 569
		snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
			 "%s-tx", queue->name);
570
		err = bind_interdomain_evtchn_to_irqhandler(
571 572
			queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
			queue->tx_irq_name, queue);
573 574
		if (err < 0)
			goto err_unmap;
575 576
		queue->tx_irq = err;
		disable_irq(queue->tx_irq);
577

578 579
		snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
			 "%s-rx", queue->name);
580
		err = bind_interdomain_evtchn_to_irqhandler(
581 582
			queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
			queue->rx_irq_name, queue);
583 584
		if (err < 0)
			goto err_tx_unbind;
585 586
		queue->rx_irq = err;
		disable_irq(queue->rx_irq);
587
	}
I
Ian Campbell 已提交
588

589
	task = kthread_create(xenvif_kthread_guest_rx,
590
			      (void *)queue, "%s-guest-rx", queue->name);
591
	if (IS_ERR(task)) {
592
		pr_warn("Could not allocate kthread for %s\n", queue->name);
593
		err = PTR_ERR(task);
594 595
		goto err_rx_unbind;
	}
596
	queue->task = task;
597

598
	task = kthread_create(xenvif_dealloc_kthread,
599
			      (void *)queue, "%s-dealloc", queue->name);
600
	if (IS_ERR(task)) {
601
		pr_warn("Could not allocate kthread for %s\n", queue->name);
602 603 604
		err = PTR_ERR(task);
		goto err_rx_unbind;
	}
605
	queue->dealloc_task = task;
606

607 608
	wake_up_process(queue->task);
	wake_up_process(queue->dealloc_task);
609

I
Ian Campbell 已提交
610
	return 0;
611 612

err_rx_unbind:
613 614
	unbind_from_irqhandler(queue->rx_irq, queue);
	queue->rx_irq = 0;
615
err_tx_unbind:
616 617
	unbind_from_irqhandler(queue->tx_irq, queue);
	queue->tx_irq = 0;
I
Ian Campbell 已提交
618
err_unmap:
619
	xenvif_unmap_frontend_rings(queue);
I
Ian Campbell 已提交
620
err:
621
	module_put(THIS_MODULE);
I
Ian Campbell 已提交
622 623 624
	return err;
}

625
void xenvif_carrier_off(struct xenvif *vif)
I
Ian Campbell 已提交
626 627
{
	struct net_device *dev = vif->dev;
628 629

	rtnl_lock();
630 631 632 633 634
	if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
		netif_carrier_off(dev); /* discard queued packets */
		if (netif_running(dev))
			xenvif_down(vif);
	}
635 636 637
	rtnl_unlock();
}

638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
				      unsigned int worst_case_skb_lifetime)
{
	int i, unmap_timeout = 0;

	for (i = 0; i < MAX_PENDING_REQS; ++i) {
		if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
			unmap_timeout++;
			schedule_timeout(msecs_to_jiffies(1000));
			if (unmap_timeout > worst_case_skb_lifetime &&
			    net_ratelimit())
				netdev_err(queue->vif->dev,
					   "Page still granted! Index: %x\n",
					   i);
			i = -1;
		}
	}
}

657 658
void xenvif_disconnect(struct xenvif *vif)
{
659
	struct xenvif_queue *queue = NULL;
660
	unsigned int num_queues = vif->num_queues;
661 662
	unsigned int queue_index;

663
	xenvif_carrier_off(vif);
I
Ian Campbell 已提交
664

665 666
	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
667

668 669 670 671 672
		if (queue->task) {
			del_timer_sync(&queue->wake_queue);
			kthread_stop(queue->task);
			queue->task = NULL;
		}
673

674 675 676 677 678 679 680 681 682 683 684 685 686
		if (queue->dealloc_task) {
			kthread_stop(queue->dealloc_task);
			queue->dealloc_task = NULL;
		}

		if (queue->tx_irq) {
			if (queue->tx_irq == queue->rx_irq)
				unbind_from_irqhandler(queue->tx_irq, queue);
			else {
				unbind_from_irqhandler(queue->tx_irq, queue);
				unbind_from_irqhandler(queue->rx_irq, queue);
			}
			queue->tx_irq = 0;
687
		}
I
Ian Campbell 已提交
688

689 690
		xenvif_unmap_frontend_rings(queue);
	}
691 692
}

693 694 695 696 697 698 699 700 701 702
/* Reverse the relevant parts of xenvif_init_queue().
 * Used for queue teardown from xenvif_free(), and on the
 * error handling paths in xenbus.c:connect().
 */
void xenvif_deinit_queue(struct xenvif_queue *queue)
{
	free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
	netif_napi_del(&queue->napi);
}

703 704
void xenvif_free(struct xenvif *vif)
{
705
	struct xenvif_queue *queue = NULL;
706
	unsigned int num_queues = vif->num_queues;
707
	unsigned int queue_index;
708 709
	/* Here we want to avoid timeout messages if an skb can be legitimately
	 * stuck somewhere else. Realistically this could be an another vif's
710 711 712 713 714 715
	 * internal or QDisc queue. That another vif also has this
	 * rx_drain_timeout_msecs timeout, but the timer only ditches the
	 * internal queue. After that, the QDisc queue can put in worst case
	 * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's
	 * internal queue, so we need several rounds of such timeouts until we
	 * can be sure that no another vif should have skb's from us. We are
716
	 * not sending more skb's, so newly stuck packets are not interesting
717 718 719 720
	 * for us here.
	 */
	unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
		DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
721

722
	unregister_netdev(vif->dev);
723

724 725 726
	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime);
727
		xenvif_deinit_queue(queue);
728 729 730 731
	}

	vfree(vif->queues);
	vif->queues = NULL;
732
	vif->num_queues = 0;
I
Ian Campbell 已提交
733 734

	free_netdev(vif->dev);
735

736
	module_put(THIS_MODULE);
I
Ian Campbell 已提交
737
}