interface.c 19.8 KB
Newer Older
I
Ian Campbell 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Network-device interface management.
 *
 * Copyright (c) 2004-2005, Keir Fraser
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "common.h"

33
#include <linux/kthread.h>
I
Ian Campbell 已提交
34 35 36
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
37
#include <linux/vmalloc.h>
I
Ian Campbell 已提交
38 39 40

#include <xen/events.h>
#include <asm/xen/hypercall.h>
41
#include <xen/balloon.h>
I
Ian Campbell 已提交
42 43

#define XENVIF_QUEUE_LENGTH 32
44
#define XENVIF_NAPI_WEIGHT  64
I
Ian Campbell 已提交
45

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
 * increasing the inflight counter. We need to increase the inflight
 * counter because core driver calls into xenvif_zerocopy_callback
 * which calls xenvif_skb_zerocopy_complete.
 */
void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
				 struct sk_buff *skb)
{
	skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
	atomic_inc(&queue->inflight_packets);
}

void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
{
	atomic_dec(&queue->inflight_packets);
}

63 64 65 66 67 68 69 70 71 72
static inline void xenvif_stop_queue(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;

	if (!queue->vif->can_queue)
		return;

	netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
}

I
Ian Campbell 已提交
73 74
int xenvif_schedulable(struct xenvif *vif)
{
75 76
	return netif_running(vif->dev) &&
		test_bit(VIF_STATUS_CONNECTED, &vif->status);
I
Ian Campbell 已提交
77 78
}

79
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
I
Ian Campbell 已提交
80
{
81
	struct xenvif_queue *queue = dev_id;
I
Ian Campbell 已提交
82

83 84
	if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
		napi_schedule(&queue->napi);
I
Ian Campbell 已提交
85

86 87 88
	return IRQ_HANDLED;
}

89
int xenvif_poll(struct napi_struct *napi, int budget)
90
{
91 92
	struct xenvif_queue *queue =
		container_of(napi, struct xenvif_queue, napi);
93 94
	int work_done;

95 96 97 98
	/* This vif is rogue, we pretend we've there is nothing to do
	 * for this vif to deschedule it from NAPI. But this interface
	 * will be turned off in thread context later.
	 */
99
	if (unlikely(queue->vif->disabled)) {
100 101 102 103
		napi_complete(napi);
		return 0;
	}

104
	work_done = xenvif_tx_action(queue, budget);
105 106

	if (work_done < budget) {
107
		napi_complete(napi);
108
		xenvif_napi_schedule_or_enable_events(queue);
109 110 111 112 113
	}

	return work_done;
}

114 115
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
116
	struct xenvif_queue *queue = dev_id;
117 118
	struct netdev_queue *net_queue =
		netdev_get_tx_queue(queue->vif->dev, queue->id);
119

120 121 122 123 124 125 126
	/* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
	 * the carrier went down and this queue was previously blocked
	 */
	if (unlikely(netif_tx_queue_stopped(net_queue) ||
		     (!netif_carrier_ok(queue->vif->dev) &&
		      test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
		set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
127
	xenvif_kick_thread(queue);
I
Ian Campbell 已提交
128 129 130 131

	return IRQ_HANDLED;
}

132
irqreturn_t xenvif_interrupt(int irq, void *dev_id)
133 134 135 136 137 138 139
{
	xenvif_tx_interrupt(irq, dev_id);
	xenvif_rx_interrupt(irq, dev_id);

	return IRQ_HANDLED;
}

140 141 142 143 144 145 146 147 148 149 150 151 152 153
int xenvif_queue_stopped(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;
	unsigned int id = queue->id;
	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
}

void xenvif_wake_queue(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;
	unsigned int id = queue->id;
	netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
}

154 155
/* Callback to wake the queue's thread and turn the carrier off on timeout */
static void xenvif_rx_stalled(unsigned long data)
156
{
157 158 159
	struct xenvif_queue *queue = (struct xenvif_queue *)data;

	if (xenvif_queue_stopped(queue)) {
160
		set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
161 162 163
		xenvif_kick_thread(queue);
	}
}
164

I
Ian Campbell 已提交
165 166 167
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
168
	struct xenvif_queue *queue = NULL;
169
	unsigned int num_queues = vif->num_queues;
170
	u16 index;
171
	int min_slots_needed;
I
Ian Campbell 已提交
172 173 174

	BUG_ON(skb->dev != dev);

175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
	/* Drop the packet if queues are not set up */
	if (num_queues < 1)
		goto drop;

	/* Obtain the queue to be used to transmit this packet */
	index = skb_get_queue_mapping(skb);
	if (index >= num_queues) {
		pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
				    index, vif->dev->name);
		index %= num_queues;
	}
	queue = &vif->queues[index];

	/* Drop the packet if queue is not ready */
	if (queue->task == NULL ||
	    queue->dealloc_task == NULL ||
191
	    !xenvif_schedulable(vif))
I
Ian Campbell 已提交
192 193
		goto drop;

194 195 196 197
	/* At best we'll need one slot for the header and one for each
	 * frag.
	 */
	min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
I
Ian Campbell 已提交
198

199 200 201
	/* If the skb is GSO then we'll also need an extra slot for the
	 * metadata.
	 */
202
	if (skb_is_gso(skb))
203
		min_slots_needed++;
I
Ian Campbell 已提交
204

205 206 207 208
	/* If the skb can't possibly fit in the remaining slots
	 * then turn off the queue to give the ring a chance to
	 * drain.
	 */
209
	if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
210 211
		queue->rx_stalled.function = xenvif_rx_stalled;
		queue->rx_stalled.data = (unsigned long)queue;
212
		xenvif_stop_queue(queue);
213 214
		mod_timer(&queue->rx_stalled,
			  jiffies + rx_drain_timeout_jiffies);
215
	}
I
Ian Campbell 已提交
216

217 218
	skb_queue_tail(&queue->rx_queue, skb);
	xenvif_kick_thread(queue);
I
Ian Campbell 已提交
219 220 221 222 223 224 225 226 227 228 229 230

	return NETDEV_TX_OK;

 drop:
	vif->dev->stats.tx_dropped++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}

static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
231
	struct xenvif_queue *queue = NULL;
232
	unsigned int num_queues = vif->num_queues;
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
	unsigned long rx_bytes = 0;
	unsigned long rx_packets = 0;
	unsigned long tx_bytes = 0;
	unsigned long tx_packets = 0;
	unsigned int index;

	if (vif->queues == NULL)
		goto out;

	/* Aggregate tx and rx stats from each queue */
	for (index = 0; index < num_queues; ++index) {
		queue = &vif->queues[index];
		rx_bytes += queue->stats.rx_bytes;
		rx_packets += queue->stats.rx_packets;
		tx_bytes += queue->stats.tx_bytes;
		tx_packets += queue->stats.tx_packets;
	}

out:
	vif->dev->stats.rx_bytes = rx_bytes;
	vif->dev->stats.rx_packets = rx_packets;
	vif->dev->stats.tx_bytes = tx_bytes;
	vif->dev->stats.tx_packets = tx_packets;

I
Ian Campbell 已提交
257 258 259 260 261
	return &vif->dev->stats;
}

static void xenvif_up(struct xenvif *vif)
{
262
	struct xenvif_queue *queue = NULL;
263
	unsigned int num_queues = vif->num_queues;
264 265 266 267 268 269 270 271 272 273
	unsigned int queue_index;

	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		napi_enable(&queue->napi);
		enable_irq(queue->tx_irq);
		if (queue->tx_irq != queue->rx_irq)
			enable_irq(queue->rx_irq);
		xenvif_napi_schedule_or_enable_events(queue);
	}
I
Ian Campbell 已提交
274 275 276 277
}

static void xenvif_down(struct xenvif *vif)
{
278
	struct xenvif_queue *queue = NULL;
279
	unsigned int num_queues = vif->num_queues;
280 281 282 283 284 285 286 287 288 289
	unsigned int queue_index;

	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		napi_disable(&queue->napi);
		disable_irq(queue->tx_irq);
		if (queue->tx_irq != queue->rx_irq)
			disable_irq(queue->rx_irq);
		del_timer_sync(&queue->credit_timeout);
	}
I
Ian Campbell 已提交
290 291 292 293 294
}

static int xenvif_open(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
295
	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
I
Ian Campbell 已提交
296
		xenvif_up(vif);
297
	netif_tx_start_all_queues(dev);
I
Ian Campbell 已提交
298 299 300 301 302 303
	return 0;
}

static int xenvif_close(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
304
	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
I
Ian Campbell 已提交
305
		xenvif_down(vif);
306
	netif_tx_stop_all_queues(dev);
I
Ian Campbell 已提交
307 308 309 310 311 312 313 314 315 316 317 318 319 320
	return 0;
}

static int xenvif_change_mtu(struct net_device *dev, int mtu)
{
	struct xenvif *vif = netdev_priv(dev);
	int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;

	if (mtu > max)
		return -EINVAL;
	dev->mtu = mtu;
	return 0;
}

321 322
static netdev_features_t xenvif_fix_features(struct net_device *dev,
	netdev_features_t features)
I
Ian Campbell 已提交
323 324 325
{
	struct xenvif *vif = netdev_priv(dev);

326 327
	if (!vif->can_sg)
		features &= ~NETIF_F_SG;
328
	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
329
		features &= ~NETIF_F_TSO;
330 331
	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
		features &= ~NETIF_F_TSO6;
332
	if (!vif->ip_csum)
333
		features &= ~NETIF_F_IP_CSUM;
334 335
	if (!vif->ipv6_csum)
		features &= ~NETIF_F_IPV6_CSUM;
I
Ian Campbell 已提交
336

337
	return features;
I
Ian Campbell 已提交
338 339 340 341 342 343 344 345
}

static const struct xenvif_stat {
	char name[ETH_GSTRING_LEN];
	u16 offset;
} xenvif_stats[] = {
	{
		"rx_gso_checksum_fixup",
346
		offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
I
Ian Campbell 已提交
347
	},
348 349 350 351 352
	/* If (sent != success + fail), there are probably packets never
	 * freed up properly!
	 */
	{
		"tx_zerocopy_sent",
353
		offsetof(struct xenvif_stats, tx_zerocopy_sent),
354 355 356
	},
	{
		"tx_zerocopy_success",
357
		offsetof(struct xenvif_stats, tx_zerocopy_success),
358 359 360
	},
	{
		"tx_zerocopy_fail",
361
		offsetof(struct xenvif_stats, tx_zerocopy_fail)
362
	},
363 364 365 366 367
	/* Number of packets exceeding MAX_SKB_FRAG slots. You should use
	 * a guest with the same MAX_SKB_FRAG
	 */
	{
		"tx_frag_overflow",
368
		offsetof(struct xenvif_stats, tx_frag_overflow)
369
	},
I
Ian Campbell 已提交
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
};

static int xenvif_get_sset_count(struct net_device *dev, int string_set)
{
	switch (string_set) {
	case ETH_SS_STATS:
		return ARRAY_SIZE(xenvif_stats);
	default:
		return -EINVAL;
	}
}

static void xenvif_get_ethtool_stats(struct net_device *dev,
				     struct ethtool_stats *stats, u64 * data)
{
385
	struct xenvif *vif = netdev_priv(dev);
386
	unsigned int num_queues = vif->num_queues;
I
Ian Campbell 已提交
387
	int i;
388 389 390 391 392 393 394 395 396 397 398
	unsigned int queue_index;
	struct xenvif_stats *vif_stats;

	for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
		unsigned long accum = 0;
		for (queue_index = 0; queue_index < num_queues; ++queue_index) {
			vif_stats = &vif->queues[queue_index].stats;
			accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
		}
		data[i] = accum;
	}
I
Ian Campbell 已提交
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
}

static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
{
	int i;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
			memcpy(data + i * ETH_GSTRING_LEN,
			       xenvif_stats[i].name, ETH_GSTRING_LEN);
		break;
	}
}

414
static const struct ethtool_ops xenvif_ethtool_ops = {
I
Ian Campbell 已提交
415 416 417 418 419 420 421
	.get_link	= ethtool_op_get_link,

	.get_sset_count = xenvif_get_sset_count,
	.get_ethtool_stats = xenvif_get_ethtool_stats,
	.get_strings = xenvif_get_strings,
};

422
static const struct net_device_ops xenvif_netdev_ops = {
I
Ian Campbell 已提交
423 424 425 426 427
	.ndo_start_xmit	= xenvif_start_xmit,
	.ndo_get_stats	= xenvif_get_stats,
	.ndo_open	= xenvif_open,
	.ndo_stop	= xenvif_close,
	.ndo_change_mtu	= xenvif_change_mtu,
428
	.ndo_fix_features = xenvif_fix_features,
429 430
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_validate_addr   = eth_validate_addr,
I
Ian Campbell 已提交
431 432 433 434 435 436 437 438 439 440 441
};

struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
			    unsigned int handle)
{
	int err;
	struct net_device *dev;
	struct xenvif *vif;
	char name[IFNAMSIZ] = {};

	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
442 443
	/* Allocate a netdev with the max. supported number of queues.
	 * When the guest selects the desired number, it will be updated
444
	 * via netif_set_real_num_*_queues().
445
	 */
446 447
	dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
			      ether_setup, xenvif_max_queues);
I
Ian Campbell 已提交
448
	if (dev == NULL) {
449
		pr_warn("Could not allocate netdev for %s\n", name);
I
Ian Campbell 已提交
450 451 452 453 454 455
		return ERR_PTR(-ENOMEM);
	}

	SET_NETDEV_DEV(dev, parent);

	vif = netdev_priv(dev);
456

I
Ian Campbell 已提交
457 458 459
	vif->domid  = domid;
	vif->handle = handle;
	vif->can_sg = 1;
460
	vif->ip_csum = 1;
I
Ian Campbell 已提交
461
	vif->dev = dev;
462 463
	vif->disabled = false;

464
	/* Start out with no queues. */
465
	vif->queues = NULL;
466
	vif->num_queues = 0;
467

I
Ian Campbell 已提交
468
	dev->netdev_ops	= &xenvif_netdev_ops;
469 470
	dev->hw_features = NETIF_F_SG |
		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
471
		NETIF_F_TSO | NETIF_F_TSO6;
472
	dev->features = dev->hw_features | NETIF_F_RXCSUM;
473
	dev->ethtool_ops = &xenvif_ethtool_ops;
I
Ian Campbell 已提交
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495

	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;

	/*
	 * Initialise a dummy MAC address. We choose the numerically
	 * largest non-broadcast address to prevent the address getting
	 * stolen by an Ethernet bridge for STP purposes.
	 * (FE:FF:FF:FF:FF:FF)
	 */
	memset(dev->dev_addr, 0xFF, ETH_ALEN);
	dev->dev_addr[0] &= ~0x01;

	netif_carrier_off(dev);

	err = register_netdev(dev);
	if (err) {
		netdev_warn(dev, "Could not register device: err=%d\n", err);
		free_netdev(dev);
		return ERR_PTR(err);
	}

	netdev_dbg(dev, "Successfully created xenvif\n");
496 497 498

	__module_get(THIS_MODULE);

I
Ian Campbell 已提交
499 500 501
	return vif;
}

502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
int xenvif_init_queue(struct xenvif_queue *queue)
{
	int err, i;

	queue->credit_bytes = queue->remaining_credit = ~0UL;
	queue->credit_usec  = 0UL;
	init_timer(&queue->credit_timeout);
	queue->credit_window_start = get_jiffies_64();

	skb_queue_head_init(&queue->rx_queue);
	skb_queue_head_init(&queue->tx_queue);

	queue->pending_cons = 0;
	queue->pending_prod = MAX_PENDING_REQS;
	for (i = 0; i < MAX_PENDING_REQS; ++i)
		queue->pending_ring[i] = i;

	spin_lock_init(&queue->callback_lock);
	spin_lock_init(&queue->response_lock);

	/* If ballooning is disabled, this will consume real memory, so you
	 * better enable it. The long term solution would be to use just a
	 * bunch of valid page descriptors, without dependency on ballooning
	 */
	err = alloc_xenballooned_pages(MAX_PENDING_REQS,
				       queue->mmap_pages,
				       false);
	if (err) {
		netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
		return -ENOMEM;
	}

	for (i = 0; i < MAX_PENDING_REQS; i++) {
		queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
			{ .callback = xenvif_zerocopy_callback,
			  .ctx = NULL,
			  .desc = i };
		queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
	}

542
	init_timer(&queue->rx_stalled);
543 544 545 546 547 548 549 550 551 552

	return 0;
}

void xenvif_carrier_on(struct xenvif *vif)
{
	rtnl_lock();
	if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
		dev_set_mtu(vif->dev, ETH_DATA_LEN);
	netdev_update_features(vif->dev);
553
	set_bit(VIF_STATUS_CONNECTED, &vif->status);
554 555 556 557 558 559 560
	netif_carrier_on(vif->dev);
	if (netif_running(vif->dev))
		xenvif_up(vif);
	rtnl_unlock();
}

int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
561 562
		   unsigned long rx_ring_ref, unsigned int tx_evtchn,
		   unsigned int rx_evtchn)
I
Ian Campbell 已提交
563
{
564
	struct task_struct *task;
I
Ian Campbell 已提交
565 566
	int err = -ENOMEM;

567 568 569
	BUG_ON(queue->tx_irq);
	BUG_ON(queue->task);
	BUG_ON(queue->dealloc_task);
I
Ian Campbell 已提交
570

571
	err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
I
Ian Campbell 已提交
572 573 574
	if (err < 0)
		goto err;

575 576
	init_waitqueue_head(&queue->wq);
	init_waitqueue_head(&queue->dealloc_wq);
577
	atomic_set(&queue->inflight_packets, 0);
578

579 580 581
	if (tx_evtchn == rx_evtchn) {
		/* feature-split-event-channels == 0 */
		err = bind_interdomain_evtchn_to_irqhandler(
582 583
			queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
			queue->name, queue);
584 585
		if (err < 0)
			goto err_unmap;
586 587
		queue->tx_irq = queue->rx_irq = err;
		disable_irq(queue->tx_irq);
588 589
	} else {
		/* feature-split-event-channels == 1 */
590 591
		snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
			 "%s-tx", queue->name);
592
		err = bind_interdomain_evtchn_to_irqhandler(
593 594
			queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
			queue->tx_irq_name, queue);
595 596
		if (err < 0)
			goto err_unmap;
597 598
		queue->tx_irq = err;
		disable_irq(queue->tx_irq);
599

600 601
		snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
			 "%s-rx", queue->name);
602
		err = bind_interdomain_evtchn_to_irqhandler(
603 604
			queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
			queue->rx_irq_name, queue);
605 606
		if (err < 0)
			goto err_tx_unbind;
607 608
		queue->rx_irq = err;
		disable_irq(queue->rx_irq);
609
	}
I
Ian Campbell 已提交
610

611
	task = kthread_create(xenvif_kthread_guest_rx,
612
			      (void *)queue, "%s-guest-rx", queue->name);
613
	if (IS_ERR(task)) {
614
		pr_warn("Could not allocate kthread for %s\n", queue->name);
615
		err = PTR_ERR(task);
616 617
		goto err_rx_unbind;
	}
618
	queue->task = task;
619

620
	task = kthread_create(xenvif_dealloc_kthread,
621
			      (void *)queue, "%s-dealloc", queue->name);
622
	if (IS_ERR(task)) {
623
		pr_warn("Could not allocate kthread for %s\n", queue->name);
624 625 626
		err = PTR_ERR(task);
		goto err_rx_unbind;
	}
627
	queue->dealloc_task = task;
628

629 630
	wake_up_process(queue->task);
	wake_up_process(queue->dealloc_task);
631

632 633 634
	netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
			XENVIF_NAPI_WEIGHT);

I
Ian Campbell 已提交
635
	return 0;
636 637

err_rx_unbind:
638 639
	unbind_from_irqhandler(queue->rx_irq, queue);
	queue->rx_irq = 0;
640
err_tx_unbind:
641 642
	unbind_from_irqhandler(queue->tx_irq, queue);
	queue->tx_irq = 0;
I
Ian Campbell 已提交
643
err_unmap:
644
	xenvif_unmap_frontend_rings(queue);
I
Ian Campbell 已提交
645
err:
646
	module_put(THIS_MODULE);
I
Ian Campbell 已提交
647 648 649
	return err;
}

650
void xenvif_carrier_off(struct xenvif *vif)
I
Ian Campbell 已提交
651 652
{
	struct net_device *dev = vif->dev;
653 654

	rtnl_lock();
655 656 657 658 659
	if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
		netif_carrier_off(dev); /* discard queued packets */
		if (netif_running(dev))
			xenvif_down(vif);
	}
660 661 662
	rtnl_unlock();
}

663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
				      unsigned int worst_case_skb_lifetime)
{
	int i, unmap_timeout = 0;

	for (i = 0; i < MAX_PENDING_REQS; ++i) {
		if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
			unmap_timeout++;
			schedule_timeout(msecs_to_jiffies(1000));
			if (unmap_timeout > worst_case_skb_lifetime &&
			    net_ratelimit())
				netdev_err(queue->vif->dev,
					   "Page still granted! Index: %x\n",
					   i);
			i = -1;
		}
	}
}

682 683
void xenvif_disconnect(struct xenvif *vif)
{
684
	struct xenvif_queue *queue = NULL;
685
	unsigned int num_queues = vif->num_queues;
686 687
	unsigned int queue_index;

688
	xenvif_carrier_off(vif);
I
Ian Campbell 已提交
689

690 691
	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
692

693 694
		netif_napi_del(&queue->napi);

695
		if (queue->task) {
696
			del_timer_sync(&queue->rx_stalled);
697 698 699
			kthread_stop(queue->task);
			queue->task = NULL;
		}
700

701 702 703 704 705 706 707 708 709 710 711 712 713
		if (queue->dealloc_task) {
			kthread_stop(queue->dealloc_task);
			queue->dealloc_task = NULL;
		}

		if (queue->tx_irq) {
			if (queue->tx_irq == queue->rx_irq)
				unbind_from_irqhandler(queue->tx_irq, queue);
			else {
				unbind_from_irqhandler(queue->tx_irq, queue);
				unbind_from_irqhandler(queue->rx_irq, queue);
			}
			queue->tx_irq = 0;
714
		}
I
Ian Campbell 已提交
715

716 717
		xenvif_unmap_frontend_rings(queue);
	}
718 719
}

720 721 722 723 724 725 726 727 728
/* Reverse the relevant parts of xenvif_init_queue().
 * Used for queue teardown from xenvif_free(), and on the
 * error handling paths in xenbus.c:connect().
 */
void xenvif_deinit_queue(struct xenvif_queue *queue)
{
	free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
}

729 730
void xenvif_free(struct xenvif *vif)
{
731
	struct xenvif_queue *queue = NULL;
732
	unsigned int num_queues = vif->num_queues;
733
	unsigned int queue_index;
734 735
	/* Here we want to avoid timeout messages if an skb can be legitimately
	 * stuck somewhere else. Realistically this could be an another vif's
736
	 * internal or QDisc queue. That another vif also has this
737 738 739 740
	 * rx_drain_timeout_msecs timeout, so give it time to drain out.
	 * Although if that other guest wakes up just before its timeout happens
	 * and takes only one skb from QDisc, it can hold onto other skbs for a
	 * longer period.
741
	 */
742
	unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000);
743

744
	unregister_netdev(vif->dev);
745

746 747 748
	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime);
749
		xenvif_deinit_queue(queue);
750 751 752 753
	}

	vfree(vif->queues);
	vif->queues = NULL;
754
	vif->num_queues = 0;
I
Ian Campbell 已提交
755 756

	free_netdev(vif->dev);
757

758
	module_put(THIS_MODULE);
I
Ian Campbell 已提交
759
}