interface.c 18.6 KB
Newer Older
I
Ian Campbell 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Network-device interface management.
 *
 * Copyright (c) 2004-2005, Keir Fraser
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "common.h"

33
#include <linux/kthread.h>
I
Ian Campbell 已提交
34 35 36
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
37
#include <linux/vmalloc.h>
I
Ian Campbell 已提交
38 39 40

#include <xen/events.h>
#include <asm/xen/hypercall.h>
41
#include <xen/balloon.h>
I
Ian Campbell 已提交
42 43

#define XENVIF_QUEUE_LENGTH 32
44
#define XENVIF_NAPI_WEIGHT  64
I
Ian Campbell 已提交
45

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
 * increasing the inflight counter. We need to increase the inflight
 * counter because core driver calls into xenvif_zerocopy_callback
 * which calls xenvif_skb_zerocopy_complete.
 */
void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
				 struct sk_buff *skb)
{
	skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
	atomic_inc(&queue->inflight_packets);
}

void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
{
	atomic_dec(&queue->inflight_packets);
}

I
Ian Campbell 已提交
63 64
int xenvif_schedulable(struct xenvif *vif)
{
65 66
	return netif_running(vif->dev) &&
		test_bit(VIF_STATUS_CONNECTED, &vif->status);
I
Ian Campbell 已提交
67 68
}

69
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
I
Ian Campbell 已提交
70
{
71
	struct xenvif_queue *queue = dev_id;
I
Ian Campbell 已提交
72

73 74
	if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
		napi_schedule(&queue->napi);
I
Ian Campbell 已提交
75

76 77 78
	return IRQ_HANDLED;
}

79
int xenvif_poll(struct napi_struct *napi, int budget)
80
{
81 82
	struct xenvif_queue *queue =
		container_of(napi, struct xenvif_queue, napi);
83 84
	int work_done;

85 86 87 88
	/* This vif is rogue, we pretend we've there is nothing to do
	 * for this vif to deschedule it from NAPI. But this interface
	 * will be turned off in thread context later.
	 */
89
	if (unlikely(queue->vif->disabled)) {
90 91 92 93
		napi_complete(napi);
		return 0;
	}

94
	work_done = xenvif_tx_action(queue, budget);
95 96

	if (work_done < budget) {
97
		napi_complete(napi);
98
		xenvif_napi_schedule_or_enable_events(queue);
99 100 101 102 103
	}

	return work_done;
}

104 105
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
106
	struct xenvif_queue *queue = dev_id;
107 108
	struct netdev_queue *net_queue =
		netdev_get_tx_queue(queue->vif->dev, queue->id);
109

110 111 112 113 114 115 116
	/* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
	 * the carrier went down and this queue was previously blocked
	 */
	if (unlikely(netif_tx_queue_stopped(net_queue) ||
		     (!netif_carrier_ok(queue->vif->dev) &&
		      test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
		set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
117
	xenvif_kick_thread(queue);
I
Ian Campbell 已提交
118 119 120 121

	return IRQ_HANDLED;
}

122
irqreturn_t xenvif_interrupt(int irq, void *dev_id)
123 124 125 126 127 128 129
{
	xenvif_tx_interrupt(irq, dev_id);
	xenvif_rx_interrupt(irq, dev_id);

	return IRQ_HANDLED;
}

130 131 132 133 134 135 136 137 138 139 140 141 142 143
int xenvif_queue_stopped(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;
	unsigned int id = queue->id;
	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
}

void xenvif_wake_queue(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;
	unsigned int id = queue->id;
	netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
}

144 145
/* Callback to wake the queue's thread and turn the carrier off on timeout */
static void xenvif_rx_stalled(unsigned long data)
146
{
147 148 149
	struct xenvif_queue *queue = (struct xenvif_queue *)data;

	if (xenvif_queue_stopped(queue)) {
150
		set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
151 152 153
		xenvif_kick_thread(queue);
	}
}
154

I
Ian Campbell 已提交
155 156 157
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
158
	struct xenvif_queue *queue = NULL;
159
	unsigned int num_queues = vif->num_queues;
160
	u16 index;
161
	int min_slots_needed;
I
Ian Campbell 已提交
162 163 164

	BUG_ON(skb->dev != dev);

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
	/* Drop the packet if queues are not set up */
	if (num_queues < 1)
		goto drop;

	/* Obtain the queue to be used to transmit this packet */
	index = skb_get_queue_mapping(skb);
	if (index >= num_queues) {
		pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
				    index, vif->dev->name);
		index %= num_queues;
	}
	queue = &vif->queues[index];

	/* Drop the packet if queue is not ready */
	if (queue->task == NULL ||
	    queue->dealloc_task == NULL ||
181
	    !xenvif_schedulable(vif))
I
Ian Campbell 已提交
182 183
		goto drop;

184 185 186 187
	/* At best we'll need one slot for the header and one for each
	 * frag.
	 */
	min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
I
Ian Campbell 已提交
188

189 190 191
	/* If the skb is GSO then we'll also need an extra slot for the
	 * metadata.
	 */
192
	if (skb_is_gso(skb))
193
		min_slots_needed++;
I
Ian Campbell 已提交
194

195 196 197 198
	/* If the skb can't possibly fit in the remaining slots
	 * then turn off the queue to give the ring a chance to
	 * drain.
	 */
199
	if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
200 201
		queue->rx_stalled.function = xenvif_rx_stalled;
		queue->rx_stalled.data = (unsigned long)queue;
202
		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
203 204
		mod_timer(&queue->rx_stalled,
			  jiffies + rx_drain_timeout_jiffies);
205
	}
I
Ian Campbell 已提交
206

207 208
	skb_queue_tail(&queue->rx_queue, skb);
	xenvif_kick_thread(queue);
I
Ian Campbell 已提交
209 210 211 212 213 214 215 216 217 218 219 220

	return NETDEV_TX_OK;

 drop:
	vif->dev->stats.tx_dropped++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}

static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
221
	struct xenvif_queue *queue = NULL;
222
	unsigned int num_queues = vif->num_queues;
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
	unsigned long rx_bytes = 0;
	unsigned long rx_packets = 0;
	unsigned long tx_bytes = 0;
	unsigned long tx_packets = 0;
	unsigned int index;

	if (vif->queues == NULL)
		goto out;

	/* Aggregate tx and rx stats from each queue */
	for (index = 0; index < num_queues; ++index) {
		queue = &vif->queues[index];
		rx_bytes += queue->stats.rx_bytes;
		rx_packets += queue->stats.rx_packets;
		tx_bytes += queue->stats.tx_bytes;
		tx_packets += queue->stats.tx_packets;
	}

out:
	vif->dev->stats.rx_bytes = rx_bytes;
	vif->dev->stats.rx_packets = rx_packets;
	vif->dev->stats.tx_bytes = tx_bytes;
	vif->dev->stats.tx_packets = tx_packets;

I
Ian Campbell 已提交
247 248 249 250 251
	return &vif->dev->stats;
}

static void xenvif_up(struct xenvif *vif)
{
252
	struct xenvif_queue *queue = NULL;
253
	unsigned int num_queues = vif->num_queues;
254 255 256 257 258 259 260 261 262 263
	unsigned int queue_index;

	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		napi_enable(&queue->napi);
		enable_irq(queue->tx_irq);
		if (queue->tx_irq != queue->rx_irq)
			enable_irq(queue->rx_irq);
		xenvif_napi_schedule_or_enable_events(queue);
	}
I
Ian Campbell 已提交
264 265 266 267
}

static void xenvif_down(struct xenvif *vif)
{
268
	struct xenvif_queue *queue = NULL;
269
	unsigned int num_queues = vif->num_queues;
270 271 272 273 274 275 276 277 278 279
	unsigned int queue_index;

	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		napi_disable(&queue->napi);
		disable_irq(queue->tx_irq);
		if (queue->tx_irq != queue->rx_irq)
			disable_irq(queue->rx_irq);
		del_timer_sync(&queue->credit_timeout);
	}
I
Ian Campbell 已提交
280 281 282 283 284
}

static int xenvif_open(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
285
	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
I
Ian Campbell 已提交
286
		xenvif_up(vif);
287
	netif_tx_start_all_queues(dev);
I
Ian Campbell 已提交
288 289 290 291 292 293
	return 0;
}

static int xenvif_close(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
294
	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
I
Ian Campbell 已提交
295
		xenvif_down(vif);
296
	netif_tx_stop_all_queues(dev);
I
Ian Campbell 已提交
297 298 299 300 301 302 303 304 305 306 307 308 309 310
	return 0;
}

static int xenvif_change_mtu(struct net_device *dev, int mtu)
{
	struct xenvif *vif = netdev_priv(dev);
	int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;

	if (mtu > max)
		return -EINVAL;
	dev->mtu = mtu;
	return 0;
}

311 312
static netdev_features_t xenvif_fix_features(struct net_device *dev,
	netdev_features_t features)
I
Ian Campbell 已提交
313 314 315
{
	struct xenvif *vif = netdev_priv(dev);

316 317
	if (!vif->can_sg)
		features &= ~NETIF_F_SG;
318
	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
319
		features &= ~NETIF_F_TSO;
320 321
	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
		features &= ~NETIF_F_TSO6;
322
	if (!vif->ip_csum)
323
		features &= ~NETIF_F_IP_CSUM;
324 325
	if (!vif->ipv6_csum)
		features &= ~NETIF_F_IPV6_CSUM;
I
Ian Campbell 已提交
326

327
	return features;
I
Ian Campbell 已提交
328 329 330 331 332 333 334 335
}

static const struct xenvif_stat {
	char name[ETH_GSTRING_LEN];
	u16 offset;
} xenvif_stats[] = {
	{
		"rx_gso_checksum_fixup",
336
		offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
I
Ian Campbell 已提交
337
	},
338 339 340 341 342
	/* If (sent != success + fail), there are probably packets never
	 * freed up properly!
	 */
	{
		"tx_zerocopy_sent",
343
		offsetof(struct xenvif_stats, tx_zerocopy_sent),
344 345 346
	},
	{
		"tx_zerocopy_success",
347
		offsetof(struct xenvif_stats, tx_zerocopy_success),
348 349 350
	},
	{
		"tx_zerocopy_fail",
351
		offsetof(struct xenvif_stats, tx_zerocopy_fail)
352
	},
353 354 355 356 357
	/* Number of packets exceeding MAX_SKB_FRAG slots. You should use
	 * a guest with the same MAX_SKB_FRAG
	 */
	{
		"tx_frag_overflow",
358
		offsetof(struct xenvif_stats, tx_frag_overflow)
359
	},
I
Ian Campbell 已提交
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
};

static int xenvif_get_sset_count(struct net_device *dev, int string_set)
{
	switch (string_set) {
	case ETH_SS_STATS:
		return ARRAY_SIZE(xenvif_stats);
	default:
		return -EINVAL;
	}
}

static void xenvif_get_ethtool_stats(struct net_device *dev,
				     struct ethtool_stats *stats, u64 * data)
{
375
	struct xenvif *vif = netdev_priv(dev);
376
	unsigned int num_queues = vif->num_queues;
I
Ian Campbell 已提交
377
	int i;
378 379 380 381 382 383 384 385 386 387 388
	unsigned int queue_index;
	struct xenvif_stats *vif_stats;

	for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
		unsigned long accum = 0;
		for (queue_index = 0; queue_index < num_queues; ++queue_index) {
			vif_stats = &vif->queues[queue_index].stats;
			accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
		}
		data[i] = accum;
	}
I
Ian Campbell 已提交
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
}

static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
{
	int i;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
			memcpy(data + i * ETH_GSTRING_LEN,
			       xenvif_stats[i].name, ETH_GSTRING_LEN);
		break;
	}
}

404
static const struct ethtool_ops xenvif_ethtool_ops = {
I
Ian Campbell 已提交
405 406 407 408 409 410 411
	.get_link	= ethtool_op_get_link,

	.get_sset_count = xenvif_get_sset_count,
	.get_ethtool_stats = xenvif_get_ethtool_stats,
	.get_strings = xenvif_get_strings,
};

412
static const struct net_device_ops xenvif_netdev_ops = {
I
Ian Campbell 已提交
413 414 415 416 417
	.ndo_start_xmit	= xenvif_start_xmit,
	.ndo_get_stats	= xenvif_get_stats,
	.ndo_open	= xenvif_open,
	.ndo_stop	= xenvif_close,
	.ndo_change_mtu	= xenvif_change_mtu,
418
	.ndo_fix_features = xenvif_fix_features,
419 420
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_validate_addr   = eth_validate_addr,
I
Ian Campbell 已提交
421 422 423 424 425 426 427 428 429 430 431
};

struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
			    unsigned int handle)
{
	int err;
	struct net_device *dev;
	struct xenvif *vif;
	char name[IFNAMSIZ] = {};

	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
432 433
	/* Allocate a netdev with the max. supported number of queues.
	 * When the guest selects the desired number, it will be updated
434
	 * via netif_set_real_num_*_queues().
435
	 */
436 437
	dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
			      ether_setup, xenvif_max_queues);
I
Ian Campbell 已提交
438
	if (dev == NULL) {
439
		pr_warn("Could not allocate netdev for %s\n", name);
I
Ian Campbell 已提交
440 441 442 443 444 445
		return ERR_PTR(-ENOMEM);
	}

	SET_NETDEV_DEV(dev, parent);

	vif = netdev_priv(dev);
446

I
Ian Campbell 已提交
447 448 449
	vif->domid  = domid;
	vif->handle = handle;
	vif->can_sg = 1;
450
	vif->ip_csum = 1;
I
Ian Campbell 已提交
451
	vif->dev = dev;
452 453
	vif->disabled = false;

454
	/* Start out with no queues. */
455
	vif->queues = NULL;
456
	vif->num_queues = 0;
457

I
Ian Campbell 已提交
458
	dev->netdev_ops	= &xenvif_netdev_ops;
459 460
	dev->hw_features = NETIF_F_SG |
		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
461
		NETIF_F_TSO | NETIF_F_TSO6;
462
	dev->features = dev->hw_features | NETIF_F_RXCSUM;
463
	dev->ethtool_ops = &xenvif_ethtool_ops;
I
Ian Campbell 已提交
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485

	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;

	/*
	 * Initialise a dummy MAC address. We choose the numerically
	 * largest non-broadcast address to prevent the address getting
	 * stolen by an Ethernet bridge for STP purposes.
	 * (FE:FF:FF:FF:FF:FF)
	 */
	memset(dev->dev_addr, 0xFF, ETH_ALEN);
	dev->dev_addr[0] &= ~0x01;

	netif_carrier_off(dev);

	err = register_netdev(dev);
	if (err) {
		netdev_warn(dev, "Could not register device: err=%d\n", err);
		free_netdev(dev);
		return ERR_PTR(err);
	}

	netdev_dbg(dev, "Successfully created xenvif\n");
486 487 488

	__module_get(THIS_MODULE);

I
Ian Campbell 已提交
489 490 491
	return vif;
}

492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
int xenvif_init_queue(struct xenvif_queue *queue)
{
	int err, i;

	queue->credit_bytes = queue->remaining_credit = ~0UL;
	queue->credit_usec  = 0UL;
	init_timer(&queue->credit_timeout);
	queue->credit_window_start = get_jiffies_64();

	skb_queue_head_init(&queue->rx_queue);
	skb_queue_head_init(&queue->tx_queue);

	queue->pending_cons = 0;
	queue->pending_prod = MAX_PENDING_REQS;
	for (i = 0; i < MAX_PENDING_REQS; ++i)
		queue->pending_ring[i] = i;

	spin_lock_init(&queue->callback_lock);
	spin_lock_init(&queue->response_lock);

	/* If ballooning is disabled, this will consume real memory, so you
	 * better enable it. The long term solution would be to use just a
	 * bunch of valid page descriptors, without dependency on ballooning
	 */
	err = alloc_xenballooned_pages(MAX_PENDING_REQS,
				       queue->mmap_pages,
				       false);
	if (err) {
		netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
		return -ENOMEM;
	}

	for (i = 0; i < MAX_PENDING_REQS; i++) {
		queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
			{ .callback = xenvif_zerocopy_callback,
			  .ctx = NULL,
			  .desc = i };
		queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
	}

532
	init_timer(&queue->rx_stalled);
533 534 535 536 537 538 539 540 541 542

	return 0;
}

void xenvif_carrier_on(struct xenvif *vif)
{
	rtnl_lock();
	if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
		dev_set_mtu(vif->dev, ETH_DATA_LEN);
	netdev_update_features(vif->dev);
543
	set_bit(VIF_STATUS_CONNECTED, &vif->status);
544 545 546 547 548 549 550
	netif_carrier_on(vif->dev);
	if (netif_running(vif->dev))
		xenvif_up(vif);
	rtnl_unlock();
}

int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
551 552
		   unsigned long rx_ring_ref, unsigned int tx_evtchn,
		   unsigned int rx_evtchn)
I
Ian Campbell 已提交
553
{
554
	struct task_struct *task;
I
Ian Campbell 已提交
555 556
	int err = -ENOMEM;

557 558 559
	BUG_ON(queue->tx_irq);
	BUG_ON(queue->task);
	BUG_ON(queue->dealloc_task);
I
Ian Campbell 已提交
560

561
	err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
I
Ian Campbell 已提交
562 563 564
	if (err < 0)
		goto err;

565 566
	init_waitqueue_head(&queue->wq);
	init_waitqueue_head(&queue->dealloc_wq);
567
	atomic_set(&queue->inflight_packets, 0);
568

569 570 571
	netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
			XENVIF_NAPI_WEIGHT);

572 573 574
	if (tx_evtchn == rx_evtchn) {
		/* feature-split-event-channels == 0 */
		err = bind_interdomain_evtchn_to_irqhandler(
575 576
			queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
			queue->name, queue);
577 578
		if (err < 0)
			goto err_unmap;
579 580
		queue->tx_irq = queue->rx_irq = err;
		disable_irq(queue->tx_irq);
581 582
	} else {
		/* feature-split-event-channels == 1 */
583 584
		snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
			 "%s-tx", queue->name);
585
		err = bind_interdomain_evtchn_to_irqhandler(
586 587
			queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
			queue->tx_irq_name, queue);
588 589
		if (err < 0)
			goto err_unmap;
590 591
		queue->tx_irq = err;
		disable_irq(queue->tx_irq);
592

593 594
		snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
			 "%s-rx", queue->name);
595
		err = bind_interdomain_evtchn_to_irqhandler(
596 597
			queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
			queue->rx_irq_name, queue);
598 599
		if (err < 0)
			goto err_tx_unbind;
600 601
		queue->rx_irq = err;
		disable_irq(queue->rx_irq);
602
	}
I
Ian Campbell 已提交
603

604
	task = kthread_create(xenvif_kthread_guest_rx,
605
			      (void *)queue, "%s-guest-rx", queue->name);
606
	if (IS_ERR(task)) {
607
		pr_warn("Could not allocate kthread for %s\n", queue->name);
608
		err = PTR_ERR(task);
609 610
		goto err_rx_unbind;
	}
611
	queue->task = task;
612

613
	task = kthread_create(xenvif_dealloc_kthread,
614
			      (void *)queue, "%s-dealloc", queue->name);
615
	if (IS_ERR(task)) {
616
		pr_warn("Could not allocate kthread for %s\n", queue->name);
617 618 619
		err = PTR_ERR(task);
		goto err_rx_unbind;
	}
620
	queue->dealloc_task = task;
621

622 623
	wake_up_process(queue->task);
	wake_up_process(queue->dealloc_task);
624

I
Ian Campbell 已提交
625
	return 0;
626 627

err_rx_unbind:
628 629
	unbind_from_irqhandler(queue->rx_irq, queue);
	queue->rx_irq = 0;
630
err_tx_unbind:
631 632
	unbind_from_irqhandler(queue->tx_irq, queue);
	queue->tx_irq = 0;
I
Ian Campbell 已提交
633
err_unmap:
634
	xenvif_unmap_frontend_rings(queue);
I
Ian Campbell 已提交
635
err:
636
	module_put(THIS_MODULE);
I
Ian Campbell 已提交
637 638 639
	return err;
}

640
void xenvif_carrier_off(struct xenvif *vif)
I
Ian Campbell 已提交
641 642
{
	struct net_device *dev = vif->dev;
643 644

	rtnl_lock();
645 646 647 648 649
	if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
		netif_carrier_off(dev); /* discard queued packets */
		if (netif_running(dev))
			xenvif_down(vif);
	}
650 651 652 653 654
	rtnl_unlock();
}

void xenvif_disconnect(struct xenvif *vif)
{
655
	struct xenvif_queue *queue = NULL;
656
	unsigned int num_queues = vif->num_queues;
657 658
	unsigned int queue_index;

659
	xenvif_carrier_off(vif);
I
Ian Campbell 已提交
660

661 662
	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
663

664 665
		netif_napi_del(&queue->napi);

666
		if (queue->task) {
667
			del_timer_sync(&queue->rx_stalled);
668 669 670
			kthread_stop(queue->task);
			queue->task = NULL;
		}
671

672 673 674 675 676 677 678 679 680 681 682 683 684
		if (queue->dealloc_task) {
			kthread_stop(queue->dealloc_task);
			queue->dealloc_task = NULL;
		}

		if (queue->tx_irq) {
			if (queue->tx_irq == queue->rx_irq)
				unbind_from_irqhandler(queue->tx_irq, queue);
			else {
				unbind_from_irqhandler(queue->tx_irq, queue);
				unbind_from_irqhandler(queue->rx_irq, queue);
			}
			queue->tx_irq = 0;
685
		}
I
Ian Campbell 已提交
686

687 688
		xenvif_unmap_frontend_rings(queue);
	}
689 690
}

691 692 693 694 695 696 697 698 699
/* Reverse the relevant parts of xenvif_init_queue().
 * Used for queue teardown from xenvif_free(), and on the
 * error handling paths in xenbus.c:connect().
 */
void xenvif_deinit_queue(struct xenvif_queue *queue)
{
	free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
}

700 701
void xenvif_free(struct xenvif *vif)
{
702
	struct xenvif_queue *queue = NULL;
703
	unsigned int num_queues = vif->num_queues;
704
	unsigned int queue_index;
705

706
	unregister_netdev(vif->dev);
707

708 709
	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
710
		xenvif_deinit_queue(queue);
711 712 713 714
	}

	vfree(vif->queues);
	vif->queues = NULL;
715
	vif->num_queues = 0;
I
Ian Campbell 已提交
716 717

	free_netdev(vif->dev);
718

719
	module_put(THIS_MODULE);
I
Ian Campbell 已提交
720
}