interface.c 17.5 KB
Newer Older
I
Ian Campbell 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Network-device interface management.
 *
 * Copyright (c) 2004-2005, Keir Fraser
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "common.h"

33
#include <linux/kthread.h>
I
Ian Campbell 已提交
34 35 36
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
37
#include <linux/vmalloc.h>
I
Ian Campbell 已提交
38 39 40

#include <xen/events.h>
#include <asm/xen/hypercall.h>
41
#include <xen/balloon.h>
I
Ian Campbell 已提交
42 43

#define XENVIF_QUEUE_LENGTH 32
44
#define XENVIF_NAPI_WEIGHT  64
I
Ian Campbell 已提交
45

46 47 48
/* Number of bytes allowed on the internal guest Rx queue. */
#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
 * increasing the inflight counter. We need to increase the inflight
 * counter because core driver calls into xenvif_zerocopy_callback
 * which calls xenvif_skb_zerocopy_complete.
 */
void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
				 struct sk_buff *skb)
{
	skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
	atomic_inc(&queue->inflight_packets);
}

void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
{
	atomic_dec(&queue->inflight_packets);
}

I
Ian Campbell 已提交
66 67
int xenvif_schedulable(struct xenvif *vif)
{
68
	return netif_running(vif->dev) &&
69 70
		test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
		!vif->disabled;
I
Ian Campbell 已提交
71 72
}

73
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
I
Ian Campbell 已提交
74
{
75
	struct xenvif_queue *queue = dev_id;
I
Ian Campbell 已提交
76

77 78
	if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
		napi_schedule(&queue->napi);
I
Ian Campbell 已提交
79

80 81 82
	return IRQ_HANDLED;
}

83
int xenvif_poll(struct napi_struct *napi, int budget)
84
{
85 86
	struct xenvif_queue *queue =
		container_of(napi, struct xenvif_queue, napi);
87 88
	int work_done;

89 90 91 92
	/* This vif is rogue, we pretend we've there is nothing to do
	 * for this vif to deschedule it from NAPI. But this interface
	 * will be turned off in thread context later.
	 */
93
	if (unlikely(queue->vif->disabled)) {
94 95 96 97
		napi_complete(napi);
		return 0;
	}

98
	work_done = xenvif_tx_action(queue, budget);
99 100

	if (work_done < budget) {
101
		napi_complete(napi);
102
		xenvif_napi_schedule_or_enable_events(queue);
103 104 105 106 107
	}

	return work_done;
}

108 109
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
110
	struct xenvif_queue *queue = dev_id;
111

112
	xenvif_kick_thread(queue);
I
Ian Campbell 已提交
113 114 115 116

	return IRQ_HANDLED;
}

117
irqreturn_t xenvif_interrupt(int irq, void *dev_id)
118 119 120 121 122 123 124
{
	xenvif_tx_interrupt(irq, dev_id);
	xenvif_rx_interrupt(irq, dev_id);

	return IRQ_HANDLED;
}

125 126 127 128 129 130 131 132 133 134 135 136 137 138
int xenvif_queue_stopped(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;
	unsigned int id = queue->id;
	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
}

void xenvif_wake_queue(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;
	unsigned int id = queue->id;
	netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
}

I
Ian Campbell 已提交
139 140 141
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
142
	struct xenvif_queue *queue = NULL;
143
	unsigned int num_queues = vif->num_queues;
144
	u16 index;
145
	struct xenvif_rx_cb *cb;
I
Ian Campbell 已提交
146 147 148

	BUG_ON(skb->dev != dev);

149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
	/* Drop the packet if queues are not set up */
	if (num_queues < 1)
		goto drop;

	/* Obtain the queue to be used to transmit this packet */
	index = skb_get_queue_mapping(skb);
	if (index >= num_queues) {
		pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
				    index, vif->dev->name);
		index %= num_queues;
	}
	queue = &vif->queues[index];

	/* Drop the packet if queue is not ready */
	if (queue->task == NULL ||
	    queue->dealloc_task == NULL ||
165
	    !xenvif_schedulable(vif))
I
Ian Campbell 已提交
166 167
		goto drop;

168
	cb = XENVIF_RX_CB(skb);
169
	cb->expires = jiffies + vif->drain_timeout;
I
Ian Campbell 已提交
170

171
	xenvif_rx_queue_tail(queue, skb);
172
	xenvif_kick_thread(queue);
I
Ian Campbell 已提交
173 174 175 176 177 178 179 180 181 182 183 184

	return NETDEV_TX_OK;

 drop:
	vif->dev->stats.tx_dropped++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}

static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
185
	struct xenvif_queue *queue = NULL;
186
	unsigned int num_queues = vif->num_queues;
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
	unsigned long rx_bytes = 0;
	unsigned long rx_packets = 0;
	unsigned long tx_bytes = 0;
	unsigned long tx_packets = 0;
	unsigned int index;

	if (vif->queues == NULL)
		goto out;

	/* Aggregate tx and rx stats from each queue */
	for (index = 0; index < num_queues; ++index) {
		queue = &vif->queues[index];
		rx_bytes += queue->stats.rx_bytes;
		rx_packets += queue->stats.rx_packets;
		tx_bytes += queue->stats.tx_bytes;
		tx_packets += queue->stats.tx_packets;
	}

out:
	vif->dev->stats.rx_bytes = rx_bytes;
	vif->dev->stats.rx_packets = rx_packets;
	vif->dev->stats.tx_bytes = tx_bytes;
	vif->dev->stats.tx_packets = tx_packets;

I
Ian Campbell 已提交
211 212 213 214 215
	return &vif->dev->stats;
}

static void xenvif_up(struct xenvif *vif)
{
216
	struct xenvif_queue *queue = NULL;
217
	unsigned int num_queues = vif->num_queues;
218 219 220 221 222 223 224 225 226 227
	unsigned int queue_index;

	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		napi_enable(&queue->napi);
		enable_irq(queue->tx_irq);
		if (queue->tx_irq != queue->rx_irq)
			enable_irq(queue->rx_irq);
		xenvif_napi_schedule_or_enable_events(queue);
	}
I
Ian Campbell 已提交
228 229 230 231
}

static void xenvif_down(struct xenvif *vif)
{
232
	struct xenvif_queue *queue = NULL;
233
	unsigned int num_queues = vif->num_queues;
234 235 236 237 238 239 240
	unsigned int queue_index;

	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		disable_irq(queue->tx_irq);
		if (queue->tx_irq != queue->rx_irq)
			disable_irq(queue->rx_irq);
241
		napi_disable(&queue->napi);
242 243
		del_timer_sync(&queue->credit_timeout);
	}
I
Ian Campbell 已提交
244 245 246 247 248
}

static int xenvif_open(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
249
	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
I
Ian Campbell 已提交
250
		xenvif_up(vif);
251
	netif_tx_start_all_queues(dev);
I
Ian Campbell 已提交
252 253 254 255 256 257
	return 0;
}

static int xenvif_close(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
258
	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
I
Ian Campbell 已提交
259
		xenvif_down(vif);
260
	netif_tx_stop_all_queues(dev);
I
Ian Campbell 已提交
261 262 263 264 265 266 267 268 269 270 271 272 273 274
	return 0;
}

static int xenvif_change_mtu(struct net_device *dev, int mtu)
{
	struct xenvif *vif = netdev_priv(dev);
	int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;

	if (mtu > max)
		return -EINVAL;
	dev->mtu = mtu;
	return 0;
}

275 276
static netdev_features_t xenvif_fix_features(struct net_device *dev,
	netdev_features_t features)
I
Ian Campbell 已提交
277 278 279
{
	struct xenvif *vif = netdev_priv(dev);

280 281
	if (!vif->can_sg)
		features &= ~NETIF_F_SG;
282
	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
283
		features &= ~NETIF_F_TSO;
284 285
	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
		features &= ~NETIF_F_TSO6;
286
	if (!vif->ip_csum)
287
		features &= ~NETIF_F_IP_CSUM;
288 289
	if (!vif->ipv6_csum)
		features &= ~NETIF_F_IPV6_CSUM;
I
Ian Campbell 已提交
290

291
	return features;
I
Ian Campbell 已提交
292 293 294 295 296 297 298 299
}

static const struct xenvif_stat {
	char name[ETH_GSTRING_LEN];
	u16 offset;
} xenvif_stats[] = {
	{
		"rx_gso_checksum_fixup",
300
		offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
I
Ian Campbell 已提交
301
	},
302 303 304 305 306
	/* If (sent != success + fail), there are probably packets never
	 * freed up properly!
	 */
	{
		"tx_zerocopy_sent",
307
		offsetof(struct xenvif_stats, tx_zerocopy_sent),
308 309 310
	},
	{
		"tx_zerocopy_success",
311
		offsetof(struct xenvif_stats, tx_zerocopy_success),
312 313 314
	},
	{
		"tx_zerocopy_fail",
315
		offsetof(struct xenvif_stats, tx_zerocopy_fail)
316
	},
317 318 319 320 321
	/* Number of packets exceeding MAX_SKB_FRAG slots. You should use
	 * a guest with the same MAX_SKB_FRAG
	 */
	{
		"tx_frag_overflow",
322
		offsetof(struct xenvif_stats, tx_frag_overflow)
323
	},
I
Ian Campbell 已提交
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
};

static int xenvif_get_sset_count(struct net_device *dev, int string_set)
{
	switch (string_set) {
	case ETH_SS_STATS:
		return ARRAY_SIZE(xenvif_stats);
	default:
		return -EINVAL;
	}
}

static void xenvif_get_ethtool_stats(struct net_device *dev,
				     struct ethtool_stats *stats, u64 * data)
{
339
	struct xenvif *vif = netdev_priv(dev);
340
	unsigned int num_queues = vif->num_queues;
I
Ian Campbell 已提交
341
	int i;
342 343 344 345 346 347 348 349 350 351 352
	unsigned int queue_index;
	struct xenvif_stats *vif_stats;

	for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
		unsigned long accum = 0;
		for (queue_index = 0; queue_index < num_queues; ++queue_index) {
			vif_stats = &vif->queues[queue_index].stats;
			accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
		}
		data[i] = accum;
	}
I
Ian Campbell 已提交
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
}

static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
{
	int i;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
			memcpy(data + i * ETH_GSTRING_LEN,
			       xenvif_stats[i].name, ETH_GSTRING_LEN);
		break;
	}
}

368
static const struct ethtool_ops xenvif_ethtool_ops = {
I
Ian Campbell 已提交
369 370 371 372 373 374 375
	.get_link	= ethtool_op_get_link,

	.get_sset_count = xenvif_get_sset_count,
	.get_ethtool_stats = xenvif_get_ethtool_stats,
	.get_strings = xenvif_get_strings,
};

376
static const struct net_device_ops xenvif_netdev_ops = {
I
Ian Campbell 已提交
377 378 379 380 381
	.ndo_start_xmit	= xenvif_start_xmit,
	.ndo_get_stats	= xenvif_get_stats,
	.ndo_open	= xenvif_open,
	.ndo_stop	= xenvif_close,
	.ndo_change_mtu	= xenvif_change_mtu,
382
	.ndo_fix_features = xenvif_fix_features,
383 384
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_validate_addr   = eth_validate_addr,
I
Ian Campbell 已提交
385 386 387 388 389 390 391 392 393 394 395
};

struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
			    unsigned int handle)
{
	int err;
	struct net_device *dev;
	struct xenvif *vif;
	char name[IFNAMSIZ] = {};

	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
396 397
	/* Allocate a netdev with the max. supported number of queues.
	 * When the guest selects the desired number, it will be updated
398
	 * via netif_set_real_num_*_queues().
399
	 */
400 401
	dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
			      ether_setup, xenvif_max_queues);
I
Ian Campbell 已提交
402
	if (dev == NULL) {
403
		pr_warn("Could not allocate netdev for %s\n", name);
I
Ian Campbell 已提交
404 405 406 407 408 409
		return ERR_PTR(-ENOMEM);
	}

	SET_NETDEV_DEV(dev, parent);

	vif = netdev_priv(dev);
410

I
Ian Campbell 已提交
411 412 413
	vif->domid  = domid;
	vif->handle = handle;
	vif->can_sg = 1;
414
	vif->ip_csum = 1;
I
Ian Campbell 已提交
415
	vif->dev = dev;
416
	vif->disabled = false;
417 418
	vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
	vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
419

420
	/* Start out with no queues. */
421
	vif->queues = NULL;
422
	vif->num_queues = 0;
423

424 425
	spin_lock_init(&vif->lock);

I
Ian Campbell 已提交
426
	dev->netdev_ops	= &xenvif_netdev_ops;
427 428
	dev->hw_features = NETIF_F_SG |
		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
429
		NETIF_F_TSO | NETIF_F_TSO6;
430
	dev->features = dev->hw_features | NETIF_F_RXCSUM;
431
	dev->ethtool_ops = &xenvif_ethtool_ops;
I
Ian Campbell 已提交
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453

	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;

	/*
	 * Initialise a dummy MAC address. We choose the numerically
	 * largest non-broadcast address to prevent the address getting
	 * stolen by an Ethernet bridge for STP purposes.
	 * (FE:FF:FF:FF:FF:FF)
	 */
	memset(dev->dev_addr, 0xFF, ETH_ALEN);
	dev->dev_addr[0] &= ~0x01;

	netif_carrier_off(dev);

	err = register_netdev(dev);
	if (err) {
		netdev_warn(dev, "Could not register device: err=%d\n", err);
		free_netdev(dev);
		return ERR_PTR(err);
	}

	netdev_dbg(dev, "Successfully created xenvif\n");
454 455 456

	__module_get(THIS_MODULE);

I
Ian Campbell 已提交
457 458 459
	return vif;
}

460 461 462 463 464 465 466 467 468
int xenvif_init_queue(struct xenvif_queue *queue)
{
	int err, i;

	queue->credit_bytes = queue->remaining_credit = ~0UL;
	queue->credit_usec  = 0UL;
	init_timer(&queue->credit_timeout);
	queue->credit_window_start = get_jiffies_64();

469 470
	queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;

471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
	skb_queue_head_init(&queue->rx_queue);
	skb_queue_head_init(&queue->tx_queue);

	queue->pending_cons = 0;
	queue->pending_prod = MAX_PENDING_REQS;
	for (i = 0; i < MAX_PENDING_REQS; ++i)
		queue->pending_ring[i] = i;

	spin_lock_init(&queue->callback_lock);
	spin_lock_init(&queue->response_lock);

	/* If ballooning is disabled, this will consume real memory, so you
	 * better enable it. The long term solution would be to use just a
	 * bunch of valid page descriptors, without dependency on ballooning
	 */
	err = alloc_xenballooned_pages(MAX_PENDING_REQS,
				       queue->mmap_pages,
				       false);
	if (err) {
		netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
		return -ENOMEM;
	}

	for (i = 0; i < MAX_PENDING_REQS; i++) {
		queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
			{ .callback = xenvif_zerocopy_callback,
			  .ctx = NULL,
			  .desc = i };
		queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
	}

	return 0;
}

void xenvif_carrier_on(struct xenvif *vif)
{
	rtnl_lock();
	if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
		dev_set_mtu(vif->dev, ETH_DATA_LEN);
	netdev_update_features(vif->dev);
511
	set_bit(VIF_STATUS_CONNECTED, &vif->status);
512 513 514 515 516 517
	if (netif_running(vif->dev))
		xenvif_up(vif);
	rtnl_unlock();
}

int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
518 519
		   unsigned long rx_ring_ref, unsigned int tx_evtchn,
		   unsigned int rx_evtchn)
I
Ian Campbell 已提交
520
{
521
	struct task_struct *task;
I
Ian Campbell 已提交
522 523
	int err = -ENOMEM;

524 525 526
	BUG_ON(queue->tx_irq);
	BUG_ON(queue->task);
	BUG_ON(queue->dealloc_task);
I
Ian Campbell 已提交
527

528
	err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
I
Ian Campbell 已提交
529 530 531
	if (err < 0)
		goto err;

532 533
	init_waitqueue_head(&queue->wq);
	init_waitqueue_head(&queue->dealloc_wq);
534
	atomic_set(&queue->inflight_packets, 0);
535

536 537 538
	netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
			XENVIF_NAPI_WEIGHT);

539 540 541
	if (tx_evtchn == rx_evtchn) {
		/* feature-split-event-channels == 0 */
		err = bind_interdomain_evtchn_to_irqhandler(
542 543
			queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
			queue->name, queue);
544 545
		if (err < 0)
			goto err_unmap;
546 547
		queue->tx_irq = queue->rx_irq = err;
		disable_irq(queue->tx_irq);
548 549
	} else {
		/* feature-split-event-channels == 1 */
550 551
		snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
			 "%s-tx", queue->name);
552
		err = bind_interdomain_evtchn_to_irqhandler(
553 554
			queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
			queue->tx_irq_name, queue);
555 556
		if (err < 0)
			goto err_unmap;
557 558
		queue->tx_irq = err;
		disable_irq(queue->tx_irq);
559

560 561
		snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
			 "%s-rx", queue->name);
562
		err = bind_interdomain_evtchn_to_irqhandler(
563 564
			queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
			queue->rx_irq_name, queue);
565 566
		if (err < 0)
			goto err_tx_unbind;
567 568
		queue->rx_irq = err;
		disable_irq(queue->rx_irq);
569
	}
I
Ian Campbell 已提交
570

571 572
	queue->stalled = true;

573
	task = kthread_create(xenvif_kthread_guest_rx,
574
			      (void *)queue, "%s-guest-rx", queue->name);
575
	if (IS_ERR(task)) {
576
		pr_warn("Could not allocate kthread for %s\n", queue->name);
577
		err = PTR_ERR(task);
578 579
		goto err_rx_unbind;
	}
580
	queue->task = task;
581

582
	task = kthread_create(xenvif_dealloc_kthread,
583
			      (void *)queue, "%s-dealloc", queue->name);
584
	if (IS_ERR(task)) {
585
		pr_warn("Could not allocate kthread for %s\n", queue->name);
586 587 588
		err = PTR_ERR(task);
		goto err_rx_unbind;
	}
589
	queue->dealloc_task = task;
590

591 592
	wake_up_process(queue->task);
	wake_up_process(queue->dealloc_task);
593

I
Ian Campbell 已提交
594
	return 0;
595 596

err_rx_unbind:
597 598
	unbind_from_irqhandler(queue->rx_irq, queue);
	queue->rx_irq = 0;
599
err_tx_unbind:
600 601
	unbind_from_irqhandler(queue->tx_irq, queue);
	queue->tx_irq = 0;
I
Ian Campbell 已提交
602
err_unmap:
603
	xenvif_unmap_frontend_rings(queue);
I
Ian Campbell 已提交
604
err:
605
	module_put(THIS_MODULE);
I
Ian Campbell 已提交
606 607 608
	return err;
}

609
void xenvif_carrier_off(struct xenvif *vif)
I
Ian Campbell 已提交
610 611
{
	struct net_device *dev = vif->dev;
612 613

	rtnl_lock();
614 615 616 617 618
	if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
		netif_carrier_off(dev); /* discard queued packets */
		if (netif_running(dev))
			xenvif_down(vif);
	}
619 620 621 622 623
	rtnl_unlock();
}

void xenvif_disconnect(struct xenvif *vif)
{
624
	struct xenvif_queue *queue = NULL;
625
	unsigned int num_queues = vif->num_queues;
626 627
	unsigned int queue_index;

628
	xenvif_carrier_off(vif);
I
Ian Campbell 已提交
629

630 631
	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
632

633 634
		netif_napi_del(&queue->napi);

635 636 637 638
		if (queue->task) {
			kthread_stop(queue->task);
			queue->task = NULL;
		}
639

640 641 642 643 644 645 646 647 648 649 650 651 652
		if (queue->dealloc_task) {
			kthread_stop(queue->dealloc_task);
			queue->dealloc_task = NULL;
		}

		if (queue->tx_irq) {
			if (queue->tx_irq == queue->rx_irq)
				unbind_from_irqhandler(queue->tx_irq, queue);
			else {
				unbind_from_irqhandler(queue->tx_irq, queue);
				unbind_from_irqhandler(queue->rx_irq, queue);
			}
			queue->tx_irq = 0;
653
		}
I
Ian Campbell 已提交
654

655 656
		xenvif_unmap_frontend_rings(queue);
	}
657 658
}

659 660 661 662 663 664 665 666 667
/* Reverse the relevant parts of xenvif_init_queue().
 * Used for queue teardown from xenvif_free(), and on the
 * error handling paths in xenbus.c:connect().
 */
void xenvif_deinit_queue(struct xenvif_queue *queue)
{
	free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
}

668 669
void xenvif_free(struct xenvif *vif)
{
670
	struct xenvif_queue *queue = NULL;
671
	unsigned int num_queues = vif->num_queues;
672
	unsigned int queue_index;
673

674
	unregister_netdev(vif->dev);
675

676 677
	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
678
		xenvif_deinit_queue(queue);
679 680 681 682
	}

	vfree(vif->queues);
	vif->queues = NULL;
683
	vif->num_queues = 0;
I
Ian Campbell 已提交
684 685

	free_netdev(vif->dev);
686

687
	module_put(THIS_MODULE);
I
Ian Campbell 已提交
688
}