interface.c 19.2 KB
Newer Older
I
Ian Campbell 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Network-device interface management.
 *
 * Copyright (c) 2004-2005, Keir Fraser
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "common.h"

33
#include <linux/kthread.h>
I
Ian Campbell 已提交
34 35 36
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
37
#include <linux/vmalloc.h>
I
Ian Campbell 已提交
38 39 40

#include <xen/events.h>
#include <asm/xen/hypercall.h>
41
#include <xen/balloon.h>
I
Ian Campbell 已提交
42 43

#define XENVIF_QUEUE_LENGTH 32
44
#define XENVIF_NAPI_WEIGHT  64
I
Ian Campbell 已提交
45

46 47 48 49 50 51 52 53 54 55
static inline void xenvif_stop_queue(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;

	if (!queue->vif->can_queue)
		return;

	netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
}

I
Ian Campbell 已提交
56 57
int xenvif_schedulable(struct xenvif *vif)
{
58 59
	return netif_running(vif->dev) &&
		test_bit(VIF_STATUS_CONNECTED, &vif->status);
I
Ian Campbell 已提交
60 61
}

62
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
I
Ian Campbell 已提交
63
{
64
	struct xenvif_queue *queue = dev_id;
I
Ian Campbell 已提交
65

66 67
	if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
		napi_schedule(&queue->napi);
I
Ian Campbell 已提交
68

69 70 71
	return IRQ_HANDLED;
}

72
int xenvif_poll(struct napi_struct *napi, int budget)
73
{
74 75
	struct xenvif_queue *queue =
		container_of(napi, struct xenvif_queue, napi);
76 77
	int work_done;

78 79 80 81
	/* This vif is rogue, we pretend we've there is nothing to do
	 * for this vif to deschedule it from NAPI. But this interface
	 * will be turned off in thread context later.
	 */
82
	if (unlikely(queue->vif->disabled)) {
83 84 85 86
		napi_complete(napi);
		return 0;
	}

87
	work_done = xenvif_tx_action(queue, budget);
88 89

	if (work_done < budget) {
90
		napi_complete(napi);
91
		xenvif_napi_schedule_or_enable_events(queue);
92 93 94 95 96
	}

	return work_done;
}

97 98
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
99
	struct xenvif_queue *queue = dev_id;
100 101
	struct netdev_queue *net_queue =
		netdev_get_tx_queue(queue->vif->dev, queue->id);
102

103 104 105 106 107 108 109
	/* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
	 * the carrier went down and this queue was previously blocked
	 */
	if (unlikely(netif_tx_queue_stopped(net_queue) ||
		     (!netif_carrier_ok(queue->vif->dev) &&
		      test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
		set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
110
	xenvif_kick_thread(queue);
I
Ian Campbell 已提交
111 112 113 114

	return IRQ_HANDLED;
}

115
irqreturn_t xenvif_interrupt(int irq, void *dev_id)
116 117 118 119 120 121 122
{
	xenvif_tx_interrupt(irq, dev_id);
	xenvif_rx_interrupt(irq, dev_id);

	return IRQ_HANDLED;
}

123 124 125 126 127 128 129 130 131 132 133 134 135 136
int xenvif_queue_stopped(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;
	unsigned int id = queue->id;
	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
}

void xenvif_wake_queue(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;
	unsigned int id = queue->id;
	netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
}

137 138
/* Callback to wake the queue's thread and turn the carrier off on timeout */
static void xenvif_rx_stalled(unsigned long data)
139
{
140 141 142
	struct xenvif_queue *queue = (struct xenvif_queue *)data;

	if (xenvif_queue_stopped(queue)) {
143
		set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
144 145 146
		xenvif_kick_thread(queue);
	}
}
147

I
Ian Campbell 已提交
148 149 150
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
151
	struct xenvif_queue *queue = NULL;
152
	unsigned int num_queues = vif->num_queues;
153
	u16 index;
154
	int min_slots_needed;
I
Ian Campbell 已提交
155 156 157

	BUG_ON(skb->dev != dev);

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
	/* Drop the packet if queues are not set up */
	if (num_queues < 1)
		goto drop;

	/* Obtain the queue to be used to transmit this packet */
	index = skb_get_queue_mapping(skb);
	if (index >= num_queues) {
		pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
				    index, vif->dev->name);
		index %= num_queues;
	}
	queue = &vif->queues[index];

	/* Drop the packet if queue is not ready */
	if (queue->task == NULL ||
	    queue->dealloc_task == NULL ||
174
	    !xenvif_schedulable(vif))
I
Ian Campbell 已提交
175 176
		goto drop;

177 178 179 180
	/* At best we'll need one slot for the header and one for each
	 * frag.
	 */
	min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
I
Ian Campbell 已提交
181

182 183 184
	/* If the skb is GSO then we'll also need an extra slot for the
	 * metadata.
	 */
185
	if (skb_is_gso(skb))
186
		min_slots_needed++;
I
Ian Campbell 已提交
187

188 189 190 191
	/* If the skb can't possibly fit in the remaining slots
	 * then turn off the queue to give the ring a chance to
	 * drain.
	 */
192
	if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
193 194
		queue->rx_stalled.function = xenvif_rx_stalled;
		queue->rx_stalled.data = (unsigned long)queue;
195
		xenvif_stop_queue(queue);
196 197
		mod_timer(&queue->rx_stalled,
			  jiffies + rx_drain_timeout_jiffies);
198
	}
I
Ian Campbell 已提交
199

200 201
	skb_queue_tail(&queue->rx_queue, skb);
	xenvif_kick_thread(queue);
I
Ian Campbell 已提交
202 203 204 205 206 207 208 209 210 211 212 213

	return NETDEV_TX_OK;

 drop:
	vif->dev->stats.tx_dropped++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}

static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
214
	struct xenvif_queue *queue = NULL;
215
	unsigned int num_queues = vif->num_queues;
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
	unsigned long rx_bytes = 0;
	unsigned long rx_packets = 0;
	unsigned long tx_bytes = 0;
	unsigned long tx_packets = 0;
	unsigned int index;

	if (vif->queues == NULL)
		goto out;

	/* Aggregate tx and rx stats from each queue */
	for (index = 0; index < num_queues; ++index) {
		queue = &vif->queues[index];
		rx_bytes += queue->stats.rx_bytes;
		rx_packets += queue->stats.rx_packets;
		tx_bytes += queue->stats.tx_bytes;
		tx_packets += queue->stats.tx_packets;
	}

out:
	vif->dev->stats.rx_bytes = rx_bytes;
	vif->dev->stats.rx_packets = rx_packets;
	vif->dev->stats.tx_bytes = tx_bytes;
	vif->dev->stats.tx_packets = tx_packets;

I
Ian Campbell 已提交
240 241 242 243 244
	return &vif->dev->stats;
}

static void xenvif_up(struct xenvif *vif)
{
245
	struct xenvif_queue *queue = NULL;
246
	unsigned int num_queues = vif->num_queues;
247 248 249 250 251 252 253 254 255 256
	unsigned int queue_index;

	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		napi_enable(&queue->napi);
		enable_irq(queue->tx_irq);
		if (queue->tx_irq != queue->rx_irq)
			enable_irq(queue->rx_irq);
		xenvif_napi_schedule_or_enable_events(queue);
	}
I
Ian Campbell 已提交
257 258 259 260
}

static void xenvif_down(struct xenvif *vif)
{
261
	struct xenvif_queue *queue = NULL;
262
	unsigned int num_queues = vif->num_queues;
263 264 265 266 267 268 269 270 271 272
	unsigned int queue_index;

	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		napi_disable(&queue->napi);
		disable_irq(queue->tx_irq);
		if (queue->tx_irq != queue->rx_irq)
			disable_irq(queue->rx_irq);
		del_timer_sync(&queue->credit_timeout);
	}
I
Ian Campbell 已提交
273 274 275 276 277
}

static int xenvif_open(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
278
	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
I
Ian Campbell 已提交
279
		xenvif_up(vif);
280
	netif_tx_start_all_queues(dev);
I
Ian Campbell 已提交
281 282 283 284 285 286
	return 0;
}

static int xenvif_close(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
287
	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
I
Ian Campbell 已提交
288
		xenvif_down(vif);
289
	netif_tx_stop_all_queues(dev);
I
Ian Campbell 已提交
290 291 292 293 294 295 296 297 298 299 300 301 302 303
	return 0;
}

static int xenvif_change_mtu(struct net_device *dev, int mtu)
{
	struct xenvif *vif = netdev_priv(dev);
	int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;

	if (mtu > max)
		return -EINVAL;
	dev->mtu = mtu;
	return 0;
}

304 305
static netdev_features_t xenvif_fix_features(struct net_device *dev,
	netdev_features_t features)
I
Ian Campbell 已提交
306 307 308
{
	struct xenvif *vif = netdev_priv(dev);

309 310
	if (!vif->can_sg)
		features &= ~NETIF_F_SG;
311
	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
312
		features &= ~NETIF_F_TSO;
313 314
	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
		features &= ~NETIF_F_TSO6;
315
	if (!vif->ip_csum)
316
		features &= ~NETIF_F_IP_CSUM;
317 318
	if (!vif->ipv6_csum)
		features &= ~NETIF_F_IPV6_CSUM;
I
Ian Campbell 已提交
319

320
	return features;
I
Ian Campbell 已提交
321 322 323 324 325 326 327 328
}

static const struct xenvif_stat {
	char name[ETH_GSTRING_LEN];
	u16 offset;
} xenvif_stats[] = {
	{
		"rx_gso_checksum_fixup",
329
		offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
I
Ian Campbell 已提交
330
	},
331 332 333 334 335
	/* If (sent != success + fail), there are probably packets never
	 * freed up properly!
	 */
	{
		"tx_zerocopy_sent",
336
		offsetof(struct xenvif_stats, tx_zerocopy_sent),
337 338 339
	},
	{
		"tx_zerocopy_success",
340
		offsetof(struct xenvif_stats, tx_zerocopy_success),
341 342 343
	},
	{
		"tx_zerocopy_fail",
344
		offsetof(struct xenvif_stats, tx_zerocopy_fail)
345
	},
346 347 348 349 350
	/* Number of packets exceeding MAX_SKB_FRAG slots. You should use
	 * a guest with the same MAX_SKB_FRAG
	 */
	{
		"tx_frag_overflow",
351
		offsetof(struct xenvif_stats, tx_frag_overflow)
352
	},
I
Ian Campbell 已提交
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
};

static int xenvif_get_sset_count(struct net_device *dev, int string_set)
{
	switch (string_set) {
	case ETH_SS_STATS:
		return ARRAY_SIZE(xenvif_stats);
	default:
		return -EINVAL;
	}
}

static void xenvif_get_ethtool_stats(struct net_device *dev,
				     struct ethtool_stats *stats, u64 * data)
{
368
	struct xenvif *vif = netdev_priv(dev);
369
	unsigned int num_queues = vif->num_queues;
I
Ian Campbell 已提交
370
	int i;
371 372 373 374 375 376 377 378 379 380 381
	unsigned int queue_index;
	struct xenvif_stats *vif_stats;

	for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
		unsigned long accum = 0;
		for (queue_index = 0; queue_index < num_queues; ++queue_index) {
			vif_stats = &vif->queues[queue_index].stats;
			accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
		}
		data[i] = accum;
	}
I
Ian Campbell 已提交
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
}

static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
{
	int i;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
			memcpy(data + i * ETH_GSTRING_LEN,
			       xenvif_stats[i].name, ETH_GSTRING_LEN);
		break;
	}
}

397
static const struct ethtool_ops xenvif_ethtool_ops = {
I
Ian Campbell 已提交
398 399 400 401 402 403 404
	.get_link	= ethtool_op_get_link,

	.get_sset_count = xenvif_get_sset_count,
	.get_ethtool_stats = xenvif_get_ethtool_stats,
	.get_strings = xenvif_get_strings,
};

405
static const struct net_device_ops xenvif_netdev_ops = {
I
Ian Campbell 已提交
406 407 408 409 410
	.ndo_start_xmit	= xenvif_start_xmit,
	.ndo_get_stats	= xenvif_get_stats,
	.ndo_open	= xenvif_open,
	.ndo_stop	= xenvif_close,
	.ndo_change_mtu	= xenvif_change_mtu,
411
	.ndo_fix_features = xenvif_fix_features,
412 413
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_validate_addr   = eth_validate_addr,
I
Ian Campbell 已提交
414 415 416 417 418 419 420 421 422 423 424
};

struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
			    unsigned int handle)
{
	int err;
	struct net_device *dev;
	struct xenvif *vif;
	char name[IFNAMSIZ] = {};

	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
425 426
	/* Allocate a netdev with the max. supported number of queues.
	 * When the guest selects the desired number, it will be updated
427
	 * via netif_set_real_num_*_queues().
428
	 */
429 430
	dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
			      ether_setup, xenvif_max_queues);
I
Ian Campbell 已提交
431
	if (dev == NULL) {
432
		pr_warn("Could not allocate netdev for %s\n", name);
I
Ian Campbell 已提交
433 434 435 436 437 438
		return ERR_PTR(-ENOMEM);
	}

	SET_NETDEV_DEV(dev, parent);

	vif = netdev_priv(dev);
439

I
Ian Campbell 已提交
440 441 442
	vif->domid  = domid;
	vif->handle = handle;
	vif->can_sg = 1;
443
	vif->ip_csum = 1;
I
Ian Campbell 已提交
444
	vif->dev = dev;
445 446
	vif->disabled = false;

447
	/* Start out with no queues. */
448
	vif->queues = NULL;
449
	vif->num_queues = 0;
450

I
Ian Campbell 已提交
451
	dev->netdev_ops	= &xenvif_netdev_ops;
452 453
	dev->hw_features = NETIF_F_SG |
		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
454
		NETIF_F_TSO | NETIF_F_TSO6;
455
	dev->features = dev->hw_features | NETIF_F_RXCSUM;
456
	dev->ethtool_ops = &xenvif_ethtool_ops;
I
Ian Campbell 已提交
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478

	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;

	/*
	 * Initialise a dummy MAC address. We choose the numerically
	 * largest non-broadcast address to prevent the address getting
	 * stolen by an Ethernet bridge for STP purposes.
	 * (FE:FF:FF:FF:FF:FF)
	 */
	memset(dev->dev_addr, 0xFF, ETH_ALEN);
	dev->dev_addr[0] &= ~0x01;

	netif_carrier_off(dev);

	err = register_netdev(dev);
	if (err) {
		netdev_warn(dev, "Could not register device: err=%d\n", err);
		free_netdev(dev);
		return ERR_PTR(err);
	}

	netdev_dbg(dev, "Successfully created xenvif\n");
479 480 481

	__module_get(THIS_MODULE);

I
Ian Campbell 已提交
482 483 484
	return vif;
}

485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
int xenvif_init_queue(struct xenvif_queue *queue)
{
	int err, i;

	queue->credit_bytes = queue->remaining_credit = ~0UL;
	queue->credit_usec  = 0UL;
	init_timer(&queue->credit_timeout);
	queue->credit_window_start = get_jiffies_64();

	skb_queue_head_init(&queue->rx_queue);
	skb_queue_head_init(&queue->tx_queue);

	queue->pending_cons = 0;
	queue->pending_prod = MAX_PENDING_REQS;
	for (i = 0; i < MAX_PENDING_REQS; ++i)
		queue->pending_ring[i] = i;

	spin_lock_init(&queue->callback_lock);
	spin_lock_init(&queue->response_lock);

	/* If ballooning is disabled, this will consume real memory, so you
	 * better enable it. The long term solution would be to use just a
	 * bunch of valid page descriptors, without dependency on ballooning
	 */
	err = alloc_xenballooned_pages(MAX_PENDING_REQS,
				       queue->mmap_pages,
				       false);
	if (err) {
		netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
		return -ENOMEM;
	}

	for (i = 0; i < MAX_PENDING_REQS; i++) {
		queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
			{ .callback = xenvif_zerocopy_callback,
			  .ctx = NULL,
			  .desc = i };
		queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
	}

525
	init_timer(&queue->rx_stalled);
526 527 528 529 530 531 532 533 534 535 536 537 538

	netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
			XENVIF_NAPI_WEIGHT);

	return 0;
}

void xenvif_carrier_on(struct xenvif *vif)
{
	rtnl_lock();
	if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
		dev_set_mtu(vif->dev, ETH_DATA_LEN);
	netdev_update_features(vif->dev);
539
	set_bit(VIF_STATUS_CONNECTED, &vif->status);
540 541 542 543 544 545 546
	netif_carrier_on(vif->dev);
	if (netif_running(vif->dev))
		xenvif_up(vif);
	rtnl_unlock();
}

int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
547 548
		   unsigned long rx_ring_ref, unsigned int tx_evtchn,
		   unsigned int rx_evtchn)
I
Ian Campbell 已提交
549
{
550
	struct task_struct *task;
I
Ian Campbell 已提交
551 552
	int err = -ENOMEM;

553 554 555
	BUG_ON(queue->tx_irq);
	BUG_ON(queue->task);
	BUG_ON(queue->dealloc_task);
I
Ian Campbell 已提交
556

557
	err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
I
Ian Campbell 已提交
558 559 560
	if (err < 0)
		goto err;

561 562
	init_waitqueue_head(&queue->wq);
	init_waitqueue_head(&queue->dealloc_wq);
563

564 565 566
	if (tx_evtchn == rx_evtchn) {
		/* feature-split-event-channels == 0 */
		err = bind_interdomain_evtchn_to_irqhandler(
567 568
			queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
			queue->name, queue);
569 570
		if (err < 0)
			goto err_unmap;
571 572
		queue->tx_irq = queue->rx_irq = err;
		disable_irq(queue->tx_irq);
573 574
	} else {
		/* feature-split-event-channels == 1 */
575 576
		snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
			 "%s-tx", queue->name);
577
		err = bind_interdomain_evtchn_to_irqhandler(
578 579
			queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
			queue->tx_irq_name, queue);
580 581
		if (err < 0)
			goto err_unmap;
582 583
		queue->tx_irq = err;
		disable_irq(queue->tx_irq);
584

585 586
		snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
			 "%s-rx", queue->name);
587
		err = bind_interdomain_evtchn_to_irqhandler(
588 589
			queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
			queue->rx_irq_name, queue);
590 591
		if (err < 0)
			goto err_tx_unbind;
592 593
		queue->rx_irq = err;
		disable_irq(queue->rx_irq);
594
	}
I
Ian Campbell 已提交
595

596
	task = kthread_create(xenvif_kthread_guest_rx,
597
			      (void *)queue, "%s-guest-rx", queue->name);
598
	if (IS_ERR(task)) {
599
		pr_warn("Could not allocate kthread for %s\n", queue->name);
600
		err = PTR_ERR(task);
601 602
		goto err_rx_unbind;
	}
603
	queue->task = task;
604

605
	task = kthread_create(xenvif_dealloc_kthread,
606
			      (void *)queue, "%s-dealloc", queue->name);
607
	if (IS_ERR(task)) {
608
		pr_warn("Could not allocate kthread for %s\n", queue->name);
609 610 611
		err = PTR_ERR(task);
		goto err_rx_unbind;
	}
612
	queue->dealloc_task = task;
613

614 615
	wake_up_process(queue->task);
	wake_up_process(queue->dealloc_task);
616

I
Ian Campbell 已提交
617
	return 0;
618 619

err_rx_unbind:
620 621
	unbind_from_irqhandler(queue->rx_irq, queue);
	queue->rx_irq = 0;
622
err_tx_unbind:
623 624
	unbind_from_irqhandler(queue->tx_irq, queue);
	queue->tx_irq = 0;
I
Ian Campbell 已提交
625
err_unmap:
626
	xenvif_unmap_frontend_rings(queue);
I
Ian Campbell 已提交
627
err:
628
	module_put(THIS_MODULE);
I
Ian Campbell 已提交
629 630 631
	return err;
}

632
void xenvif_carrier_off(struct xenvif *vif)
I
Ian Campbell 已提交
633 634
{
	struct net_device *dev = vif->dev;
635 636

	rtnl_lock();
637 638 639 640 641
	if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
		netif_carrier_off(dev); /* discard queued packets */
		if (netif_running(dev))
			xenvif_down(vif);
	}
642 643 644
	rtnl_unlock();
}

645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663
static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
				      unsigned int worst_case_skb_lifetime)
{
	int i, unmap_timeout = 0;

	for (i = 0; i < MAX_PENDING_REQS; ++i) {
		if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
			unmap_timeout++;
			schedule_timeout(msecs_to_jiffies(1000));
			if (unmap_timeout > worst_case_skb_lifetime &&
			    net_ratelimit())
				netdev_err(queue->vif->dev,
					   "Page still granted! Index: %x\n",
					   i);
			i = -1;
		}
	}
}

664 665
void xenvif_disconnect(struct xenvif *vif)
{
666
	struct xenvif_queue *queue = NULL;
667
	unsigned int num_queues = vif->num_queues;
668 669
	unsigned int queue_index;

670
	xenvif_carrier_off(vif);
I
Ian Campbell 已提交
671

672 673
	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
674

675
		if (queue->task) {
676
			del_timer_sync(&queue->rx_stalled);
677 678 679
			kthread_stop(queue->task);
			queue->task = NULL;
		}
680

681 682 683 684 685 686 687 688 689 690 691 692 693
		if (queue->dealloc_task) {
			kthread_stop(queue->dealloc_task);
			queue->dealloc_task = NULL;
		}

		if (queue->tx_irq) {
			if (queue->tx_irq == queue->rx_irq)
				unbind_from_irqhandler(queue->tx_irq, queue);
			else {
				unbind_from_irqhandler(queue->tx_irq, queue);
				unbind_from_irqhandler(queue->rx_irq, queue);
			}
			queue->tx_irq = 0;
694
		}
I
Ian Campbell 已提交
695

696 697
		xenvif_unmap_frontend_rings(queue);
	}
698 699
}

700 701 702 703 704 705 706 707 708 709
/* Reverse the relevant parts of xenvif_init_queue().
 * Used for queue teardown from xenvif_free(), and on the
 * error handling paths in xenbus.c:connect().
 */
void xenvif_deinit_queue(struct xenvif_queue *queue)
{
	free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
	netif_napi_del(&queue->napi);
}

710 711
void xenvif_free(struct xenvif *vif)
{
712
	struct xenvif_queue *queue = NULL;
713
	unsigned int num_queues = vif->num_queues;
714
	unsigned int queue_index;
715 716
	/* Here we want to avoid timeout messages if an skb can be legitimately
	 * stuck somewhere else. Realistically this could be an another vif's
717
	 * internal or QDisc queue. That another vif also has this
718 719 720 721
	 * rx_drain_timeout_msecs timeout, so give it time to drain out.
	 * Although if that other guest wakes up just before its timeout happens
	 * and takes only one skb from QDisc, it can hold onto other skbs for a
	 * longer period.
722
	 */
723
	unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000);
724

725
	unregister_netdev(vif->dev);
726

727 728 729
	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime);
730
		xenvif_deinit_queue(queue);
731 732 733 734
	}

	vfree(vif->queues);
	vif->queues = NULL;
735
	vif->num_queues = 0;
I
Ian Campbell 已提交
736 737

	free_netdev(vif->dev);
738

739
	module_put(THIS_MODULE);
I
Ian Campbell 已提交
740
}