interface.c 19.7 KB
Newer Older
I
Ian Campbell 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Network-device interface management.
 *
 * Copyright (c) 2004-2005, Keir Fraser
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "common.h"

33
#include <linux/kthread.h>
I
Ian Campbell 已提交
34 35 36
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
37
#include <linux/vmalloc.h>
I
Ian Campbell 已提交
38 39 40

#include <xen/events.h>
#include <asm/xen/hypercall.h>
41
#include <xen/balloon.h>
I
Ian Campbell 已提交
42 43

#define XENVIF_QUEUE_LENGTH 32
44
#define XENVIF_NAPI_WEIGHT  64
I
Ian Campbell 已提交
45

46 47 48
/* Number of bytes allowed on the internal guest Rx queue. */
#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
 * increasing the inflight counter. We need to increase the inflight
 * counter because core driver calls into xenvif_zerocopy_callback
 * which calls xenvif_skb_zerocopy_complete.
 */
void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
				 struct sk_buff *skb)
{
	skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
	atomic_inc(&queue->inflight_packets);
}

void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
{
	atomic_dec(&queue->inflight_packets);
64 65 66 67 68 69

	/* Wake the dealloc thread _after_ decrementing inflight_packets so
	 * that if kthread_stop() has already been called, the dealloc thread
	 * does not wait forever with nothing to wake it.
	 */
	wake_up(&queue->dealloc_wq);
70 71
}

I
Ian Campbell 已提交
72 73
int xenvif_schedulable(struct xenvif *vif)
{
74
	return netif_running(vif->dev) &&
75 76
		test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
		!vif->disabled;
I
Ian Campbell 已提交
77 78
}

79
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
I
Ian Campbell 已提交
80
{
81
	struct xenvif_queue *queue = dev_id;
I
Ian Campbell 已提交
82

83 84
	if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
		napi_schedule(&queue->napi);
I
Ian Campbell 已提交
85

86 87 88
	return IRQ_HANDLED;
}

L
Lad, Prabhakar 已提交
89
static int xenvif_poll(struct napi_struct *napi, int budget)
90
{
91 92
	struct xenvif_queue *queue =
		container_of(napi, struct xenvif_queue, napi);
93 94
	int work_done;

95 96 97 98
	/* This vif is rogue, we pretend we've there is nothing to do
	 * for this vif to deschedule it from NAPI. But this interface
	 * will be turned off in thread context later.
	 */
99
	if (unlikely(queue->vif->disabled)) {
100 101 102 103
		napi_complete(napi);
		return 0;
	}

104
	work_done = xenvif_tx_action(queue, budget);
105 106

	if (work_done < budget) {
107
		napi_complete(napi);
108
		xenvif_napi_schedule_or_enable_events(queue);
109 110 111 112 113
	}

	return work_done;
}

114 115
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
116
	struct xenvif_queue *queue = dev_id;
117

118
	xenvif_kick_thread(queue);
I
Ian Campbell 已提交
119 120 121 122

	return IRQ_HANDLED;
}

123
irqreturn_t xenvif_interrupt(int irq, void *dev_id)
124 125 126 127 128 129 130
{
	xenvif_tx_interrupt(irq, dev_id);
	xenvif_rx_interrupt(irq, dev_id);

	return IRQ_HANDLED;
}

131 132 133 134 135 136 137 138 139
irqreturn_t xenvif_ctrl_interrupt(int irq, void *dev_id)
{
	struct xenvif *vif = dev_id;

	wake_up(&vif->ctrl_wq);

	return IRQ_HANDLED;
}

140 141 142 143 144 145 146 147 148 149 150 151 152 153
int xenvif_queue_stopped(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;
	unsigned int id = queue->id;
	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
}

void xenvif_wake_queue(struct xenvif_queue *queue)
{
	struct net_device *dev = queue->vif->dev;
	unsigned int id = queue->id;
	netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
}

I
Ian Campbell 已提交
154 155 156
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
157
	struct xenvif_queue *queue = NULL;
158
	unsigned int num_queues = vif->num_queues;
159
	u16 index;
160
	struct xenvif_rx_cb *cb;
I
Ian Campbell 已提交
161 162 163

	BUG_ON(skb->dev != dev);

164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
	/* Drop the packet if queues are not set up */
	if (num_queues < 1)
		goto drop;

	/* Obtain the queue to be used to transmit this packet */
	index = skb_get_queue_mapping(skb);
	if (index >= num_queues) {
		pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
				    index, vif->dev->name);
		index %= num_queues;
	}
	queue = &vif->queues[index];

	/* Drop the packet if queue is not ready */
	if (queue->task == NULL ||
	    queue->dealloc_task == NULL ||
180
	    !xenvif_schedulable(vif))
I
Ian Campbell 已提交
181 182
		goto drop;

183 184 185 186 187 188 189
	if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
		struct ethhdr *eth = (struct ethhdr *)skb->data;

		if (!xenvif_mcast_match(vif, eth->h_dest))
			goto drop;
	}

190
	cb = XENVIF_RX_CB(skb);
191
	cb->expires = jiffies + vif->drain_timeout;
I
Ian Campbell 已提交
192

193
	xenvif_rx_queue_tail(queue, skb);
194
	xenvif_kick_thread(queue);
I
Ian Campbell 已提交
195 196 197 198 199 200 201 202 203 204 205 206

	return NETDEV_TX_OK;

 drop:
	vif->dev->stats.tx_dropped++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}

static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
207
	struct xenvif_queue *queue = NULL;
208
	unsigned int num_queues = vif->num_queues;
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
	unsigned long rx_bytes = 0;
	unsigned long rx_packets = 0;
	unsigned long tx_bytes = 0;
	unsigned long tx_packets = 0;
	unsigned int index;

	if (vif->queues == NULL)
		goto out;

	/* Aggregate tx and rx stats from each queue */
	for (index = 0; index < num_queues; ++index) {
		queue = &vif->queues[index];
		rx_bytes += queue->stats.rx_bytes;
		rx_packets += queue->stats.rx_packets;
		tx_bytes += queue->stats.tx_bytes;
		tx_packets += queue->stats.tx_packets;
	}

out:
	vif->dev->stats.rx_bytes = rx_bytes;
	vif->dev->stats.rx_packets = rx_packets;
	vif->dev->stats.tx_bytes = tx_bytes;
	vif->dev->stats.tx_packets = tx_packets;

I
Ian Campbell 已提交
233 234 235 236 237
	return &vif->dev->stats;
}

static void xenvif_up(struct xenvif *vif)
{
238
	struct xenvif_queue *queue = NULL;
239
	unsigned int num_queues = vif->num_queues;
240 241 242 243 244 245 246 247 248 249
	unsigned int queue_index;

	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		napi_enable(&queue->napi);
		enable_irq(queue->tx_irq);
		if (queue->tx_irq != queue->rx_irq)
			enable_irq(queue->rx_irq);
		xenvif_napi_schedule_or_enable_events(queue);
	}
I
Ian Campbell 已提交
250 251 252 253
}

static void xenvif_down(struct xenvif *vif)
{
254
	struct xenvif_queue *queue = NULL;
255
	unsigned int num_queues = vif->num_queues;
256 257 258 259 260 261 262
	unsigned int queue_index;

	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
		disable_irq(queue->tx_irq);
		if (queue->tx_irq != queue->rx_irq)
			disable_irq(queue->rx_irq);
263
		napi_disable(&queue->napi);
264 265
		del_timer_sync(&queue->credit_timeout);
	}
I
Ian Campbell 已提交
266 267 268 269 270
}

static int xenvif_open(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
271
	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
I
Ian Campbell 已提交
272
		xenvif_up(vif);
273
	netif_tx_start_all_queues(dev);
I
Ian Campbell 已提交
274 275 276 277 278 279
	return 0;
}

static int xenvif_close(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
280
	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
I
Ian Campbell 已提交
281
		xenvif_down(vif);
282
	netif_tx_stop_all_queues(dev);
I
Ian Campbell 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295 296
	return 0;
}

static int xenvif_change_mtu(struct net_device *dev, int mtu)
{
	struct xenvif *vif = netdev_priv(dev);
	int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;

	if (mtu > max)
		return -EINVAL;
	dev->mtu = mtu;
	return 0;
}

297 298
static netdev_features_t xenvif_fix_features(struct net_device *dev,
	netdev_features_t features)
I
Ian Campbell 已提交
299 300 301
{
	struct xenvif *vif = netdev_priv(dev);

302 303
	if (!vif->can_sg)
		features &= ~NETIF_F_SG;
304
	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
305
		features &= ~NETIF_F_TSO;
306 307
	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
		features &= ~NETIF_F_TSO6;
308
	if (!vif->ip_csum)
309
		features &= ~NETIF_F_IP_CSUM;
310 311
	if (!vif->ipv6_csum)
		features &= ~NETIF_F_IPV6_CSUM;
I
Ian Campbell 已提交
312

313
	return features;
I
Ian Campbell 已提交
314 315 316 317 318 319 320 321
}

static const struct xenvif_stat {
	char name[ETH_GSTRING_LEN];
	u16 offset;
} xenvif_stats[] = {
	{
		"rx_gso_checksum_fixup",
322
		offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
I
Ian Campbell 已提交
323
	},
324 325 326 327 328
	/* If (sent != success + fail), there are probably packets never
	 * freed up properly!
	 */
	{
		"tx_zerocopy_sent",
329
		offsetof(struct xenvif_stats, tx_zerocopy_sent),
330 331 332
	},
	{
		"tx_zerocopy_success",
333
		offsetof(struct xenvif_stats, tx_zerocopy_success),
334 335 336
	},
	{
		"tx_zerocopy_fail",
337
		offsetof(struct xenvif_stats, tx_zerocopy_fail)
338
	},
339 340 341 342 343
	/* Number of packets exceeding MAX_SKB_FRAG slots. You should use
	 * a guest with the same MAX_SKB_FRAG
	 */
	{
		"tx_frag_overflow",
344
		offsetof(struct xenvif_stats, tx_frag_overflow)
345
	},
I
Ian Campbell 已提交
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
};

static int xenvif_get_sset_count(struct net_device *dev, int string_set)
{
	switch (string_set) {
	case ETH_SS_STATS:
		return ARRAY_SIZE(xenvif_stats);
	default:
		return -EINVAL;
	}
}

static void xenvif_get_ethtool_stats(struct net_device *dev,
				     struct ethtool_stats *stats, u64 * data)
{
361
	struct xenvif *vif = netdev_priv(dev);
362
	unsigned int num_queues = vif->num_queues;
I
Ian Campbell 已提交
363
	int i;
364 365 366 367 368
	unsigned int queue_index;

	for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
		unsigned long accum = 0;
		for (queue_index = 0; queue_index < num_queues; ++queue_index) {
369
			void *vif_stats = &vif->queues[queue_index].stats;
370 371 372 373
			accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
		}
		data[i] = accum;
	}
I
Ian Campbell 已提交
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
}

static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
{
	int i;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
			memcpy(data + i * ETH_GSTRING_LEN,
			       xenvif_stats[i].name, ETH_GSTRING_LEN);
		break;
	}
}

389
static const struct ethtool_ops xenvif_ethtool_ops = {
I
Ian Campbell 已提交
390 391 392 393 394 395 396
	.get_link	= ethtool_op_get_link,

	.get_sset_count = xenvif_get_sset_count,
	.get_ethtool_stats = xenvif_get_ethtool_stats,
	.get_strings = xenvif_get_strings,
};

397
static const struct net_device_ops xenvif_netdev_ops = {
I
Ian Campbell 已提交
398 399 400 401 402
	.ndo_start_xmit	= xenvif_start_xmit,
	.ndo_get_stats	= xenvif_get_stats,
	.ndo_open	= xenvif_open,
	.ndo_stop	= xenvif_close,
	.ndo_change_mtu	= xenvif_change_mtu,
403
	.ndo_fix_features = xenvif_fix_features,
404 405
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_validate_addr   = eth_validate_addr,
I
Ian Campbell 已提交
406 407 408 409 410 411 412 413 414 415 416
};

struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
			    unsigned int handle)
{
	int err;
	struct net_device *dev;
	struct xenvif *vif;
	char name[IFNAMSIZ] = {};

	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
417 418
	/* Allocate a netdev with the max. supported number of queues.
	 * When the guest selects the desired number, it will be updated
419
	 * via netif_set_real_num_*_queues().
420
	 */
421 422
	dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
			      ether_setup, xenvif_max_queues);
I
Ian Campbell 已提交
423
	if (dev == NULL) {
424
		pr_warn("Could not allocate netdev for %s\n", name);
I
Ian Campbell 已提交
425 426 427 428 429 430
		return ERR_PTR(-ENOMEM);
	}

	SET_NETDEV_DEV(dev, parent);

	vif = netdev_priv(dev);
431

I
Ian Campbell 已提交
432 433 434
	vif->domid  = domid;
	vif->handle = handle;
	vif->can_sg = 1;
435
	vif->ip_csum = 1;
I
Ian Campbell 已提交
436
	vif->dev = dev;
437
	vif->disabled = false;
438 439
	vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
	vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
440

441
	/* Start out with no queues. */
442
	vif->queues = NULL;
443
	vif->num_queues = 0;
444

445
	spin_lock_init(&vif->lock);
446
	INIT_LIST_HEAD(&vif->fe_mcast_addr);
447

I
Ian Campbell 已提交
448
	dev->netdev_ops	= &xenvif_netdev_ops;
449 450
	dev->hw_features = NETIF_F_SG |
		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
451
		NETIF_F_TSO | NETIF_F_TSO6;
452
	dev->features = dev->hw_features | NETIF_F_RXCSUM;
453
	dev->ethtool_ops = &xenvif_ethtool_ops;
I
Ian Campbell 已提交
454 455 456 457 458 459 460 461 462

	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;

	/*
	 * Initialise a dummy MAC address. We choose the numerically
	 * largest non-broadcast address to prevent the address getting
	 * stolen by an Ethernet bridge for STP purposes.
	 * (FE:FF:FF:FF:FF:FF)
	 */
463
	eth_broadcast_addr(dev->dev_addr);
I
Ian Campbell 已提交
464 465 466 467 468 469 470 471 472 473 474 475
	dev->dev_addr[0] &= ~0x01;

	netif_carrier_off(dev);

	err = register_netdev(dev);
	if (err) {
		netdev_warn(dev, "Could not register device: err=%d\n", err);
		free_netdev(dev);
		return ERR_PTR(err);
	}

	netdev_dbg(dev, "Successfully created xenvif\n");
476 477 478

	__module_get(THIS_MODULE);

I
Ian Campbell 已提交
479 480 481
	return vif;
}

482 483 484 485 486 487 488
int xenvif_init_queue(struct xenvif_queue *queue)
{
	int err, i;

	queue->credit_bytes = queue->remaining_credit = ~0UL;
	queue->credit_usec  = 0UL;
	init_timer(&queue->credit_timeout);
489
	queue->credit_timeout.function = xenvif_tx_credit_callback;
490 491
	queue->credit_window_start = get_jiffies_64();

492 493
	queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;

494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
	skb_queue_head_init(&queue->rx_queue);
	skb_queue_head_init(&queue->tx_queue);

	queue->pending_cons = 0;
	queue->pending_prod = MAX_PENDING_REQS;
	for (i = 0; i < MAX_PENDING_REQS; ++i)
		queue->pending_ring[i] = i;

	spin_lock_init(&queue->callback_lock);
	spin_lock_init(&queue->response_lock);

	/* If ballooning is disabled, this will consume real memory, so you
	 * better enable it. The long term solution would be to use just a
	 * bunch of valid page descriptors, without dependency on ballooning
	 */
509 510
	err = gnttab_alloc_pages(MAX_PENDING_REQS,
				 queue->mmap_pages);
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
	if (err) {
		netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
		return -ENOMEM;
	}

	for (i = 0; i < MAX_PENDING_REQS; i++) {
		queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
			{ .callback = xenvif_zerocopy_callback,
			  .ctx = NULL,
			  .desc = i };
		queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
	}

	return 0;
}

void xenvif_carrier_on(struct xenvif *vif)
{
	rtnl_lock();
	if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
		dev_set_mtu(vif->dev, ETH_DATA_LEN);
	netdev_update_features(vif->dev);
533
	set_bit(VIF_STATUS_CONNECTED, &vif->status);
534 535 536 537 538
	if (netif_running(vif->dev))
		xenvif_up(vif);
	rtnl_unlock();
}

539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
			unsigned int evtchn)
{
	struct net_device *dev = vif->dev;
	void *addr;
	struct xen_netif_ctrl_sring *shared;
	struct task_struct *task;
	int err = -ENOMEM;

	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
				     &ring_ref, 1, &addr);
	if (err)
		goto err;

	shared = (struct xen_netif_ctrl_sring *)addr;
	BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);

	init_waitqueue_head(&vif->ctrl_wq);

	err = bind_interdomain_evtchn_to_irqhandler(vif->domid, evtchn,
						    xenvif_ctrl_interrupt,
						    0, dev->name, vif);
	if (err < 0)
		goto err_unmap;

	vif->ctrl_irq = err;

	task = kthread_create(xenvif_ctrl_kthread, (void *)vif,
			      "%s-control", dev->name);
	if (IS_ERR(task)) {
		pr_warn("Could not allocate kthread for %s\n", dev->name);
		err = PTR_ERR(task);
		goto err_deinit;
	}

	get_task_struct(task);
	vif->ctrl_task = task;

	wake_up_process(vif->ctrl_task);

	return 0;

err_deinit:
	unbind_from_irqhandler(vif->ctrl_irq, vif);
	vif->ctrl_irq = 0;

err_unmap:
	xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
				vif->ctrl.sring);
	vif->ctrl.sring = NULL;

err:
	return err;
}

int xenvif_connect_data(struct xenvif_queue *queue,
			unsigned long tx_ring_ref,
			unsigned long rx_ring_ref,
			unsigned int tx_evtchn,
			unsigned int rx_evtchn)
I
Ian Campbell 已提交
599
{
600
	struct task_struct *task;
I
Ian Campbell 已提交
601 602
	int err = -ENOMEM;

603 604 605
	BUG_ON(queue->tx_irq);
	BUG_ON(queue->task);
	BUG_ON(queue->dealloc_task);
I
Ian Campbell 已提交
606

607 608
	err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
					     rx_ring_ref);
I
Ian Campbell 已提交
609 610 611
	if (err < 0)
		goto err;

612 613
	init_waitqueue_head(&queue->wq);
	init_waitqueue_head(&queue->dealloc_wq);
614
	atomic_set(&queue->inflight_packets, 0);
615

616 617 618
	netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
			XENVIF_NAPI_WEIGHT);

619 620 621
	if (tx_evtchn == rx_evtchn) {
		/* feature-split-event-channels == 0 */
		err = bind_interdomain_evtchn_to_irqhandler(
622 623
			queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
			queue->name, queue);
624 625
		if (err < 0)
			goto err_unmap;
626 627
		queue->tx_irq = queue->rx_irq = err;
		disable_irq(queue->tx_irq);
628 629
	} else {
		/* feature-split-event-channels == 1 */
630 631
		snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
			 "%s-tx", queue->name);
632
		err = bind_interdomain_evtchn_to_irqhandler(
633 634
			queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
			queue->tx_irq_name, queue);
635 636
		if (err < 0)
			goto err_unmap;
637 638
		queue->tx_irq = err;
		disable_irq(queue->tx_irq);
639

640 641
		snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
			 "%s-rx", queue->name);
642
		err = bind_interdomain_evtchn_to_irqhandler(
643 644
			queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
			queue->rx_irq_name, queue);
645 646
		if (err < 0)
			goto err_tx_unbind;
647 648
		queue->rx_irq = err;
		disable_irq(queue->rx_irq);
649
	}
I
Ian Campbell 已提交
650

651 652
	queue->stalled = true;

653
	task = kthread_create(xenvif_kthread_guest_rx,
654
			      (void *)queue, "%s-guest-rx", queue->name);
655
	if (IS_ERR(task)) {
656
		pr_warn("Could not allocate kthread for %s\n", queue->name);
657
		err = PTR_ERR(task);
658 659
		goto err_rx_unbind;
	}
660
	queue->task = task;
661
	get_task_struct(task);
662

663
	task = kthread_create(xenvif_dealloc_kthread,
664
			      (void *)queue, "%s-dealloc", queue->name);
665
	if (IS_ERR(task)) {
666
		pr_warn("Could not allocate kthread for %s\n", queue->name);
667 668 669
		err = PTR_ERR(task);
		goto err_rx_unbind;
	}
670
	queue->dealloc_task = task;
671

672 673
	wake_up_process(queue->task);
	wake_up_process(queue->dealloc_task);
674

I
Ian Campbell 已提交
675
	return 0;
676 677

err_rx_unbind:
678 679
	unbind_from_irqhandler(queue->rx_irq, queue);
	queue->rx_irq = 0;
680
err_tx_unbind:
681 682
	unbind_from_irqhandler(queue->tx_irq, queue);
	queue->tx_irq = 0;
I
Ian Campbell 已提交
683
err_unmap:
684
	xenvif_unmap_frontend_data_rings(queue);
685
	netif_napi_del(&queue->napi);
I
Ian Campbell 已提交
686
err:
687
	module_put(THIS_MODULE);
I
Ian Campbell 已提交
688 689 690
	return err;
}

691
void xenvif_carrier_off(struct xenvif *vif)
I
Ian Campbell 已提交
692 693
{
	struct net_device *dev = vif->dev;
694 695

	rtnl_lock();
696 697 698 699 700
	if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
		netif_carrier_off(dev); /* discard queued packets */
		if (netif_running(dev))
			xenvif_down(vif);
	}
701 702 703
	rtnl_unlock();
}

704
void xenvif_disconnect_data(struct xenvif *vif)
705
{
706
	struct xenvif_queue *queue = NULL;
707
	unsigned int num_queues = vif->num_queues;
708 709
	unsigned int queue_index;

710
	xenvif_carrier_off(vif);
I
Ian Campbell 已提交
711

712 713
	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
		queue = &vif->queues[queue_index];
714

715 716
		netif_napi_del(&queue->napi);

717 718
		if (queue->task) {
			kthread_stop(queue->task);
719
			put_task_struct(queue->task);
720 721
			queue->task = NULL;
		}
722

723 724 725 726 727 728 729 730 731 732 733 734 735
		if (queue->dealloc_task) {
			kthread_stop(queue->dealloc_task);
			queue->dealloc_task = NULL;
		}

		if (queue->tx_irq) {
			if (queue->tx_irq == queue->rx_irq)
				unbind_from_irqhandler(queue->tx_irq, queue);
			else {
				unbind_from_irqhandler(queue->tx_irq, queue);
				unbind_from_irqhandler(queue->rx_irq, queue);
			}
			queue->tx_irq = 0;
736
		}
I
Ian Campbell 已提交
737

738
		xenvif_unmap_frontend_data_rings(queue);
739
	}
740 741

	xenvif_mcast_addr_list_free(vif);
742 743
}

744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
void xenvif_disconnect_ctrl(struct xenvif *vif)
{
	if (vif->ctrl_task) {
		kthread_stop(vif->ctrl_task);
		put_task_struct(vif->ctrl_task);
		vif->ctrl_task = NULL;
	}

	if (vif->ctrl_irq) {
		unbind_from_irqhandler(vif->ctrl_irq, vif);
		vif->ctrl_irq = 0;
	}

	if (vif->ctrl.sring) {
		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
					vif->ctrl.sring);
		vif->ctrl.sring = NULL;
	}
}

764 765 766 767 768 769
/* Reverse the relevant parts of xenvif_init_queue().
 * Used for queue teardown from xenvif_free(), and on the
 * error handling paths in xenbus.c:connect().
 */
void xenvif_deinit_queue(struct xenvif_queue *queue)
{
770
	gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
771 772
}

773 774
void xenvif_free(struct xenvif *vif)
{
775
	struct xenvif_queue *queues = vif->queues;
776
	unsigned int num_queues = vif->num_queues;
777
	unsigned int queue_index;
778

779
	unregister_netdev(vif->dev);
I
Ian Campbell 已提交
780
	free_netdev(vif->dev);
781

782 783 784 785
	for (queue_index = 0; queue_index < num_queues; ++queue_index)
		xenvif_deinit_queue(&queues[queue_index]);
	vfree(queues);

786
	module_put(THIS_MODULE);
I
Ian Campbell 已提交
787
}