macvtap.c 28.7 KB
Newer Older
A
Arnd Bergmann 已提交
1 2
#include <linux/etherdevice.h>
#include <linux/if_macvlan.h>
3
#include <linux/if_vlan.h>
A
Arnd Bergmann 已提交
4 5 6 7 8 9 10 11 12
#include <linux/interrupt.h>
#include <linux/nsproxy.h>
#include <linux/compat.h>
#include <linux/if_tun.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/cache.h>
#include <linux/sched.h>
#include <linux/types.h>
13
#include <linux/slab.h>
A
Arnd Bergmann 已提交
14 15 16
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/cdev.h>
A
Al Viro 已提交
17
#include <linux/idr.h>
A
Arnd Bergmann 已提交
18 19 20 21 22
#include <linux/fs.h>

#include <net/net_namespace.h>
#include <net/rtnetlink.h>
#include <net/sock.h>
23
#include <linux/virtio_net.h>
A
Arnd Bergmann 已提交
24 25 26 27 28 29 30 31 32 33 34 35 36 37

/*
 * A macvtap queue is the central object of this driver, it connects
 * an open character device to a macvlan interface. There can be
 * multiple queues on one interface, which map back to queues
 * implemented in hardware on the underlying device.
 *
 * macvtap_proto is used to allocate queues through the sock allocation
 * mechanism.
 *
 */
struct macvtap_queue {
	struct sock sk;
	struct socket sock;
38
	struct socket_wq wq;
39
	int vnet_hdr_sz;
40
	struct macvlan_dev __rcu *vlan;
A
Arnd Bergmann 已提交
41
	struct file *file;
42
	unsigned int flags;
J
Jason Wang 已提交
43
	u16 queue_index;
J
Jason Wang 已提交
44 45
	bool enabled;
	struct list_head next;
A
Arnd Bergmann 已提交
46 47 48 49 50 51 52 53 54
};

static struct proto macvtap_proto = {
	.name = "macvtap",
	.owner = THIS_MODULE,
	.obj_size = sizeof (struct macvtap_queue),
};

/*
55
 * Variables for dealing with macvtaps device numbers.
A
Arnd Bergmann 已提交
56
 */
57
static dev_t macvtap_major;
58 59 60 61
#define MACVTAP_NUM_DEVS (1U << MINORBITS)
static DEFINE_MUTEX(minor_lock);
static DEFINE_IDR(minor_idr);

62
#define GOODCOPY_LEN 128
A
Arnd Bergmann 已提交
63 64 65
static struct class *macvtap_class;
static struct cdev macvtap_cdev;

A
Arnd Bergmann 已提交
66 67
static const struct proto_ops macvtap_socket_ops;

A
Arnd Bergmann 已提交
68 69
/*
 * RCU usage:
70 71
 * The macvtap_queue and the macvlan_dev are loosely coupled, the
 * pointers from one to the other can only be read while rcu_read_lock
72
 * or rtnl is held.
A
Arnd Bergmann 已提交
73
 *
74 75 76 77
 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
 * q->vlan becomes inaccessible. When the files gets closed,
 * macvtap_get_queue() fails.
A
Arnd Bergmann 已提交
78
 *
79 80 81 82
 * There may still be references to the struct sock inside of the
 * queue from outbound SKBs, but these never reference back to the
 * file or the dev. The data structure is freed through __sk_free
 * when both our references and any pending SKBs are gone.
A
Arnd Bergmann 已提交
83 84
 */

J
Jason Wang 已提交
85
static int macvtap_enable_queue(struct net_device *dev, struct file *file,
A
Arnd Bergmann 已提交
86
				struct macvtap_queue *q)
J
Jason Wang 已提交
87 88 89 90
{
	struct macvlan_dev *vlan = netdev_priv(dev);
	int err = -EINVAL;

91
	ASSERT_RTNL();
J
Jason Wang 已提交
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107

	if (q->enabled)
		goto out;

	err = 0;
	rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
	q->queue_index = vlan->numvtaps;
	q->enabled = true;

	vlan->numvtaps++;
out:
	return err;
}

static int macvtap_set_queue(struct net_device *dev, struct file *file,
			     struct macvtap_queue *q)
A
Arnd Bergmann 已提交
108 109 110 111
{
	struct macvlan_dev *vlan = netdev_priv(dev);
	int err = -EBUSY;

112
	rtnl_lock();
J
Jason Wang 已提交
113
	if (vlan->numqueues == MAX_MACVTAP_QUEUES)
A
Arnd Bergmann 已提交
114 115 116
		goto out;

	err = 0;
117
	rcu_assign_pointer(q->vlan, vlan);
J
Jason Wang 已提交
118
	rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
119
	sock_hold(&q->sk);
A
Arnd Bergmann 已提交
120 121

	q->file = file;
J
Jason Wang 已提交
122
	q->queue_index = vlan->numvtaps;
J
Jason Wang 已提交
123
	q->enabled = true;
124
	file->private_data = q;
J
Jason Wang 已提交
125
	list_add_tail(&q->next, &vlan->queue_list);
A
Arnd Bergmann 已提交
126

127
	vlan->numvtaps++;
J
Jason Wang 已提交
128
	vlan->numqueues++;
129

A
Arnd Bergmann 已提交
130
out:
131
	rtnl_unlock();
A
Arnd Bergmann 已提交
132 133 134
	return err;
}

135
static int macvtap_disable_queue(struct macvtap_queue *q)
J
Jason Wang 已提交
136 137 138 139
{
	struct macvlan_dev *vlan;
	struct macvtap_queue *nq;

140
	ASSERT_RTNL();
J
Jason Wang 已提交
141 142 143
	if (!q->enabled)
		return -EINVAL;

144 145
	vlan = rtnl_dereference(q->vlan);

J
Jason Wang 已提交
146 147 148
	if (vlan) {
		int index = q->queue_index;
		BUG_ON(index >= vlan->numvtaps);
149
		nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
J
Jason Wang 已提交
150 151 152 153 154 155 156 157 158 159 160 161
		nq->queue_index = index;

		rcu_assign_pointer(vlan->taps[index], nq);
		RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
		q->enabled = false;

		vlan->numvtaps--;
	}

	return 0;
}

A
Arnd Bergmann 已提交
162
/*
163 164 165
 * The file owning the queue got closed, give up both
 * the reference that the files holds as well as the
 * one from the macvlan_dev if that still exists.
A
Arnd Bergmann 已提交
166 167 168 169
 *
 * Using the spinlock makes sure that we don't get
 * to the queue again after destroying it.
 */
170
static void macvtap_put_queue(struct macvtap_queue *q)
A
Arnd Bergmann 已提交
171
{
172
	struct macvlan_dev *vlan;
A
Arnd Bergmann 已提交
173

174 175 176
	rtnl_lock();
	vlan = rtnl_dereference(q->vlan);

177
	if (vlan) {
J
Jason Wang 已提交
178
		if (q->enabled)
179
			BUG_ON(macvtap_disable_queue(q));
J
Jason Wang 已提交
180

J
Jason Wang 已提交
181
		vlan->numqueues--;
182
		RCU_INIT_POINTER(q->vlan, NULL);
183
		sock_put(&q->sk);
J
Jason Wang 已提交
184
		list_del_init(&q->next);
A
Arnd Bergmann 已提交
185 186
	}

187
	rtnl_unlock();
A
Arnd Bergmann 已提交
188 189 190 191 192 193

	synchronize_rcu();
	sock_put(&q->sk);
}

/*
194 195 196 197 198
 * Select a queue based on the rxq of the device on which this packet
 * arrived. If the incoming device is not mq, calculate a flow hash
 * to select a queue. If all fails, find the first available queue.
 * Cache vlan->numvtaps since it can become zero during the execution
 * of this function.
A
Arnd Bergmann 已提交
199 200 201 202 203
 */
static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
					       struct sk_buff *skb)
{
	struct macvlan_dev *vlan = netdev_priv(dev);
204
	struct macvtap_queue *tap = NULL;
J
Jason Wang 已提交
205 206 207 208 209
	/* Access to taps array is protected by rcu, but access to numvtaps
	 * isn't. Below we use it to lookup a queue, but treat it as a hint
	 * and validate that the result isn't NULL - in case we are
	 * racing against queue removal.
	 */
210
	int numvtaps = ACCESS_ONCE(vlan->numvtaps);
211 212 213 214 215
	__u32 rxq;

	if (!numvtaps)
		goto out;

216 217 218 219
	/* Check if we can use flow to select a queue */
	rxq = skb_get_rxhash(skb);
	if (rxq) {
		tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
J
Jason Wang 已提交
220
		goto out;
221 222
	}

223 224
	if (likely(skb_rx_queue_recorded(skb))) {
		rxq = skb_get_rx_queue(skb);
A
Arnd Bergmann 已提交
225

226 227 228 229
		while (unlikely(rxq >= numvtaps))
			rxq -= numvtaps;

		tap = rcu_dereference(vlan->taps[rxq]);
J
Jason Wang 已提交
230
		goto out;
231 232
	}

J
Jason Wang 已提交
233
	tap = rcu_dereference(vlan->taps[0]);
234 235
out:
	return tap;
A
Arnd Bergmann 已提交
236 237
}

238 239
/*
 * The net_device is going away, give up the reference
240 241
 * that it holds on all queues and safely set the pointer
 * from the queues to NULL.
242
 */
A
Arnd Bergmann 已提交
243 244 245
static void macvtap_del_queues(struct net_device *dev)
{
	struct macvlan_dev *vlan = netdev_priv(dev);
J
Jason Wang 已提交
246
	struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES];
247
	int i, j = 0;
248

249
	ASSERT_RTNL();
J
Jason Wang 已提交
250 251
	list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
		list_del_init(&q->next);
J
Jason Wang 已提交
252 253
		qlist[j++] = q;
		RCU_INIT_POINTER(q->vlan, NULL);
J
Jason Wang 已提交
254 255 256
		if (q->enabled)
			vlan->numvtaps--;
		vlan->numqueues--;
257
	}
J
Jason Wang 已提交
258 259 260 261
	for (i = 0; i < vlan->numvtaps; i++)
		RCU_INIT_POINTER(vlan->taps[i], NULL);
	BUG_ON(vlan->numvtaps);
	BUG_ON(vlan->numqueues);
262 263
	/* guarantee that any future macvtap_set_queue will fail */
	vlan->numvtaps = MAX_MACVTAP_QUEUES;
264 265 266

	for (--j; j >= 0; j--)
		sock_put(&qlist[j]->sk);
A
Arnd Bergmann 已提交
267 268 269 270 271 272 273 274 275 276 277
}

/*
 * Forward happens for data that gets sent from one macvlan
 * endpoint to another one in bridge mode. We just take
 * the skb and put it into the receive queue.
 */
static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
{
	struct macvtap_queue *q = macvtap_get_queue(dev, skb);
	if (!q)
H
Herbert Xu 已提交
278 279 280 281
		goto drop;

	if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
		goto drop;
A
Arnd Bergmann 已提交
282 283

	skb_queue_tail(&q->sk.sk_receive_queue, skb);
E
Eric Dumazet 已提交
284
	wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
H
Herbert Xu 已提交
285 286 287 288 289
	return NET_RX_SUCCESS;

drop:
	kfree_skb(skb);
	return NET_RX_DROP;
A
Arnd Bergmann 已提交
290 291 292 293 294 295 296 297 298 299 300 301 302
}

/*
 * Receive is for data from the external interface (lowerdev),
 * in case of macvtap, we can treat that the same way as
 * forward, which macvlan cannot.
 */
static int macvtap_receive(struct sk_buff *skb)
{
	skb_push(skb, ETH_HLEN);
	return macvtap_forward(skb->dev, skb);
}

303 304 305 306 307
static int macvtap_get_minor(struct macvlan_dev *vlan)
{
	int retval = -ENOMEM;

	mutex_lock(&minor_lock);
T
Tejun Heo 已提交
308 309 310 311
	retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
	if (retval >= 0) {
		vlan->minor = retval;
	} else if (retval == -ENOSPC) {
312 313 314 315
		printk(KERN_ERR "too many macvtap devices\n");
		retval = -EINVAL;
	}
	mutex_unlock(&minor_lock);
T
Tejun Heo 已提交
316
	return retval < 0 ? retval : 0;
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
}

static void macvtap_free_minor(struct macvlan_dev *vlan)
{
	mutex_lock(&minor_lock);
	if (vlan->minor) {
		idr_remove(&minor_idr, vlan->minor);
		vlan->minor = 0;
	}
	mutex_unlock(&minor_lock);
}

static struct net_device *dev_get_by_macvtap_minor(int minor)
{
	struct net_device *dev = NULL;
	struct macvlan_dev *vlan;

	mutex_lock(&minor_lock);
	vlan = idr_find(&minor_idr, minor);
	if (vlan) {
		dev = vlan->dev;
		dev_hold(dev);
	}
	mutex_unlock(&minor_lock);
	return dev;
}

A
Arnd Bergmann 已提交
344 345 346 347 348
static int macvtap_newlink(struct net *src_net,
			   struct net_device *dev,
			   struct nlattr *tb[],
			   struct nlattr *data[])
{
J
Jason Wang 已提交
349 350 351
	struct macvlan_dev *vlan = netdev_priv(dev);
	INIT_LIST_HEAD(&vlan->queue_list);

352 353 354 355 356
	/* Don't put anything that may fail after macvlan_common_newlink
	 * because we can't undo what it does.
	 */
	return macvlan_common_newlink(src_net, dev, tb, data,
				      macvtap_receive, macvtap_forward);
A
Arnd Bergmann 已提交
357 358 359 360 361 362 363 364 365
}

static void macvtap_dellink(struct net_device *dev,
			    struct list_head *head)
{
	macvtap_del_queues(dev);
	macvlan_dellink(dev, head);
}

H
Herbert Xu 已提交
366 367 368 369 370 371
static void macvtap_setup(struct net_device *dev)
{
	macvlan_common_setup(dev);
	dev->tx_queue_len = TUN_READQ_SIZE;
}

A
Arnd Bergmann 已提交
372 373
static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
	.kind		= "macvtap",
H
Herbert Xu 已提交
374
	.setup		= macvtap_setup,
A
Arnd Bergmann 已提交
375 376 377 378 379 380 381
	.newlink	= macvtap_newlink,
	.dellink	= macvtap_dellink,
};


static void macvtap_sock_write_space(struct sock *sk)
{
382 383
	wait_queue_head_t *wqueue;

A
Arnd Bergmann 已提交
384 385 386 387
	if (!sock_writeable(sk) ||
	    !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
		return;

388 389 390
	wqueue = sk_sleep(sk);
	if (wqueue && waitqueue_active(wqueue))
		wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
A
Arnd Bergmann 已提交
391 392
}

393 394 395 396 397
static void macvtap_sock_destruct(struct sock *sk)
{
	skb_queue_purge(&sk->sk_receive_queue);
}

A
Arnd Bergmann 已提交
398 399 400
static int macvtap_open(struct inode *inode, struct file *file)
{
	struct net *net = current->nsproxy->net_ns;
401
	struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode));
A
Arnd Bergmann 已提交
402 403 404 405 406 407 408 409 410 411 412 413 414
	struct macvtap_queue *q;
	int err;

	err = -ENODEV;
	if (!dev)
		goto out;

	err = -ENOMEM;
	q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
					     &macvtap_proto);
	if (!q)
		goto out;

J
Jason Wang 已提交
415
	RCU_INIT_POINTER(q->sock.wq, &q->wq);
416
	init_waitqueue_head(&q->wq.wait);
A
Arnd Bergmann 已提交
417 418
	q->sock.type = SOCK_RAW;
	q->sock.state = SS_CONNECTED;
A
Arnd Bergmann 已提交
419 420
	q->sock.file = file;
	q->sock.ops = &macvtap_socket_ops;
A
Arnd Bergmann 已提交
421 422
	sock_init_data(&q->sock, &q->sk);
	q->sk.sk_write_space = macvtap_sock_write_space;
423
	q->sk.sk_destruct = macvtap_sock_destruct;
424
	q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
425
	q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
A
Arnd Bergmann 已提交
426

427 428 429
	/*
	 * so far only KVM virtio_net uses macvtap, enable zero copy between
	 * guest kernel and host kernel when lower device supports zerocopy
430 431 432
	 *
	 * The macvlan supports zerocopy iff the lower device supports zero
	 * copy so we don't have to look at the lower device directly.
433
	 */
434 435
	if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
		sock_set_flag(&q->sk, SOCK_ZEROCOPY);
436

A
Arnd Bergmann 已提交
437 438 439 440 441 442 443 444 445 446 447 448 449
	err = macvtap_set_queue(dev, file, q);
	if (err)
		sock_put(&q->sk);

out:
	if (dev)
		dev_put(dev);

	return err;
}

static int macvtap_release(struct inode *inode, struct file *file)
{
450 451
	struct macvtap_queue *q = file->private_data;
	macvtap_put_queue(q);
A
Arnd Bergmann 已提交
452 453 454 455 456
	return 0;
}

static unsigned int macvtap_poll(struct file *file, poll_table * wait)
{
457
	struct macvtap_queue *q = file->private_data;
A
Arnd Bergmann 已提交
458 459 460 461 462 463
	unsigned int mask = POLLERR;

	if (!q)
		goto out;

	mask = 0;
464
	poll_wait(file, &q->wq.wait, wait);
A
Arnd Bergmann 已提交
465 466 467 468 469 470 471 472 473 474 475 476 477

	if (!skb_queue_empty(&q->sk.sk_receive_queue))
		mask |= POLLIN | POLLRDNORM;

	if (sock_writeable(&q->sk) ||
	    (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
	     sock_writeable(&q->sk)))
		mask |= POLLOUT | POLLWRNORM;

out:
	return mask;
}

478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
						size_t len, size_t linear,
						int noblock, int *err)
{
	struct sk_buff *skb;

	/* Under a page?  Don't bother with paged skb. */
	if (prepad + len < PAGE_SIZE || !linear)
		linear = len;

	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
				   err);
	if (!skb)
		return NULL;

	skb_reserve(skb, prepad);
	skb_put(skb, linear);
	skb->data_len = len - linear;
	skb->len += len - linear;

	return skb;
}

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
/* set skb frags from iovec, this can move to core network code for reuse */
static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
				  int offset, size_t count)
{
	int len = iov_length(from, count) - offset;
	int copy = skb_headlen(skb);
	int size, offset1 = 0;
	int i = 0;

	/* Skip over from offset */
	while (count && (offset >= from->iov_len)) {
		offset -= from->iov_len;
		++from;
		--count;
	}

	/* copy up to skb headlen */
	while (count && (copy > 0)) {
		size = min_t(unsigned int, copy, from->iov_len - offset);
		if (copy_from_user(skb->data + offset1, from->iov_base + offset,
				   size))
			return -EFAULT;
		if (copy > size) {
			++from;
			--count;
526 527 528
			offset = 0;
		} else
			offset += size;
529 530 531 532 533 534 535 536 537 538 539
		copy -= size;
		offset1 += size;
	}

	if (len == offset1)
		return 0;

	while (count--) {
		struct page *page[MAX_SKB_FRAGS];
		int num_pages;
		unsigned long base;
540
		unsigned long truesize;
541

542
		len = from->iov_len - offset;
543
		if (!len) {
544
			offset = 0;
545 546 547
			++from;
			continue;
		}
548
		base = (unsigned long)from->iov_base + offset;
549
		size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
550 551
		if (i + size > MAX_SKB_FRAGS)
			return -EMSGSIZE;
552
		num_pages = get_user_pages_fast(base, size, 0, &page[i]);
553
		if (num_pages != size) {
554 555
			for (i = 0; i < num_pages; i++)
				put_page(page[i]);
556
			return -EFAULT;
557
		}
558
		truesize = size * PAGE_SIZE;
559 560
		skb->data_len += len;
		skb->len += len;
561 562
		skb->truesize += truesize;
		atomic_add(truesize, &skb->sk->sk_wmem_alloc);
563
		while (len) {
564 565 566
			int off = base & ~PAGE_MASK;
			int size = min_t(int, len, PAGE_SIZE - off);
			__skb_fill_page_desc(skb, i, page[i], off, size);
567 568
			skb_shinfo(skb)->nr_frags++;
			/* increase sk_wmem_alloc */
569 570
			base += size;
			len -= size;
571 572
			i++;
		}
573
		offset = 0;
574 575 576 577 578
		++from;
	}
	return 0;
}

579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
/*
 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
 * be shared with the tun/tap driver.
 */
static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
				     struct virtio_net_hdr *vnet_hdr)
{
	unsigned short gso_type = 0;
	if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
		switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
		case VIRTIO_NET_HDR_GSO_TCPV4:
			gso_type = SKB_GSO_TCPV4;
			break;
		case VIRTIO_NET_HDR_GSO_TCPV6:
			gso_type = SKB_GSO_TCPV6;
			break;
		case VIRTIO_NET_HDR_GSO_UDP:
			gso_type = SKB_GSO_UDP;
			break;
		default:
			return -EINVAL;
		}

		if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
			gso_type |= SKB_GSO_TCP_ECN;

		if (vnet_hdr->gso_size == 0)
			return -EINVAL;
	}

	if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
		if (!skb_partial_csum_set(skb, vnet_hdr->csum_start,
					  vnet_hdr->csum_offset))
			return -EINVAL;
	}

	if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
		skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
617
		skb_shinfo(skb)->gso_type = gso_type;
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651

		/* Header must be checked, and gso_segs computed. */
		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
		skb_shinfo(skb)->gso_segs = 0;
	}
	return 0;
}

static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
				   struct virtio_net_hdr *vnet_hdr)
{
	memset(vnet_hdr, 0, sizeof(*vnet_hdr));

	if (skb_is_gso(skb)) {
		struct skb_shared_info *sinfo = skb_shinfo(skb);

		/* This is a hint as to how much should be linear. */
		vnet_hdr->hdr_len = skb_headlen(skb);
		vnet_hdr->gso_size = sinfo->gso_size;
		if (sinfo->gso_type & SKB_GSO_TCPV4)
			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
		else if (sinfo->gso_type & SKB_GSO_TCPV6)
			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
		else if (sinfo->gso_type & SKB_GSO_UDP)
			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
		else
			BUG();
		if (sinfo->gso_type & SKB_GSO_TCP_ECN)
			vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
	} else
		vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
652
		vnet_hdr->csum_start = skb_checksum_start_offset(skb);
653
		vnet_hdr->csum_offset = skb->csum_offset;
654 655
	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
		vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
656 657 658 659 660 661
	} /* else everything is zero */

	return 0;
}


A
Arnd Bergmann 已提交
662
/* Get packet from user space buffer */
663 664 665
static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
				const struct iovec *iv, unsigned long total_len,
				size_t count, int noblock)
A
Arnd Bergmann 已提交
666 667
{
	struct sk_buff *skb;
668
	struct macvlan_dev *vlan;
669
	unsigned long len = total_len;
A
Arnd Bergmann 已提交
670
	int err;
671 672
	struct virtio_net_hdr vnet_hdr = { 0 };
	int vnet_hdr_len = 0;
673
	int copylen = 0;
674
	bool zerocopy = false;
675 676

	if (q->flags & IFF_VNET_HDR) {
677
		vnet_hdr_len = q->vnet_hdr_sz;
678 679

		err = -EINVAL;
680
		if (len < vnet_hdr_len)
681
			goto err;
682
		len -= vnet_hdr_len;
683 684

		err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
685
					   sizeof(vnet_hdr));
686 687 688 689 690 691 692 693 694 695 696
		if (err < 0)
			goto err;
		if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
		     vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
							vnet_hdr.hdr_len)
			vnet_hdr.hdr_len = vnet_hdr.csum_start +
						vnet_hdr.csum_offset + 2;
		err = -EINVAL;
		if (vnet_hdr.hdr_len > len)
			goto err;
	}
A
Arnd Bergmann 已提交
697

698
	err = -EINVAL;
A
Arnd Bergmann 已提交
699
	if (unlikely(len < ETH_HLEN))
700
		goto err;
A
Arnd Bergmann 已提交
701

702 703 704 705
	err = -EMSGSIZE;
	if (unlikely(count > UIO_MAXIOV))
		goto err;

706 707 708 709
	if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
		zerocopy = true;

	if (zerocopy) {
710 711 712 713 714 715 716 717 718 719 720
		/* Userspace may produce vectors with count greater than
		 * MAX_SKB_FRAGS, so we need to linearize parts of the skb
		 * to let the rest of data to be fit in the frags.
		 */
		if (count > MAX_SKB_FRAGS) {
			copylen = iov_length(iv, count - MAX_SKB_FRAGS);
			if (copylen < vnet_hdr_len)
				copylen = 0;
			else
				copylen -= vnet_hdr_len;
		}
721 722 723 724
		/* There are 256 bytes to be copied in skb, so there is enough
		 * room for skb expand head in case it is used.
		 * The rest buffer is mapped from userspace.
		 */
725 726
		if (copylen < vnet_hdr.hdr_len)
			copylen = vnet_hdr.hdr_len;
727 728 729 730 731 732 733
		if (!copylen)
			copylen = GOODCOPY_LEN;
	} else
		copylen = len;

	skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
				vnet_hdr.hdr_len, noblock, &err);
734 735
	if (!skb)
		goto err;
A
Arnd Bergmann 已提交
736

737
	if (zerocopy)
738
		err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
739
	else
740 741
		err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
						   len);
742
	if (err)
743
		goto err_kfree;
A
Arnd Bergmann 已提交
744 745

	skb_set_network_header(skb, ETH_HLEN);
746 747 748 749 750 751 752 753 754
	skb_reset_mac_header(skb);
	skb->protocol = eth_hdr(skb)->h_proto;

	if (vnet_hdr_len) {
		err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr);
		if (err)
			goto err_kfree;
	}

755
	skb_probe_transport_header(skb, ETH_HLEN);
756

757 758
	rcu_read_lock();
	vlan = rcu_dereference(q->vlan);
759
	/* copy skb_ubuf_info for callback when skb has no error */
760
	if (zerocopy) {
761
		skb_shinfo(skb)->destructor_arg = m->msg_control;
762
		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
763
		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
764
	}
765 766 767 768
	if (vlan)
		macvlan_start_xmit(skb, vlan->dev);
	else
		kfree_skb(skb);
769
	rcu_read_unlock();
A
Arnd Bergmann 已提交
770

771
	return total_len;
772

773 774 775
err_kfree:
	kfree_skb(skb);

776
err:
777 778
	rcu_read_lock();
	vlan = rcu_dereference(q->vlan);
779
	if (vlan)
E
Eric Dumazet 已提交
780
		vlan->dev->stats.tx_dropped++;
781
	rcu_read_unlock();
782 783

	return err;
A
Arnd Bergmann 已提交
784 785 786 787 788 789 790
}

static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
				 unsigned long count, loff_t pos)
{
	struct file *file = iocb->ki_filp;
	ssize_t result = -ENOLINK;
791
	struct macvtap_queue *q = file->private_data;
A
Arnd Bergmann 已提交
792

793 794
	result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count,
				  file->f_flags & O_NONBLOCK);
A
Arnd Bergmann 已提交
795 796 797 798 799 800 801 802
	return result;
}

/* Put packet to the user space buffer */
static ssize_t macvtap_put_user(struct macvtap_queue *q,
				const struct sk_buff *skb,
				const struct iovec *iv, int len)
{
803
	struct macvlan_dev *vlan;
A
Arnd Bergmann 已提交
804
	int ret;
805
	int vnet_hdr_len = 0;
806 807
	int vlan_offset = 0;
	int copied;
808 809 810

	if (q->flags & IFF_VNET_HDR) {
		struct virtio_net_hdr vnet_hdr;
811
		vnet_hdr_len = q->vnet_hdr_sz;
812 813 814 815 816 817 818
		if ((len -= vnet_hdr_len) < 0)
			return -EINVAL;

		ret = macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
		if (ret)
			return ret;

819
		if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
820 821
			return -EFAULT;
	}
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
	copied = vnet_hdr_len;

	if (!vlan_tx_tag_present(skb))
		len = min_t(int, skb->len, len);
	else {
		int copy;
		struct {
			__be16 h_vlan_proto;
			__be16 h_vlan_TCI;
		} veth;
		veth.h_vlan_proto = htons(ETH_P_8021Q);
		veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));

		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
		len = min_t(int, skb->len + VLAN_HLEN, len);

		copy = min_t(int, vlan_offset, len);
		ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
		len -= copy;
		copied += copy;
		if (ret || !len)
			goto done;

		copy = min_t(int, sizeof(veth), len);
		ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
		len -= copy;
		copied += copy;
		if (ret || !len)
			goto done;
	}
A
Arnd Bergmann 已提交
852

853 854
	ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
	copied += len;
A
Arnd Bergmann 已提交
855

856
done:
857 858
	rcu_read_lock();
	vlan = rcu_dereference(q->vlan);
A
Arnd Bergmann 已提交
859
	if (vlan)
860
		macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
861
	rcu_read_unlock();
A
Arnd Bergmann 已提交
862

863
	return ret ? ret : copied;
A
Arnd Bergmann 已提交
864 865
}

A
Arnd Bergmann 已提交
866 867 868
static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
			       const struct iovec *iv, unsigned long len,
			       int noblock)
A
Arnd Bergmann 已提交
869
{
870
	DEFINE_WAIT(wait);
A
Arnd Bergmann 已提交
871
	struct sk_buff *skb;
A
Arnd Bergmann 已提交
872
	ssize_t ret = 0;
A
Arnd Bergmann 已提交
873 874

	while (len) {
875 876 877
		if (!noblock)
			prepare_to_wait(sk_sleep(&q->sk), &wait,
					TASK_INTERRUPTIBLE);
A
Arnd Bergmann 已提交
878 879 880 881

		/* Read frames from the queue */
		skb = skb_dequeue(&q->sk.sk_receive_queue);
		if (!skb) {
A
Arnd Bergmann 已提交
882
			if (noblock) {
A
Arnd Bergmann 已提交
883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
				ret = -EAGAIN;
				break;
			}
			if (signal_pending(current)) {
				ret = -ERESTARTSYS;
				break;
			}
			/* Nothing to read, let's sleep */
			schedule();
			continue;
		}
		ret = macvtap_put_user(q, skb, iv, len);
		kfree_skb(skb);
		break;
	}

899 900
	if (!noblock)
		finish_wait(sk_sleep(&q->sk), &wait);
A
Arnd Bergmann 已提交
901 902 903 904 905 906 907 908 909
	return ret;
}

static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
				unsigned long count, loff_t pos)
{
	struct file *file = iocb->ki_filp;
	struct macvtap_queue *q = file->private_data;
	ssize_t len, ret = 0;
A
Arnd Bergmann 已提交
910

A
Arnd Bergmann 已提交
911 912 913 914 915 916 917 918
	len = iov_length(iv, count);
	if (len < 0) {
		ret = -EINVAL;
		goto out;
	}

	ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
	ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
A
Arnd Bergmann 已提交
919 920 921 922
out:
	return ret;
}

923 924 925 926
static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q)
{
	struct macvlan_dev *vlan;

927 928
	ASSERT_RTNL();
	vlan = rtnl_dereference(q->vlan);
929 930 931 932 933 934 935 936 937 938 939
	if (vlan)
		dev_hold(vlan->dev);

	return vlan;
}

static void macvtap_put_vlan(struct macvlan_dev *vlan)
{
	dev_put(vlan->dev);
}

J
Jason Wang 已提交
940 941 942 943 944 945 946 947 948 949 950 951 952 953
static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags)
{
	struct macvtap_queue *q = file->private_data;
	struct macvlan_dev *vlan;
	int ret;

	vlan = macvtap_get_vlan(q);
	if (!vlan)
		return -EINVAL;

	if (flags & IFF_ATTACH_QUEUE)
		ret = macvtap_enable_queue(vlan->dev, file, q);
	else if (flags & IFF_DETACH_QUEUE)
		ret = macvtap_disable_queue(q);
954 955
	else
		ret = -EINVAL;
J
Jason Wang 已提交
956 957 958 959 960

	macvtap_put_vlan(vlan);
	return ret;
}

A
Arnd Bergmann 已提交
961 962 963 964 965 966
/*
 * provide compatibility with generic tun/tap interface
 */
static long macvtap_ioctl(struct file *file, unsigned int cmd,
			  unsigned long arg)
{
967 968
	struct macvtap_queue *q = file->private_data;
	struct macvlan_dev *vlan;
A
Arnd Bergmann 已提交
969 970 971 972
	void __user *argp = (void __user *)arg;
	struct ifreq __user *ifr = argp;
	unsigned int __user *up = argp;
	unsigned int u;
973 974
	int __user *sp = argp;
	int s;
975
	int ret;
A
Arnd Bergmann 已提交
976 977 978 979 980 981

	switch (cmd) {
	case TUNSETIFF:
		/* ignore the name, just look at flags */
		if (get_user(u, &ifr->ifr_flags))
			return -EFAULT;
982 983

		ret = 0;
J
Jason Wang 已提交
984 985
		if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) !=
		    (IFF_NO_PI | IFF_TAP))
986 987 988 989 990
			ret = -EINVAL;
		else
			q->flags = u;

		return ret;
A
Arnd Bergmann 已提交
991 992

	case TUNGETIFF:
993
		rtnl_lock();
994
		vlan = macvtap_get_vlan(q);
995 996
		if (!vlan) {
			rtnl_unlock();
A
Arnd Bergmann 已提交
997
			return -ENOLINK;
998
		}
A
Arnd Bergmann 已提交
999

1000
		ret = 0;
1001
		if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
1002
		    put_user(q->flags, &ifr->ifr_flags))
1003
			ret = -EFAULT;
1004
		macvtap_put_vlan(vlan);
1005
		rtnl_unlock();
1006
		return ret;
A
Arnd Bergmann 已提交
1007

J
Jason Wang 已提交
1008 1009 1010
	case TUNSETQUEUE:
		if (get_user(u, &ifr->ifr_flags))
			return -EFAULT;
1011 1012 1013
		rtnl_lock();
		ret = macvtap_ioctl_set_queue(file, u);
		rtnl_unlock();
J
Jason Wang 已提交
1014

A
Arnd Bergmann 已提交
1015
	case TUNGETFEATURES:
J
Jason Wang 已提交
1016 1017
		if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR |
			     IFF_MULTI_QUEUE, up))
A
Arnd Bergmann 已提交
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
			return -EFAULT;
		return 0;

	case TUNSETSNDBUF:
		if (get_user(u, up))
			return -EFAULT;

		q->sk.sk_sndbuf = u;
		return 0;

1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
	case TUNGETVNETHDRSZ:
		s = q->vnet_hdr_sz;
		if (put_user(s, sp))
			return -EFAULT;
		return 0;

	case TUNSETVNETHDRSZ:
		if (get_user(s, sp))
			return -EFAULT;
		if (s < (int)sizeof(struct virtio_net_hdr))
			return -EINVAL;

		q->vnet_hdr_sz = s;
		return 0;

A
Arnd Bergmann 已提交
1043 1044 1045
	case TUNSETOFFLOAD:
		/* let the user check for future flags */
		if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1046
			    TUN_F_TSO_ECN | TUN_F_UFO))
A
Arnd Bergmann 已提交
1047 1048
			return -EINVAL;

1049 1050 1051 1052
		/* TODO: only accept frames with the features that
			 got enabled for forwarded frames */
		if (!(q->flags & IFF_VNET_HDR))
			return  -EINVAL;
A
Arnd Bergmann 已提交
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
		return 0;

	default:
		return -EINVAL;
	}
}

#ifdef CONFIG_COMPAT
static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
				 unsigned long arg)
{
	return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
}
#endif

static const struct file_operations macvtap_fops = {
	.owner		= THIS_MODULE,
	.open		= macvtap_open,
	.release	= macvtap_release,
	.aio_read	= macvtap_aio_read,
	.aio_write	= macvtap_aio_write,
	.poll		= macvtap_poll,
	.llseek		= no_llseek,
	.unlocked_ioctl	= macvtap_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl	= macvtap_compat_ioctl,
#endif
};

A
Arnd Bergmann 已提交
1082 1083 1084 1085
static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
			   struct msghdr *m, size_t total_len)
{
	struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
1086
	return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen,
A
Arnd Bergmann 已提交
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
			    m->msg_flags & MSG_DONTWAIT);
}

static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
			   struct msghdr *m, size_t total_len,
			   int flags)
{
	struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
	int ret;
	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
		return -EINVAL;
	ret = macvtap_do_read(q, iocb, m->msg_iov, total_len,
			  flags & MSG_DONTWAIT);
	if (ret > total_len) {
		m->msg_flags |= MSG_TRUNC;
		ret = flags & MSG_TRUNC ? ret : total_len;
	}
	return ret;
}

/* Ops structure to mimic raw sockets with tun */
static const struct proto_ops macvtap_socket_ops = {
	.sendmsg = macvtap_sendmsg,
	.recvmsg = macvtap_recvmsg,
};

/* Get an underlying socket object from tun file.  Returns error unless file is
 * attached to a device.  The returned object works like a packet socket, it
 * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
 * holding a reference to the file for as long as the socket is in use. */
struct socket *macvtap_get_socket(struct file *file)
{
	struct macvtap_queue *q;
	if (file->f_op != &macvtap_fops)
		return ERR_PTR(-EINVAL);
	q = file->private_data;
	if (!q)
		return ERR_PTR(-EBADFD);
	return &q->sock;
}
EXPORT_SYMBOL_GPL(macvtap_get_socket);

1129 1130 1131
static int macvtap_device_event(struct notifier_block *unused,
				unsigned long event, void *ptr)
{
1132
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1133
	struct macvlan_dev *vlan;
1134 1135
	struct device *classdev;
	dev_t devt;
1136
	int err;
1137 1138 1139 1140

	if (dev->rtnl_link_ops != &macvtap_link_ops)
		return NOTIFY_DONE;

1141
	vlan = netdev_priv(dev);
1142 1143 1144 1145 1146 1147 1148

	switch (event) {
	case NETDEV_REGISTER:
		/* Create the device node here after the network device has
		 * been registered but before register_netdevice has
		 * finished running.
		 */
1149 1150 1151 1152 1153
		err = macvtap_get_minor(vlan);
		if (err)
			return notifier_from_errno(err);

		devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
1154 1155
		classdev = device_create(macvtap_class, &dev->dev, devt,
					 dev, "tap%d", dev->ifindex);
1156 1157
		if (IS_ERR(classdev)) {
			macvtap_free_minor(vlan);
1158
			return notifier_from_errno(PTR_ERR(classdev));
1159
		}
1160 1161
		break;
	case NETDEV_UNREGISTER:
1162
		devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
1163
		device_destroy(macvtap_class, devt);
1164
		macvtap_free_minor(vlan);
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
		break;
	}

	return NOTIFY_DONE;
}

static struct notifier_block macvtap_notifier_block __read_mostly = {
	.notifier_call	= macvtap_device_event,
};

A
Arnd Bergmann 已提交
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
static int macvtap_init(void)
{
	int err;

	err = alloc_chrdev_region(&macvtap_major, 0,
				MACVTAP_NUM_DEVS, "macvtap");
	if (err)
		goto out1;

	cdev_init(&macvtap_cdev, &macvtap_fops);
	err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
	if (err)
		goto out2;

	macvtap_class = class_create(THIS_MODULE, "macvtap");
	if (IS_ERR(macvtap_class)) {
		err = PTR_ERR(macvtap_class);
		goto out3;
	}

1195
	err = register_netdevice_notifier(&macvtap_notifier_block);
A
Arnd Bergmann 已提交
1196 1197 1198
	if (err)
		goto out4;

1199 1200 1201 1202
	err = macvlan_link_register(&macvtap_link_ops);
	if (err)
		goto out5;

A
Arnd Bergmann 已提交
1203 1204
	return 0;

1205 1206
out5:
	unregister_netdevice_notifier(&macvtap_notifier_block);
A
Arnd Bergmann 已提交
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
out4:
	class_unregister(macvtap_class);
out3:
	cdev_del(&macvtap_cdev);
out2:
	unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
out1:
	return err;
}
module_init(macvtap_init);

static void macvtap_exit(void)
{
	rtnl_link_unregister(&macvtap_link_ops);
1221
	unregister_netdevice_notifier(&macvtap_notifier_block);
A
Arnd Bergmann 已提交
1222 1223 1224 1225 1226 1227 1228 1229 1230
	class_unregister(macvtap_class);
	cdev_del(&macvtap_cdev);
	unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
}
module_exit(macvtap_exit);

MODULE_ALIAS_RTNL_LINK("macvtap");
MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
MODULE_LICENSE("GPL");