6lowpan.c 30.7 KB
Newer Older
1
/*
2
   Copyright (c) 2013-2014 Intel Corp.
3 4 5 6 7 8 9 10 11 12 13 14 15 16

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 and
   only version 2 as published by the Free Software Foundation.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
*/

#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
17
#include <linux/module.h>
18
#include <linux/debugfs.h>
19 20 21 22 23 24 25 26 27 28 29

#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>

#include <net/af_ieee802154.h> /* to get the address type */

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>

30
#include <net/6lowpan.h> /* for the compression support */
31

32 33
#define VERSION "0.1"

34
static struct dentry *lowpan_enable_debugfs;
35 36
static struct dentry *lowpan_control_debugfs;

37 38 39 40
#define IFACE_NAME_TEMPLATE "bt%d"

struct skb_cb {
	struct in6_addr addr;
41
	struct in6_addr gw;
42 43
	struct l2cap_chan *chan;
	int status;
44 45 46 47 48 49 50 51 52 53 54
};
#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))

/* The devices list contains those devices that we are acting
 * as a proxy. The BT 6LoWPAN device is a virtual device that
 * connects to the Bluetooth LE device. The real connection to
 * BT device is done via l2cap layer. There exists one
 * virtual device / one BT 6LoWPAN network (=hciX device).
 * The list contains struct lowpan_dev elements.
 */
static LIST_HEAD(bt_6lowpan_devices);
55
static DEFINE_SPINLOCK(devices_lock);
56

57
static bool enable_6lowpan;
58 59 60 61 62

/* We are listening incoming connections via this channel
 */
static struct l2cap_chan *listen_chan;

63 64
struct lowpan_peer {
	struct list_head list;
65
	struct rcu_head rcu;
66
	struct l2cap_chan *chan;
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86

	/* peer addresses in various formats */
	unsigned char eui64_addr[EUI64_ADDR_LEN];
	struct in6_addr peer_addr;
};

struct lowpan_dev {
	struct list_head list;

	struct hci_dev *hdev;
	struct net_device *netdev;
	struct list_head peers;
	atomic_t peer_count; /* number of items in peers list */

	struct work_struct delete_netdev;
	struct delayed_work notify_peers;
};

static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
{
87
	return (struct lowpan_dev *)lowpan_priv(netdev)->priv;
88 89 90 91
}

static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
{
92
	list_add_rcu(&peer->list, &dev->peers);
93 94 95 96 97
	atomic_inc(&dev->peer_count);
}

static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
{
98
	list_del_rcu(&peer->list);
99
	kfree_rcu(peer, rcu);
100

101 102
	module_put(THIS_MODULE);

103 104 105 106 107 108 109 110 111 112 113
	if (atomic_dec_and_test(&dev->peer_count)) {
		BT_DBG("last peer");
		return true;
	}

	return false;
}

static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
						 bdaddr_t *ba, __u8 type)
{
114
	struct lowpan_peer *peer;
115 116 117 118

	BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
	       ba, type);

119 120 121
	rcu_read_lock();

	list_for_each_entry_rcu(peer, &dev->peers, list) {
122 123
		BT_DBG("dst addr %pMR dst type %d",
		       &peer->chan->dst, peer->chan->dst_type);
124

125
		if (bacmp(&peer->chan->dst, ba))
126 127
			continue;

128 129
		if (type == peer->chan->dst_type) {
			rcu_read_unlock();
130
			return peer;
131
		}
132 133
	}

134 135
	rcu_read_unlock();

136 137 138
	return NULL;
}

139 140
static inline struct lowpan_peer *__peer_lookup_chan(struct lowpan_dev *dev,
						     struct l2cap_chan *chan)
141
{
142
	struct lowpan_peer *peer;
143

144
	list_for_each_entry_rcu(peer, &dev->peers, list) {
145
		if (peer->chan == chan)
146 147 148 149 150 151
			return peer;
	}

	return NULL;
}

152 153
static inline struct lowpan_peer *__peer_lookup_conn(struct lowpan_dev *dev,
						     struct l2cap_conn *conn)
154
{
155
	struct lowpan_peer *peer;
156

157
	list_for_each_entry_rcu(peer, &dev->peers, list) {
158
		if (peer->chan->conn == conn)
159 160 161 162 163 164
			return peer;
	}

	return NULL;
}

165 166 167 168
static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev,
						  struct in6_addr *daddr,
						  struct sk_buff *skb)
{
169
	struct lowpan_peer *peer;
170 171 172 173 174 175 176 177 178 179
	struct in6_addr *nexthop;
	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
	int count = atomic_read(&dev->peer_count);

	BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt);

	/* If we have multiple 6lowpan peers, then check where we should
	 * send the packet. If only one peer exists, then we can send the
	 * packet right away.
	 */
180 181 182 183 184 185 186
	if (count == 1) {
		rcu_read_lock();
		peer = list_first_or_null_rcu(&dev->peers, struct lowpan_peer,
					      list);
		rcu_read_unlock();
		return peer;
	}
187 188 189 190 191 192 193

	if (!rt) {
		nexthop = &lowpan_cb(skb)->gw;

		if (ipv6_addr_any(nexthop))
			return NULL;
	} else {
194
		nexthop = rt6_nexthop(rt, daddr);
195 196 197 198 199 200 201 202 203 204

		/* We need to remember the address because it is needed
		 * by bt_xmit() when sending the packet. In bt_xmit(), the
		 * destination routing info is not set.
		 */
		memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr));
	}

	BT_DBG("gw %pI6c", nexthop);

205 206 207
	rcu_read_lock();

	list_for_each_entry_rcu(peer, &dev->peers, list) {
208 209 210 211
		BT_DBG("dst addr %pMR dst type %d ip %pI6c",
		       &peer->chan->dst, peer->chan->dst_type,
		       &peer->peer_addr);

212 213
		if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) {
			rcu_read_unlock();
214
			return peer;
215
		}
216 217
	}

218 219
	rcu_read_unlock();

220 221 222
	return NULL;
}

223 224
static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
{
225
	struct lowpan_dev *entry;
226 227
	struct lowpan_peer *peer = NULL;

228
	rcu_read_lock();
229

230 231
	list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
		peer = __peer_lookup_conn(entry, conn);
232 233 234 235
		if (peer)
			break;
	}

236
	rcu_read_unlock();
237 238 239 240 241 242

	return peer;
}

static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
{
243
	struct lowpan_dev *entry;
244 245
	struct lowpan_dev *dev = NULL;

246
	rcu_read_lock();
247

248
	list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
249 250 251 252 253 254
		if (conn->hcon->hdev == entry->hdev) {
			dev = entry;
			break;
		}
	}

255
	rcu_read_unlock();
256 257 258 259 260 261 262 263 264 265

	return dev;
}

static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
{
	struct sk_buff *skb_cp;

	skb_cp = skb_copy(skb, GFP_ATOMIC);
	if (!skb_cp)
266
		return NET_RX_DROP;
267

268
	return netif_rx(skb_cp);
269 270
}

271 272
static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
			   struct l2cap_chan *chan)
273 274 275 276 277 278 279 280
{
	const u8 *saddr, *daddr;
	u8 iphc0, iphc1;
	struct lowpan_dev *dev;
	struct lowpan_peer *peer;

	dev = lowpan_dev(netdev);

281 282 283
	rcu_read_lock();
	peer = __peer_lookup_chan(dev, chan);
	rcu_read_unlock();
284
	if (!peer)
285
		return -EINVAL;
286 287 288 289 290 291

	saddr = peer->eui64_addr;
	daddr = dev->netdev->dev_addr;

	/* at least two bytes will be used for the encoding */
	if (skb->len < 2)
292
		return -EINVAL;
293 294

	if (lowpan_fetch_skb_u8(skb, &iphc0))
295
		return -EINVAL;
296 297

	if (lowpan_fetch_skb_u8(skb, &iphc1))
298
		return -EINVAL;
299

300 301 302 303 304
	return lowpan_header_decompress(skb, netdev,
					saddr, IEEE802154_ADDR_LONG,
					EUI64_ADDR_LEN, daddr,
					IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
					iphc0, iphc1);
305 306 307 308

}

static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
309
		    struct l2cap_chan *chan)
310 311 312 313 314 315 316
{
	struct sk_buff *local_skb;
	int ret;

	if (!netif_running(dev))
		goto drop;

317
	if (dev->type != ARPHRD_6LOWPAN || !skb->len)
318 319
		goto drop;

320 321
	skb_reset_network_header(skb);

322 323 324 325
	skb = skb_share_check(skb, GFP_ATOMIC);
	if (!skb)
		goto drop;

326
	/* check that it's our buffer */
327
	if (lowpan_is_ipv6(*skb_network_header(skb))) {
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
		/* Copy the packet so that the IPv6 header is
		 * properly aligned.
		 */
		local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
					    skb_tailroom(skb), GFP_ATOMIC);
		if (!local_skb)
			goto drop;

		local_skb->protocol = htons(ETH_P_IPV6);
		local_skb->pkt_type = PACKET_HOST;

		skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));

		if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
			kfree_skb(local_skb);
			goto drop;
		}

		dev->stats.rx_bytes += skb->len;
		dev->stats.rx_packets++;

349 350
		consume_skb(local_skb);
		consume_skb(skb);
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
	} else if (lowpan_is_iphc(*skb_network_header(skb))) {
		local_skb = skb_clone(skb, GFP_ATOMIC);
		if (!local_skb)
			goto drop;

		ret = iphc_decompress(local_skb, dev, chan);
		if (ret < 0) {
			kfree_skb(local_skb);
			goto drop;
		}

		local_skb->protocol = htons(ETH_P_IPV6);
		local_skb->pkt_type = PACKET_HOST;
		local_skb->dev = dev;

		if (give_skb_to_upper(local_skb, dev)
				!= NET_RX_SUCCESS) {
			kfree_skb(local_skb);
			goto drop;
370
		}
371 372 373 374 375 376 377 378

		dev->stats.rx_bytes += skb->len;
		dev->stats.rx_packets++;

		consume_skb(local_skb);
		consume_skb(skb);
	} else {
		goto drop;
379 380 381 382 383
	}

	return NET_RX_SUCCESS;

drop:
384
	dev->stats.rx_dropped++;
385 386 387 388
	return NET_RX_DROP;
}

/* Packet from BT LE device */
389
static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
390 391 392 393 394
{
	struct lowpan_dev *dev;
	struct lowpan_peer *peer;
	int err;

395
	peer = lookup_peer(chan->conn);
396 397 398
	if (!peer)
		return -ENOENT;

399
	dev = lookup_dev(chan->conn);
400
	if (!dev || !dev->netdev)
401 402
		return -ENOENT;

403 404 405 406
	err = recv_pkt(skb, dev->netdev, chan);
	if (err) {
		BT_DBG("recv pkt %d", err);
		err = -EAGAIN;
407 408
	}

409
	return err;
410 411
}

412
static u8 get_addr_type_from_eui64(u8 byte)
413
{
414 415
	/* Is universal(0) or local(1) bit */
	return ((byte & 0x02) ? BDADDR_LE_RANDOM : BDADDR_LE_PUBLIC);
416 417 418 419 420
}

static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
{
	u8 *eui64 = ip6_daddr->s6_addr + 8;
421 422 423 424 425 426 427

	addr->b[0] = eui64[7];
	addr->b[1] = eui64[6];
	addr->b[2] = eui64[5];
	addr->b[3] = eui64[2];
	addr->b[4] = eui64[1];
	addr->b[5] = eui64[0];
428
}
429

430 431 432 433
static void convert_dest_bdaddr(struct in6_addr *ip6_daddr,
				bdaddr_t *addr, u8 *addr_type)
{
	copy_to_bdaddr(ip6_daddr, addr);
434

435 436 437 438 439 440
	/* We need to toggle the U/L bit that we got from IPv6 address
	 * so that we get the proper address and type of the BD address.
	 */
	addr->b[5] ^= 0x02;

	*addr_type = get_addr_type_from_eui64(addr->b[5]);
441 442
}

443 444
static int setup_header(struct sk_buff *skb, struct net_device *netdev,
			bdaddr_t *peer_addr, u8 *peer_addr_type)
445
{
446
	struct in6_addr ipv6_daddr;
447 448 449
	struct lowpan_dev *dev;
	struct lowpan_peer *peer;
	bdaddr_t addr, *any = BDADDR_ANY;
450 451
	u8 *daddr = any->b;
	int err, status = 0;
452 453 454

	dev = lowpan_dev(netdev);

455 456 457
	memcpy(&ipv6_daddr, &lowpan_cb(skb)->addr, sizeof(ipv6_daddr));

	if (ipv6_addr_is_multicast(&ipv6_daddr)) {
458
		lowpan_cb(skb)->chan = NULL;
459
	} else {
460
		u8 addr_type;
461 462 463 464

		/* Get destination BT device from skb.
		 * If there is no such peer then discard the packet.
		 */
465
		convert_dest_bdaddr(&ipv6_daddr, &addr, &addr_type);
466

467
		BT_DBG("dest addr %pMR type %d IP %pI6c", &addr,
468
		       addr_type, &ipv6_daddr);
469 470 471

		peer = peer_lookup_ba(dev, &addr, addr_type);
		if (!peer) {
472 473 474 475 476
			/* The packet might be sent to 6lowpan interface
			 * because of routing (either via default route
			 * or user set route) so get peer according to
			 * the destination address.
			 */
477
			peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
478 479 480 481
			if (!peer) {
				BT_DBG("no such peer %pMR found", &addr);
				return -ENOENT;
			}
482 483 484
		}

		daddr = peer->eui64_addr;
485 486
		*peer_addr = addr;
		*peer_addr_type = addr_type;
487
		lowpan_cb(skb)->chan = peer->chan;
488 489

		status = 1;
490 491
	}

492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
	lowpan_header_compress(skb, netdev, ETH_P_IPV6, daddr,
			       dev->netdev->dev_addr, skb->len);

	err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
	if (err < 0)
		return err;

	return status;
}

static int header_create(struct sk_buff *skb, struct net_device *netdev,
			 unsigned short type, const void *_daddr,
			 const void *_saddr, unsigned int len)
{
	struct ipv6hdr *hdr;

	if (type != ETH_P_IPV6)
		return -EINVAL;

	hdr = ipv6_hdr(skb);

	memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, sizeof(struct in6_addr));
514

515
	return 0;
516 517 518
}

/* Packet to BT LE device */
519
static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
520
		    struct net_device *netdev)
521
{
522 523 524 525 526
	struct msghdr msg;
	struct kvec iv;
	int err;

	/* Remember the skb so that we can send EAGAIN to the caller if
527
	 * we run out of credits.
528
	 */
529
	chan->data = skb;
530 531 532 533

	iv.iov_base = skb->data;
	iv.iov_len = skb->len;

A
Al Viro 已提交
534
	memset(&msg, 0, sizeof(msg));
535
	iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iv, 1, skb->len);
A
Al Viro 已提交
536

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
	err = l2cap_chan_send(chan, &msg, skb->len);
	if (err > 0) {
		netdev->stats.tx_bytes += err;
		netdev->stats.tx_packets++;
		return 0;
	}

	if (!err)
		err = lowpan_cb(skb)->status;

	if (err < 0) {
		if (err == -EAGAIN)
			netdev->stats.tx_dropped++;
		else
			netdev->stats.tx_errors++;
	}
553

554
	return err;
555 556
}

557
static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
558 559
{
	struct sk_buff *local_skb;
560
	struct lowpan_dev *entry;
561
	int err = 0;
562

563
	rcu_read_lock();
564

565 566
	list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
		struct lowpan_peer *pentry;
567 568 569 570 571 572 573
		struct lowpan_dev *dev;

		if (entry->netdev != netdev)
			continue;

		dev = lowpan_dev(entry->netdev);

574
		list_for_each_entry_rcu(pentry, &dev->peers, list) {
575 576
			int ret;

577 578
			local_skb = skb_clone(skb, GFP_ATOMIC);

579 580 581 582
			BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
			       netdev->name,
			       &pentry->chan->dst, pentry->chan->dst_type,
			       &pentry->peer_addr, pentry->chan);
583 584 585
			ret = send_pkt(pentry->chan, local_skb, netdev);
			if (ret < 0)
				err = ret;
586 587 588 589 590

			kfree_skb(local_skb);
		}
	}

591
	rcu_read_unlock();
592 593

	return err;
594 595 596 597 598 599 600 601
}

static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	int err = 0;
	bdaddr_t addr;
	u8 addr_type;

602 603 604
	/* We must take a copy of the skb before we modify/replace the ipv6
	 * header as the header could be used elsewhere
	 */
605 606
	skb = skb_unshare(skb, GFP_ATOMIC);
	if (!skb)
607 608 609 610 611 612 613 614 615 616 617 618
		return NET_XMIT_DROP;

	/* Return values from setup_header()
	 *  <0 - error, packet is dropped
	 *   0 - this is a multicast packet
	 *   1 - this is unicast packet
	 */
	err = setup_header(skb, netdev, &addr, &addr_type);
	if (err < 0) {
		kfree_skb(skb);
		return NET_XMIT_DROP;
	}
619

620 621 622 623 624
	if (err) {
		if (lowpan_cb(skb)->chan) {
			BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
			       netdev->name, &addr, addr_type,
			       &lowpan_cb(skb)->addr, lowpan_cb(skb)->chan);
625
			err = send_pkt(lowpan_cb(skb)->chan, skb, netdev);
626
		} else {
627
			err = -ENOENT;
628 629 630 631 632
		}
	} else {
		/* We need to send the packet to every device behind this
		 * interface.
		 */
633
		err = send_mcast_pkt(skb, netdev);
634 635
	}

636 637
	dev_kfree_skb(skb);

638 639 640
	if (err)
		BT_DBG("ERROR: xmit failed (%d)", err);

641
	return err < 0 ? NET_XMIT_DROP : err;
642 643
}

644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
static struct lock_class_key bt_tx_busylock;
static struct lock_class_key bt_netdev_xmit_lock_key;

static void bt_set_lockdep_class_one(struct net_device *dev,
				     struct netdev_queue *txq,
				     void *_unused)
{
	lockdep_set_class(&txq->_xmit_lock, &bt_netdev_xmit_lock_key);
}

static int bt_dev_init(struct net_device *dev)
{
	netdev_for_each_tx_queue(dev, bt_set_lockdep_class_one, NULL);
	dev->qdisc_tx_busylock = &bt_tx_busylock;

	return 0;
}

662
static const struct net_device_ops netdev_ops = {
663
	.ndo_init		= bt_dev_init,
664 665 666 667 668 669 670 671 672 673 674
	.ndo_start_xmit		= bt_xmit,
};

static struct header_ops header_ops = {
	.create	= header_create,
};

static void netdev_setup(struct net_device *dev)
{
	dev->hard_header_len	= 0;
	dev->needed_tailroom	= 0;
675 676
	dev->flags		= IFF_RUNNING | IFF_POINTOPOINT |
				  IFF_MULTICAST;
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
	dev->watchdog_timeo	= 0;

	dev->netdev_ops		= &netdev_ops;
	dev->header_ops		= &header_ops;
	dev->destructor		= free_netdev;
}

static struct device_type bt_type = {
	.name	= "bluetooth",
};

static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
{
	/* addr is the BT address in little-endian format */
	eui[0] = addr[5];
	eui[1] = addr[4];
	eui[2] = addr[3];
	eui[3] = 0xFF;
	eui[4] = 0xFE;
	eui[5] = addr[2];
	eui[6] = addr[1];
	eui[7] = addr[0];

700
	/* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
701
	if (addr_type == BDADDR_LE_PUBLIC)
702
		eui[0] &= ~0x02;
703
	else
704 705 706
		eui[0] |= 0x02;

	BT_DBG("type %d addr %*phC", addr_type, 8, eui);
707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
}

static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
		         u8 addr_type)
{
	netdev->addr_assign_type = NET_ADDR_PERM;
	set_addr(netdev->dev_addr, addr->b, addr_type);
}

static void ifup(struct net_device *netdev)
{
	int err;

	rtnl_lock();
	err = dev_open(netdev);
	if (err < 0)
		BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
	rtnl_unlock();
}

727 728 729 730 731 732 733 734 735 736 737
static void ifdown(struct net_device *netdev)
{
	int err;

	rtnl_lock();
	err = dev_close(netdev);
	if (err < 0)
		BT_INFO("iface %s cannot be closed (%d)", netdev->name, err);
	rtnl_unlock();
}

738 739 740 741 742 743 744 745 746 747 748 749 750
static void do_notify_peers(struct work_struct *work)
{
	struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
					      notify_peers.work);

	netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
}

static bool is_bt_6lowpan(struct hci_conn *hcon)
{
	if (hcon->type != LE_LINK)
		return false;

751
	if (!enable_6lowpan)
752 753 754
		return false;

	return true;
755 756
}

757 758 759 760 761 762 763 764 765 766 767 768
static struct l2cap_chan *chan_create(void)
{
	struct l2cap_chan *chan;

	chan = l2cap_chan_create();
	if (!chan)
		return NULL;

	l2cap_chan_set_defaults(chan);

	chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
	chan->mode = L2CAP_MODE_LE_FLOWCTL;
769
	chan->imtu = 1280;
770 771 772 773

	return chan;
}

774 775 776 777 778 779 780 781
static void set_ip_addr_bits(u8 addr_type, u8 *addr)
{
	if (addr_type == BDADDR_LE_PUBLIC)
		*addr |= 0x02;
	else
		*addr &= ~0x02;
}

782 783
static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
					struct lowpan_dev *dev)
784 785 786 787 788
{
	struct lowpan_peer *peer;

	peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
	if (!peer)
789
		return NULL;
790

791
	peer->chan = chan;
792 793 794 795 796
	memset(&peer->peer_addr, 0, sizeof(struct in6_addr));

	/* RFC 2464 ch. 5 */
	peer->peer_addr.s6_addr[0] = 0xFE;
	peer->peer_addr.s6_addr[1] = 0x80;
797 798
	set_addr((u8 *)&peer->peer_addr.s6_addr + 8, chan->dst.b,
		 chan->dst_type);
799 800 801 802

	memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
	       EUI64_ADDR_LEN);

803 804 805 806 807
	/* IPv6 address needs to have the U/L bit set properly so toggle
	 * it back here.
	 */
	set_ip_addr_bits(chan->dst_type, (u8 *)&peer->peer_addr.s6_addr + 8);

808
	spin_lock(&devices_lock);
809 810
	INIT_LIST_HEAD(&peer->list);
	peer_add(dev, peer);
811
	spin_unlock(&devices_lock);
812 813 814 815 816

	/* Notifying peers about us needs to be done without locks held */
	INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
	schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));

817
	return peer->chan;
818 819
}

820
static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
821 822 823 824
{
	struct net_device *netdev;
	int err = 0;

825 826 827
	netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev)),
			      IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
			      netdev_setup);
828 829 830
	if (!netdev)
		return -ENOMEM;

831
	set_dev_addr(netdev, &chan->src, chan->src_type);
832 833

	netdev->netdev_ops = &netdev_ops;
834
	SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
835 836
	SET_NETDEV_DEVTYPE(netdev, &bt_type);

837
	*dev = lowpan_dev(netdev);
838 839 840 841 842 843 844 845 846
	(*dev)->netdev = netdev;
	(*dev)->hdev = chan->conn->hcon->hdev;
	INIT_LIST_HEAD(&(*dev)->peers);

	spin_lock(&devices_lock);
	INIT_LIST_HEAD(&(*dev)->list);
	list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
	spin_unlock(&devices_lock);

847 848
	lowpan_netdev_setup(netdev, LOWPAN_LLTYPE_BTLE);

849 850 851
	err = register_netdev(netdev);
	if (err < 0) {
		BT_INFO("register_netdev failed %d", err);
852 853 854
		spin_lock(&devices_lock);
		list_del_rcu(&(*dev)->list);
		spin_unlock(&devices_lock);
855 856 857 858
		free_netdev(netdev);
		goto out;
	}

859 860 861
	BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
	       netdev->ifindex, &chan->dst, chan->dst_type,
	       &chan->src, chan->src_type);
862 863
	set_bit(__LINK_STATE_PRESENT, &netdev->state);

864
	return 0;
865 866 867 868 869

out:
	return err;
}

870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
static inline void chan_ready_cb(struct l2cap_chan *chan)
{
	struct lowpan_dev *dev;

	dev = lookup_dev(chan->conn);

	BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev);

	if (!dev) {
		if (setup_netdev(chan, &dev) < 0) {
			l2cap_chan_del(chan, -ENOENT);
			return;
		}
	}

885 886 887
	if (!try_module_get(THIS_MODULE))
		return;

888 889 890 891
	add_peer_chan(chan, dev);
	ifup(dev->netdev);
}

892
static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan)
893
{
894
	struct l2cap_chan *chan;
895

896 897 898 899
	chan = chan_create();
	if (!chan)
		return NULL;

900
	chan->ops = pchan->ops;
901 902 903

	BT_DBG("chan %p pchan %p", chan, pchan);

904
	return chan;
905 906
}

907 908 909 910 911 912 913
static void delete_netdev(struct work_struct *work)
{
	struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
						delete_netdev);

	unregister_netdev(entry->netdev);

914
	/* The entry pointer is deleted by the netdev destructor. */
915 916
}

917
static void chan_close_cb(struct l2cap_chan *chan)
918
{
919
	struct lowpan_dev *entry;
920 921 922
	struct lowpan_dev *dev = NULL;
	struct lowpan_peer *peer;
	int err = -ENOENT;
923
	bool last = false, remove = true;
924

925 926 927 928 929 930 931 932 933
	BT_DBG("chan %p conn %p", chan, chan->conn);

	if (chan->conn && chan->conn->hcon) {
		if (!is_bt_6lowpan(chan->conn->hcon))
			return;

		/* If conn is set, then the netdev is also there and we should
		 * not remove it.
		 */
934
		remove = false;
935
	}
936

937
	spin_lock(&devices_lock);
938

939
	list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
940
		dev = lowpan_dev(entry->netdev);
941
		peer = __peer_lookup_chan(dev, chan);
942 943 944
		if (peer) {
			last = peer_del(dev, peer);
			err = 0;
945 946 947 948 949 950 951

			BT_DBG("dev %p removing %speer %p", dev,
			       last ? "last " : "1 ", peer);
			BT_DBG("chan %p orig refcnt %d", chan,
			       atomic_read(&chan->kref.refcount));

			l2cap_chan_put(chan);
952 953 954 955 956
			break;
		}
	}

	if (!err && last && dev && !atomic_read(&dev->peer_count)) {
957
		spin_unlock(&devices_lock);
958 959 960

		cancel_delayed_work_sync(&dev->notify_peers);

961 962
		ifdown(dev->netdev);

963
		if (remove) {
964 965 966
			INIT_WORK(&entry->delete_netdev, delete_netdev);
			schedule_work(&entry->delete_netdev);
		}
967
	} else {
968
		spin_unlock(&devices_lock);
969 970
	}

971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
	return;
}

static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err)
{
	BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn,
	       state_to_string(state), err);
}

static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
					 unsigned long hdr_len,
					 unsigned long len, int nb)
{
	/* Note that we must allocate using GFP_ATOMIC here as
	 * this function is called originally from netdev hard xmit
	 * function in atomic context.
	 */
	return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
}

static void chan_suspend_cb(struct l2cap_chan *chan)
{
	struct sk_buff *skb = chan->data;

	BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);

997 998 999
	if (!skb)
		return;

1000 1001 1002 1003 1004 1005 1006 1007 1008
	lowpan_cb(skb)->status = -EAGAIN;
}

static void chan_resume_cb(struct l2cap_chan *chan)
{
	struct sk_buff *skb = chan->data;

	BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);

1009 1010 1011
	if (!skb)
		return;

1012 1013 1014 1015 1016
	lowpan_cb(skb)->status = 0;
}

static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
{
1017
	return L2CAP_CONN_TIMEOUT;
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
}

static const struct l2cap_ops bt_6lowpan_chan_ops = {
	.name			= "L2CAP 6LoWPAN channel",
	.new_connection		= chan_new_conn_cb,
	.recv			= chan_recv_cb,
	.close			= chan_close_cb,
	.state_change		= chan_state_change_cb,
	.ready			= chan_ready_cb,
	.resume			= chan_resume_cb,
	.suspend		= chan_suspend_cb,
	.get_sndtimeo		= chan_get_sndtimeo_cb,
	.alloc_skb		= chan_alloc_skb_cb,

	.teardown		= l2cap_chan_no_teardown,
	.defer			= l2cap_chan_no_defer,
	.set_shutdown		= l2cap_chan_no_set_shutdown,
};

static inline __u8 bdaddr_type(__u8 type)
{
	if (type == ADDR_LE_DEV_PUBLIC)
		return BDADDR_LE_PUBLIC;
	else
		return BDADDR_LE_RANDOM;
}

static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
{
1047
	struct l2cap_chan *chan;
1048 1049
	int err;

1050
	chan = chan_create();
1051
	if (!chan)
1052 1053
		return -EINVAL;

1054 1055
	chan->ops = &bt_6lowpan_chan_ops;

1056
	err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
1057 1058
				 addr, dst_type);

1059
	BT_DBG("chan %p err %d", chan, err);
1060
	if (err < 0)
1061
		l2cap_chan_put(chan);
1062

1063 1064 1065
	return err;
}

1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
{
	struct lowpan_peer *peer;

	BT_DBG("conn %p dst type %d", conn, dst_type);

	peer = lookup_peer(conn);
	if (!peer)
		return -ENOENT;

	BT_DBG("peer %p chan %p", peer, peer->chan);

	l2cap_chan_close(peer->chan, ENOENT);

	return 0;
}

static struct l2cap_chan *bt_6lowpan_listen(void)
{
	bdaddr_t *addr = BDADDR_ANY;
1086
	struct l2cap_chan *chan;
1087 1088
	int err;

1089
	if (!enable_6lowpan)
1090 1091
		return NULL;

1092
	chan = chan_create();
1093
	if (!chan)
1094 1095
		return NULL;

1096
	chan->ops = &bt_6lowpan_chan_ops;
1097 1098
	chan->state = BT_LISTEN;
	chan->src_type = BDADDR_LE_PUBLIC;
1099

1100
	atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
1101

1102
	BT_DBG("chan %p src type %d", chan, chan->src_type);
1103

1104
	err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
1105
	if (err) {
1106
		l2cap_chan_put(chan);
1107 1108 1109 1110
		BT_ERR("psm cannot be added err %d", err);
		return NULL;
	}

1111
	return chan;
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
}

static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
			  struct l2cap_conn **conn)
{
	struct hci_conn *hcon;
	struct hci_dev *hdev;
	bdaddr_t *src = BDADDR_ANY;
	int n;

	n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
		   &addr->b[5], &addr->b[4], &addr->b[3],
		   &addr->b[2], &addr->b[1], &addr->b[0],
		   addr_type);

	if (n < 7)
		return -EINVAL;

	hdev = hci_get_route(addr, src);
	if (!hdev)
		return -ENOENT;

	hci_dev_lock(hdev);
	hcon = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
	hci_dev_unlock(hdev);

	if (!hcon)
		return -ENOENT;

	*conn = (struct l2cap_conn *)hcon->l2cap_data;

	BT_DBG("conn %p dst %pMR type %d", *conn, &hcon->dst, hcon->dst_type);

	return 0;
}

static void disconnect_all_peers(void)
{
1150
	struct lowpan_dev *entry;
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
	struct lowpan_peer *peer, *tmp_peer, *new_peer;
	struct list_head peers;

	INIT_LIST_HEAD(&peers);

	/* We make a separate list of peers as the close_cb() will
	 * modify the device peers list so it is better not to mess
	 * with the same list at the same time.
	 */

1161
	rcu_read_lock();
1162

1163 1164
	list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
		list_for_each_entry_rcu(peer, &entry->peers, list) {
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
			new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
			if (!new_peer)
				break;

			new_peer->chan = peer->chan;
			INIT_LIST_HEAD(&new_peer->list);

			list_add(&new_peer->list, &peers);
		}
	}

1176
	rcu_read_unlock();
1177

1178
	spin_lock(&devices_lock);
1179 1180
	list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
		l2cap_chan_close(peer->chan, ENOENT);
1181 1182

		list_del_rcu(&peer->list);
1183
		kfree_rcu(peer, rcu);
1184
	}
1185
	spin_unlock(&devices_lock);
1186 1187
}

1188
struct set_enable {
1189
	struct work_struct work;
1190
	bool flag;
1191
};
1192

1193
static void do_enable_set(struct work_struct *work)
1194
{
1195 1196
	struct set_enable *set_enable = container_of(work,
						     struct set_enable, work);
1197

1198
	if (!set_enable->flag || enable_6lowpan != set_enable->flag)
1199
		/* Disconnect existing connections if 6lowpan is
1200
		 * disabled
1201 1202 1203
		 */
		disconnect_all_peers();

1204
	enable_6lowpan = set_enable->flag;
1205 1206 1207 1208 1209 1210 1211 1212

	if (listen_chan) {
		l2cap_chan_close(listen_chan, 0);
		l2cap_chan_put(listen_chan);
	}

	listen_chan = bt_6lowpan_listen();

1213
	kfree(set_enable);
1214 1215
}

1216
static int lowpan_enable_set(void *data, u64 val)
1217
{
1218
	struct set_enable *set_enable;
1219

1220 1221
	set_enable = kzalloc(sizeof(*set_enable), GFP_KERNEL);
	if (!set_enable)
1222 1223
		return -ENOMEM;

1224 1225
	set_enable->flag = !!val;
	INIT_WORK(&set_enable->work, do_enable_set);
1226

1227
	schedule_work(&set_enable->work);
1228

1229 1230 1231
	return 0;
}

1232
static int lowpan_enable_get(void *data, u64 *val)
1233
{
1234
	*val = enable_6lowpan;
1235 1236 1237
	return 0;
}

1238 1239
DEFINE_SIMPLE_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get,
			lowpan_enable_set, "%llu\n");
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309

static ssize_t lowpan_control_write(struct file *fp,
				    const char __user *user_buffer,
				    size_t count,
				    loff_t *position)
{
	char buf[32];
	size_t buf_size = min(count, sizeof(buf) - 1);
	int ret;
	bdaddr_t addr;
	u8 addr_type;
	struct l2cap_conn *conn = NULL;

	if (copy_from_user(buf, user_buffer, buf_size))
		return -EFAULT;

	buf[buf_size] = '\0';

	if (memcmp(buf, "connect ", 8) == 0) {
		ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn);
		if (ret == -EINVAL)
			return ret;

		if (listen_chan) {
			l2cap_chan_close(listen_chan, 0);
			l2cap_chan_put(listen_chan);
			listen_chan = NULL;
		}

		if (conn) {
			struct lowpan_peer *peer;

			if (!is_bt_6lowpan(conn->hcon))
				return -EINVAL;

			peer = lookup_peer(conn);
			if (peer) {
				BT_DBG("6LoWPAN connection already exists");
				return -EALREADY;
			}

			BT_DBG("conn %p dst %pMR type %d user %d", conn,
			       &conn->hcon->dst, conn->hcon->dst_type,
			       addr_type);
		}

		ret = bt_6lowpan_connect(&addr, addr_type);
		if (ret < 0)
			return ret;

		return count;
	}

	if (memcmp(buf, "disconnect ", 11) == 0) {
		ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn);
		if (ret < 0)
			return ret;

		ret = bt_6lowpan_disconnect(conn, addr_type);
		if (ret < 0)
			return ret;

		return count;
	}

	return count;
}

static int lowpan_control_show(struct seq_file *f, void *ptr)
{
1310 1311
	struct lowpan_dev *entry;
	struct lowpan_peer *peer;
1312

1313
	spin_lock(&devices_lock);
1314

1315 1316
	list_for_each_entry(entry, &bt_6lowpan_devices, list) {
		list_for_each_entry(peer, &entry->peers, list)
1317 1318 1319 1320
			seq_printf(f, "%pMR (type %u)\n",
				   &peer->chan->dst, peer->chan->dst_type);
	}

1321
	spin_unlock(&devices_lock);
1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338

	return 0;
}

static int lowpan_control_open(struct inode *inode, struct file *file)
{
	return single_open(file, lowpan_control_show, inode->i_private);
}

static const struct file_operations lowpan_control_fops = {
	.open		= lowpan_control_open,
	.read		= seq_read,
	.write		= lowpan_control_write,
	.llseek		= seq_lseek,
	.release	= single_release,
};

1339 1340
static void disconnect_devices(void)
{
1341
	struct lowpan_dev *entry, *tmp, *new_dev;
1342 1343 1344 1345 1346 1347 1348 1349 1350
	struct list_head devices;

	INIT_LIST_HEAD(&devices);

	/* We make a separate list of devices because the unregister_netdev()
	 * will call device_event() which will also want to modify the same
	 * devices list.
	 */

1351
	rcu_read_lock();
1352

1353
	list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1354 1355 1356 1357 1358 1359 1360
		new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
		if (!new_dev)
			break;

		new_dev->netdev = entry->netdev;
		INIT_LIST_HEAD(&new_dev->list);

1361
		list_add_rcu(&new_dev->list, &devices);
1362 1363
	}

1364
	rcu_read_unlock();
1365

1366
	list_for_each_entry_safe(entry, tmp, &devices, list) {
1367 1368 1369 1370 1371 1372 1373 1374
		ifdown(entry->netdev);
		BT_DBG("Unregistering netdev %s %p",
		       entry->netdev->name, entry->netdev);
		unregister_netdev(entry->netdev);
		kfree(entry);
	}
}

1375 1376 1377 1378
static int device_event(struct notifier_block *unused,
			unsigned long event, void *ptr)
{
	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1379
	struct lowpan_dev *entry;
1380 1381 1382 1383 1384 1385

	if (netdev->type != ARPHRD_6LOWPAN)
		return NOTIFY_DONE;

	switch (event) {
	case NETDEV_UNREGISTER:
1386 1387
		spin_lock(&devices_lock);
		list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1388
			if (entry->netdev == netdev) {
1389 1390
				BT_DBG("Unregistered netdev %s %p",
				       netdev->name, netdev);
1391 1392 1393 1394
				list_del(&entry->list);
				break;
			}
		}
1395
		spin_unlock(&devices_lock);
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405
		break;
	}

	return NOTIFY_DONE;
}

static struct notifier_block bt_6lowpan_dev_notifier = {
	.notifier_call = device_event,
};

1406
static int __init bt_6lowpan_init(void)
1407
{
1408 1409 1410
	lowpan_enable_debugfs = debugfs_create_file("6lowpan_enable", 0644,
						    bt_debugfs, NULL,
						    &lowpan_enable_fops);
1411 1412 1413 1414
	lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
						     bt_debugfs, NULL,
						     &lowpan_control_fops);

1415 1416 1417
	return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
}

1418
static void __exit bt_6lowpan_exit(void)
1419
{
1420
	debugfs_remove(lowpan_enable_debugfs);
1421 1422 1423 1424 1425 1426 1427
	debugfs_remove(lowpan_control_debugfs);

	if (listen_chan) {
		l2cap_chan_close(listen_chan, 0);
		l2cap_chan_put(listen_chan);
	}

1428 1429
	disconnect_devices();

1430 1431
	unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
}
1432 1433 1434 1435 1436 1437 1438 1439

module_init(bt_6lowpan_init);
module_exit(bt_6lowpan_exit);

MODULE_AUTHOR("Jukka Rissanen <jukka.rissanen@linux.intel.com>");
MODULE_DESCRIPTION("Bluetooth 6LoWPAN");
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");