l2cap_core.c 159.5 KB
Newer Older
1
/*
L
Linus Torvalds 已提交
2 3
   BlueZ - Bluetooth protocol stack for Linux
   Copyright (C) 2000-2001 Qualcomm Incorporated
4
   Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5
   Copyright (C) 2010 Google Inc.
6
   Copyright (C) 2011 ProFUSION Embedded Systems
7
   Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18

   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 20 21
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
L
Linus Torvalds 已提交
22 23
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

24 25
   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
L
Linus Torvalds 已提交
26 27 28
   SOFTWARE IS DISCLAIMED.
*/

29
/* Bluetooth L2CAP core. */
L
Linus Torvalds 已提交
30 31 32

#include <linux/module.h>

33
#include <linux/debugfs.h>
34
#include <linux/crc16.h>
L
Linus Torvalds 已提交
35 36 37 38

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
39

40
#include "smp.h"
41
#include "a2mp.h"
42
#include "amp.h"
L
Linus Torvalds 已提交
43

44
bool disable_ertm;
45

46
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47
static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
L
Linus Torvalds 已提交
48

49 50
static LIST_HEAD(chan_list);
static DEFINE_RWLOCK(chan_list_lock);
L
Linus Torvalds 已提交
51 52

static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53
				       u8 code, u8 ident, u16 dlen, void *data);
54
static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
55
			   void *data);
56
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57
static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
L
Linus Torvalds 已提交
58

59
static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60
		     struct sk_buff_head *skbs, u8 event);
61

62 63 64 65 66 67 68 69 70 71 72 73
static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
{
	if (hcon->type == LE_LINK) {
		if (type == ADDR_LE_DEV_PUBLIC)
			return BDADDR_LE_PUBLIC;
		else
			return BDADDR_LE_RANDOM;
	}

	return BDADDR_BREDR;
}

74
/* ---- L2CAP channels ---- */
75

76 77
static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
						   u16 cid)
78
{
79
	struct l2cap_chan *c;
80

81 82 83
	list_for_each_entry(c, &conn->chan_l, list) {
		if (c->dcid == cid)
			return c;
84
	}
85
	return NULL;
86 87
}

88 89
static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
						   u16 cid)
90
{
91
	struct l2cap_chan *c;
92

93 94 95
	list_for_each_entry(c, &conn->chan_l, list) {
		if (c->scid == cid)
			return c;
96
	}
97
	return NULL;
98 99 100
}

/* Find channel with given SCID.
101
 * Returns locked channel. */
102 103
static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
						 u16 cid)
104
{
105
	struct l2cap_chan *c;
106

107
	mutex_lock(&conn->chan_lock);
108
	c = __l2cap_get_chan_by_scid(conn, cid);
109 110
	if (c)
		l2cap_chan_lock(c);
111 112
	mutex_unlock(&conn->chan_lock);

113
	return c;
114 115
}

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
/* Find channel with given DCID.
 * Returns locked channel.
 */
static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
						 u16 cid)
{
	struct l2cap_chan *c;

	mutex_lock(&conn->chan_lock);
	c = __l2cap_get_chan_by_dcid(conn, cid);
	if (c)
		l2cap_chan_lock(c);
	mutex_unlock(&conn->chan_lock);

	return c;
}

133 134
static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
						    u8 ident)
135
{
136
	struct l2cap_chan *c;
137

138 139 140
	list_for_each_entry(c, &conn->chan_l, list) {
		if (c->ident == ident)
			return c;
141
	}
142
	return NULL;
143 144
}

M
Mat Martineau 已提交
145 146 147 148 149 150 151 152 153 154 155 156 157 158
static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
						  u8 ident)
{
	struct l2cap_chan *c;

	mutex_lock(&conn->chan_lock);
	c = __l2cap_get_chan_by_ident(conn, ident);
	if (c)
		l2cap_chan_lock(c);
	mutex_unlock(&conn->chan_lock);

	return c;
}

159
static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
160
{
161
	struct l2cap_chan *c;
162

163
	list_for_each_entry(c, &chan_list, global_l) {
164
		if (c->sport == psm && !bacmp(&c->src, src))
165
			return c;
166
	}
167
	return NULL;
168 169 170 171
}

int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
{
172 173
	int err;

174
	write_lock(&chan_list_lock);
175

176
	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
177 178
		err = -EADDRINUSE;
		goto done;
179 180
	}

181 182 183 184 185 186 187 188 189
	if (psm) {
		chan->psm = psm;
		chan->sport = psm;
		err = 0;
	} else {
		u16 p;

		err = -EINVAL;
		for (p = 0x1001; p < 0x1100; p += 2)
190
			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 192 193 194 195 196
				chan->psm   = cpu_to_le16(p);
				chan->sport = cpu_to_le16(p);
				err = 0;
				break;
			}
	}
197

198
done:
199
	write_unlock(&chan_list_lock);
200
	return err;
201 202 203 204
}

int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
{
205
	write_lock(&chan_list_lock);
206 207 208

	chan->scid = scid;

209
	write_unlock(&chan_list_lock);
210 211 212 213

	return 0;
}

214
static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
215
{
216
	u16 cid = L2CAP_CID_DYN_START;
217

218
	for (; cid < L2CAP_CID_DYN_END; cid++) {
219
		if (!__l2cap_get_chan_by_scid(conn, cid))
220 221 222 223 224 225
			return cid;
	}

	return 0;
}

226
static void l2cap_state_change(struct l2cap_chan *chan, int state)
227
{
228
	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229
	       state_to_string(state));
230

231
	chan->state = state;
232
	chan->ops->state_change(chan, state, 0);
233 234
}

235 236
static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
						int state, int err)
237
{
238
	chan->state = state;
239
	chan->ops->state_change(chan, chan->state, err);
240 241 242 243
}

static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
{
244
	chan->ops->state_change(chan, chan->state, err);
245 246
}

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
static void __set_retrans_timer(struct l2cap_chan *chan)
{
	if (!delayed_work_pending(&chan->monitor_timer) &&
	    chan->retrans_timeout) {
		l2cap_set_timer(chan, &chan->retrans_timer,
				msecs_to_jiffies(chan->retrans_timeout));
	}
}

static void __set_monitor_timer(struct l2cap_chan *chan)
{
	__clear_retrans_timer(chan);
	if (chan->monitor_timeout) {
		l2cap_set_timer(chan, &chan->monitor_timer,
				msecs_to_jiffies(chan->monitor_timeout));
	}
}

265 266 267 268 269 270 271 272 273 274 275 276 277
static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
					       u16 seq)
{
	struct sk_buff *skb;

	skb_queue_walk(head, skb) {
		if (bt_cb(skb)->control.txseq == seq)
			return skb;
	}

	return NULL;
}

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
/* ---- L2CAP sequence number lists ---- */

/* For ERTM, ordered lists of sequence numbers must be tracked for
 * SREJ requests that are received and for frames that are to be
 * retransmitted. These seq_list functions implement a singly-linked
 * list in an array, where membership in the list can also be checked
 * in constant time. Items can also be added to the tail of the list
 * and removed from the head in constant time, without further memory
 * allocs or frees.
 */

static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
{
	size_t alloc_size, i;

	/* Allocated size is a power of 2 to map sequence numbers
	 * (which may be up to 14 bits) in to a smaller array that is
	 * sized for the negotiated ERTM transmit windows.
	 */
	alloc_size = roundup_pow_of_two(size);

	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
	if (!seq_list->list)
		return -ENOMEM;

	seq_list->mask = alloc_size - 1;
	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
	for (i = 0; i < alloc_size; i++)
		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;

	return 0;
}

static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
{
	kfree(seq_list->list);
}

static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
					   u16 seq)
{
	/* Constant-time check for list membership */
	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
}

static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
{
	u16 mask = seq_list->mask;

	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
		/* In case someone tries to pop the head of an empty list */
		return L2CAP_SEQ_LIST_CLEAR;
	} else if (seq_list->head == seq) {
		/* Head can be removed in constant time */
		seq_list->head = seq_list->list[seq & mask];
		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;

		if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
			seq_list->head = L2CAP_SEQ_LIST_CLEAR;
			seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
		}
	} else {
		/* Walk the list to find the sequence number */
		u16 prev = seq_list->head;
		while (seq_list->list[prev & mask] != seq) {
			prev = seq_list->list[prev & mask];
			if (prev == L2CAP_SEQ_LIST_TAIL)
				return L2CAP_SEQ_LIST_CLEAR;
		}

		/* Unlink the number from the list and clear it */
		seq_list->list[prev & mask] = seq_list->list[seq & mask];
		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
		if (seq_list->tail == seq)
			seq_list->tail = prev;
	}
	return seq;
}

static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
{
	/* Remove the head in constant time */
	return l2cap_seq_list_remove(seq_list, seq_list->head);
}

static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
{
366
	u16 i;
367

368 369 370 371 372 373 374 375
	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
		return;

	for (i = 0; i <= seq_list->mask; i++)
		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;

	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 377 378 379 380 381 382 383
}

static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
{
	u16 mask = seq_list->mask;

	/* All appends happen in constant time */

384 385
	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
		return;
386

387 388 389 390 391 392 393
	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
		seq_list->head = seq;
	else
		seq_list->list[seq_list->tail & mask] = seq;

	seq_list->tail = seq;
	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 395
}

396
static void l2cap_chan_timeout(struct work_struct *work)
397
{
398
	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399
					       chan_timer.work);
400
	struct l2cap_conn *conn = chan->conn;
401 402
	int reason;

403
	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404

405
	mutex_lock(&conn->chan_lock);
406
	l2cap_chan_lock(chan);
407

408
	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409
		reason = ECONNREFUSED;
410
	else if (chan->state == BT_CONNECT &&
411
		 chan->sec_level != BT_SECURITY_SDP)
412 413 414 415
		reason = ECONNREFUSED;
	else
		reason = ETIMEDOUT;

416
	l2cap_chan_close(chan, reason);
417

418
	l2cap_chan_unlock(chan);
419

420
	chan->ops->close(chan);
421 422
	mutex_unlock(&conn->chan_lock);

423
	l2cap_chan_put(chan);
424 425
}

426
struct l2cap_chan *l2cap_chan_create(void)
427 428 429 430 431 432 433
{
	struct l2cap_chan *chan;

	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
	if (!chan)
		return NULL;

434 435
	mutex_init(&chan->lock);

436
	write_lock(&chan_list_lock);
437
	list_add(&chan->global_l, &chan_list);
438
	write_unlock(&chan_list_lock);
439

440
	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441

442 443
	chan->state = BT_OPEN;

444
	kref_init(&chan->kref);
445

446 447 448
	/* This flag is cleared in l2cap_chan_ready() */
	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);

449
	BT_DBG("chan %p", chan);
450

451 452 453
	return chan;
}

454
static void l2cap_chan_destroy(struct kref *kref)
455
{
456 457
	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);

458 459
	BT_DBG("chan %p", chan);

460
	write_lock(&chan_list_lock);
461
	list_del(&chan->global_l);
462
	write_unlock(&chan_list_lock);
463

464
	kfree(chan);
465 466
}

467 468
void l2cap_chan_hold(struct l2cap_chan *c)
{
469
	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470

471
	kref_get(&c->kref);
472 473 474 475
}

void l2cap_chan_put(struct l2cap_chan *c)
{
476
	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477

478
	kref_put(&c->kref, l2cap_chan_destroy);
479 480
}

481 482 483 484 485 486
void l2cap_chan_set_defaults(struct l2cap_chan *chan)
{
	chan->fcs  = L2CAP_FCS_CRC16;
	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487
	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 489 490 491 492
	chan->sec_level = BT_SECURITY_LOW;

	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
}

493
void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494
{
495
	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496
	       __le16_to_cpu(chan->psm), chan->dcid);
497

498
	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499

500
	chan->conn = conn;
501

502 503
	switch (chan->chan_type) {
	case L2CAP_CHAN_CONN_ORIENTED:
504 505
		if (conn->hcon->type == LE_LINK) {
			/* LE connection */
506
			chan->omtu = L2CAP_DEFAULT_MTU;
507 508 509 510
			if (chan->dcid == L2CAP_CID_ATT)
				chan->scid = L2CAP_CID_ATT;
			else
				chan->scid = l2cap_alloc_cid(conn);
511 512
		} else {
			/* Alloc CID for connection-oriented socket */
513
			chan->scid = l2cap_alloc_cid(conn);
514
			chan->omtu = L2CAP_DEFAULT_MTU;
515
		}
516 517 518
		break;

	case L2CAP_CHAN_CONN_LESS:
519
		/* Connectionless socket */
520 521
		chan->scid = L2CAP_CID_CONN_LESS;
		chan->dcid = L2CAP_CID_CONN_LESS;
522
		chan->omtu = L2CAP_DEFAULT_MTU;
523 524
		break;

525 526 527 528 529 530 531
	case L2CAP_CHAN_CONN_FIX_A2MP:
		chan->scid = L2CAP_CID_A2MP;
		chan->dcid = L2CAP_CID_A2MP;
		chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
		chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
		break;

532
	default:
533
		/* Raw socket can send/recv signalling messages only */
534 535
		chan->scid = L2CAP_CID_SIGNALING;
		chan->dcid = L2CAP_CID_SIGNALING;
536
		chan->omtu = L2CAP_DEFAULT_MTU;
537 538
	}

539 540 541 542 543
	chan->local_id		= L2CAP_BESTEFFORT_ID;
	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
544
	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
545

546
	l2cap_chan_hold(chan);
547

548 549
	hci_conn_hold(conn->hcon);

550
	list_add(&chan->list, &conn->chan_l);
551 552
}

553
void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
554 555 556
{
	mutex_lock(&conn->chan_lock);
	__l2cap_chan_add(conn, chan);
557
	mutex_unlock(&conn->chan_lock);
558 559
}

560
void l2cap_chan_del(struct l2cap_chan *chan, int err)
561
{
562
	struct l2cap_conn *conn = chan->conn;
563

564
	__clear_chan_timer(chan);
565

566
	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
567

568
	if (conn) {
569
		struct amp_mgr *mgr = conn->hcon->amp_mgr;
570
		/* Delete from channel list */
571
		list_del(&chan->list);
572

573
		l2cap_chan_put(chan);
574

575
		chan->conn = NULL;
576 577

		if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
578
			hci_conn_drop(conn->hcon);
579 580 581

		if (mgr && mgr->bredr_chan == chan)
			mgr->bredr_chan = NULL;
582 583
	}

584 585 586 587 588 589 590
	if (chan->hs_hchan) {
		struct hci_chan *hs_hchan = chan->hs_hchan;

		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
		amp_disconnect_logical_link(hs_hchan);
	}

591
	chan->ops->teardown(chan, err);
592

593
	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
594
		return;
595

596 597 598
	switch(chan->mode) {
	case L2CAP_MODE_BASIC:
		break;
599

600
	case L2CAP_MODE_ERTM:
601 602 603
		__clear_retrans_timer(chan);
		__clear_monitor_timer(chan);
		__clear_ack_timer(chan);
604

605
		skb_queue_purge(&chan->srej_q);
606

607 608
		l2cap_seq_list_free(&chan->srej_list);
		l2cap_seq_list_free(&chan->retrans_list);
609 610 611 612 613 614

		/* fall through */

	case L2CAP_MODE_STREAMING:
		skb_queue_purge(&chan->tx_q);
		break;
615
	}
616 617

	return;
618 619
}

620
void l2cap_chan_close(struct l2cap_chan *chan, int reason)
621 622 623
{
	struct l2cap_conn *conn = chan->conn;

624
	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
625

626
	switch (chan->state) {
627
	case BT_LISTEN:
628
		chan->ops->teardown(chan, 0);
629 630 631 632
		break;

	case BT_CONNECTED:
	case BT_CONFIG:
633
		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
634
		    conn->hcon->type == ACL_LINK) {
635
			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
636
			l2cap_send_disconn_req(chan, reason);
637 638 639 640 641
		} else
			l2cap_chan_del(chan, reason);
		break;

	case BT_CONNECT2:
642
		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
643
		    conn->hcon->type == ACL_LINK) {
644 645 646
			struct l2cap_conn_rsp rsp;
			__u16 result;

647
			if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
648 649 650
				result = L2CAP_CR_SEC_BLOCK;
			else
				result = L2CAP_CR_BAD_PSM;
651

652
			l2cap_state_change(chan, BT_DISCONN);
653 654 655 656

			rsp.scid   = cpu_to_le16(chan->dcid);
			rsp.dcid   = cpu_to_le16(chan->scid);
			rsp.result = cpu_to_le16(result);
657
			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
658
			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
659
				       sizeof(rsp), &rsp);
660 661 662 663 664 665 666 667 668 669 670
		}

		l2cap_chan_del(chan, reason);
		break;

	case BT_CONNECT:
	case BT_DISCONN:
		l2cap_chan_del(chan, reason);
		break;

	default:
671
		chan->ops->teardown(chan, 0);
672 673 674 675
		break;
	}
}

676
static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
677
{
678 679
	switch (chan->chan_type) {
	case L2CAP_CHAN_RAW:
680
		switch (chan->sec_level) {
681 682 683 684 685 686 687
		case BT_SECURITY_HIGH:
			return HCI_AT_DEDICATED_BONDING_MITM;
		case BT_SECURITY_MEDIUM:
			return HCI_AT_DEDICATED_BONDING;
		default:
			return HCI_AT_NO_BONDING;
		}
688
		break;
689 690 691 692 693 694 695 696 697 698
	case L2CAP_CHAN_CONN_LESS:
		if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
			if (chan->sec_level == BT_SECURITY_LOW)
				chan->sec_level = BT_SECURITY_SDP;
		}
		if (chan->sec_level == BT_SECURITY_HIGH)
			return HCI_AT_NO_BONDING_MITM;
		else
			return HCI_AT_NO_BONDING;
		break;
699 700 701 702
	case L2CAP_CHAN_CONN_ORIENTED:
		if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
			if (chan->sec_level == BT_SECURITY_LOW)
				chan->sec_level = BT_SECURITY_SDP;
703

704 705 706 707 708 709 710
			if (chan->sec_level == BT_SECURITY_HIGH)
				return HCI_AT_NO_BONDING_MITM;
			else
				return HCI_AT_NO_BONDING;
		}
		/* fall through */
	default:
711
		switch (chan->sec_level) {
712
		case BT_SECURITY_HIGH:
713
			return HCI_AT_GENERAL_BONDING_MITM;
714
		case BT_SECURITY_MEDIUM:
715
			return HCI_AT_GENERAL_BONDING;
716
		default:
717
			return HCI_AT_NO_BONDING;
718
		}
719
		break;
720
	}
721 722 723
}

/* Service level security */
724
int l2cap_chan_check_security(struct l2cap_chan *chan)
725
{
726
	struct l2cap_conn *conn = chan->conn;
727 728
	__u8 auth_type;

729 730 731
	if (conn->hcon->type == LE_LINK)
		return smp_conn_security(conn->hcon, chan->sec_level);

732
	auth_type = l2cap_get_auth_type(chan);
733

734
	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
735 736
}

737
static u8 l2cap_get_ident(struct l2cap_conn *conn)
738 739 740 741 742 743 744 745 746
{
	u8 id;

	/* Get next available identificator.
	 *    1 - 128 are used by kernel.
	 *  129 - 199 are reserved.
	 *  200 - 254 are used by utilities like l2ping, etc.
	 */

747
	spin_lock(&conn->lock);
748 749 750 751 752 753

	if (++conn->tx_ident > 128)
		conn->tx_ident = 1;

	id = conn->tx_ident;

754
	spin_unlock(&conn->lock);
755 756 757 758

	return id;
}

759 760
static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
			   void *data)
761 762
{
	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
763
	u8 flags;
764 765 766 767

	BT_DBG("code 0x%2.2x", code);

	if (!skb)
768
		return;
769

770 771 772 773 774
	if (lmp_no_flush_capable(conn->hcon->hdev))
		flags = ACL_START_NO_FLUSH;
	else
		flags = ACL_START;

775
	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
776
	skb->priority = HCI_PRIO_MAX;
777

778 779 780
	hci_send_acl(conn->hchan, skb, flags);
}

781 782 783 784 785 786
static bool __chan_is_moving(struct l2cap_chan *chan)
{
	return chan->move_state != L2CAP_MOVE_STABLE &&
	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
}

787 788 789 790 791 792
static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
{
	struct hci_conn *hcon = chan->conn->hcon;
	u16 flags;

	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
793
	       skb->priority);
794

795 796 797 798 799 800 801 802 803
	if (chan->hs_hcon && !__chan_is_moving(chan)) {
		if (chan->hs_hchan)
			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
		else
			kfree_skb(skb);

		return;
	}

804
	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
805
	    lmp_no_flush_capable(hcon->hdev))
806 807 808
		flags = ACL_START_NO_FLUSH;
	else
		flags = ACL_START;
809

810 811
	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
	hci_send_acl(chan->conn->hchan, skb, flags);
812 813
}

814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
{
	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;

	if (enh & L2CAP_CTRL_FRAME_TYPE) {
		/* S-Frame */
		control->sframe = 1;
		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;

		control->sar = 0;
		control->txseq = 0;
	} else {
		/* I-Frame */
		control->sframe = 0;
		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;

		control->poll = 0;
		control->super = 0;
	}
}

static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
{
	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;

	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
		/* S-Frame */
		control->sframe = 1;
		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;

		control->sar = 0;
		control->txseq = 0;
	} else {
		/* I-Frame */
		control->sframe = 0;
		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;

		control->poll = 0;
		control->super = 0;
	}
}

static inline void __unpack_control(struct l2cap_chan *chan,
				    struct sk_buff *skb)
{
	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
		__unpack_extended_control(get_unaligned_le32(skb->data),
					  &bt_cb(skb)->control);
868
		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
869 870 871
	} else {
		__unpack_enhanced_control(get_unaligned_le16(skb->data),
					  &bt_cb(skb)->control);
872
		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
873 874 875
	}
}

876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913
static u32 __pack_extended_control(struct l2cap_ctrl *control)
{
	u32 packed;

	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;

	if (control->sframe) {
		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
	} else {
		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
	}

	return packed;
}

static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
{
	u16 packed;

	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;

	if (control->sframe) {
		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
		packed |= L2CAP_CTRL_FRAME_TYPE;
	} else {
		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
	}

	return packed;
}

914 915 916 917 918 919 920 921 922 923 924 925 926
static inline void __pack_control(struct l2cap_chan *chan,
				  struct l2cap_ctrl *control,
				  struct sk_buff *skb)
{
	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
		put_unaligned_le32(__pack_extended_control(control),
				   skb->data + L2CAP_HDR_SIZE);
	} else {
		put_unaligned_le16(__pack_enhanced_control(control),
				   skb->data + L2CAP_HDR_SIZE);
	}
}

927 928 929 930 931 932 933 934
static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
{
	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
		return L2CAP_EXT_HDR_SIZE;
	else
		return L2CAP_ENH_HDR_SIZE;
}

935 936
static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
					       u32 control)
937 938 939
{
	struct sk_buff *skb;
	struct l2cap_hdr *lh;
940
	int hlen = __ertm_hdr_size(chan);
941 942 943 944

	if (chan->fcs == L2CAP_FCS_CRC16)
		hlen += L2CAP_FCS_SIZE;

945
	skb = bt_skb_alloc(hlen, GFP_KERNEL);
946 947

	if (!skb)
948
		return ERR_PTR(-ENOMEM);
949 950 951 952 953

	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
	lh->cid = cpu_to_le16(chan->dcid);

954 955 956 957
	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
	else
		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
958 959

	if (chan->fcs == L2CAP_FCS_CRC16) {
960
		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
961 962 963 964
		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
	}

	skb->priority = HCI_PRIO_MAX;
965 966 967 968 969 970 971 972 973 974 975 976 977 978
	return skb;
}

static void l2cap_send_sframe(struct l2cap_chan *chan,
			      struct l2cap_ctrl *control)
{
	struct sk_buff *skb;
	u32 control_field;

	BT_DBG("chan %p, control %p", chan, control);

	if (!control->sframe)
		return;

979 980 981
	if (__chan_is_moving(chan))
		return;

982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
	    !control->poll)
		control->final = 1;

	if (control->super == L2CAP_SUPER_RR)
		clear_bit(CONN_RNR_SENT, &chan->conn_state);
	else if (control->super == L2CAP_SUPER_RNR)
		set_bit(CONN_RNR_SENT, &chan->conn_state);

	if (control->super != L2CAP_SUPER_SREJ) {
		chan->last_acked_seq = control->reqseq;
		__clear_ack_timer(chan);
	}

	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
	       control->final, control->poll, control->super);

	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
		control_field = __pack_extended_control(control);
	else
		control_field = __pack_enhanced_control(control);

	skb = l2cap_create_sframe_pdu(chan, control_field);
	if (!IS_ERR(skb))
		l2cap_do_send(chan, skb);
1007 1008
}

1009
static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1010
{
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
	struct l2cap_ctrl control;

	BT_DBG("chan %p, poll %d", chan, poll);

	memset(&control, 0, sizeof(control));
	control.sframe = 1;
	control.poll = poll;

	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
		control.super = L2CAP_SUPER_RNR;
	else
		control.super = L2CAP_SUPER_RR;
1023

1024 1025
	control.reqseq = chan->buffer_seq;
	l2cap_send_sframe(chan, &control);
1026 1027
}

1028
static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1029
{
1030
	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1031 1032
}

1033
static bool __amp_capable(struct l2cap_chan *chan)
1034
{
1035
	struct l2cap_conn *conn = chan->conn;
1036
	struct hci_dev *hdev;
1037 1038 1039 1040 1041 1042 1043
	bool amp_available = false;

	if (!conn->hs_enabled)
		return false;

	if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
		return false;
1044 1045

	read_lock(&hci_dev_list_lock);
1046
	list_for_each_entry(hdev, &hci_dev_list, list) {
1047
		if (hdev->amp_type != AMP_TYPE_BREDR &&
1048 1049 1050 1051 1052
		    test_bit(HCI_UP, &hdev->flags)) {
			amp_available = true;
			break;
		}
	}
1053 1054
	read_unlock(&hci_dev_list_lock);

1055 1056
	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
		return amp_available;
1057 1058

	return false;
1059 1060
}

1061 1062 1063 1064 1065 1066
static bool l2cap_check_efs(struct l2cap_chan *chan)
{
	/* Check EFS parameters */
	return true;
}

1067
void l2cap_send_conn_req(struct l2cap_chan *chan)
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
{
	struct l2cap_conn *conn = chan->conn;
	struct l2cap_conn_req req;

	req.scid = cpu_to_le16(chan->scid);
	req.psm  = chan->psm;

	chan->ident = l2cap_get_ident(conn);

	set_bit(CONF_CONNECT_PEND, &chan->conf_state);

	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
}

1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
{
	struct l2cap_create_chan_req req;
	req.scid = cpu_to_le16(chan->scid);
	req.psm  = chan->psm;
	req.amp_id = amp_id;

	chan->ident = l2cap_get_ident(chan->conn);

	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
		       sizeof(req), &req);
}

1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
static void l2cap_move_setup(struct l2cap_chan *chan)
{
	struct sk_buff *skb;

	BT_DBG("chan %p", chan);

	if (chan->mode != L2CAP_MODE_ERTM)
		return;

	__clear_retrans_timer(chan);
	__clear_monitor_timer(chan);
	__clear_ack_timer(chan);

	chan->retry_count = 0;
	skb_queue_walk(&chan->tx_q, skb) {
		if (bt_cb(skb)->control.retries)
			bt_cb(skb)->control.retries = 1;
		else
			break;
	}

	chan->expected_tx_seq = chan->buffer_seq;

	clear_bit(CONN_REJ_ACT, &chan->conn_state);
	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
	l2cap_seq_list_clear(&chan->retrans_list);
	l2cap_seq_list_clear(&chan->srej_list);
	skb_queue_purge(&chan->srej_q);

	chan->tx_state = L2CAP_TX_STATE_XMIT;
	chan->rx_state = L2CAP_RX_STATE_MOVE;

	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
}

1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
static void l2cap_move_done(struct l2cap_chan *chan)
{
	u8 move_role = chan->move_role;
	BT_DBG("chan %p", chan);

	chan->move_state = L2CAP_MOVE_STABLE;
	chan->move_role = L2CAP_MOVE_ROLE_NONE;

	if (chan->mode != L2CAP_MODE_ERTM)
		return;

	switch (move_role) {
	case L2CAP_MOVE_ROLE_INITIATOR:
		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
		break;
	case L2CAP_MOVE_ROLE_RESPONDER:
		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
		break;
	}
}

1152 1153
static void l2cap_chan_ready(struct l2cap_chan *chan)
{
1154
	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1155 1156 1157
	chan->conf_state = 0;
	__clear_chan_timer(chan);

1158
	chan->state = BT_CONNECTED;
1159

1160
	chan->ops->ready(chan);
1161 1162
}

1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
static void l2cap_start_connection(struct l2cap_chan *chan)
{
	if (__amp_capable(chan)) {
		BT_DBG("chan %p AMP capable: discover AMPs", chan);
		a2mp_discover_amp(chan);
	} else {
		l2cap_send_conn_req(chan);
	}
}

1173
static void l2cap_do_start(struct l2cap_chan *chan)
1174
{
1175
	struct l2cap_conn *conn = chan->conn;
1176

1177 1178 1179 1180 1181
	if (conn->hcon->type == LE_LINK) {
		l2cap_chan_ready(chan);
		return;
	}

1182
	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1183 1184 1185
		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
			return;

1186
		if (l2cap_chan_check_security(chan) &&
1187
		    __l2cap_no_conn_pending(chan)) {
1188 1189
			l2cap_start_connection(chan);
		}
1190 1191
	} else {
		struct l2cap_info_req req;
1192
		req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1193 1194 1195 1196

		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
		conn->info_ident = l2cap_get_ident(conn);

1197
		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1198

1199 1200
		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
			       sizeof(req), &req);
1201 1202 1203
	}
}

1204 1205 1206
static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
{
	u32 local_feat_mask = l2cap_feat_mask;
1207
	if (!disable_ertm)
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;

	switch (mode) {
	case L2CAP_MODE_ERTM:
		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
	case L2CAP_MODE_STREAMING:
		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
	default:
		return 0x00;
	}
}

1220
static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1221
{
1222
	struct l2cap_conn *conn = chan->conn;
1223 1224
	struct l2cap_disconn_req req;

1225 1226 1227
	if (!conn)
		return;

1228
	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1229 1230 1231
		__clear_retrans_timer(chan);
		__clear_monitor_timer(chan);
		__clear_ack_timer(chan);
1232 1233
	}

1234
	if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1235
		l2cap_state_change(chan, BT_DISCONN);
1236 1237 1238
		return;
	}

1239 1240
	req.dcid = cpu_to_le16(chan->dcid);
	req.scid = cpu_to_le16(chan->scid);
1241 1242
	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
		       sizeof(req), &req);
1243

1244
	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1245 1246
}

L
Linus Torvalds 已提交
1247
/* ---- L2CAP connections ---- */
1248 1249
static void l2cap_conn_start(struct l2cap_conn *conn)
{
1250
	struct l2cap_chan *chan, *tmp;
1251 1252 1253

	BT_DBG("conn %p", conn);

1254
	mutex_lock(&conn->chan_lock);
1255

1256
	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1257
		l2cap_chan_lock(chan);
1258

1259
		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1260
			l2cap_chan_unlock(chan);
1261 1262 1263
			continue;
		}

1264
		if (chan->state == BT_CONNECT) {
1265
			if (!l2cap_chan_check_security(chan) ||
1266
			    !__l2cap_no_conn_pending(chan)) {
1267
				l2cap_chan_unlock(chan);
1268 1269
				continue;
			}
1270

1271
			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1272
			    && test_bit(CONF_STATE2_DEVICE,
1273
					&chan->conf_state)) {
1274
				l2cap_chan_close(chan, ECONNRESET);
1275
				l2cap_chan_unlock(chan);
1276
				continue;
1277
			}
1278

1279
			l2cap_start_connection(chan);
1280

1281
		} else if (chan->state == BT_CONNECT2) {
1282
			struct l2cap_conn_rsp rsp;
1283
			char buf[128];
1284 1285
			rsp.scid = cpu_to_le16(chan->dcid);
			rsp.dcid = cpu_to_le16(chan->scid);
1286

1287
			if (l2cap_chan_check_security(chan)) {
1288
				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1289 1290
					rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
					rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1291
					chan->ops->defer(chan);
1292 1293

				} else {
1294
					l2cap_state_change(chan, BT_CONFIG);
1295 1296
					rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
					rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1297
				}
1298
			} else {
1299 1300
				rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
				rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1301 1302
			}

1303
			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1304
				       sizeof(rsp), &rsp);
1305

1306
			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1307
			    rsp.result != L2CAP_CR_SUCCESS) {
1308
				l2cap_chan_unlock(chan);
1309 1310 1311
				continue;
			}

1312
			set_bit(CONF_REQ_SENT, &chan->conf_state);
1313
			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1314
				       l2cap_build_conf_req(chan, buf), buf);
1315
			chan->num_conf_req++;
1316 1317
		}

1318
		l2cap_chan_unlock(chan);
1319 1320
	}

1321
	mutex_unlock(&conn->chan_lock);
1322 1323
}

1324
/* Find socket with cid and source/destination bdaddr.
1325 1326
 * Returns closest match, locked.
 */
1327
static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1328 1329
						    bdaddr_t *src,
						    bdaddr_t *dst)
1330
{
1331
	struct l2cap_chan *c, *c1 = NULL;
1332

1333
	read_lock(&chan_list_lock);
1334

1335
	list_for_each_entry(c, &chan_list, global_l) {
1336
		if (state && c->state != state)
1337 1338
			continue;

1339
		if (c->scid == cid) {
1340 1341 1342
			int src_match, dst_match;
			int src_any, dst_any;

1343
			/* Exact match. */
1344 1345
			src_match = !bacmp(&c->src, src);
			dst_match = !bacmp(&c->dst, dst);
1346
			if (src_match && dst_match) {
1347 1348 1349
				read_unlock(&chan_list_lock);
				return c;
			}
1350 1351

			/* Closest match */
1352 1353
			src_any = !bacmp(&c->src, BDADDR_ANY);
			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1354 1355
			if ((src_match && dst_any) || (src_any && dst_match) ||
			    (src_any && dst_any))
1356
				c1 = c;
1357 1358
		}
	}
1359

1360
	read_unlock(&chan_list_lock);
1361

1362
	return c1;
1363 1364 1365 1366
}

static void l2cap_le_conn_ready(struct l2cap_conn *conn)
{
1367
	struct hci_conn *hcon = conn->hcon;
1368
	struct l2cap_chan *chan, *pchan;
1369
	u8 dst_type;
1370 1371 1372 1373

	BT_DBG("");

	/* Check if we have socket listening on cid */
1374
	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1375
					  &hcon->src, &hcon->dst);
1376
	if (!pchan)
1377 1378
		return;

1379 1380 1381 1382
	/* Client ATT sockets should override the server one */
	if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
		return;

1383 1384 1385 1386 1387 1388
	dst_type = bdaddr_type(hcon, hcon->dst_type);

	/* If device is blocked, do not create a channel for it */
	if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
		return;

1389
	l2cap_chan_lock(pchan);
1390

1391
	chan = pchan->ops->new_connection(pchan);
1392
	if (!chan)
1393 1394
		goto clean;

1395 1396
	chan->dcid = L2CAP_CID_ATT;

1397 1398 1399 1400
	bacpy(&chan->src, &hcon->src);
	bacpy(&chan->dst, &hcon->dst);
	chan->src_type = bdaddr_type(hcon, hcon->src_type);
	chan->dst_type = dst_type;
1401

1402
	__l2cap_chan_add(conn, chan);
1403

1404
clean:
1405
	l2cap_chan_unlock(pchan);
1406 1407
}

1408 1409
static void l2cap_conn_ready(struct l2cap_conn *conn)
{
1410
	struct l2cap_chan *chan;
1411
	struct hci_conn *hcon = conn->hcon;
1412

1413
	BT_DBG("conn %p", conn);
1414

1415 1416 1417
	/* For outgoing pairing which doesn't necessarily have an
	 * associated socket (e.g. mgmt_pair_device).
	 */
1418 1419
	if (hcon->out && hcon->type == LE_LINK)
		smp_conn_security(hcon, hcon->pending_sec_level);
1420

1421
	mutex_lock(&conn->chan_lock);
1422

1423 1424 1425
	if (hcon->type == LE_LINK)
		l2cap_le_conn_ready(conn);

1426
	list_for_each_entry(chan, &conn->chan_l, list) {
1427

1428
		l2cap_chan_lock(chan);
1429

1430 1431 1432 1433 1434
		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
			l2cap_chan_unlock(chan);
			continue;
		}

1435 1436
		if (hcon->type == LE_LINK) {
			if (smp_conn_security(hcon, chan->sec_level))
1437
				l2cap_chan_ready(chan);
1438

1439
		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1440
			l2cap_chan_ready(chan);
1441

1442
		} else if (chan->state == BT_CONNECT) {
1443
			l2cap_do_start(chan);
1444
		}
1445

1446
		l2cap_chan_unlock(chan);
1447
	}
1448

1449
	mutex_unlock(&conn->chan_lock);
1450 1451 1452 1453 1454
}

/* Notify sockets that we cannot guaranty reliability anymore */
static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
{
1455
	struct l2cap_chan *chan;
1456 1457 1458

	BT_DBG("conn %p", conn);

1459
	mutex_lock(&conn->chan_lock);
1460

1461
	list_for_each_entry(chan, &conn->chan_l, list) {
1462
		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1463
			l2cap_chan_set_err(chan, err);
1464 1465
	}

1466
	mutex_unlock(&conn->chan_lock);
1467 1468
}

1469
static void l2cap_info_timeout(struct work_struct *work)
1470
{
1471
	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1472
					       info_timer.work);
1473

1474
	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1475
	conn->info_ident = 0;
1476

1477 1478 1479
	l2cap_conn_start(conn);
}

1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
/*
 * l2cap_user
 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
 * callback is called during registration. The ->remove callback is called
 * during unregistration.
 * An l2cap_user object can either be explicitly unregistered or when the
 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
 * External modules must own a reference to the l2cap_conn object if they intend
 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
 * any time if they don't.
 */

int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
{
	struct hci_dev *hdev = conn->hcon->hdev;
	int ret;

	/* We need to check whether l2cap_conn is registered. If it is not, we
	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
	 * relies on the parent hci_conn object to be locked. This itself relies
	 * on the hci_dev object to be locked. So we must lock the hci device
	 * here, too. */

	hci_dev_lock(hdev);

	if (user->list.next || user->list.prev) {
		ret = -EINVAL;
		goto out_unlock;
	}

	/* conn->hchan is NULL after l2cap_conn_del() was called */
	if (!conn->hchan) {
		ret = -ENODEV;
		goto out_unlock;
	}

	ret = user->probe(conn, user);
	if (ret)
		goto out_unlock;

	list_add(&user->list, &conn->users);
	ret = 0;

out_unlock:
	hci_dev_unlock(hdev);
	return ret;
}
EXPORT_SYMBOL(l2cap_register_user);

void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
{
	struct hci_dev *hdev = conn->hcon->hdev;

	hci_dev_lock(hdev);

	if (!user->list.next || !user->list.prev)
		goto out_unlock;

	list_del(&user->list);
	user->list.next = NULL;
	user->list.prev = NULL;
	user->remove(conn, user);

out_unlock:
	hci_dev_unlock(hdev);
}
EXPORT_SYMBOL(l2cap_unregister_user);

static void l2cap_unregister_all_users(struct l2cap_conn *conn)
{
	struct l2cap_user *user;

	while (!list_empty(&conn->users)) {
		user = list_first_entry(&conn->users, struct l2cap_user, list);
		list_del(&user->list);
		user->list.next = NULL;
		user->list.prev = NULL;
		user->remove(conn, user);
	}
}

1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
static void l2cap_conn_del(struct hci_conn *hcon, int err)
{
	struct l2cap_conn *conn = hcon->l2cap_data;
	struct l2cap_chan *chan, *l;

	if (!conn)
		return;

	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);

	kfree_skb(conn->rx_skb);

1575 1576
	l2cap_unregister_all_users(conn);

1577 1578
	mutex_lock(&conn->chan_lock);

1579 1580
	/* Kill channels */
	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1581
		l2cap_chan_hold(chan);
1582 1583
		l2cap_chan_lock(chan);

1584
		l2cap_chan_del(chan, err);
1585 1586 1587

		l2cap_chan_unlock(chan);

1588
		chan->ops->close(chan);
1589
		l2cap_chan_put(chan);
1590 1591
	}

1592 1593
	mutex_unlock(&conn->chan_lock);

1594 1595
	hci_chan_del(conn->hchan);

1596
	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1597
		cancel_delayed_work_sync(&conn->info_timer);
1598

1599
	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1600
		cancel_delayed_work_sync(&conn->security_timer);
1601
		smp_chan_destroy(conn);
1602
	}
1603 1604

	hcon->l2cap_data = NULL;
1605 1606
	conn->hchan = NULL;
	l2cap_conn_put(conn);
1607 1608
}

1609
static void security_timeout(struct work_struct *work)
1610
{
1611
	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1612
					       security_timer.work);
1613

1614 1615 1616 1617 1618 1619
	BT_DBG("conn %p", conn);

	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
		smp_chan_destroy(conn);
		l2cap_conn_del(conn->hcon, ETIMEDOUT);
	}
1620 1621
}

1622
static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
L
Linus Torvalds 已提交
1623
{
1624
	struct l2cap_conn *conn = hcon->l2cap_data;
1625
	struct hci_chan *hchan;
L
Linus Torvalds 已提交
1626

1627
	if (conn)
L
Linus Torvalds 已提交
1628 1629
		return conn;

1630 1631 1632 1633
	hchan = hci_chan_create(hcon);
	if (!hchan)
		return NULL;

1634
	conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1635 1636
	if (!conn) {
		hci_chan_del(hchan);
L
Linus Torvalds 已提交
1637
		return NULL;
1638
	}
L
Linus Torvalds 已提交
1639

1640
	kref_init(&conn->ref);
L
Linus Torvalds 已提交
1641 1642
	hcon->l2cap_data = conn;
	conn->hcon = hcon;
1643
	hci_conn_get(conn->hcon);
1644
	conn->hchan = hchan;
L
Linus Torvalds 已提交
1645

1646
	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1647

1648 1649 1650 1651 1652 1653 1654 1655
	switch (hcon->type) {
	case LE_LINK:
		if (hcon->hdev->le_mtu) {
			conn->mtu = hcon->hdev->le_mtu;
			break;
		}
		/* fall through */
	default:
1656
		conn->mtu = hcon->hdev->acl_mtu;
1657 1658
		break;
	}
1659

1660 1661
	conn->feat_mask = 0;

1662 1663 1664 1665
	if (hcon->type == ACL_LINK)
		conn->hs_enabled = test_bit(HCI_HS_ENABLED,
					    &hcon->hdev->dev_flags);

L
Linus Torvalds 已提交
1666
	spin_lock_init(&conn->lock);
1667
	mutex_init(&conn->chan_lock);
1668 1669

	INIT_LIST_HEAD(&conn->chan_l);
1670
	INIT_LIST_HEAD(&conn->users);
L
Linus Torvalds 已提交
1671

1672
	if (hcon->type == LE_LINK)
1673
		INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1674
	else
1675
		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
D
Dave Young 已提交
1676

1677
	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1678

L
Linus Torvalds 已提交
1679 1680 1681
	return conn;
}

1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
static void l2cap_conn_free(struct kref *ref)
{
	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);

	hci_conn_put(conn->hcon);
	kfree(conn);
}

void l2cap_conn_get(struct l2cap_conn *conn)
{
	kref_get(&conn->ref);
}
EXPORT_SYMBOL(l2cap_conn_get);

void l2cap_conn_put(struct l2cap_conn *conn)
{
	kref_put(&conn->ref, l2cap_conn_free);
}
EXPORT_SYMBOL(l2cap_conn_put);

L
Linus Torvalds 已提交
1702 1703
/* ---- Socket interface ---- */

1704
/* Find socket with psm and source / destination bdaddr.
L
Linus Torvalds 已提交
1705 1706
 * Returns closest match.
 */
1707 1708
static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
						   bdaddr_t *src,
1709 1710
						   bdaddr_t *dst,
						   u8 link_type)
L
Linus Torvalds 已提交
1711
{
1712
	struct l2cap_chan *c, *c1 = NULL;
L
Linus Torvalds 已提交
1713

1714
	read_lock(&chan_list_lock);
1715

1716
	list_for_each_entry(c, &chan_list, global_l) {
1717
		if (state && c->state != state)
L
Linus Torvalds 已提交
1718 1719
			continue;

1720 1721 1722 1723 1724 1725
		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
			continue;

		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
			continue;

1726
		if (c->psm == psm) {
1727 1728 1729
			int src_match, dst_match;
			int src_any, dst_any;

L
Linus Torvalds 已提交
1730
			/* Exact match. */
1731 1732
			src_match = !bacmp(&c->src, src);
			dst_match = !bacmp(&c->dst, dst);
1733
			if (src_match && dst_match) {
1734
				read_unlock(&chan_list_lock);
1735 1736
				return c;
			}
L
Linus Torvalds 已提交
1737 1738

			/* Closest match */
1739 1740
			src_any = !bacmp(&c->src, BDADDR_ANY);
			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1741 1742
			if ((src_match && dst_any) || (src_any && dst_match) ||
			    (src_any && dst_any))
1743
				c1 = c;
L
Linus Torvalds 已提交
1744 1745 1746
		}
	}

1747
	read_unlock(&chan_list_lock);
1748

1749
	return c1;
L
Linus Torvalds 已提交
1750 1751
}

1752 1753
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
		       bdaddr_t *dst, u8 dst_type)
L
Linus Torvalds 已提交
1754 1755 1756 1757
{
	struct l2cap_conn *conn;
	struct hci_conn *hcon;
	struct hci_dev *hdev;
1758
	__u8 auth_type;
1759
	int err;
L
Linus Torvalds 已提交
1760

1761
	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1762
	       dst_type, __le16_to_cpu(psm));
L
Linus Torvalds 已提交
1763

1764
	hdev = hci_get_route(dst, &chan->src);
1765
	if (!hdev)
L
Linus Torvalds 已提交
1766 1767
		return -EHOSTUNREACH;

1768
	hci_dev_lock(hdev);
L
Linus Torvalds 已提交
1769

1770
	l2cap_chan_lock(chan);
1771 1772 1773

	/* PSM must be odd and lsb of upper byte must be 0 */
	if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1774
	    chan->chan_type != L2CAP_CHAN_RAW) {
1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
		err = -EINVAL;
		goto done;
	}

	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
		err = -EINVAL;
		goto done;
	}

	switch (chan->mode) {
	case L2CAP_MODE_BASIC:
		break;
	case L2CAP_MODE_ERTM:
	case L2CAP_MODE_STREAMING:
		if (!disable_ertm)
			break;
		/* fall through */
	default:
		err = -ENOTSUPP;
		goto done;
	}

1797
	switch (chan->state) {
1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820
	case BT_CONNECT:
	case BT_CONNECT2:
	case BT_CONFIG:
		/* Already connecting */
		err = 0;
		goto done;

	case BT_CONNECTED:
		/* Already connected */
		err = -EISCONN;
		goto done;

	case BT_OPEN:
	case BT_BOUND:
		/* Can connect */
		break;

	default:
		err = -EBADFD;
		goto done;
	}

	/* Set destination address and psm */
1821
	bacpy(&chan->dst, dst);
1822
	chan->dst_type = dst_type;
1823

1824 1825
	chan->psm = psm;
	chan->dcid = cid;
L
Linus Torvalds 已提交
1826

1827
	auth_type = l2cap_get_auth_type(chan);
1828

1829
	if (bdaddr_type_is_le(dst_type))
1830
		hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1831
				   chan->sec_level, auth_type);
1832
	else
1833
		hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1834
				   chan->sec_level, auth_type);
1835

1836 1837
	if (IS_ERR(hcon)) {
		err = PTR_ERR(hcon);
L
Linus Torvalds 已提交
1838
		goto done;
1839
	}
L
Linus Torvalds 已提交
1840

1841
	conn = l2cap_conn_add(hcon);
L
Linus Torvalds 已提交
1842
	if (!conn) {
1843
		hci_conn_drop(hcon);
1844
		err = -ENOMEM;
L
Linus Torvalds 已提交
1845 1846 1847
		goto done;
	}

1848 1849 1850 1851
	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
		hci_conn_drop(hcon);
		err = -EBUSY;
		goto done;
1852 1853
	}

L
Linus Torvalds 已提交
1854
	/* Update source addr of the socket */
1855
	bacpy(&chan->src, &hcon->src);
1856
	chan->src_type = bdaddr_type(hcon, hcon->src_type);
L
Linus Torvalds 已提交
1857

1858
	l2cap_chan_unlock(chan);
1859
	l2cap_chan_add(conn, chan);
1860
	l2cap_chan_lock(chan);
1861

1862 1863 1864
	/* l2cap_chan_add takes its own ref so we can drop this one */
	hci_conn_drop(hcon);

1865
	l2cap_state_change(chan, BT_CONNECT);
1866
	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
L
Linus Torvalds 已提交
1867 1868

	if (hcon->state == BT_CONNECTED) {
1869
		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1870
			__clear_chan_timer(chan);
1871
			if (l2cap_chan_check_security(chan))
1872
				l2cap_state_change(chan, BT_CONNECTED);
1873
		} else
1874
			l2cap_do_start(chan);
L
Linus Torvalds 已提交
1875 1876
	}

1877 1878
	err = 0;

L
Linus Torvalds 已提交
1879
done:
1880
	l2cap_chan_unlock(chan);
1881
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
1882 1883 1884 1885
	hci_dev_put(hdev);
	return err;
}

1886
static void l2cap_monitor_timeout(struct work_struct *work)
1887
{
1888
	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1889
					       monitor_timer.work);
1890

1891
	BT_DBG("chan %p", chan);
1892

1893 1894
	l2cap_chan_lock(chan);

1895
	if (!chan->conn) {
1896
		l2cap_chan_unlock(chan);
1897
		l2cap_chan_put(chan);
1898 1899 1900
		return;
	}

1901
	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1902

1903
	l2cap_chan_unlock(chan);
1904
	l2cap_chan_put(chan);
1905 1906
}

1907
static void l2cap_retrans_timeout(struct work_struct *work)
1908
{
1909
	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1910
					       retrans_timer.work);
1911

1912
	BT_DBG("chan %p", chan);
1913

1914 1915
	l2cap_chan_lock(chan);

1916 1917 1918 1919 1920
	if (!chan->conn) {
		l2cap_chan_unlock(chan);
		l2cap_chan_put(chan);
		return;
	}
1921

1922
	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1923
	l2cap_chan_unlock(chan);
1924
	l2cap_chan_put(chan);
1925 1926
}

1927 1928
static void l2cap_streaming_send(struct l2cap_chan *chan,
				 struct sk_buff_head *skbs)
1929
{
1930
	struct sk_buff *skb;
1931
	struct l2cap_ctrl *control;
1932

1933 1934
	BT_DBG("chan %p, skbs %p", chan, skbs);

1935 1936 1937
	if (__chan_is_moving(chan))
		return;

1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950
	skb_queue_splice_tail_init(skbs, &chan->tx_q);

	while (!skb_queue_empty(&chan->tx_q)) {

		skb = skb_dequeue(&chan->tx_q);

		bt_cb(skb)->control.retries = 1;
		control = &bt_cb(skb)->control;

		control->reqseq = 0;
		control->txseq = chan->next_tx_seq;

		__pack_control(chan, control, skb);
1951

1952
		if (chan->fcs == L2CAP_FCS_CRC16) {
1953 1954
			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1955 1956
		}

1957
		l2cap_do_send(chan, skb);
1958

1959
		BT_DBG("Sent txseq %u", control->txseq);
1960

1961
		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1962
		chan->frames_sent++;
1963 1964 1965
	}
}

1966
static int l2cap_ertm_send(struct l2cap_chan *chan)
1967 1968
{
	struct sk_buff *skb, *tx_skb;
1969 1970 1971 1972
	struct l2cap_ctrl *control;
	int sent = 0;

	BT_DBG("chan %p", chan);
1973

1974
	if (chan->state != BT_CONNECTED)
1975
		return -ENOTCONN;
1976

1977 1978 1979
	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
		return 0;

1980 1981 1982
	if (__chan_is_moving(chan))
		return 0;

1983 1984 1985
	while (chan->tx_send_head &&
	       chan->unacked_frames < chan->remote_tx_win &&
	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1986

1987
		skb = chan->tx_send_head;
1988

1989 1990
		bt_cb(skb)->control.retries = 1;
		control = &bt_cb(skb)->control;
1991

1992
		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1993
			control->final = 1;
1994

1995 1996 1997
		control->reqseq = chan->buffer_seq;
		chan->last_acked_seq = chan->buffer_seq;
		control->txseq = chan->next_tx_seq;
1998

1999
		__pack_control(chan, control, skb);
2000

2001
		if (chan->fcs == L2CAP_FCS_CRC16) {
2002 2003
			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2004 2005
		}

2006 2007 2008 2009
		/* Clone after data has been modified. Data is assumed to be
		   read-only (for locking purposes) on cloned sk_buffs.
		 */
		tx_skb = skb_clone(skb, GFP_KERNEL);
2010

2011 2012
		if (!tx_skb)
			break;
2013

2014
		__set_retrans_timer(chan);
2015 2016

		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2017
		chan->unacked_frames++;
2018
		chan->frames_sent++;
2019
		sent++;
2020

2021 2022
		if (skb_queue_is_last(&chan->tx_q, skb))
			chan->tx_send_head = NULL;
2023
		else
2024
			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2025 2026

		l2cap_do_send(chan, tx_skb);
2027
		BT_DBG("Sent txseq %u", control->txseq);
2028 2029
	}

2030 2031
	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2032 2033

	return sent;
2034 2035
}

2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
static void l2cap_ertm_resend(struct l2cap_chan *chan)
{
	struct l2cap_ctrl control;
	struct sk_buff *skb;
	struct sk_buff *tx_skb;
	u16 seq;

	BT_DBG("chan %p", chan);

	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
		return;

2048 2049 2050
	if (__chan_is_moving(chan))
		return;

2051 2052 2053 2054 2055 2056
	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
		seq = l2cap_seq_list_pop(&chan->retrans_list);

		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
		if (!skb) {
			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2057
			       seq);
2058 2059 2060 2061 2062 2063 2064 2065 2066
			continue;
		}

		bt_cb(skb)->control.retries++;
		control = bt_cb(skb)->control;

		if (chan->max_tx != 0 &&
		    bt_cb(skb)->control.retries > chan->max_tx) {
			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2067
			l2cap_send_disconn_req(chan, ECONNRESET);
2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081
			l2cap_seq_list_clear(&chan->retrans_list);
			break;
		}

		control.reqseq = chan->buffer_seq;
		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
			control.final = 1;
		else
			control.final = 0;

		if (skb_cloned(skb)) {
			/* Cloned sk_buffs are read-only, so we need a
			 * writeable copy
			 */
2082
			tx_skb = skb_copy(skb, GFP_KERNEL);
2083
		} else {
2084
			tx_skb = skb_clone(skb, GFP_KERNEL);
2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114
		}

		if (!tx_skb) {
			l2cap_seq_list_clear(&chan->retrans_list);
			break;
		}

		/* Update skb contents */
		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
			put_unaligned_le32(__pack_extended_control(&control),
					   tx_skb->data + L2CAP_HDR_SIZE);
		} else {
			put_unaligned_le16(__pack_enhanced_control(&control),
					   tx_skb->data + L2CAP_HDR_SIZE);
		}

		if (chan->fcs == L2CAP_FCS_CRC16) {
			u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
			put_unaligned_le16(fcs, skb_put(tx_skb,
							L2CAP_FCS_SIZE));
		}

		l2cap_do_send(chan, tx_skb);

		BT_DBG("Resent txseq %d", control.txseq);

		chan->last_acked_seq = chan->buffer_seq;
	}
}

2115 2116 2117 2118 2119 2120 2121 2122 2123
static void l2cap_retransmit(struct l2cap_chan *chan,
			     struct l2cap_ctrl *control)
{
	BT_DBG("chan %p, control %p", chan, control);

	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
	l2cap_ertm_resend(chan);
}

2124 2125 2126
static void l2cap_retransmit_all(struct l2cap_chan *chan,
				 struct l2cap_ctrl *control)
{
2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141
	struct sk_buff *skb;

	BT_DBG("chan %p, control %p", chan, control);

	if (control->poll)
		set_bit(CONN_SEND_FBIT, &chan->conn_state);

	l2cap_seq_list_clear(&chan->retrans_list);

	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
		return;

	if (chan->unacked_frames) {
		skb_queue_walk(&chan->tx_q, skb) {
			if (bt_cb(skb)->control.txseq == control->reqseq ||
2142
			    skb == chan->tx_send_head)
2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155
				break;
		}

		skb_queue_walk_from(&chan->tx_q, skb) {
			if (skb == chan->tx_send_head)
				break;

			l2cap_seq_list_append(&chan->retrans_list,
					      bt_cb(skb)->control.txseq);
		}

		l2cap_ertm_resend(chan);
	}
2156 2157
}

2158
static void l2cap_send_ack(struct l2cap_chan *chan)
2159
{
2160 2161 2162 2163
	struct l2cap_ctrl control;
	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
					 chan->last_acked_seq);
	int threshold;
2164

2165 2166
	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
	       chan, chan->last_acked_seq, chan->buffer_seq);
2167

2168 2169
	memset(&control, 0, sizeof(control));
	control.sframe = 1;
2170

2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183
	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
	    chan->rx_state == L2CAP_RX_STATE_RECV) {
		__clear_ack_timer(chan);
		control.super = L2CAP_SUPER_RNR;
		control.reqseq = chan->buffer_seq;
		l2cap_send_sframe(chan, &control);
	} else {
		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
			l2cap_ertm_send(chan);
			/* If any i-frames were sent, they included an ack */
			if (chan->buffer_seq == chan->last_acked_seq)
				frames_to_ack = 0;
		}
2184

2185
		/* Ack now if the window is 3/4ths full.
2186 2187
		 * Calculate without mul or div
		 */
2188
		threshold = chan->ack_win;
2189 2190 2191
		threshold += threshold << 1;
		threshold >>= 2;

2192
		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2193 2194 2195 2196 2197 2198 2199 2200 2201
		       threshold);

		if (frames_to_ack >= threshold) {
			__clear_ack_timer(chan);
			control.super = L2CAP_SUPER_RR;
			control.reqseq = chan->buffer_seq;
			l2cap_send_sframe(chan, &control);
			frames_to_ack = 0;
		}
2202

2203 2204 2205
		if (frames_to_ack)
			__set_ack_timer(chan);
	}
2206 2207
}

2208 2209 2210
static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
					 struct msghdr *msg, int len,
					 int count, struct sk_buff *skb)
2211
{
2212
	struct l2cap_conn *conn = chan->conn;
2213
	struct sk_buff **frag;
2214
	int sent = 0;
L
Linus Torvalds 已提交
2215

2216
	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2217
		return -EFAULT;
L
Linus Torvalds 已提交
2218 2219 2220 2221 2222 2223 2224

	sent += count;
	len  -= count;

	/* Continuation fragments (no L2CAP header) */
	frag = &skb_shinfo(skb)->frag_list;
	while (len) {
2225 2226
		struct sk_buff *tmp;

L
Linus Torvalds 已提交
2227 2228
		count = min_t(unsigned int, conn->mtu, len);

2229 2230 2231 2232 2233 2234
		tmp = chan->ops->alloc_skb(chan, count,
					   msg->msg_flags & MSG_DONTWAIT);
		if (IS_ERR(tmp))
			return PTR_ERR(tmp);

		*frag = tmp;
2235

2236 2237
		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
			return -EFAULT;
L
Linus Torvalds 已提交
2238

2239 2240
		(*frag)->priority = skb->priority;

L
Linus Torvalds 已提交
2241 2242 2243
		sent += count;
		len  -= count;

2244 2245 2246
		skb->len += (*frag)->len;
		skb->data_len += (*frag)->len;

L
Linus Torvalds 已提交
2247 2248 2249 2250
		frag = &(*frag)->next;
	}

	return sent;
2251
}
L
Linus Torvalds 已提交
2252

2253
static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2254 2255
						 struct msghdr *msg, size_t len,
						 u32 priority)
2256
{
2257
	struct l2cap_conn *conn = chan->conn;
2258
	struct sk_buff *skb;
2259
	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2260 2261
	struct l2cap_hdr *lh;

2262 2263
	BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
	       __le16_to_cpu(chan->psm), len, priority);
2264 2265

	count = min_t(unsigned int, (conn->mtu - hlen), len);
2266 2267

	skb = chan->ops->alloc_skb(chan, count + hlen,
2268 2269 2270
				   msg->msg_flags & MSG_DONTWAIT);
	if (IS_ERR(skb))
		return skb;
2271

2272 2273
	skb->priority = priority;

2274 2275
	/* Create L2CAP header */
	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2276
	lh->cid = cpu_to_le16(chan->dcid);
2277
	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2278
	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2279

2280
	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2281 2282 2283 2284 2285 2286 2287
	if (unlikely(err < 0)) {
		kfree_skb(skb);
		return ERR_PTR(err);
	}
	return skb;
}

2288
static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2289 2290
					      struct msghdr *msg, size_t len,
					      u32 priority)
2291
{
2292
	struct l2cap_conn *conn = chan->conn;
2293
	struct sk_buff *skb;
2294
	int err, count;
2295 2296
	struct l2cap_hdr *lh;

2297
	BT_DBG("chan %p len %zu", chan, len);
2298

2299
	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2300

2301
	skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2302 2303 2304
				   msg->msg_flags & MSG_DONTWAIT);
	if (IS_ERR(skb))
		return skb;
2305

2306 2307
	skb->priority = priority;

2308 2309
	/* Create L2CAP header */
	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2310
	lh->cid = cpu_to_le16(chan->dcid);
2311
	lh->len = cpu_to_le16(len);
2312

2313
	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2314 2315 2316 2317 2318 2319 2320
	if (unlikely(err < 0)) {
		kfree_skb(skb);
		return ERR_PTR(err);
	}
	return skb;
}

2321
static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2322 2323
					       struct msghdr *msg, size_t len,
					       u16 sdulen)
2324
{
2325
	struct l2cap_conn *conn = chan->conn;
2326
	struct sk_buff *skb;
2327
	int err, count, hlen;
2328 2329
	struct l2cap_hdr *lh;

2330
	BT_DBG("chan %p len %zu", chan, len);
2331

2332 2333 2334
	if (!conn)
		return ERR_PTR(-ENOTCONN);

2335
	hlen = __ertm_hdr_size(chan);
2336

2337
	if (sdulen)
2338
		hlen += L2CAP_SDULEN_SIZE;
2339

2340
	if (chan->fcs == L2CAP_FCS_CRC16)
2341
		hlen += L2CAP_FCS_SIZE;
2342

2343
	count = min_t(unsigned int, (conn->mtu - hlen), len);
2344 2345

	skb = chan->ops->alloc_skb(chan, count + hlen,
2346 2347 2348
				   msg->msg_flags & MSG_DONTWAIT);
	if (IS_ERR(skb))
		return skb;
2349 2350 2351

	/* Create L2CAP header */
	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2352
	lh->cid = cpu_to_le16(chan->dcid);
2353
	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2354

2355 2356 2357 2358 2359
	/* Control header is populated later */
	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
	else
		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2360

2361
	if (sdulen)
2362
		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2363

2364
	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2365 2366 2367 2368
	if (unlikely(err < 0)) {
		kfree_skb(skb);
		return ERR_PTR(err);
	}
2369

2370
	bt_cb(skb)->control.fcs = chan->fcs;
2371
	bt_cb(skb)->control.retries = 0;
2372
	return skb;
L
Linus Torvalds 已提交
2373 2374
}

2375 2376 2377
static int l2cap_segment_sdu(struct l2cap_chan *chan,
			     struct sk_buff_head *seg_queue,
			     struct msghdr *msg, size_t len)
2378 2379
{
	struct sk_buff *skb;
2380 2381 2382
	u16 sdu_len;
	size_t pdu_len;
	u8 sar;
2383

2384
	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2385

2386 2387 2388 2389
	/* It is critical that ERTM PDUs fit in a single HCI fragment,
	 * so fragmented skbs are not used.  The HCI layer's handling
	 * of fragmented skbs is not compatible with ERTM's queueing.
	 */
2390

2391 2392
	/* PDU size is derived from the HCI MTU */
	pdu_len = chan->conn->mtu;
2393

2394 2395 2396
	/* Constrain PDU size for BR/EDR connections */
	if (!chan->hs_hcon)
		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2397 2398

	/* Adjust for largest possible L2CAP overhead. */
2399 2400 2401
	if (chan->fcs)
		pdu_len -= L2CAP_FCS_SIZE;

2402
	pdu_len -= __ertm_hdr_size(chan);
2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418

	/* Remote device may have requested smaller PDUs */
	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);

	if (len <= pdu_len) {
		sar = L2CAP_SAR_UNSEGMENTED;
		sdu_len = 0;
		pdu_len = len;
	} else {
		sar = L2CAP_SAR_START;
		sdu_len = len;
		pdu_len -= L2CAP_SDULEN_SIZE;
	}

	while (len > 0) {
		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2419 2420

		if (IS_ERR(skb)) {
2421
			__skb_queue_purge(seg_queue);
2422 2423 2424
			return PTR_ERR(skb);
		}

2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439
		bt_cb(skb)->control.sar = sar;
		__skb_queue_tail(seg_queue, skb);

		len -= pdu_len;
		if (sdu_len) {
			sdu_len = 0;
			pdu_len += L2CAP_SDULEN_SIZE;
		}

		if (len <= pdu_len) {
			sar = L2CAP_SAR_END;
			pdu_len = len;
		} else {
			sar = L2CAP_SAR_CONTINUE;
		}
2440 2441
	}

2442
	return 0;
2443 2444
}

2445
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2446
		    u32 priority)
2447 2448 2449
{
	struct sk_buff *skb;
	int err;
2450
	struct sk_buff_head seg_queue;
2451

2452 2453 2454
	if (!chan->conn)
		return -ENOTCONN;

2455
	/* Connectionless channel */
2456
	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2457
		skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471
		if (IS_ERR(skb))
			return PTR_ERR(skb);

		l2cap_do_send(chan, skb);
		return len;
	}

	switch (chan->mode) {
	case L2CAP_MODE_BASIC:
		/* Check outgoing MTU */
		if (len > chan->omtu)
			return -EMSGSIZE;

		/* Create a basic PDU */
2472
		skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2473 2474 2475 2476 2477 2478 2479 2480 2481
		if (IS_ERR(skb))
			return PTR_ERR(skb);

		l2cap_do_send(chan, skb);
		err = len;
		break;

	case L2CAP_MODE_ERTM:
	case L2CAP_MODE_STREAMING:
2482 2483 2484 2485 2486
		/* Check outgoing MTU */
		if (len > chan->omtu) {
			err = -EMSGSIZE;
			break;
		}
2487

2488
		__skb_queue_head_init(&seg_queue);
2489

2490 2491 2492 2493 2494
		/* Do segmentation before calling in to the state machine,
		 * since it's possible to block while waiting for memory
		 * allocation.
		 */
		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2495

2496 2497 2498 2499 2500 2501
		/* The channel could have been closed while segmenting,
		 * check that it is still connected.
		 */
		if (chan->state != BT_CONNECTED) {
			__skb_queue_purge(&seg_queue);
			err = -ENOTCONN;
2502 2503
		}

2504
		if (err)
2505 2506
			break;

2507
		if (chan->mode == L2CAP_MODE_ERTM)
2508
			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2509
		else
2510
			l2cap_streaming_send(chan, &seg_queue);
2511

2512
		err = len;
2513

2514 2515 2516 2517
		/* If the skbs were not queued for sending, they'll still be in
		 * seg_queue and need to be purged.
		 */
		__skb_queue_purge(&seg_queue);
2518 2519 2520 2521 2522 2523 2524 2525 2526 2527
		break;

	default:
		BT_DBG("bad state %1.1x", chan->mode);
		err = -EBADFD;
	}

	return err;
}

2528 2529
static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
{
2530 2531 2532
	struct l2cap_ctrl control;
	u16 seq;

2533
	BT_DBG("chan %p, txseq %u", chan, txseq);
2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548

	memset(&control, 0, sizeof(control));
	control.sframe = 1;
	control.super = L2CAP_SUPER_SREJ;

	for (seq = chan->expected_tx_seq; seq != txseq;
	     seq = __next_seq(chan, seq)) {
		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
			control.reqseq = seq;
			l2cap_send_sframe(chan, &control);
			l2cap_seq_list_append(&chan->srej_list, seq);
		}
	}

	chan->expected_tx_seq = __next_seq(chan, txseq);
2549 2550 2551 2552
}

static void l2cap_send_srej_tail(struct l2cap_chan *chan)
{
2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564
	struct l2cap_ctrl control;

	BT_DBG("chan %p", chan);

	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
		return;

	memset(&control, 0, sizeof(control));
	control.sframe = 1;
	control.super = L2CAP_SUPER_SREJ;
	control.reqseq = chan->srej_list.tail;
	l2cap_send_sframe(chan, &control);
2565 2566 2567 2568
}

static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
{
2569 2570 2571 2572
	struct l2cap_ctrl control;
	u16 initial_head;
	u16 seq;

2573
	BT_DBG("chan %p, txseq %u", chan, txseq);
2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590

	memset(&control, 0, sizeof(control));
	control.sframe = 1;
	control.super = L2CAP_SUPER_SREJ;

	/* Capture initial list head to allow only one pass through the list. */
	initial_head = chan->srej_list.head;

	do {
		seq = l2cap_seq_list_pop(&chan->srej_list);
		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
			break;

		control.reqseq = seq;
		l2cap_send_sframe(chan, &control);
		l2cap_seq_list_append(&chan->srej_list, seq);
	} while (chan->srej_list.head != initial_head);
2591 2592
}

2593 2594 2595 2596 2597
static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
{
	struct sk_buff *acked_skb;
	u16 ackseq;

2598
	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2599 2600 2601 2602

	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
		return;

2603
	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621
	       chan->expected_ack_seq, chan->unacked_frames);

	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
	     ackseq = __next_seq(chan, ackseq)) {

		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
		if (acked_skb) {
			skb_unlink(acked_skb, &chan->tx_q);
			kfree_skb(acked_skb);
			chan->unacked_frames--;
		}
	}

	chan->expected_ack_seq = reqseq;

	if (chan->unacked_frames == 0)
		__clear_retrans_timer(chan);

2622
	BT_DBG("unacked_frames %u", chan->unacked_frames);
2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634
}

static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
{
	BT_DBG("chan %p", chan);

	chan->expected_tx_seq = chan->buffer_seq;
	l2cap_seq_list_clear(&chan->srej_list);
	skb_queue_purge(&chan->srej_q);
	chan->rx_state = L2CAP_RX_STATE_RECV;
}

2635 2636 2637
static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
				struct l2cap_ctrl *control,
				struct sk_buff_head *skbs, u8 event)
2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675
{
	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
	       event);

	switch (event) {
	case L2CAP_EV_DATA_REQUEST:
		if (chan->tx_send_head == NULL)
			chan->tx_send_head = skb_peek(skbs);

		skb_queue_splice_tail_init(skbs, &chan->tx_q);
		l2cap_ertm_send(chan);
		break;
	case L2CAP_EV_LOCAL_BUSY_DETECTED:
		BT_DBG("Enter LOCAL_BUSY");
		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);

		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
			/* The SREJ_SENT state must be aborted if we are to
			 * enter the LOCAL_BUSY state.
			 */
			l2cap_abort_rx_srej_sent(chan);
		}

		l2cap_send_ack(chan);

		break;
	case L2CAP_EV_LOCAL_BUSY_CLEAR:
		BT_DBG("Exit LOCAL_BUSY");
		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);

		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
			struct l2cap_ctrl local_control;

			memset(&local_control, 0, sizeof(local_control));
			local_control.sframe = 1;
			local_control.super = L2CAP_SUPER_RR;
			local_control.poll = 1;
			local_control.reqseq = chan->buffer_seq;
2676
			l2cap_send_sframe(chan, &local_control);
2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706

			chan->retry_count = 1;
			__set_monitor_timer(chan);
			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
		}
		break;
	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
		l2cap_process_reqseq(chan, control->reqseq);
		break;
	case L2CAP_EV_EXPLICIT_POLL:
		l2cap_send_rr_or_rnr(chan, 1);
		chan->retry_count = 1;
		__set_monitor_timer(chan);
		__clear_ack_timer(chan);
		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
		break;
	case L2CAP_EV_RETRANS_TO:
		l2cap_send_rr_or_rnr(chan, 1);
		chan->retry_count = 1;
		__set_monitor_timer(chan);
		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
		break;
	case L2CAP_EV_RECV_FBIT:
		/* Nothing to process */
		break;
	default:
		break;
	}
}

2707 2708 2709
static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
				  struct l2cap_ctrl *control,
				  struct sk_buff_head *skbs, u8 event)
2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745
{
	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
	       event);

	switch (event) {
	case L2CAP_EV_DATA_REQUEST:
		if (chan->tx_send_head == NULL)
			chan->tx_send_head = skb_peek(skbs);
		/* Queue data, but don't send. */
		skb_queue_splice_tail_init(skbs, &chan->tx_q);
		break;
	case L2CAP_EV_LOCAL_BUSY_DETECTED:
		BT_DBG("Enter LOCAL_BUSY");
		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);

		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
			/* The SREJ_SENT state must be aborted if we are to
			 * enter the LOCAL_BUSY state.
			 */
			l2cap_abort_rx_srej_sent(chan);
		}

		l2cap_send_ack(chan);

		break;
	case L2CAP_EV_LOCAL_BUSY_CLEAR:
		BT_DBG("Exit LOCAL_BUSY");
		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);

		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
			struct l2cap_ctrl local_control;
			memset(&local_control, 0, sizeof(local_control));
			local_control.sframe = 1;
			local_control.super = L2CAP_SUPER_RR;
			local_control.poll = 1;
			local_control.reqseq = chan->buffer_seq;
2746
			l2cap_send_sframe(chan, &local_control);
2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776

			chan->retry_count = 1;
			__set_monitor_timer(chan);
			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
		}
		break;
	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
		l2cap_process_reqseq(chan, control->reqseq);

		/* Fall through */

	case L2CAP_EV_RECV_FBIT:
		if (control && control->final) {
			__clear_monitor_timer(chan);
			if (chan->unacked_frames > 0)
				__set_retrans_timer(chan);
			chan->retry_count = 0;
			chan->tx_state = L2CAP_TX_STATE_XMIT;
			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
		}
		break;
	case L2CAP_EV_EXPLICIT_POLL:
		/* Ignore */
		break;
	case L2CAP_EV_MONITOR_TO:
		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
			l2cap_send_rr_or_rnr(chan, 1);
			__set_monitor_timer(chan);
			chan->retry_count++;
		} else {
2777
			l2cap_send_disconn_req(chan, ECONNABORTED);
2778 2779 2780 2781 2782 2783 2784
		}
		break;
	default:
		break;
	}
}

2785 2786
static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
		     struct sk_buff_head *skbs, u8 event)
2787 2788 2789 2790 2791 2792
{
	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
	       chan, control, skbs, event, chan->tx_state);

	switch (chan->tx_state) {
	case L2CAP_TX_STATE_XMIT:
2793
		l2cap_tx_state_xmit(chan, control, skbs, event);
2794 2795
		break;
	case L2CAP_TX_STATE_WAIT_F:
2796
		l2cap_tx_state_wait_f(chan, control, skbs, event);
2797 2798 2799 2800 2801 2802 2803
		break;
	default:
		/* Ignore event */
		break;
	}
}

2804 2805 2806 2807
static void l2cap_pass_to_tx(struct l2cap_chan *chan,
			     struct l2cap_ctrl *control)
{
	BT_DBG("chan %p, control %p", chan, control);
2808
	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2809 2810
}

2811 2812 2813 2814
static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
				  struct l2cap_ctrl *control)
{
	BT_DBG("chan %p, control %p", chan, control);
2815
	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2816 2817
}

L
Linus Torvalds 已提交
2818 2819 2820 2821
/* Copy frame to all raw sockets on that connection */
static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
{
	struct sk_buff *nskb;
2822
	struct l2cap_chan *chan;
L
Linus Torvalds 已提交
2823 2824 2825

	BT_DBG("conn %p", conn);

2826
	mutex_lock(&conn->chan_lock);
2827

2828
	list_for_each_entry(chan, &conn->chan_l, list) {
2829
		if (chan->chan_type != L2CAP_CHAN_RAW)
L
Linus Torvalds 已提交
2830 2831
			continue;

2832 2833
		/* Don't send frame to the channel it came from */
		if (bt_cb(skb)->chan == chan)
L
Linus Torvalds 已提交
2834
			continue;
2835

2836
		nskb = skb_clone(skb, GFP_KERNEL);
2837
		if (!nskb)
L
Linus Torvalds 已提交
2838
			continue;
2839
		if (chan->ops->recv(chan, nskb))
L
Linus Torvalds 已提交
2840 2841
			kfree_skb(nskb);
	}
2842

2843
	mutex_unlock(&conn->chan_lock);
L
Linus Torvalds 已提交
2844 2845 2846
}

/* ---- L2CAP signalling commands ---- */
2847 2848
static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
				       u8 ident, u16 dlen, void *data)
L
Linus Torvalds 已提交
2849 2850 2851 2852 2853 2854
{
	struct sk_buff *skb, **frag;
	struct l2cap_cmd_hdr *cmd;
	struct l2cap_hdr *lh;
	int len, count;

2855 2856
	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
	       conn, code, ident, dlen);
L
Linus Torvalds 已提交
2857

2858 2859 2860
	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
		return NULL;

L
Linus Torvalds 已提交
2861 2862 2863
	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
	count = min_t(unsigned int, conn->mtu, len);

2864
	skb = bt_skb_alloc(count, GFP_KERNEL);
L
Linus Torvalds 已提交
2865 2866 2867 2868
	if (!skb)
		return NULL;

	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2869
	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2870 2871

	if (conn->hcon->type == LE_LINK)
2872
		lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2873
	else
2874
		lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
L
Linus Torvalds 已提交
2875 2876 2877 2878

	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
	cmd->code  = code;
	cmd->ident = ident;
2879
	cmd->len   = cpu_to_le16(dlen);
L
Linus Torvalds 已提交
2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893

	if (dlen) {
		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
		memcpy(skb_put(skb, count), data, count);
		data += count;
	}

	len -= skb->len;

	/* Continuation fragments (no L2CAP header) */
	frag = &skb_shinfo(skb)->frag_list;
	while (len) {
		count = min_t(unsigned int, conn->mtu, len);

2894
		*frag = bt_skb_alloc(count, GFP_KERNEL);
L
Linus Torvalds 已提交
2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912
		if (!*frag)
			goto fail;

		memcpy(skb_put(*frag, count), data, count);

		len  -= count;
		data += count;

		frag = &(*frag)->next;
	}

	return skb;

fail:
	kfree_skb(skb);
	return NULL;
}

2913 2914
static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
				     unsigned long *val)
L
Linus Torvalds 已提交
2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930
{
	struct l2cap_conf_opt *opt = *ptr;
	int len;

	len = L2CAP_CONF_OPT_SIZE + opt->len;
	*ptr += len;

	*type = opt->type;
	*olen = opt->len;

	switch (opt->len) {
	case 1:
		*val = *((u8 *) opt->val);
		break;

	case 2:
2931
		*val = get_unaligned_le16(opt->val);
L
Linus Torvalds 已提交
2932 2933 2934
		break;

	case 4:
2935
		*val = get_unaligned_le32(opt->val);
L
Linus Torvalds 已提交
2936 2937 2938 2939 2940 2941 2942
		break;

	default:
		*val = (unsigned long) opt->val;
		break;
	}

2943
	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
L
Linus Torvalds 已提交
2944 2945 2946 2947 2948 2949 2950
	return len;
}

static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
{
	struct l2cap_conf_opt *opt = *ptr;

2951
	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
L
Linus Torvalds 已提交
2952 2953 2954 2955 2956 2957 2958 2959 2960 2961

	opt->type = type;
	opt->len  = len;

	switch (len) {
	case 1:
		*((u8 *) opt->val)  = val;
		break;

	case 2:
2962
		put_unaligned_le16(val, opt->val);
L
Linus Torvalds 已提交
2963 2964 2965
		break;

	case 4:
2966
		put_unaligned_le32(val, opt->val);
L
Linus Torvalds 已提交
2967 2968 2969 2970 2971 2972 2973 2974 2975 2976
		break;

	default:
		memcpy(opt->val, (void *) val, len);
		break;
	}

	*ptr += L2CAP_CONF_OPT_SIZE + len;
}

2977 2978 2979 2980
static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
{
	struct l2cap_conf_efs efs;

2981
	switch (chan->mode) {
2982 2983 2984 2985 2986
	case L2CAP_MODE_ERTM:
		efs.id		= chan->local_id;
		efs.stype	= chan->local_stype;
		efs.msdu	= cpu_to_le16(chan->local_msdu);
		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2987
		efs.acc_lat	= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2988
		efs.flush_to	= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004
		break;

	case L2CAP_MODE_STREAMING:
		efs.id		= 1;
		efs.stype	= L2CAP_SERV_BESTEFFORT;
		efs.msdu	= cpu_to_le16(chan->local_msdu);
		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
		efs.acc_lat	= 0;
		efs.flush_to	= 0;
		break;

	default:
		return;
	}

	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3005
			   (unsigned long) &efs);
3006 3007
}

3008
static void l2cap_ack_timeout(struct work_struct *work)
3009
{
3010
	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3011 3012
					       ack_timer.work);
	u16 frames_to_ack;
3013

3014 3015
	BT_DBG("chan %p", chan);

3016 3017
	l2cap_chan_lock(chan);

3018 3019
	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
				     chan->last_acked_seq);
3020

3021 3022
	if (frames_to_ack)
		l2cap_send_rr_or_rnr(chan, 0);
3023

3024
	l2cap_chan_unlock(chan);
3025
	l2cap_chan_put(chan);
3026 3027
}

3028
int l2cap_ertm_init(struct l2cap_chan *chan)
3029
{
3030 3031
	int err;

3032 3033
	chan->next_tx_seq = 0;
	chan->expected_tx_seq = 0;
3034
	chan->expected_ack_seq = 0;
3035
	chan->unacked_frames = 0;
3036
	chan->buffer_seq = 0;
3037
	chan->frames_sent = 0;
3038 3039 3040 3041 3042
	chan->last_acked_seq = 0;
	chan->sdu = NULL;
	chan->sdu_last_frag = NULL;
	chan->sdu_len = 0;

3043 3044
	skb_queue_head_init(&chan->tx_q);

3045 3046
	chan->local_amp_id = AMP_ID_BREDR;
	chan->move_id = AMP_ID_BREDR;
3047 3048 3049
	chan->move_state = L2CAP_MOVE_STABLE;
	chan->move_role = L2CAP_MOVE_ROLE_NONE;

3050 3051 3052 3053 3054
	if (chan->mode != L2CAP_MODE_ERTM)
		return 0;

	chan->rx_state = L2CAP_RX_STATE_RECV;
	chan->tx_state = L2CAP_TX_STATE_XMIT;
3055

3056 3057 3058
	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3059

3060
	skb_queue_head_init(&chan->srej_q);
3061

3062 3063 3064 3065
	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
	if (err < 0)
		return err;

3066 3067 3068 3069 3070
	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
	if (err < 0)
		l2cap_seq_list_free(&chan->srej_list);

	return err;
3071 3072
}

3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
{
	switch (mode) {
	case L2CAP_MODE_STREAMING:
	case L2CAP_MODE_ERTM:
		if (l2cap_mode_supported(mode, remote_feat_mask))
			return mode;
		/* fall through */
	default:
		return L2CAP_MODE_BASIC;
	}
}

3086
static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3087
{
3088
	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3089 3090
}

3091
static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3092
{
3093
	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3094 3095
}

3096 3097 3098
static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
				      struct l2cap_conf_rfc *rfc)
{
3099
	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133
		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;

		/* Class 1 devices have must have ERTM timeouts
		 * exceeding the Link Supervision Timeout.  The
		 * default Link Supervision Timeout for AMP
		 * controllers is 10 seconds.
		 *
		 * Class 1 devices use 0xffffffff for their
		 * best-effort flush timeout, so the clamping logic
		 * will result in a timeout that meets the above
		 * requirement.  ERTM timeouts are 16-bit values, so
		 * the maximum timeout is 65.535 seconds.
		 */

		/* Convert timeout to milliseconds and round */
		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);

		/* This is the recommended formula for class 2 devices
		 * that start ERTM timers when packets are sent to the
		 * controller.
		 */
		ertm_to = 3 * ertm_to + 500;

		if (ertm_to > 0xffff)
			ertm_to = 0xffff;

		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
		rfc->monitor_timeout = rfc->retrans_timeout;
	} else {
		rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
		rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
	}
}

3134 3135 3136
static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
{
	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3137
	    __l2cap_ews_supported(chan->conn)) {
3138 3139
		/* use extended control field */
		set_bit(FLAG_EXT_CTRL, &chan->flags);
3140 3141
		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
	} else {
3142
		chan->tx_win = min_t(u16, chan->tx_win,
3143
				     L2CAP_DEFAULT_TX_WINDOW);
3144 3145
		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
	}
3146
	chan->ack_win = chan->tx_win;
3147 3148
}

3149
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
L
Linus Torvalds 已提交
3150 3151
{
	struct l2cap_conf_req *req = data;
3152
	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
L
Linus Torvalds 已提交
3153
	void *ptr = req->data;
3154
	u16 size;
L
Linus Torvalds 已提交
3155

3156
	BT_DBG("chan %p", chan);
L
Linus Torvalds 已提交
3157

3158
	if (chan->num_conf_req || chan->num_conf_rsp)
3159 3160
		goto done;

3161
	switch (chan->mode) {
3162 3163
	case L2CAP_MODE_STREAMING:
	case L2CAP_MODE_ERTM:
3164
		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3165 3166
			break;

3167
		if (__l2cap_efs_supported(chan->conn))
3168 3169
			set_bit(FLAG_EFS_ENABLE, &chan->flags);

3170
		/* fall through */
3171
	default:
3172
		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3173 3174 3175 3176
		break;
	}

done:
3177 3178
	if (chan->imtu != L2CAP_DEFAULT_MTU)
		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3179

3180
	switch (chan->mode) {
3181
	case L2CAP_MODE_BASIC:
3182
		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3183
		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3184 3185
			break;

3186 3187 3188 3189 3190 3191 3192
		rfc.mode            = L2CAP_MODE_BASIC;
		rfc.txwin_size      = 0;
		rfc.max_transmit    = 0;
		rfc.retrans_timeout = 0;
		rfc.monitor_timeout = 0;
		rfc.max_pdu_size    = 0;

3193
		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3194
				   (unsigned long) &rfc);
3195 3196 3197 3198
		break;

	case L2CAP_MODE_ERTM:
		rfc.mode            = L2CAP_MODE_ERTM;
3199
		rfc.max_transmit    = chan->max_tx;
3200 3201

		__l2cap_set_ertm_timeouts(chan, &rfc);
3202 3203

		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3204 3205
			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
			     L2CAP_FCS_SIZE);
3206
		rfc.max_pdu_size = cpu_to_le16(size);
3207

3208 3209 3210
		l2cap_txwin_setup(chan);

		rfc.txwin_size = min_t(u16, chan->tx_win,
3211
				       L2CAP_DEFAULT_TX_WINDOW);
3212

3213
		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3214
				   (unsigned long) &rfc);
3215

3216 3217 3218
		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
			l2cap_add_opt_efs(&ptr, chan);

3219 3220
		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3221
					   chan->tx_win);
3222 3223 3224

		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
			if (chan->fcs == L2CAP_FCS_NONE ||
3225
			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3226 3227 3228 3229
				chan->fcs = L2CAP_FCS_NONE;
				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
						   chan->fcs);
			}
3230 3231 3232
		break;

	case L2CAP_MODE_STREAMING:
3233
		l2cap_txwin_setup(chan);
3234 3235 3236 3237 3238
		rfc.mode            = L2CAP_MODE_STREAMING;
		rfc.txwin_size      = 0;
		rfc.max_transmit    = 0;
		rfc.retrans_timeout = 0;
		rfc.monitor_timeout = 0;
3239 3240

		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3241 3242
			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
			     L2CAP_FCS_SIZE);
3243
		rfc.max_pdu_size = cpu_to_le16(size);
3244

3245
		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3246
				   (unsigned long) &rfc);
3247

3248 3249 3250
		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
			l2cap_add_opt_efs(&ptr, chan);

3251 3252
		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
			if (chan->fcs == L2CAP_FCS_NONE ||
3253
			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3254 3255 3256 3257
				chan->fcs = L2CAP_FCS_NONE;
				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
						   chan->fcs);
			}
3258 3259
		break;
	}
L
Linus Torvalds 已提交
3260

3261
	req->dcid  = cpu_to_le16(chan->dcid);
3262
	req->flags = __constant_cpu_to_le16(0);
L
Linus Torvalds 已提交
3263 3264 3265 3266

	return ptr - data;
}

3267
static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
L
Linus Torvalds 已提交
3268
{
3269 3270
	struct l2cap_conf_rsp *rsp = data;
	void *ptr = rsp->data;
3271 3272
	void *req = chan->conf_req;
	int len = chan->conf_len;
3273 3274
	int type, hint, olen;
	unsigned long val;
3275
	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3276 3277
	struct l2cap_conf_efs efs;
	u8 remote_efs = 0;
3278
	u16 mtu = L2CAP_DEFAULT_MTU;
3279
	u16 result = L2CAP_CONF_SUCCESS;
3280
	u16 size;
L
Linus Torvalds 已提交
3281

3282
	BT_DBG("chan %p", chan);
3283

3284 3285
	while (len >= L2CAP_CONF_OPT_SIZE) {
		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
L
Linus Torvalds 已提交
3286

3287
		hint  = type & L2CAP_CONF_HINT;
3288
		type &= L2CAP_CONF_MASK;
3289 3290 3291

		switch (type) {
		case L2CAP_CONF_MTU:
3292
			mtu = val;
3293 3294 3295
			break;

		case L2CAP_CONF_FLUSH_TO:
3296
			chan->flush_to = val;
3297 3298 3299 3300 3301
			break;

		case L2CAP_CONF_QOS:
			break;

3302 3303 3304 3305 3306
		case L2CAP_CONF_RFC:
			if (olen == sizeof(rfc))
				memcpy(&rfc, (void *) val, olen);
			break;

3307 3308
		case L2CAP_CONF_FCS:
			if (val == L2CAP_FCS_NONE)
3309
				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3310
			break;
3311

3312 3313 3314 3315
		case L2CAP_CONF_EFS:
			remote_efs = 1;
			if (olen == sizeof(efs))
				memcpy(&efs, (void *) val, olen);
3316 3317
			break;

3318
		case L2CAP_CONF_EWS:
3319
			if (!chan->conn->hs_enabled)
3320
				return -ECONNREFUSED;
3321

3322 3323
			set_bit(FLAG_EXT_CTRL, &chan->flags);
			set_bit(CONF_EWS_RECV, &chan->conf_state);
3324
			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3325
			chan->remote_tx_win = val;
3326 3327
			break;

3328 3329 3330 3331 3332 3333 3334 3335 3336 3337
		default:
			if (hint)
				break;

			result = L2CAP_CONF_UNKNOWN;
			*((u8 *) ptr++) = type;
			break;
		}
	}

3338
	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3339 3340
		goto done;

3341
	switch (chan->mode) {
3342 3343
	case L2CAP_MODE_STREAMING:
	case L2CAP_MODE_ERTM:
3344
		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3345
			chan->mode = l2cap_select_mode(rfc.mode,
3346
						       chan->conn->feat_mask);
3347 3348 3349
			break;
		}

3350
		if (remote_efs) {
3351
			if (__l2cap_efs_supported(chan->conn))
3352 3353 3354 3355 3356
				set_bit(FLAG_EFS_ENABLE, &chan->flags);
			else
				return -ECONNREFUSED;
		}

3357
		if (chan->mode != rfc.mode)
3358
			return -ECONNREFUSED;
3359

3360 3361 3362 3363
		break;
	}

done:
3364
	if (chan->mode != rfc.mode) {
3365
		result = L2CAP_CONF_UNACCEPT;
3366
		rfc.mode = chan->mode;
3367

3368
		if (chan->num_conf_rsp == 1)
3369 3370
			return -ECONNREFUSED;

3371 3372
		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
				   (unsigned long) &rfc);
3373 3374
	}

3375 3376 3377 3378
	if (result == L2CAP_CONF_SUCCESS) {
		/* Configure output options and let the other side know
		 * which ones we don't like. */

3379 3380 3381
		if (mtu < L2CAP_DEFAULT_MIN_MTU)
			result = L2CAP_CONF_UNACCEPT;
		else {
3382
			chan->omtu = mtu;
3383
			set_bit(CONF_MTU_DONE, &chan->conf_state);
3384
		}
3385
		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3386

3387 3388
		if (remote_efs) {
			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3389 3390
			    efs.stype != L2CAP_SERV_NOTRAFIC &&
			    efs.stype != chan->local_stype) {
3391 3392 3393 3394 3395 3396 3397

				result = L2CAP_CONF_UNACCEPT;

				if (chan->num_conf_req >= 1)
					return -ECONNREFUSED;

				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3398 3399
						   sizeof(efs),
						   (unsigned long) &efs);
3400
			} else {
3401
				/* Send PENDING Conf Rsp */
3402 3403
				result = L2CAP_CONF_PENDING;
				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3404 3405 3406
			}
		}

3407 3408
		switch (rfc.mode) {
		case L2CAP_MODE_BASIC:
3409
			chan->fcs = L2CAP_FCS_NONE;
3410
			set_bit(CONF_MODE_DONE, &chan->conf_state);
3411 3412 3413
			break;

		case L2CAP_MODE_ERTM:
3414 3415 3416 3417
			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
				chan->remote_tx_win = rfc.txwin_size;
			else
				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3418

3419
			chan->remote_max_tx = rfc.max_transmit;
3420

3421
			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3422 3423
				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3424 3425
			rfc.max_pdu_size = cpu_to_le16(size);
			chan->remote_mps = size;
3426

3427
			__l2cap_set_ertm_timeouts(chan, &rfc);
3428

3429
			set_bit(CONF_MODE_DONE, &chan->conf_state);
3430 3431

			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3432
					   sizeof(rfc), (unsigned long) &rfc);
3433

3434 3435 3436 3437 3438
			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
				chan->remote_id = efs.id;
				chan->remote_stype = efs.stype;
				chan->remote_msdu = le16_to_cpu(efs.msdu);
				chan->remote_flush_to =
3439
					le32_to_cpu(efs.flush_to);
3440
				chan->remote_acc_lat =
3441
					le32_to_cpu(efs.acc_lat);
3442 3443 3444
				chan->remote_sdu_itime =
					le32_to_cpu(efs.sdu_itime);
				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3445 3446
						   sizeof(efs),
						   (unsigned long) &efs);
3447
			}
3448 3449 3450
			break;

		case L2CAP_MODE_STREAMING:
3451
			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3452 3453
				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3454 3455
			rfc.max_pdu_size = cpu_to_le16(size);
			chan->remote_mps = size;
3456

3457
			set_bit(CONF_MODE_DONE, &chan->conf_state);
3458

3459 3460
			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
					   (unsigned long) &rfc);
3461

3462 3463 3464
			break;

		default:
3465 3466
			result = L2CAP_CONF_UNACCEPT;

3467
			memset(&rfc, 0, sizeof(rfc));
3468
			rfc.mode = chan->mode;
3469
		}
3470

3471
		if (result == L2CAP_CONF_SUCCESS)
3472
			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3473
	}
3474
	rsp->scid   = cpu_to_le16(chan->dcid);
3475
	rsp->result = cpu_to_le16(result);
3476
	rsp->flags  = __constant_cpu_to_le16(0);
3477 3478

	return ptr - data;
L
Linus Torvalds 已提交
3479 3480
}

3481 3482
static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
				void *data, u16 *result)
3483 3484 3485 3486 3487
{
	struct l2cap_conf_req *req = data;
	void *ptr = req->data;
	int type, olen;
	unsigned long val;
3488
	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3489
	struct l2cap_conf_efs efs;
3490

3491
	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3492 3493 3494 3495 3496 3497 3498 3499

	while (len >= L2CAP_CONF_OPT_SIZE) {
		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);

		switch (type) {
		case L2CAP_CONF_MTU:
			if (val < L2CAP_DEFAULT_MIN_MTU) {
				*result = L2CAP_CONF_UNACCEPT;
3500
				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3501
			} else
3502 3503
				chan->imtu = val;
			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3504 3505 3506
			break;

		case L2CAP_CONF_FLUSH_TO:
3507
			chan->flush_to = val;
3508
			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3509
					   2, chan->flush_to);
3510 3511 3512 3513 3514 3515
			break;

		case L2CAP_CONF_RFC:
			if (olen == sizeof(rfc))
				memcpy(&rfc, (void *)val, olen);

3516
			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3517
			    rfc.mode != chan->mode)
3518 3519
				return -ECONNREFUSED;

3520
			chan->fcs = 0;
3521 3522

			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3523
					   sizeof(rfc), (unsigned long) &rfc);
3524
			break;
3525 3526

		case L2CAP_CONF_EWS:
3527
			chan->ack_win = min_t(u16, val, chan->ack_win);
3528
			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3529
					   chan->tx_win);
3530
			break;
3531 3532 3533 3534 3535 3536

		case L2CAP_CONF_EFS:
			if (olen == sizeof(efs))
				memcpy(&efs, (void *)val, olen);

			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3537 3538
			    efs.stype != L2CAP_SERV_NOTRAFIC &&
			    efs.stype != chan->local_stype)
3539 3540
				return -ECONNREFUSED;

3541 3542
			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
					   (unsigned long) &efs);
3543
			break;
3544 3545 3546 3547

		case L2CAP_CONF_FCS:
			if (*result == L2CAP_CONF_PENDING)
				if (val == L2CAP_FCS_NONE)
3548
					set_bit(CONF_RECV_NO_FCS,
3549 3550
						&chan->conf_state);
			break;
3551 3552 3553
		}
	}

3554
	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3555 3556
		return -ECONNREFUSED;

3557
	chan->mode = rfc.mode;
3558

3559
	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3560 3561
		switch (rfc.mode) {
		case L2CAP_MODE_ERTM:
3562 3563 3564
			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3565 3566 3567
			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
				chan->ack_win = min_t(u16, chan->ack_win,
						      rfc.txwin_size);
3568 3569 3570 3571

			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
				chan->local_msdu = le16_to_cpu(efs.msdu);
				chan->local_sdu_itime =
3572
					le32_to_cpu(efs.sdu_itime);
3573 3574
				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
				chan->local_flush_to =
3575
					le32_to_cpu(efs.flush_to);
3576
			}
3577
			break;
3578

3579
		case L2CAP_MODE_STREAMING:
3580
			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3581 3582 3583
		}
	}

3584
	req->dcid   = cpu_to_le16(chan->dcid);
3585
	req->flags  = __constant_cpu_to_le16(0);
3586 3587 3588 3589

	return ptr - data;
}

3590 3591
static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
				u16 result, u16 flags)
L
Linus Torvalds 已提交
3592 3593 3594 3595
{
	struct l2cap_conf_rsp *rsp = data;
	void *ptr = rsp->data;

3596
	BT_DBG("chan %p", chan);
L
Linus Torvalds 已提交
3597

3598
	rsp->scid   = cpu_to_le16(chan->dcid);
3599
	rsp->result = cpu_to_le16(result);
3600
	rsp->flags  = cpu_to_le16(flags);
L
Linus Torvalds 已提交
3601 3602 3603 3604

	return ptr - data;
}

3605
void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3606 3607
{
	struct l2cap_conn_rsp rsp;
3608
	struct l2cap_conn *conn = chan->conn;
3609
	u8 buf[128];
3610
	u8 rsp_code;
3611

3612 3613
	rsp.scid   = cpu_to_le16(chan->dcid);
	rsp.dcid   = cpu_to_le16(chan->scid);
3614 3615
	rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3616 3617 3618 3619 3620 3621 3622 3623 3624

	if (chan->hs_hcon)
		rsp_code = L2CAP_CREATE_CHAN_RSP;
	else
		rsp_code = L2CAP_CONN_RSP;

	BT_DBG("chan %p rsp_code %u", chan, rsp_code);

	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3625

3626
	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3627 3628 3629
		return;

	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3630
		       l2cap_build_conf_req(chan, buf), buf);
3631 3632 3633
	chan->num_conf_req++;
}

3634
static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3635 3636 3637
{
	int type, olen;
	unsigned long val;
3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648
	/* Use sane default values in case a misbehaving remote device
	 * did not send an RFC or extended window size option.
	 */
	u16 txwin_ext = chan->ack_win;
	struct l2cap_conf_rfc rfc = {
		.mode = chan->mode,
		.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
		.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
		.max_pdu_size = cpu_to_le16(chan->imtu),
		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
	};
3649

3650
	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3651

3652
	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3653 3654 3655 3656 3657
		return;

	while (len >= L2CAP_CONF_OPT_SIZE) {
		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);

3658 3659 3660 3661
		switch (type) {
		case L2CAP_CONF_RFC:
			if (olen == sizeof(rfc))
				memcpy(&rfc, (void *)val, olen);
3662
			break;
3663 3664 3665 3666
		case L2CAP_CONF_EWS:
			txwin_ext = val;
			break;
		}
3667 3668 3669 3670
	}

	switch (rfc.mode) {
	case L2CAP_MODE_ERTM:
3671 3672
		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3673 3674 3675 3676 3677 3678
		chan->mps = le16_to_cpu(rfc.max_pdu_size);
		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
		else
			chan->ack_win = min_t(u16, chan->ack_win,
					      rfc.txwin_size);
3679 3680
		break;
	case L2CAP_MODE_STREAMING:
3681
		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3682 3683 3684
	}
}

3685
static inline int l2cap_command_rej(struct l2cap_conn *conn,
3686 3687
				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				    u8 *data)
3688
{
3689
	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3690

3691 3692 3693
	if (cmd_len < sizeof(*rej))
		return -EPROTO;

3694
	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3695 3696 3697
		return 0;

	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3698
	    cmd->ident == conn->info_ident) {
3699
		cancel_delayed_work(&conn->info_timer);
3700 3701

		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3702
		conn->info_ident = 0;
3703

3704 3705 3706 3707 3708 3709
		l2cap_conn_start(conn);
	}

	return 0;
}

3710 3711 3712
static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
					struct l2cap_cmd_hdr *cmd,
					u8 *data, u8 rsp_code, u8 amp_id)
L
Linus Torvalds 已提交
3713 3714 3715
{
	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
	struct l2cap_conn_rsp rsp;
3716
	struct l2cap_chan *chan = NULL, *pchan;
3717
	int result, status = L2CAP_CS_NO_INFO;
L
Linus Torvalds 已提交
3718 3719

	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3720
	__le16 psm = req->psm;
L
Linus Torvalds 已提交
3721

3722
	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
L
Linus Torvalds 已提交
3723 3724

	/* Check if we have socket listening on psm */
3725
	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3726
					 &conn->hcon->dst, ACL_LINK);
3727
	if (!pchan) {
L
Linus Torvalds 已提交
3728 3729 3730 3731
		result = L2CAP_CR_BAD_PSM;
		goto sendresp;
	}

3732
	mutex_lock(&conn->chan_lock);
3733
	l2cap_chan_lock(pchan);
3734

3735
	/* Check if the ACL is secure enough (if not SDP) */
3736
	if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3737
	    !hci_conn_check_link_mode(conn->hcon)) {
3738
		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3739 3740 3741 3742
		result = L2CAP_CR_SEC_BLOCK;
		goto response;
	}

L
Linus Torvalds 已提交
3743 3744
	result = L2CAP_CR_NO_MEM;

3745 3746 3747 3748
	/* Check if we already have channel with that dcid */
	if (__l2cap_get_chan_by_dcid(conn, scid))
		goto response;

3749
	chan = pchan->ops->new_connection(pchan);
3750
	if (!chan)
L
Linus Torvalds 已提交
3751 3752
		goto response;

3753 3754 3755 3756 3757 3758 3759
	/* For certain devices (ex: HID mouse), support for authentication,
	 * pairing and bonding is optional. For such devices, inorder to avoid
	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
	 */
	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;

3760 3761
	bacpy(&chan->src, &conn->hcon->src);
	bacpy(&chan->dst, &conn->hcon->dst);
3762 3763
	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3764 3765
	chan->psm  = psm;
	chan->dcid = scid;
3766
	chan->local_amp_id = amp_id;
L
Linus Torvalds 已提交
3767

3768
	__l2cap_chan_add(conn, chan);
3769

3770
	dcid = chan->scid;
L
Linus Torvalds 已提交
3771

3772
	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
L
Linus Torvalds 已提交
3773

3774
	chan->ident = cmd->ident;
L
Linus Torvalds 已提交
3775

3776
	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3777
		if (l2cap_chan_check_security(chan)) {
3778
			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3779
				l2cap_state_change(chan, BT_CONNECT2);
3780 3781
				result = L2CAP_CR_PEND;
				status = L2CAP_CS_AUTHOR_PEND;
3782
				chan->ops->defer(chan);
3783
			} else {
3784 3785 3786 3787
				/* Force pending result for AMP controllers.
				 * The connection will succeed after the
				 * physical link is up.
				 */
3788
				if (amp_id == AMP_ID_BREDR) {
3789
					l2cap_state_change(chan, BT_CONFIG);
3790
					result = L2CAP_CR_SUCCESS;
3791
				} else {
3792
					l2cap_state_change(chan, BT_CONNECT2);
3793
					result = L2CAP_CR_PEND;
3794
				}
3795 3796
				status = L2CAP_CS_NO_INFO;
			}
3797
		} else {
3798
			l2cap_state_change(chan, BT_CONNECT2);
3799 3800 3801 3802
			result = L2CAP_CR_PEND;
			status = L2CAP_CS_AUTHEN_PEND;
		}
	} else {
3803
		l2cap_state_change(chan, BT_CONNECT2);
3804 3805
		result = L2CAP_CR_PEND;
		status = L2CAP_CS_NO_INFO;
L
Linus Torvalds 已提交
3806 3807 3808
	}

response:
3809
	l2cap_chan_unlock(pchan);
3810
	mutex_unlock(&conn->chan_lock);
L
Linus Torvalds 已提交
3811 3812

sendresp:
3813 3814 3815 3816
	rsp.scid   = cpu_to_le16(scid);
	rsp.dcid   = cpu_to_le16(dcid);
	rsp.result = cpu_to_le16(result);
	rsp.status = cpu_to_le16(status);
3817
	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3818 3819 3820

	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
		struct l2cap_info_req info;
3821
		info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3822 3823 3824 3825

		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
		conn->info_ident = l2cap_get_ident(conn);

3826
		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3827

3828 3829
		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
			       sizeof(info), &info);
3830 3831
	}

3832
	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3833
	    result == L2CAP_CR_SUCCESS) {
3834
		u8 buf[128];
3835
		set_bit(CONF_REQ_SENT, &chan->conf_state);
3836
		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3837
			       l2cap_build_conf_req(chan, buf), buf);
3838
		chan->num_conf_req++;
3839
	}
3840 3841

	return chan;
3842
}
3843

3844
static int l2cap_connect_req(struct l2cap_conn *conn,
3845
			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3846
{
3847 3848 3849
	struct hci_dev *hdev = conn->hcon->hdev;
	struct hci_conn *hcon = conn->hcon;

3850 3851 3852
	if (cmd_len < sizeof(struct l2cap_conn_req))
		return -EPROTO;

3853 3854 3855 3856 3857 3858 3859 3860
	hci_dev_lock(hdev);
	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
		mgmt_device_connected(hdev, &hcon->dst, hcon->type,
				      hcon->dst_type, 0, NULL, 0,
				      hcon->dev_class);
	hci_dev_unlock(hdev);

3861
	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
L
Linus Torvalds 已提交
3862 3863 3864
	return 0;
}

3865
static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3866 3867
				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				    u8 *data)
L
Linus Torvalds 已提交
3868 3869 3870
{
	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
	u16 scid, dcid, result, status;
3871
	struct l2cap_chan *chan;
L
Linus Torvalds 已提交
3872
	u8 req[128];
3873
	int err;
L
Linus Torvalds 已提交
3874

3875 3876 3877
	if (cmd_len < sizeof(*rsp))
		return -EPROTO;

L
Linus Torvalds 已提交
3878 3879 3880 3881 3882
	scid   = __le16_to_cpu(rsp->scid);
	dcid   = __le16_to_cpu(rsp->dcid);
	result = __le16_to_cpu(rsp->result);
	status = __le16_to_cpu(rsp->status);

3883
	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3884
	       dcid, scid, result, status);
L
Linus Torvalds 已提交
3885

3886 3887
	mutex_lock(&conn->chan_lock);

L
Linus Torvalds 已提交
3888
	if (scid) {
3889 3890
		chan = __l2cap_get_chan_by_scid(conn, scid);
		if (!chan) {
3891
			err = -EBADSLT;
3892 3893
			goto unlock;
		}
L
Linus Torvalds 已提交
3894
	} else {
3895 3896
		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
		if (!chan) {
3897
			err = -EBADSLT;
3898 3899
			goto unlock;
		}
L
Linus Torvalds 已提交
3900 3901
	}

3902 3903
	err = 0;

3904
	l2cap_chan_lock(chan);
3905

L
Linus Torvalds 已提交
3906 3907
	switch (result) {
	case L2CAP_CR_SUCCESS:
3908
		l2cap_state_change(chan, BT_CONFIG);
3909
		chan->ident = 0;
3910
		chan->dcid = dcid;
3911
		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3912

3913
		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3914 3915
			break;

L
Linus Torvalds 已提交
3916
		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3917
			       l2cap_build_conf_req(chan, req), req);
3918
		chan->num_conf_req++;
L
Linus Torvalds 已提交
3919 3920 3921
		break;

	case L2CAP_CR_PEND:
3922
		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
L
Linus Torvalds 已提交
3923 3924 3925
		break;

	default:
3926
		l2cap_chan_del(chan, ECONNREFUSED);
L
Linus Torvalds 已提交
3927 3928 3929
		break;
	}

3930
	l2cap_chan_unlock(chan);
3931 3932 3933 3934 3935

unlock:
	mutex_unlock(&conn->chan_lock);

	return err;
L
Linus Torvalds 已提交
3936 3937
}

3938
static inline void set_default_fcs(struct l2cap_chan *chan)
3939 3940 3941 3942
{
	/* FCS is enabled only in ERTM or streaming mode, if one or both
	 * sides request it.
	 */
3943
	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3944
		chan->fcs = L2CAP_FCS_NONE;
3945
	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3946
		chan->fcs = L2CAP_FCS_CRC16;
3947 3948
}

3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964
static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
				    u8 ident, u16 flags)
{
	struct l2cap_conn *conn = chan->conn;

	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
	       flags);

	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);

	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
		       l2cap_build_conf_rsp(chan, data,
					    L2CAP_CONF_SUCCESS, flags), data);
}

3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976
static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
				   u16 scid, u16 dcid)
{
	struct l2cap_cmd_rej_cid rej;

	rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
	rej.scid = __cpu_to_le16(scid);
	rej.dcid = __cpu_to_le16(dcid);

	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
}

3977 3978 3979
static inline int l2cap_config_req(struct l2cap_conn *conn,
				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				   u8 *data)
L
Linus Torvalds 已提交
3980 3981 3982 3983
{
	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
	u16 dcid, flags;
	u8 rsp[64];
3984
	struct l2cap_chan *chan;
3985
	int len, err = 0;
L
Linus Torvalds 已提交
3986

3987 3988 3989
	if (cmd_len < sizeof(*req))
		return -EPROTO;

L
Linus Torvalds 已提交
3990 3991 3992 3993 3994
	dcid  = __le16_to_cpu(req->dcid);
	flags = __le16_to_cpu(req->flags);

	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);

3995
	chan = l2cap_get_chan_by_scid(conn, dcid);
3996 3997 3998 3999
	if (!chan) {
		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
		return 0;
	}
L
Linus Torvalds 已提交
4000

4001
	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4002 4003
		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
				       chan->dcid);
4004
		goto unlock;
4005
	}
4006

4007
	/* Reject if config buffer is too small. */
4008
	len = cmd_len - sizeof(*req);
4009
	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4010
		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4011 4012
			       l2cap_build_conf_rsp(chan, rsp,
			       L2CAP_CONF_REJECT, flags), rsp);
4013 4014 4015 4016
		goto unlock;
	}

	/* Store config. */
4017 4018
	memcpy(chan->conf_req + chan->conf_len, req->data, len);
	chan->conf_len += len;
L
Linus Torvalds 已提交
4019

4020
	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
L
Linus Torvalds 已提交
4021 4022
		/* Incomplete config. Send empty response. */
		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4023 4024
			       l2cap_build_conf_rsp(chan, rsp,
			       L2CAP_CONF_SUCCESS, flags), rsp);
L
Linus Torvalds 已提交
4025 4026 4027 4028
		goto unlock;
	}

	/* Complete config. */
4029
	len = l2cap_parse_conf_req(chan, rsp);
4030
	if (len < 0) {
4031
		l2cap_send_disconn_req(chan, ECONNRESET);
L
Linus Torvalds 已提交
4032
		goto unlock;
4033
	}
L
Linus Torvalds 已提交
4034

4035
	chan->ident = cmd->ident;
4036
	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4037
	chan->num_conf_rsp++;
4038 4039

	/* Reset config buffer. */
4040
	chan->conf_len = 0;
4041

4042
	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4043 4044
		goto unlock;

4045
	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4046
		set_default_fcs(chan);
4047

4048 4049
		if (chan->mode == L2CAP_MODE_ERTM ||
		    chan->mode == L2CAP_MODE_STREAMING)
4050 4051 4052
			err = l2cap_ertm_init(chan);

		if (err < 0)
4053
			l2cap_send_disconn_req(chan, -err);
4054 4055
		else
			l2cap_chan_ready(chan);
4056

4057 4058 4059
		goto unlock;
	}

4060
	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4061
		u8 buf[64];
L
Linus Torvalds 已提交
4062
		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4063
			       l2cap_build_conf_req(chan, buf), buf);
4064
		chan->num_conf_req++;
L
Linus Torvalds 已提交
4065 4066
	}

4067 4068 4069
	/* Got Conf Rsp PENDING from remote side and asume we sent
	   Conf Rsp PENDING in the code above */
	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4070
	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4071 4072 4073

		/* check compatibility */

4074
		/* Send rsp for BR/EDR channel */
4075
		if (!chan->hs_hcon)
4076 4077 4078
			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
		else
			chan->ident = cmd->ident;
4079 4080
	}

L
Linus Torvalds 已提交
4081
unlock:
4082
	l2cap_chan_unlock(chan);
4083
	return err;
L
Linus Torvalds 已提交
4084 4085
}

4086
static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4087 4088
				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				   u8 *data)
L
Linus Torvalds 已提交
4089 4090 4091
{
	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
	u16 scid, flags, result;
4092
	struct l2cap_chan *chan;
4093
	int len = cmd_len - sizeof(*rsp);
4094
	int err = 0;
L
Linus Torvalds 已提交
4095

4096 4097 4098
	if (cmd_len < sizeof(*rsp))
		return -EPROTO;

L
Linus Torvalds 已提交
4099 4100 4101 4102
	scid   = __le16_to_cpu(rsp->scid);
	flags  = __le16_to_cpu(rsp->flags);
	result = __le16_to_cpu(rsp->result);

4103 4104
	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
	       result, len);
L
Linus Torvalds 已提交
4105

4106
	chan = l2cap_get_chan_by_scid(conn, scid);
4107
	if (!chan)
L
Linus Torvalds 已提交
4108 4109 4110 4111
		return 0;

	switch (result) {
	case L2CAP_CONF_SUCCESS:
4112
		l2cap_conf_rfc_get(chan, rsp->data, len);
4113
		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
L
Linus Torvalds 已提交
4114 4115
		break;

4116 4117 4118 4119 4120 4121 4122
	case L2CAP_CONF_PENDING:
		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);

		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
			char buf[64];

			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4123
						   buf, &result);
4124
			if (len < 0) {
4125
				l2cap_send_disconn_req(chan, ECONNRESET);
4126 4127 4128
				goto done;
			}

4129
			if (!chan->hs_hcon) {
4130 4131
				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
							0);
4132 4133 4134 4135 4136 4137
			} else {
				if (l2cap_check_efs(chan)) {
					amp_create_logical_link(chan);
					chan->ident = cmd->ident;
				}
			}
4138 4139 4140
		}
		goto done;

L
Linus Torvalds 已提交
4141
	case L2CAP_CONF_UNACCEPT:
4142
		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4143 4144
			char req[64];

4145
			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4146
				l2cap_send_disconn_req(chan, ECONNRESET);
4147 4148 4149
				goto done;
			}

4150 4151
			/* throw out any old stored conf requests */
			result = L2CAP_CONF_SUCCESS;
4152
			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4153
						   req, &result);
4154
			if (len < 0) {
4155
				l2cap_send_disconn_req(chan, ECONNRESET);
4156 4157 4158 4159
				goto done;
			}

			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4160
				       L2CAP_CONF_REQ, len, req);
4161
			chan->num_conf_req++;
4162 4163 4164
			if (result != L2CAP_CONF_SUCCESS)
				goto done;
			break;
L
Linus Torvalds 已提交
4165 4166
		}

4167
	default:
4168
		l2cap_chan_set_err(chan, ECONNRESET);
4169

4170
		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4171
		l2cap_send_disconn_req(chan, ECONNRESET);
L
Linus Torvalds 已提交
4172 4173 4174
		goto done;
	}

4175
	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
L
Linus Torvalds 已提交
4176 4177
		goto done;

4178
	set_bit(CONF_INPUT_DONE, &chan->conf_state);
L
Linus Torvalds 已提交
4179

4180
	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4181
		set_default_fcs(chan);
4182

4183 4184
		if (chan->mode == L2CAP_MODE_ERTM ||
		    chan->mode == L2CAP_MODE_STREAMING)
4185
			err = l2cap_ertm_init(chan);
4186

4187
		if (err < 0)
4188
			l2cap_send_disconn_req(chan, -err);
4189 4190
		else
			l2cap_chan_ready(chan);
L
Linus Torvalds 已提交
4191 4192 4193
	}

done:
4194
	l2cap_chan_unlock(chan);
4195
	return err;
L
Linus Torvalds 已提交
4196 4197
}

4198
static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4199 4200
				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				       u8 *data)
L
Linus Torvalds 已提交
4201 4202 4203 4204
{
	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
	struct l2cap_disconn_rsp rsp;
	u16 dcid, scid;
4205
	struct l2cap_chan *chan;
L
Linus Torvalds 已提交
4206

4207 4208 4209
	if (cmd_len != sizeof(*req))
		return -EPROTO;

L
Linus Torvalds 已提交
4210 4211 4212 4213 4214
	scid = __le16_to_cpu(req->scid);
	dcid = __le16_to_cpu(req->dcid);

	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);

4215 4216 4217 4218 4219
	mutex_lock(&conn->chan_lock);

	chan = __l2cap_get_chan_by_scid(conn, dcid);
	if (!chan) {
		mutex_unlock(&conn->chan_lock);
4220 4221
		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
		return 0;
4222
	}
L
Linus Torvalds 已提交
4223

4224 4225
	l2cap_chan_lock(chan);

4226 4227
	rsp.dcid = cpu_to_le16(chan->scid);
	rsp.scid = cpu_to_le16(chan->dcid);
L
Linus Torvalds 已提交
4228 4229
	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);

4230
	chan->ops->set_shutdown(chan);
L
Linus Torvalds 已提交
4231

4232
	l2cap_chan_hold(chan);
4233
	l2cap_chan_del(chan, ECONNRESET);
4234 4235

	l2cap_chan_unlock(chan);
L
Linus Torvalds 已提交
4236

4237
	chan->ops->close(chan);
4238
	l2cap_chan_put(chan);
4239 4240 4241

	mutex_unlock(&conn->chan_lock);

L
Linus Torvalds 已提交
4242 4243 4244
	return 0;
}

4245
static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4246 4247
				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				       u8 *data)
L
Linus Torvalds 已提交
4248 4249 4250
{
	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
	u16 dcid, scid;
4251
	struct l2cap_chan *chan;
L
Linus Torvalds 已提交
4252

4253 4254 4255
	if (cmd_len != sizeof(*rsp))
		return -EPROTO;

L
Linus Torvalds 已提交
4256 4257 4258 4259 4260
	scid = __le16_to_cpu(rsp->scid);
	dcid = __le16_to_cpu(rsp->dcid);

	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);

4261 4262 4263 4264 4265
	mutex_lock(&conn->chan_lock);

	chan = __l2cap_get_chan_by_scid(conn, scid);
	if (!chan) {
		mutex_unlock(&conn->chan_lock);
L
Linus Torvalds 已提交
4266
		return 0;
4267
	}
L
Linus Torvalds 已提交
4268

4269
	l2cap_chan_lock(chan);
4270

4271
	l2cap_chan_hold(chan);
4272
	l2cap_chan_del(chan, 0);
4273 4274

	l2cap_chan_unlock(chan);
L
Linus Torvalds 已提交
4275

4276
	chan->ops->close(chan);
4277
	l2cap_chan_put(chan);
4278 4279 4280

	mutex_unlock(&conn->chan_lock);

L
Linus Torvalds 已提交
4281 4282 4283
	return 0;
}

4284
static inline int l2cap_information_req(struct l2cap_conn *conn,
4285 4286
					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
					u8 *data)
L
Linus Torvalds 已提交
4287 4288 4289 4290
{
	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
	u16 type;

4291 4292 4293
	if (cmd_len != sizeof(*req))
		return -EPROTO;

L
Linus Torvalds 已提交
4294 4295 4296 4297
	type = __le16_to_cpu(req->type);

	BT_DBG("type 0x%4.4x", type);

4298 4299
	if (type == L2CAP_IT_FEAT_MASK) {
		u8 buf[8];
4300
		u32 feat_mask = l2cap_feat_mask;
4301
		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4302 4303
		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4304
		if (!disable_ertm)
4305
			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4306
				| L2CAP_FEAT_FCS;
4307
		if (conn->hs_enabled)
4308
			feat_mask |= L2CAP_FEAT_EXT_FLOW
4309
				| L2CAP_FEAT_EXT_WINDOW;
4310

4311
		put_unaligned_le32(feat_mask, rsp->data);
4312 4313
		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
			       buf);
4314 4315 4316
	} else if (type == L2CAP_IT_FIXED_CHAN) {
		u8 buf[12];
		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4317

4318
		if (conn->hs_enabled)
4319 4320 4321 4322
			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
		else
			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;

4323 4324
		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4325
		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4326 4327
		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
			       buf);
4328 4329 4330
	} else {
		struct l2cap_info_rsp rsp;
		rsp.type   = cpu_to_le16(type);
4331
		rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4332 4333
		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
			       &rsp);
4334
	}
L
Linus Torvalds 已提交
4335 4336 4337 4338

	return 0;
}

4339
static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4340 4341
					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
					u8 *data)
L
Linus Torvalds 已提交
4342 4343 4344 4345
{
	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
	u16 type, result;

4346
	if (cmd_len < sizeof(*rsp))
4347 4348
		return -EPROTO;

L
Linus Torvalds 已提交
4349 4350 4351 4352 4353
	type   = __le16_to_cpu(rsp->type);
	result = __le16_to_cpu(rsp->result);

	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);

4354 4355
	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
	if (cmd->ident != conn->info_ident ||
4356
	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4357 4358
		return 0;

4359
	cancel_delayed_work(&conn->info_timer);
4360

4361 4362 4363 4364 4365 4366 4367 4368 4369
	if (result != L2CAP_IR_SUCCESS) {
		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
		conn->info_ident = 0;

		l2cap_conn_start(conn);

		return 0;
	}

4370 4371
	switch (type) {
	case L2CAP_IT_FEAT_MASK:
4372
		conn->feat_mask = get_unaligned_le32(rsp->data);
4373

4374
		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4375
			struct l2cap_info_req req;
4376
			req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4377 4378 4379 4380

			conn->info_ident = l2cap_get_ident(conn);

			l2cap_send_cmd(conn, conn->info_ident,
4381
				       L2CAP_INFO_REQ, sizeof(req), &req);
4382 4383 4384 4385 4386 4387
		} else {
			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
			conn->info_ident = 0;

			l2cap_conn_start(conn);
		}
4388 4389 4390 4391
		break;

	case L2CAP_IT_FIXED_CHAN:
		conn->fixed_chan_mask = rsp->data[0];
4392
		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4393
		conn->info_ident = 0;
4394 4395

		l2cap_conn_start(conn);
4396
		break;
4397
	}
4398

L
Linus Torvalds 已提交
4399 4400 4401
	return 0;
}

4402 4403 4404
static int l2cap_create_channel_req(struct l2cap_conn *conn,
				    struct l2cap_cmd_hdr *cmd,
				    u16 cmd_len, void *data)
4405 4406
{
	struct l2cap_create_chan_req *req = data;
4407
	struct l2cap_create_chan_rsp rsp;
4408
	struct l2cap_chan *chan;
4409
	struct hci_dev *hdev;
4410 4411 4412 4413 4414
	u16 psm, scid;

	if (cmd_len != sizeof(*req))
		return -EPROTO;

4415
	if (!conn->hs_enabled)
4416 4417 4418 4419 4420
		return -EINVAL;

	psm = le16_to_cpu(req->psm);
	scid = le16_to_cpu(req->scid);

4421
	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4422

4423
	/* For controller id 0 make BR/EDR connection */
4424
	if (req->amp_id == AMP_ID_BREDR) {
4425 4426 4427 4428
		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
			      req->amp_id);
		return 0;
	}
4429

4430 4431 4432 4433
	/* Validate AMP controller id */
	hdev = hci_dev_get(req->amp_id);
	if (!hdev)
		goto error;
4434

4435 4436 4437 4438
	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
		hci_dev_put(hdev);
		goto error;
	}
4439

4440 4441 4442 4443 4444
	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
			     req->amp_id);
	if (chan) {
		struct amp_mgr *mgr = conn->hcon->amp_mgr;
		struct hci_conn *hs_hcon;
4445

4446 4447
		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
						  &conn->hcon->dst);
4448 4449
		if (!hs_hcon) {
			hci_dev_put(hdev);
4450 4451 4452
			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
					       chan->dcid);
			return 0;
4453 4454
		}

4455 4456 4457 4458
		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);

		mgr->bredr_chan = chan;
		chan->hs_hcon = hs_hcon;
4459
		chan->fcs = L2CAP_FCS_NONE;
4460
		conn->mtu = hdev->block_mtu;
4461
	}
4462

4463
	hci_dev_put(hdev);
4464 4465

	return 0;
4466 4467 4468 4469 4470 4471 4472 4473 4474 4475

error:
	rsp.dcid = 0;
	rsp.scid = cpu_to_le16(scid);
	rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);

	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
		       sizeof(rsp), &rsp);

4476
	return 0;
4477 4478
}

4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497
static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
{
	struct l2cap_move_chan_req req;
	u8 ident;

	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);

	ident = l2cap_get_ident(chan->conn);
	chan->ident = ident;

	req.icid = cpu_to_le16(chan->scid);
	req.dest_amp_id = dest_amp_id;

	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
		       &req);

	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
}

4498
static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4499 4500 4501
{
	struct l2cap_move_chan_rsp rsp;

4502
	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4503

4504
	rsp.icid = cpu_to_le16(chan->dcid);
4505 4506
	rsp.result = cpu_to_le16(result);

4507 4508
	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
		       sizeof(rsp), &rsp);
4509 4510
}

M
Mat Martineau 已提交
4511
static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4512 4513 4514
{
	struct l2cap_move_chan_cfm cfm;

M
Mat Martineau 已提交
4515
	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4516

M
Mat Martineau 已提交
4517
	chan->ident = l2cap_get_ident(chan->conn);
4518

M
Mat Martineau 已提交
4519
	cfm.icid = cpu_to_le16(chan->scid);
4520 4521
	cfm.result = cpu_to_le16(result);

M
Mat Martineau 已提交
4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538
	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
		       sizeof(cfm), &cfm);

	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
}

static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
{
	struct l2cap_move_chan_cfm cfm;

	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);

	cfm.icid = cpu_to_le16(icid);
	cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);

	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
		       sizeof(cfm), &cfm);
4539 4540 4541
}

static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4542
					 u16 icid)
4543 4544 4545
{
	struct l2cap_move_chan_cfm_rsp rsp;

4546
	BT_DBG("icid 0x%4.4x", icid);
4547 4548 4549 4550 4551

	rsp.icid = cpu_to_le16(icid);
	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
}

4552 4553 4554 4555 4556 4557 4558 4559
static void __release_logical_link(struct l2cap_chan *chan)
{
	chan->hs_hchan = NULL;
	chan->hs_hcon = NULL;

	/* Placeholder - release the logical link */
}

4560 4561 4562 4563 4564
static void l2cap_logical_fail(struct l2cap_chan *chan)
{
	/* Logical link setup failed */
	if (chan->state != BT_CONNECTED) {
		/* Create channel failure, disconnect */
4565
		l2cap_send_disconn_req(chan, ECONNRESET);
4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595
		return;
	}

	switch (chan->move_role) {
	case L2CAP_MOVE_ROLE_RESPONDER:
		l2cap_move_done(chan);
		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
		break;
	case L2CAP_MOVE_ROLE_INITIATOR:
		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
			/* Remote has only sent pending or
			 * success responses, clean up
			 */
			l2cap_move_done(chan);
		}

		/* Other amp move states imply that the move
		 * has already aborted
		 */
		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
		break;
	}
}

static void l2cap_logical_finish_create(struct l2cap_chan *chan,
					struct hci_chan *hchan)
{
	struct l2cap_conf_rsp rsp;

4596
	chan->hs_hchan = hchan;
4597 4598
	chan->hs_hcon->l2cap_data = chan->conn;

4599
	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4600 4601

	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4602
		int err;
4603 4604 4605 4606 4607

		set_default_fcs(chan);

		err = l2cap_ertm_init(chan);
		if (err < 0)
4608
			l2cap_send_disconn_req(chan, -err);
4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648
		else
			l2cap_chan_ready(chan);
	}
}

static void l2cap_logical_finish_move(struct l2cap_chan *chan,
				      struct hci_chan *hchan)
{
	chan->hs_hcon = hchan->conn;
	chan->hs_hcon->l2cap_data = chan->conn;

	BT_DBG("move_state %d", chan->move_state);

	switch (chan->move_state) {
	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
		/* Move confirm will be sent after a success
		 * response is received
		 */
		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
		break;
	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
		}
		break;
	default:
		/* Move was not in expected state, free the channel */
		__release_logical_link(chan);

		chan->move_state = L2CAP_MOVE_STABLE;
	}
}

/* Call with chan locked */
4649 4650
void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
		       u8 status)
M
Mat Martineau 已提交
4651
{
4652 4653 4654 4655 4656 4657 4658 4659 4660 4661
	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);

	if (status) {
		l2cap_logical_fail(chan);
		__release_logical_link(chan);
		return;
	}

	if (chan->state != BT_CONNECTED) {
		/* Ignore logical link if channel is on BR/EDR */
4662
		if (chan->local_amp_id != AMP_ID_BREDR)
4663 4664 4665 4666
			l2cap_logical_finish_create(chan, hchan);
	} else {
		l2cap_logical_finish_move(chan, hchan);
	}
M
Mat Martineau 已提交
4667 4668
}

4669 4670 4671 4672
void l2cap_move_start(struct l2cap_chan *chan)
{
	BT_DBG("chan %p", chan);

4673
	if (chan->local_amp_id == AMP_ID_BREDR) {
4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687
		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
			return;
		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
		/* Placeholder - start physical link setup */
	} else {
		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
		chan->move_id = 0;
		l2cap_move_setup(chan);
		l2cap_send_move_chan_req(chan, 0);
	}
}

4688 4689 4690
static void l2cap_do_create(struct l2cap_chan *chan, int result,
			    u8 local_amp_id, u8 remote_amp_id)
{
4691 4692 4693
	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
	       local_amp_id, remote_amp_id);

4694 4695
	chan->fcs = L2CAP_FCS_NONE;

4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710
	/* Outgoing channel on AMP */
	if (chan->state == BT_CONNECT) {
		if (result == L2CAP_CR_SUCCESS) {
			chan->local_amp_id = local_amp_id;
			l2cap_send_create_chan_req(chan, remote_amp_id);
		} else {
			/* Revert to BR/EDR connect */
			l2cap_send_conn_req(chan);
		}

		return;
	}

	/* Incoming channel on AMP */
	if (__l2cap_no_conn_pending(chan)) {
4711 4712 4713 4714 4715 4716 4717
		struct l2cap_conn_rsp rsp;
		char buf[128];
		rsp.scid = cpu_to_le16(chan->dcid);
		rsp.dcid = cpu_to_le16(chan->scid);

		if (result == L2CAP_CR_SUCCESS) {
			/* Send successful response */
4718 4719
			rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4720 4721
		} else {
			/* Send negative response */
4722 4723
			rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4724 4725 4726 4727 4728 4729
		}

		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
			       sizeof(rsp), &rsp);

		if (result == L2CAP_CR_SUCCESS) {
4730
			l2cap_state_change(chan, BT_CONFIG);
4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793
			set_bit(CONF_REQ_SENT, &chan->conf_state);
			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
				       L2CAP_CONF_REQ,
				       l2cap_build_conf_req(chan, buf), buf);
			chan->num_conf_req++;
		}
	}
}

static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
				   u8 remote_amp_id)
{
	l2cap_move_setup(chan);
	chan->move_id = local_amp_id;
	chan->move_state = L2CAP_MOVE_WAIT_RSP;

	l2cap_send_move_chan_req(chan, remote_amp_id);
}

static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
{
	struct hci_chan *hchan = NULL;

	/* Placeholder - get hci_chan for logical link */

	if (hchan) {
		if (hchan->state == BT_CONNECTED) {
			/* Logical link is ready to go */
			chan->hs_hcon = hchan->conn;
			chan->hs_hcon->l2cap_data = chan->conn;
			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);

			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
		} else {
			/* Wait for logical link to be ready */
			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
		}
	} else {
		/* Logical link not available */
		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
	}
}

static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
{
	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
		u8 rsp_result;
		if (result == -EINVAL)
			rsp_result = L2CAP_MR_BAD_ID;
		else
			rsp_result = L2CAP_MR_NOT_ALLOWED;

		l2cap_send_move_chan_rsp(chan, rsp_result);
	}

	chan->move_role = L2CAP_MOVE_ROLE_NONE;
	chan->move_state = L2CAP_MOVE_STABLE;

	/* Restart data transmission */
	l2cap_ertm_send(chan);
}

4794 4795
/* Invoke with locked chan */
void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4796
{
4797
	u8 local_amp_id = chan->local_amp_id;
4798
	u8 remote_amp_id = chan->remote_amp_id;
4799

4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827
	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
	       chan, result, local_amp_id, remote_amp_id);

	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
		l2cap_chan_unlock(chan);
		return;
	}

	if (chan->state != BT_CONNECTED) {
		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
	} else if (result != L2CAP_MR_SUCCESS) {
		l2cap_do_move_cancel(chan, result);
	} else {
		switch (chan->move_role) {
		case L2CAP_MOVE_ROLE_INITIATOR:
			l2cap_do_move_initiate(chan, local_amp_id,
					       remote_amp_id);
			break;
		case L2CAP_MOVE_ROLE_RESPONDER:
			l2cap_do_move_respond(chan, result);
			break;
		default:
			l2cap_do_move_cancel(chan, result);
			break;
		}
	}
}

4828
static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4829 4830
					 struct l2cap_cmd_hdr *cmd,
					 u16 cmd_len, void *data)
4831 4832
{
	struct l2cap_move_chan_req *req = data;
4833
	struct l2cap_move_chan_rsp rsp;
4834
	struct l2cap_chan *chan;
4835 4836 4837 4838 4839 4840 4841 4842
	u16 icid = 0;
	u16 result = L2CAP_MR_NOT_ALLOWED;

	if (cmd_len != sizeof(*req))
		return -EPROTO;

	icid = le16_to_cpu(req->icid);

4843
	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4844

4845
	if (!conn->hs_enabled)
4846 4847
		return -EINVAL;

4848 4849
	chan = l2cap_get_chan_by_dcid(conn, icid);
	if (!chan) {
4850 4851 4852 4853
		rsp.icid = cpu_to_le16(icid);
		rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
			       sizeof(rsp), &rsp);
4854 4855 4856
		return 0;
	}

4857 4858
	chan->ident = cmd->ident;

4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871
	if (chan->scid < L2CAP_CID_DYN_START ||
	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
	    (chan->mode != L2CAP_MODE_ERTM &&
	     chan->mode != L2CAP_MODE_STREAMING)) {
		result = L2CAP_MR_NOT_ALLOWED;
		goto send_move_response;
	}

	if (chan->local_amp_id == req->dest_amp_id) {
		result = L2CAP_MR_SAME_ID;
		goto send_move_response;
	}

4872
	if (req->dest_amp_id != AMP_ID_BREDR) {
4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891
		struct hci_dev *hdev;
		hdev = hci_dev_get(req->dest_amp_id);
		if (!hdev || hdev->dev_type != HCI_AMP ||
		    !test_bit(HCI_UP, &hdev->flags)) {
			if (hdev)
				hci_dev_put(hdev);

			result = L2CAP_MR_BAD_ID;
			goto send_move_response;
		}
		hci_dev_put(hdev);
	}

	/* Detect a move collision.  Only send a collision response
	 * if this side has "lost", otherwise proceed with the move.
	 * The winner has the larger bd_addr.
	 */
	if ((__chan_is_moving(chan) ||
	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4892
	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4893 4894 4895 4896 4897 4898 4899 4900 4901
		result = L2CAP_MR_COLLISION;
		goto send_move_response;
	}

	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
	l2cap_move_setup(chan);
	chan->move_id = req->dest_amp_id;
	icid = chan->dcid;

4902
	if (req->dest_amp_id == AMP_ID_BREDR) {
4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918
		/* Moving to BR/EDR */
		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
			result = L2CAP_MR_PEND;
		} else {
			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
			result = L2CAP_MR_SUCCESS;
		}
	} else {
		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
		/* Placeholder - uncomment when amp functions are available */
		/*amp_accept_physical(chan, req->dest_amp_id);*/
		result = L2CAP_MR_PEND;
	}

send_move_response:
4919
	l2cap_send_move_chan_rsp(chan, result);
4920

4921 4922
	l2cap_chan_unlock(chan);

4923 4924 4925
	return 0;
}

M
Mat Martineau 已提交
4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047
static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
{
	struct l2cap_chan *chan;
	struct hci_chan *hchan = NULL;

	chan = l2cap_get_chan_by_scid(conn, icid);
	if (!chan) {
		l2cap_send_move_chan_cfm_icid(conn, icid);
		return;
	}

	__clear_chan_timer(chan);
	if (result == L2CAP_MR_PEND)
		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);

	switch (chan->move_state) {
	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
		/* Move confirm will be sent when logical link
		 * is complete.
		 */
		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
		break;
	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
		if (result == L2CAP_MR_PEND) {
			break;
		} else if (test_bit(CONN_LOCAL_BUSY,
				    &chan->conn_state)) {
			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
		} else {
			/* Logical link is up or moving to BR/EDR,
			 * proceed with move
			 */
			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
		}
		break;
	case L2CAP_MOVE_WAIT_RSP:
		/* Moving to AMP */
		if (result == L2CAP_MR_SUCCESS) {
			/* Remote is ready, send confirm immediately
			 * after logical link is ready
			 */
			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
		} else {
			/* Both logical link and move success
			 * are required to confirm
			 */
			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
		}

		/* Placeholder - get hci_chan for logical link */
		if (!hchan) {
			/* Logical link not available */
			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
			break;
		}

		/* If the logical link is not yet connected, do not
		 * send confirmation.
		 */
		if (hchan->state != BT_CONNECTED)
			break;

		/* Logical link is already ready to go */

		chan->hs_hcon = hchan->conn;
		chan->hs_hcon->l2cap_data = chan->conn;

		if (result == L2CAP_MR_SUCCESS) {
			/* Can confirm now */
			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
		} else {
			/* Now only need move success
			 * to confirm
			 */
			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
		}

		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
		break;
	default:
		/* Any other amp move state means the move failed. */
		chan->move_id = chan->local_amp_id;
		l2cap_move_done(chan);
		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
	}

	l2cap_chan_unlock(chan);
}

static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
			    u16 result)
{
	struct l2cap_chan *chan;

	chan = l2cap_get_chan_by_ident(conn, ident);
	if (!chan) {
		/* Could not locate channel, icid is best guess */
		l2cap_send_move_chan_cfm_icid(conn, icid);
		return;
	}

	__clear_chan_timer(chan);

	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
		if (result == L2CAP_MR_COLLISION) {
			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
		} else {
			/* Cleanup - cancel move */
			chan->move_id = chan->local_amp_id;
			l2cap_move_done(chan);
		}
	}

	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);

	l2cap_chan_unlock(chan);
}

static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
				  struct l2cap_cmd_hdr *cmd,
				  u16 cmd_len, void *data)
5048 5049 5050 5051 5052 5053 5054 5055 5056 5057
{
	struct l2cap_move_chan_rsp *rsp = data;
	u16 icid, result;

	if (cmd_len != sizeof(*rsp))
		return -EPROTO;

	icid = le16_to_cpu(rsp->icid);
	result = le16_to_cpu(rsp->result);

5058
	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5059

M
Mat Martineau 已提交
5060 5061 5062 5063
	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
		l2cap_move_continue(conn, icid, result);
	else
		l2cap_move_fail(conn, cmd->ident, icid, result);
5064 5065 5066 5067

	return 0;
}

5068 5069 5070
static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
				      struct l2cap_cmd_hdr *cmd,
				      u16 cmd_len, void *data)
5071 5072
{
	struct l2cap_move_chan_cfm *cfm = data;
5073
	struct l2cap_chan *chan;
5074 5075 5076 5077 5078 5079 5080 5081
	u16 icid, result;

	if (cmd_len != sizeof(*cfm))
		return -EPROTO;

	icid = le16_to_cpu(cfm->icid);
	result = le16_to_cpu(cfm->result);

5082
	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5083

5084 5085 5086 5087 5088 5089 5090 5091 5092 5093
	chan = l2cap_get_chan_by_dcid(conn, icid);
	if (!chan) {
		/* Spec requires a response even if the icid was not found */
		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
		return 0;
	}

	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
		if (result == L2CAP_MC_CONFIRMED) {
			chan->local_amp_id = chan->move_id;
5094
			if (chan->local_amp_id == AMP_ID_BREDR)
5095 5096 5097 5098 5099 5100 5101 5102
				__release_logical_link(chan);
		} else {
			chan->move_id = chan->local_amp_id;
		}

		l2cap_move_done(chan);
	}

5103 5104
	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);

5105 5106
	l2cap_chan_unlock(chan);

5107 5108 5109 5110
	return 0;
}

static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5111 5112
						 struct l2cap_cmd_hdr *cmd,
						 u16 cmd_len, void *data)
5113 5114
{
	struct l2cap_move_chan_cfm_rsp *rsp = data;
5115
	struct l2cap_chan *chan;
5116 5117 5118 5119 5120 5121 5122
	u16 icid;

	if (cmd_len != sizeof(*rsp))
		return -EPROTO;

	icid = le16_to_cpu(rsp->icid);

5123
	BT_DBG("icid 0x%4.4x", icid);
5124

5125 5126 5127 5128 5129 5130 5131 5132 5133
	chan = l2cap_get_chan_by_scid(conn, icid);
	if (!chan)
		return 0;

	__clear_chan_timer(chan);

	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
		chan->local_amp_id = chan->move_id;

5134
		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5135 5136 5137 5138 5139 5140 5141
			__release_logical_link(chan);

		l2cap_move_done(chan);
	}

	l2cap_chan_unlock(chan);

5142 5143 5144
	return 0;
}

5145
static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5146
					 u16 to_multiplier)
5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166
{
	u16 max_latency;

	if (min > max || min < 6 || max > 3200)
		return -EINVAL;

	if (to_multiplier < 10 || to_multiplier > 3200)
		return -EINVAL;

	if (max >= to_multiplier * 8)
		return -EINVAL;

	max_latency = (to_multiplier * 8 / max) - 1;
	if (latency > 499 || latency > max_latency)
		return -EINVAL;

	return 0;
}

static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5167 5168
					      struct l2cap_cmd_hdr *cmd,
					      u8 *data)
5169 5170 5171 5172 5173
{
	struct hci_conn *hcon = conn->hcon;
	struct l2cap_conn_param_update_req *req;
	struct l2cap_conn_param_update_rsp rsp;
	u16 min, max, latency, to_multiplier, cmd_len;
5174
	int err;
5175 5176 5177 5178 5179 5180 5181 5182 5183

	if (!(hcon->link_mode & HCI_LM_MASTER))
		return -EINVAL;

	cmd_len = __le16_to_cpu(cmd->len);
	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
		return -EPROTO;

	req = (struct l2cap_conn_param_update_req *) data;
5184 5185
	min		= __le16_to_cpu(req->min);
	max		= __le16_to_cpu(req->max);
5186 5187 5188 5189
	latency		= __le16_to_cpu(req->latency);
	to_multiplier	= __le16_to_cpu(req->to_multiplier);

	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5190
	       min, max, latency, to_multiplier);
5191 5192

	memset(&rsp, 0, sizeof(rsp));
5193 5194 5195

	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
	if (err)
5196
		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5197
	else
5198
		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5199 5200

	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5201
		       sizeof(rsp), &rsp);
5202

5203 5204 5205
	if (!err)
		hci_le_conn_update(hcon, min, max, latency, to_multiplier);

5206 5207 5208
	return 0;
}

5209
static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5210 5211
				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				      u8 *data)
5212 5213 5214 5215 5216
{
	int err = 0;

	switch (cmd->code) {
	case L2CAP_COMMAND_REJ:
5217
		l2cap_command_rej(conn, cmd, cmd_len, data);
5218 5219 5220
		break;

	case L2CAP_CONN_REQ:
5221
		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5222 5223 5224
		break;

	case L2CAP_CONN_RSP:
5225
	case L2CAP_CREATE_CHAN_RSP:
5226
		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5227 5228 5229 5230 5231 5232 5233
		break;

	case L2CAP_CONF_REQ:
		err = l2cap_config_req(conn, cmd, cmd_len, data);
		break;

	case L2CAP_CONF_RSP:
5234
		l2cap_config_rsp(conn, cmd, cmd_len, data);
5235 5236 5237
		break;

	case L2CAP_DISCONN_REQ:
5238
		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5239 5240 5241
		break;

	case L2CAP_DISCONN_RSP:
5242
		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5243 5244 5245 5246 5247 5248 5249 5250 5251 5252
		break;

	case L2CAP_ECHO_REQ:
		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
		break;

	case L2CAP_ECHO_RSP:
		break;

	case L2CAP_INFO_REQ:
5253
		err = l2cap_information_req(conn, cmd, cmd_len, data);
5254 5255 5256
		break;

	case L2CAP_INFO_RSP:
5257
		l2cap_information_rsp(conn, cmd, cmd_len, data);
5258 5259
		break;

5260 5261 5262 5263
	case L2CAP_CREATE_CHAN_REQ:
		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
		break;

5264 5265 5266 5267 5268
	case L2CAP_MOVE_CHAN_REQ:
		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
		break;

	case L2CAP_MOVE_CHAN_RSP:
5269
		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5270 5271 5272 5273 5274 5275 5276
		break;

	case L2CAP_MOVE_CHAN_CFM:
		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
		break;

	case L2CAP_MOVE_CHAN_CFM_RSP:
5277
		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5278 5279
		break;

5280 5281 5282 5283 5284 5285 5286 5287 5288 5289
	default:
		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
		err = -EINVAL;
		break;
	}

	return err;
}

static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5290
				   struct l2cap_cmd_hdr *cmd, u8 *data)
5291 5292 5293 5294 5295 5296
{
	switch (cmd->code) {
	case L2CAP_COMMAND_REJ:
		return 0;

	case L2CAP_CONN_PARAM_UPDATE_REQ:
5297
		return l2cap_conn_param_update_req(conn, cmd, data);
5298 5299 5300 5301 5302 5303 5304 5305 5306 5307

	case L2CAP_CONN_PARAM_UPDATE_RSP:
		return 0;

	default:
		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
		return -EINVAL;
	}
}

5308 5309 5310
static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
					struct sk_buff *skb)
{
5311
	struct hci_conn *hcon = conn->hcon;
5312 5313
	struct l2cap_cmd_hdr *cmd;
	u16 len;
5314 5315
	int err;

5316
	if (hcon->type != LE_LINK)
5317
		goto drop;
5318

5319 5320
	if (skb->len < L2CAP_CMD_HDR_SIZE)
		goto drop;
5321

5322 5323
	cmd = (void *) skb->data;
	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5324

5325
	len = le16_to_cpu(cmd->len);
5326

5327
	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5328

5329 5330 5331 5332
	if (len != skb->len || !cmd->ident) {
		BT_DBG("corrupted command");
		goto drop;
	}
5333

5334 5335 5336
	err = l2cap_le_sig_cmd(conn, cmd, skb->data);
	if (err) {
		struct l2cap_cmd_rej_unk rej;
5337

5338
		BT_ERR("Wrong link type (%d)", err);
5339

5340
		rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5341 5342
		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
			       sizeof(rej), &rej);
5343 5344
	}

5345
drop:
5346 5347 5348
	kfree_skb(skb);
}

5349
static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5350
				     struct sk_buff *skb)
L
Linus Torvalds 已提交
5351
{
5352
	struct hci_conn *hcon = conn->hcon;
L
Linus Torvalds 已提交
5353 5354 5355
	u8 *data = skb->data;
	int len = skb->len;
	struct l2cap_cmd_hdr cmd;
5356
	int err;
L
Linus Torvalds 已提交
5357 5358 5359

	l2cap_raw_recv(conn, skb);

5360
	if (hcon->type != ACL_LINK)
5361
		goto drop;
5362

L
Linus Torvalds 已提交
5363
	while (len >= L2CAP_CMD_HDR_SIZE) {
5364
		u16 cmd_len;
L
Linus Torvalds 已提交
5365 5366 5367 5368
		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
		data += L2CAP_CMD_HDR_SIZE;
		len  -= L2CAP_CMD_HDR_SIZE;

5369
		cmd_len = le16_to_cpu(cmd.len);
L
Linus Torvalds 已提交
5370

5371 5372
		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
		       cmd.ident);
L
Linus Torvalds 已提交
5373

5374
		if (cmd_len > len || !cmd.ident) {
L
Linus Torvalds 已提交
5375 5376 5377 5378
			BT_DBG("corrupted command");
			break;
		}

5379
		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
L
Linus Torvalds 已提交
5380
		if (err) {
5381
			struct l2cap_cmd_rej_unk rej;
5382 5383

			BT_ERR("Wrong link type (%d)", err);
L
Linus Torvalds 已提交
5384

5385
			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5386 5387
			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
				       sizeof(rej), &rej);
L
Linus Torvalds 已提交
5388 5389
		}

5390 5391
		data += cmd_len;
		len  -= cmd_len;
L
Linus Torvalds 已提交
5392 5393
	}

5394
drop:
L
Linus Torvalds 已提交
5395 5396 5397
	kfree_skb(skb);
}

5398
static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5399 5400
{
	u16 our_fcs, rcv_fcs;
5401 5402 5403 5404 5405 5406
	int hdr_size;

	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
		hdr_size = L2CAP_EXT_HDR_SIZE;
	else
		hdr_size = L2CAP_ENH_HDR_SIZE;
5407

5408
	if (chan->fcs == L2CAP_FCS_CRC16) {
5409
		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5410 5411 5412 5413
		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);

		if (our_fcs != rcv_fcs)
5414
			return -EBADMSG;
5415 5416 5417 5418
	}
	return 0;
}

5419
static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5420
{
5421
	struct l2cap_ctrl control;
5422

5423
	BT_DBG("chan %p", chan);
5424

5425 5426 5427 5428 5429
	memset(&control, 0, sizeof(control));
	control.sframe = 1;
	control.final = 1;
	control.reqseq = chan->buffer_seq;
	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5430

5431
	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5432 5433
		control.super = L2CAP_SUPER_RNR;
		l2cap_send_sframe(chan, &control);
5434 5435
	}

5436 5437 5438
	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
	    chan->unacked_frames > 0)
		__set_retrans_timer(chan);
5439

5440
	/* Send pending iframes */
5441
	l2cap_ertm_send(chan);
5442

5443
	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5444 5445 5446 5447 5448 5449
	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
		/* F-bit wasn't sent in an s-frame or i-frame yet, so
		 * send it now.
		 */
		control.super = L2CAP_SUPER_RR;
		l2cap_send_sframe(chan, &control);
5450 5451 5452
	}
}

5453 5454
static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
			    struct sk_buff **last_frag)
5455
{
5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471
	/* skb->len reflects data in skb as well as all fragments
	 * skb->data_len reflects only data in fragments
	 */
	if (!skb_has_frag_list(skb))
		skb_shinfo(skb)->frag_list = new_frag;

	new_frag->next = NULL;

	(*last_frag)->next = new_frag;
	*last_frag = new_frag;

	skb->len += new_frag->len;
	skb->data_len += new_frag->len;
	skb->truesize += new_frag->truesize;
}

5472 5473
static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
				struct l2cap_ctrl *control)
5474 5475
{
	int err = -EINVAL;
5476

5477
	switch (control->sar) {
5478
	case L2CAP_SAR_UNSEGMENTED:
5479 5480
		if (chan->sdu)
			break;
5481

5482
		err = chan->ops->recv(chan, skb);
5483
		break;
5484

5485
	case L2CAP_SAR_START:
5486 5487
		if (chan->sdu)
			break;
5488

5489
		chan->sdu_len = get_unaligned_le16(skb->data);
5490
		skb_pull(skb, L2CAP_SDULEN_SIZE);
5491

5492 5493 5494 5495
		if (chan->sdu_len > chan->imtu) {
			err = -EMSGSIZE;
			break;
		}
5496

5497 5498
		if (skb->len >= chan->sdu_len)
			break;
5499

5500 5501
		chan->sdu = skb;
		chan->sdu_last_frag = skb;
5502

5503 5504
		skb = NULL;
		err = 0;
5505 5506
		break;

5507
	case L2CAP_SAR_CONTINUE:
5508
		if (!chan->sdu)
5509
			break;
5510

5511 5512 5513
		append_skb_frag(chan->sdu, skb,
				&chan->sdu_last_frag);
		skb = NULL;
5514

5515 5516
		if (chan->sdu->len >= chan->sdu_len)
			break;
5517

5518
		err = 0;
5519 5520
		break;

5521
	case L2CAP_SAR_END:
5522
		if (!chan->sdu)
5523
			break;
5524

5525 5526 5527
		append_skb_frag(chan->sdu, skb,
				&chan->sdu_last_frag);
		skb = NULL;
5528

5529 5530
		if (chan->sdu->len != chan->sdu_len)
			break;
5531

5532
		err = chan->ops->recv(chan, chan->sdu);
5533

5534 5535 5536 5537 5538
		if (!err) {
			/* Reassembly complete */
			chan->sdu = NULL;
			chan->sdu_last_frag = NULL;
			chan->sdu_len = 0;
5539
		}
5540 5541 5542
		break;
	}

5543 5544 5545 5546 5547 5548 5549
	if (err) {
		kfree_skb(skb);
		kfree_skb(chan->sdu);
		chan->sdu = NULL;
		chan->sdu_last_frag = NULL;
		chan->sdu_len = 0;
	}
5550

5551
	return err;
5552 5553
}

5554 5555 5556 5557 5558 5559
static int l2cap_resegment(struct l2cap_chan *chan)
{
	/* Placeholder */
	return 0;
}

5560
void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5561
{
5562
	u8 event;
5563

5564 5565
	if (chan->mode != L2CAP_MODE_ERTM)
		return;
5566

5567
	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5568
	l2cap_tx(chan, NULL, NULL, event);
5569 5570
}

5571 5572
static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
{
5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602
	int err = 0;
	/* Pass sequential frames to l2cap_reassemble_sdu()
	 * until a gap is encountered.
	 */

	BT_DBG("chan %p", chan);

	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
		struct sk_buff *skb;
		BT_DBG("Searching for skb with txseq %d (queue len %d)",
		       chan->buffer_seq, skb_queue_len(&chan->srej_q));

		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);

		if (!skb)
			break;

		skb_unlink(skb, &chan->srej_q);
		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
		if (err)
			break;
	}

	if (skb_queue_empty(&chan->srej_q)) {
		chan->rx_state = L2CAP_RX_STATE_RECV;
		l2cap_send_ack(chan);
	}

	return err;
5603 5604 5605 5606 5607
}

static void l2cap_handle_srej(struct l2cap_chan *chan,
			      struct l2cap_ctrl *control)
{
5608 5609 5610 5611 5612 5613
	struct sk_buff *skb;

	BT_DBG("chan %p, control %p", chan, control);

	if (control->reqseq == chan->next_tx_seq) {
		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5614
		l2cap_send_disconn_req(chan, ECONNRESET);
5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627
		return;
	}

	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);

	if (skb == NULL) {
		BT_DBG("Seq %d not available for retransmission",
		       control->reqseq);
		return;
	}

	if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5628
		l2cap_send_disconn_req(chan, ECONNRESET);
5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660
		return;
	}

	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);

	if (control->poll) {
		l2cap_pass_to_tx(chan, control);

		set_bit(CONN_SEND_FBIT, &chan->conn_state);
		l2cap_retransmit(chan, control);
		l2cap_ertm_send(chan);

		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
			set_bit(CONN_SREJ_ACT, &chan->conn_state);
			chan->srej_save_reqseq = control->reqseq;
		}
	} else {
		l2cap_pass_to_tx_fbit(chan, control);

		if (control->final) {
			if (chan->srej_save_reqseq != control->reqseq ||
			    !test_and_clear_bit(CONN_SREJ_ACT,
						&chan->conn_state))
				l2cap_retransmit(chan, control);
		} else {
			l2cap_retransmit(chan, control);
			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
				set_bit(CONN_SREJ_ACT, &chan->conn_state);
				chan->srej_save_reqseq = control->reqseq;
			}
		}
	}
5661 5662 5663 5664 5665
}

static void l2cap_handle_rej(struct l2cap_chan *chan,
			     struct l2cap_ctrl *control)
{
5666 5667 5668 5669 5670 5671
	struct sk_buff *skb;

	BT_DBG("chan %p, control %p", chan, control);

	if (control->reqseq == chan->next_tx_seq) {
		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5672
		l2cap_send_disconn_req(chan, ECONNRESET);
5673 5674 5675 5676 5677 5678 5679 5680
		return;
	}

	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);

	if (chan->max_tx && skb &&
	    bt_cb(skb)->control.retries >= chan->max_tx) {
		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5681
		l2cap_send_disconn_req(chan, ECONNRESET);
5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697
		return;
	}

	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);

	l2cap_pass_to_tx(chan, control);

	if (control->final) {
		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
			l2cap_retransmit_all(chan, control);
	} else {
		l2cap_retransmit_all(chan, control);
		l2cap_ertm_send(chan);
		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
			set_bit(CONN_REJ_ACT, &chan->conn_state);
	}
5698 5699
}

5700 5701 5702 5703 5704 5705 5706 5707 5708
static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
{
	BT_DBG("chan %p, txseq %d", chan, txseq);

	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
	       chan->expected_tx_seq);

	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5709
		    chan->tx_win) {
5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749
			/* See notes below regarding "double poll" and
			 * invalid packets.
			 */
			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
				BT_DBG("Invalid/Ignore - after SREJ");
				return L2CAP_TXSEQ_INVALID_IGNORE;
			} else {
				BT_DBG("Invalid - in window after SREJ sent");
				return L2CAP_TXSEQ_INVALID;
			}
		}

		if (chan->srej_list.head == txseq) {
			BT_DBG("Expected SREJ");
			return L2CAP_TXSEQ_EXPECTED_SREJ;
		}

		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
			BT_DBG("Duplicate SREJ - txseq already stored");
			return L2CAP_TXSEQ_DUPLICATE_SREJ;
		}

		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
			BT_DBG("Unexpected SREJ - not requested");
			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
		}
	}

	if (chan->expected_tx_seq == txseq) {
		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
		    chan->tx_win) {
			BT_DBG("Invalid - txseq outside tx window");
			return L2CAP_TXSEQ_INVALID;
		} else {
			BT_DBG("Expected");
			return L2CAP_TXSEQ_EXPECTED;
		}
	}

	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5750
	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785
		BT_DBG("Duplicate - expected_tx_seq later than txseq");
		return L2CAP_TXSEQ_DUPLICATE;
	}

	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
		/* A source of invalid packets is a "double poll" condition,
		 * where delays cause us to send multiple poll packets.  If
		 * the remote stack receives and processes both polls,
		 * sequence numbers can wrap around in such a way that a
		 * resent frame has a sequence number that looks like new data
		 * with a sequence gap.  This would trigger an erroneous SREJ
		 * request.
		 *
		 * Fortunately, this is impossible with a tx window that's
		 * less than half of the maximum sequence number, which allows
		 * invalid frames to be safely ignored.
		 *
		 * With tx window sizes greater than half of the tx window
		 * maximum, the frame is invalid and cannot be ignored.  This
		 * causes a disconnect.
		 */

		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
			BT_DBG("Invalid/Ignore - txseq outside tx window");
			return L2CAP_TXSEQ_INVALID_IGNORE;
		} else {
			BT_DBG("Invalid - txseq outside tx window");
			return L2CAP_TXSEQ_INVALID;
		}
	} else {
		BT_DBG("Unexpected - txseq indicates missing frames");
		return L2CAP_TXSEQ_UNEXPECTED;
	}
}

5786 5787 5788 5789 5790
static int l2cap_rx_state_recv(struct l2cap_chan *chan,
			       struct l2cap_ctrl *control,
			       struct sk_buff *skb, u8 event)
{
	int err = 0;
5791
	bool skb_in_use = false;
5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811

	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
	       event);

	switch (event) {
	case L2CAP_EV_RECV_IFRAME:
		switch (l2cap_classify_txseq(chan, control->txseq)) {
		case L2CAP_TXSEQ_EXPECTED:
			l2cap_pass_to_tx(chan, control);

			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
				BT_DBG("Busy, discarding expected seq %d",
				       control->txseq);
				break;
			}

			chan->expected_tx_seq = __next_seq(chan,
							   control->txseq);

			chan->buffer_seq = chan->expected_tx_seq;
5812
			skb_in_use = true;
5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847

			err = l2cap_reassemble_sdu(chan, skb, control);
			if (err)
				break;

			if (control->final) {
				if (!test_and_clear_bit(CONN_REJ_ACT,
							&chan->conn_state)) {
					control->final = 0;
					l2cap_retransmit_all(chan, control);
					l2cap_ertm_send(chan);
				}
			}

			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
				l2cap_send_ack(chan);
			break;
		case L2CAP_TXSEQ_UNEXPECTED:
			l2cap_pass_to_tx(chan, control);

			/* Can't issue SREJ frames in the local busy state.
			 * Drop this frame, it will be seen as missing
			 * when local busy is exited.
			 */
			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
				BT_DBG("Busy, discarding unexpected seq %d",
				       control->txseq);
				break;
			}

			/* There was a gap in the sequence, so an SREJ
			 * must be sent for each missing frame.  The
			 * current frame is stored for later use.
			 */
			skb_queue_tail(&chan->srej_q, skb);
5848
			skb_in_use = true;
5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864
			BT_DBG("Queued %p (queue len %d)", skb,
			       skb_queue_len(&chan->srej_q));

			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
			l2cap_seq_list_clear(&chan->srej_list);
			l2cap_send_srej(chan, control->txseq);

			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
			break;
		case L2CAP_TXSEQ_DUPLICATE:
			l2cap_pass_to_tx(chan, control);
			break;
		case L2CAP_TXSEQ_INVALID_IGNORE:
			break;
		case L2CAP_TXSEQ_INVALID:
		default:
5865
			l2cap_send_disconn_req(chan, ECONNRESET);
5866 5867 5868 5869 5870 5871 5872 5873
			break;
		}
		break;
	case L2CAP_EV_RECV_RR:
		l2cap_pass_to_tx(chan, control);
		if (control->final) {
			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);

5874 5875
			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
			    !__chan_is_moving(chan)) {
5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925
				control->final = 0;
				l2cap_retransmit_all(chan, control);
			}

			l2cap_ertm_send(chan);
		} else if (control->poll) {
			l2cap_send_i_or_rr_or_rnr(chan);
		} else {
			if (test_and_clear_bit(CONN_REMOTE_BUSY,
					       &chan->conn_state) &&
			    chan->unacked_frames)
				__set_retrans_timer(chan);

			l2cap_ertm_send(chan);
		}
		break;
	case L2CAP_EV_RECV_RNR:
		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
		l2cap_pass_to_tx(chan, control);
		if (control && control->poll) {
			set_bit(CONN_SEND_FBIT, &chan->conn_state);
			l2cap_send_rr_or_rnr(chan, 0);
		}
		__clear_retrans_timer(chan);
		l2cap_seq_list_clear(&chan->retrans_list);
		break;
	case L2CAP_EV_RECV_REJ:
		l2cap_handle_rej(chan, control);
		break;
	case L2CAP_EV_RECV_SREJ:
		l2cap_handle_srej(chan, control);
		break;
	default:
		break;
	}

	if (skb && !skb_in_use) {
		BT_DBG("Freeing %p", skb);
		kfree_skb(skb);
	}

	return err;
}

static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
				    struct l2cap_ctrl *control,
				    struct sk_buff *skb, u8 event)
{
	int err = 0;
	u16 txseq = control->txseq;
5926
	bool skb_in_use = false;
5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937

	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
	       event);

	switch (event) {
	case L2CAP_EV_RECV_IFRAME:
		switch (l2cap_classify_txseq(chan, txseq)) {
		case L2CAP_TXSEQ_EXPECTED:
			/* Keep frame for reassembly later */
			l2cap_pass_to_tx(chan, control);
			skb_queue_tail(&chan->srej_q, skb);
5938
			skb_in_use = true;
5939 5940 5941 5942 5943 5944 5945 5946 5947 5948
			BT_DBG("Queued %p (queue len %d)", skb,
			       skb_queue_len(&chan->srej_q));

			chan->expected_tx_seq = __next_seq(chan, txseq);
			break;
		case L2CAP_TXSEQ_EXPECTED_SREJ:
			l2cap_seq_list_pop(&chan->srej_list);

			l2cap_pass_to_tx(chan, control);
			skb_queue_tail(&chan->srej_q, skb);
5949
			skb_in_use = true;
5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963
			BT_DBG("Queued %p (queue len %d)", skb,
			       skb_queue_len(&chan->srej_q));

			err = l2cap_rx_queued_iframes(chan);
			if (err)
				break;

			break;
		case L2CAP_TXSEQ_UNEXPECTED:
			/* Got a frame that can't be reassembled yet.
			 * Save it for later, and send SREJs to cover
			 * the missing frames.
			 */
			skb_queue_tail(&chan->srej_q, skb);
5964
			skb_in_use = true;
5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977
			BT_DBG("Queued %p (queue len %d)", skb,
			       skb_queue_len(&chan->srej_q));

			l2cap_pass_to_tx(chan, control);
			l2cap_send_srej(chan, control->txseq);
			break;
		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
			/* This frame was requested with an SREJ, but
			 * some expected retransmitted frames are
			 * missing.  Request retransmission of missing
			 * SREJ'd frames.
			 */
			skb_queue_tail(&chan->srej_q, skb);
5978
			skb_in_use = true;
5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997
			BT_DBG("Queued %p (queue len %d)", skb,
			       skb_queue_len(&chan->srej_q));

			l2cap_pass_to_tx(chan, control);
			l2cap_send_srej_list(chan, control->txseq);
			break;
		case L2CAP_TXSEQ_DUPLICATE_SREJ:
			/* We've already queued this frame.  Drop this copy. */
			l2cap_pass_to_tx(chan, control);
			break;
		case L2CAP_TXSEQ_DUPLICATE:
			/* Expecting a later sequence number, so this frame
			 * was already received.  Ignore it completely.
			 */
			break;
		case L2CAP_TXSEQ_INVALID_IGNORE:
			break;
		case L2CAP_TXSEQ_INVALID:
		default:
5998
			l2cap_send_disconn_req(chan, ECONNRESET);
5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062
			break;
		}
		break;
	case L2CAP_EV_RECV_RR:
		l2cap_pass_to_tx(chan, control);
		if (control->final) {
			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);

			if (!test_and_clear_bit(CONN_REJ_ACT,
						&chan->conn_state)) {
				control->final = 0;
				l2cap_retransmit_all(chan, control);
			}

			l2cap_ertm_send(chan);
		} else if (control->poll) {
			if (test_and_clear_bit(CONN_REMOTE_BUSY,
					       &chan->conn_state) &&
			    chan->unacked_frames) {
				__set_retrans_timer(chan);
			}

			set_bit(CONN_SEND_FBIT, &chan->conn_state);
			l2cap_send_srej_tail(chan);
		} else {
			if (test_and_clear_bit(CONN_REMOTE_BUSY,
					       &chan->conn_state) &&
			    chan->unacked_frames)
				__set_retrans_timer(chan);

			l2cap_send_ack(chan);
		}
		break;
	case L2CAP_EV_RECV_RNR:
		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
		l2cap_pass_to_tx(chan, control);
		if (control->poll) {
			l2cap_send_srej_tail(chan);
		} else {
			struct l2cap_ctrl rr_control;
			memset(&rr_control, 0, sizeof(rr_control));
			rr_control.sframe = 1;
			rr_control.super = L2CAP_SUPER_RR;
			rr_control.reqseq = chan->buffer_seq;
			l2cap_send_sframe(chan, &rr_control);
		}

		break;
	case L2CAP_EV_RECV_REJ:
		l2cap_handle_rej(chan, control);
		break;
	case L2CAP_EV_RECV_SREJ:
		l2cap_handle_srej(chan, control);
		break;
	}

	if (skb && !skb_in_use) {
		BT_DBG("Freeing %p", skb);
		kfree_skb(skb);
	}

	return err;
}

6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152
static int l2cap_finish_move(struct l2cap_chan *chan)
{
	BT_DBG("chan %p", chan);

	chan->rx_state = L2CAP_RX_STATE_RECV;

	if (chan->hs_hcon)
		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
	else
		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;

	return l2cap_resegment(chan);
}

static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
				 struct l2cap_ctrl *control,
				 struct sk_buff *skb, u8 event)
{
	int err;

	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
	       event);

	if (!control->poll)
		return -EPROTO;

	l2cap_process_reqseq(chan, control->reqseq);

	if (!skb_queue_empty(&chan->tx_q))
		chan->tx_send_head = skb_peek(&chan->tx_q);
	else
		chan->tx_send_head = NULL;

	/* Rewind next_tx_seq to the point expected
	 * by the receiver.
	 */
	chan->next_tx_seq = control->reqseq;
	chan->unacked_frames = 0;

	err = l2cap_finish_move(chan);
	if (err)
		return err;

	set_bit(CONN_SEND_FBIT, &chan->conn_state);
	l2cap_send_i_or_rr_or_rnr(chan);

	if (event == L2CAP_EV_RECV_IFRAME)
		return -EPROTO;

	return l2cap_rx_state_recv(chan, control, NULL, event);
}

static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
				 struct l2cap_ctrl *control,
				 struct sk_buff *skb, u8 event)
{
	int err;

	if (!control->final)
		return -EPROTO;

	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);

	chan->rx_state = L2CAP_RX_STATE_RECV;
	l2cap_process_reqseq(chan, control->reqseq);

	if (!skb_queue_empty(&chan->tx_q))
		chan->tx_send_head = skb_peek(&chan->tx_q);
	else
		chan->tx_send_head = NULL;

	/* Rewind next_tx_seq to the point expected
	 * by the receiver.
	 */
	chan->next_tx_seq = control->reqseq;
	chan->unacked_frames = 0;

	if (chan->hs_hcon)
		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
	else
		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;

	err = l2cap_resegment(chan);

	if (!err)
		err = l2cap_rx_state_recv(chan, control, skb, event);

	return err;
}

6153 6154 6155 6156 6157 6158 6159 6160 6161
static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
{
	/* Make sure reqseq is for a packet that has been sent but not acked */
	u16 unacked;

	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
}

6162 6163
static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
		    struct sk_buff *skb, u8 event)
6164
{
6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178
	int err = 0;

	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
	       control, skb, event, chan->rx_state);

	if (__valid_reqseq(chan, control->reqseq)) {
		switch (chan->rx_state) {
		case L2CAP_RX_STATE_RECV:
			err = l2cap_rx_state_recv(chan, control, skb, event);
			break;
		case L2CAP_RX_STATE_SREJ_SENT:
			err = l2cap_rx_state_srej_sent(chan, control, skb,
						       event);
			break;
6179 6180 6181 6182 6183 6184
		case L2CAP_RX_STATE_WAIT_P:
			err = l2cap_rx_state_wait_p(chan, control, skb, event);
			break;
		case L2CAP_RX_STATE_WAIT_F:
			err = l2cap_rx_state_wait_f(chan, control, skb, event);
			break;
6185 6186 6187 6188 6189 6190 6191 6192
		default:
			/* shut it down */
			break;
		}
	} else {
		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
		       control->reqseq, chan->next_tx_seq,
		       chan->expected_ack_seq);
6193
		l2cap_send_disconn_req(chan, ECONNRESET);
6194 6195 6196
	}

	return err;
6197 6198 6199 6200 6201
}

static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
			   struct sk_buff *skb)
{
6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234
	int err = 0;

	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
	       chan->rx_state);

	if (l2cap_classify_txseq(chan, control->txseq) ==
	    L2CAP_TXSEQ_EXPECTED) {
		l2cap_pass_to_tx(chan, control);

		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
		       __next_seq(chan, chan->buffer_seq));

		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);

		l2cap_reassemble_sdu(chan, skb, control);
	} else {
		if (chan->sdu) {
			kfree_skb(chan->sdu);
			chan->sdu = NULL;
		}
		chan->sdu_last_frag = NULL;
		chan->sdu_len = 0;

		if (skb) {
			BT_DBG("Freeing %p", skb);
			kfree_skb(skb);
		}
	}

	chan->last_acked_seq = control->txseq;
	chan->expected_tx_seq = __next_seq(chan, control->txseq);

	return err;
6235 6236 6237 6238 6239 6240 6241
}

static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
{
	struct l2cap_ctrl *control = &bt_cb(skb)->control;
	u16 len;
	u8 event;
6242

6243 6244
	__unpack_control(chan, skb);

6245 6246 6247 6248 6249
	len = skb->len;

	/*
	 * We can just drop the corrupted I-frame here.
	 * Receiver will miss it and start proper recovery
6250
	 * procedures and ask for retransmission.
6251
	 */
6252
	if (l2cap_check_fcs(chan, skb))
6253 6254
		goto drop;

6255
	if (!control->sframe && control->sar == L2CAP_SAR_START)
6256
		len -= L2CAP_SDULEN_SIZE;
6257

6258
	if (chan->fcs == L2CAP_FCS_CRC16)
6259
		len -= L2CAP_FCS_SIZE;
6260

6261
	if (len > chan->mps) {
6262
		l2cap_send_disconn_req(chan, ECONNRESET);
6263 6264 6265
		goto drop;
	}

6266 6267
	if (!control->sframe) {
		int err;
6268

6269 6270 6271
		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
		       control->sar, control->reqseq, control->final,
		       control->txseq);
6272

6273 6274 6275 6276
		/* Validate F-bit - F=0 always valid, F=1 only
		 * valid in TX WAIT_F
		 */
		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6277
			goto drop;
6278 6279 6280 6281 6282 6283

		if (chan->mode != L2CAP_MODE_STREAMING) {
			event = L2CAP_EV_RECV_IFRAME;
			err = l2cap_rx(chan, control, skb, event);
		} else {
			err = l2cap_stream_rx(chan, control, skb);
6284 6285
		}

6286
		if (err)
6287
			l2cap_send_disconn_req(chan, ECONNRESET);
6288
	} else {
6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301
		const u8 rx_func_to_event[4] = {
			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
		};

		/* Only I-frames are expected in streaming mode */
		if (chan->mode == L2CAP_MODE_STREAMING)
			goto drop;

		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
		       control->reqseq, control->final, control->poll,
		       control->super);

6302
		if (len != 0) {
6303
			BT_ERR("Trailing bytes: %d in sframe", len);
6304
			l2cap_send_disconn_req(chan, ECONNRESET);
6305 6306 6307
			goto drop;
		}

6308 6309 6310 6311 6312 6313 6314
		/* Validate F and P bits */
		if (control->final && (control->poll ||
				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
			goto drop;

		event = rx_func_to_event[control->super];
		if (l2cap_rx(chan, control, skb, event))
6315
			l2cap_send_disconn_req(chan, ECONNRESET);
6316 6317 6318 6319 6320 6321 6322 6323 6324
	}

	return 0;

drop:
	kfree_skb(skb);
	return 0;
}

6325 6326
static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
			       struct sk_buff *skb)
L
Linus Torvalds 已提交
6327
{
6328
	struct l2cap_chan *chan;
L
Linus Torvalds 已提交
6329

6330
	chan = l2cap_get_chan_by_scid(conn, cid);
6331
	if (!chan) {
6332 6333 6334 6335
		if (cid == L2CAP_CID_A2MP) {
			chan = a2mp_channel_create(conn, skb);
			if (!chan) {
				kfree_skb(skb);
6336
				return;
6337 6338 6339 6340 6341 6342 6343
			}

			l2cap_chan_lock(chan);
		} else {
			BT_DBG("unknown cid 0x%4.4x", cid);
			/* Drop packet and return */
			kfree_skb(skb);
6344
			return;
6345
		}
L
Linus Torvalds 已提交
6346 6347
	}

6348
	BT_DBG("chan %p, len %d", chan, skb->len);
L
Linus Torvalds 已提交
6349

6350
	if (chan->state != BT_CONNECTED)
L
Linus Torvalds 已提交
6351 6352
		goto drop;

6353
	switch (chan->mode) {
6354 6355 6356 6357 6358
	case L2CAP_MODE_BASIC:
		/* If socket recv buffers overflows we drop data here
		 * which is *bad* because L2CAP has to be reliable.
		 * But we don't have any other choice. L2CAP doesn't
		 * provide flow control mechanism. */
L
Linus Torvalds 已提交
6359

6360
		if (chan->imtu < skb->len)
6361
			goto drop;
L
Linus Torvalds 已提交
6362

6363
		if (!chan->ops->recv(chan, skb))
6364 6365 6366 6367
			goto done;
		break;

	case L2CAP_MODE_ERTM:
6368
	case L2CAP_MODE_STREAMING:
6369
		l2cap_data_rcv(chan, skb);
6370 6371
		goto done;

6372
	default:
6373
		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6374 6375
		break;
	}
L
Linus Torvalds 已提交
6376 6377 6378 6379 6380

drop:
	kfree_skb(skb);

done:
6381
	l2cap_chan_unlock(chan);
L
Linus Torvalds 已提交
6382 6383
}

6384 6385
static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
				  struct sk_buff *skb)
L
Linus Torvalds 已提交
6386
{
6387
	struct hci_conn *hcon = conn->hcon;
6388
	struct l2cap_chan *chan;
L
Linus Torvalds 已提交
6389

6390 6391 6392
	if (hcon->type != ACL_LINK)
		goto drop;

6393 6394
	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
					ACL_LINK);
6395
	if (!chan)
L
Linus Torvalds 已提交
6396 6397
		goto drop;

6398
	BT_DBG("chan %p, len %d", chan, skb->len);
L
Linus Torvalds 已提交
6399

6400
	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
L
Linus Torvalds 已提交
6401 6402
		goto drop;

6403
	if (chan->imtu < skb->len)
L
Linus Torvalds 已提交
6404 6405
		goto drop;

6406
	/* Store remote BD_ADDR and PSM for msg_name */
6407
	bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6408 6409
	bt_cb(skb)->psm = psm;

6410
	if (!chan->ops->recv(chan, skb))
6411
		return;
L
Linus Torvalds 已提交
6412 6413 6414 6415 6416

drop:
	kfree_skb(skb);
}

6417
static void l2cap_att_channel(struct l2cap_conn *conn,
6418
			      struct sk_buff *skb)
6419
{
6420
	struct hci_conn *hcon = conn->hcon;
6421
	struct l2cap_chan *chan;
6422

6423 6424 6425
	if (hcon->type != LE_LINK)
		goto drop;

6426
	chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6427
					 &hcon->src, &hcon->dst);
6428
	if (!chan)
6429 6430
		goto drop;

6431
	BT_DBG("chan %p, len %d", chan, skb->len);
6432

6433 6434 6435
	if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
		goto drop;

6436
	if (chan->imtu < skb->len)
6437 6438
		goto drop;

6439
	if (!chan->ops->recv(chan, skb))
6440
		return;
6441 6442 6443 6444 6445

drop:
	kfree_skb(skb);
}

L
Linus Torvalds 已提交
6446 6447 6448
static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
{
	struct l2cap_hdr *lh = (void *) skb->data;
6449 6450
	u16 cid, len;
	__le16 psm;
L
Linus Torvalds 已提交
6451 6452 6453 6454 6455

	skb_pull(skb, L2CAP_HDR_SIZE);
	cid = __le16_to_cpu(lh->cid);
	len = __le16_to_cpu(lh->len);

6456 6457 6458 6459 6460
	if (len != skb->len) {
		kfree_skb(skb);
		return;
	}

L
Linus Torvalds 已提交
6461 6462 6463
	BT_DBG("len %d, cid 0x%4.4x", len, cid);

	switch (cid) {
6464
	case L2CAP_CID_SIGNALING:
L
Linus Torvalds 已提交
6465 6466 6467
		l2cap_sig_channel(conn, skb);
		break;

6468
	case L2CAP_CID_CONN_LESS:
6469
		psm = get_unaligned((__le16 *) skb->data);
6470
		skb_pull(skb, L2CAP_PSMLEN_SIZE);
L
Linus Torvalds 已提交
6471 6472 6473
		l2cap_conless_channel(conn, psm, skb);
		break;

6474
	case L2CAP_CID_ATT:
6475
		l2cap_att_channel(conn, skb);
6476 6477
		break;

6478 6479 6480 6481
	case L2CAP_CID_LE_SIGNALING:
		l2cap_le_sig_channel(conn, skb);
		break;

6482 6483 6484 6485 6486
	case L2CAP_CID_SMP:
		if (smp_sig_channel(conn, skb))
			l2cap_conn_del(conn->hcon, EACCES);
		break;

L
Linus Torvalds 已提交
6487 6488 6489 6490 6491 6492 6493 6494
	default:
		l2cap_data_channel(conn, cid, skb);
		break;
	}
}

/* ---- L2CAP interface with lower layer (HCI) ---- */

6495
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
L
Linus Torvalds 已提交
6496 6497
{
	int exact = 0, lm1 = 0, lm2 = 0;
6498
	struct l2cap_chan *c;
L
Linus Torvalds 已提交
6499

6500
	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
L
Linus Torvalds 已提交
6501 6502

	/* Find listening sockets and check their link_mode */
6503 6504
	read_lock(&chan_list_lock);
	list_for_each_entry(c, &chan_list, global_l) {
6505
		if (c->state != BT_LISTEN)
L
Linus Torvalds 已提交
6506 6507
			continue;

6508
		if (!bacmp(&c->src, &hdev->bdaddr)) {
6509
			lm1 |= HCI_LM_ACCEPT;
6510
			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6511
				lm1 |= HCI_LM_MASTER;
L
Linus Torvalds 已提交
6512
			exact++;
6513
		} else if (!bacmp(&c->src, BDADDR_ANY)) {
6514
			lm2 |= HCI_LM_ACCEPT;
6515
			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6516 6517
				lm2 |= HCI_LM_MASTER;
		}
L
Linus Torvalds 已提交
6518
	}
6519
	read_unlock(&chan_list_lock);
L
Linus Torvalds 已提交
6520 6521 6522 6523

	return exact ? lm1 : lm2;
}

6524
void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
L
Linus Torvalds 已提交
6525
{
6526 6527
	struct l2cap_conn *conn;

6528
	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
L
Linus Torvalds 已提交
6529 6530

	if (!status) {
6531
		conn = l2cap_conn_add(hcon);
L
Linus Torvalds 已提交
6532 6533
		if (conn)
			l2cap_conn_ready(conn);
6534
	} else {
6535
		l2cap_conn_del(hcon, bt_to_errno(status));
6536
	}
L
Linus Torvalds 已提交
6537 6538
}

6539
int l2cap_disconn_ind(struct hci_conn *hcon)
6540 6541 6542 6543 6544
{
	struct l2cap_conn *conn = hcon->l2cap_data;

	BT_DBG("hcon %p", hcon);

6545
	if (!conn)
6546
		return HCI_ERROR_REMOTE_USER_TERM;
6547 6548 6549
	return conn->disc_reason;
}

6550
void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
L
Linus Torvalds 已提交
6551 6552 6553
{
	BT_DBG("hcon %p reason %d", hcon, reason);

6554
	l2cap_conn_del(hcon, bt_to_errno(reason));
L
Linus Torvalds 已提交
6555 6556
}

6557
static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6558
{
6559
	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6560 6561
		return;

6562
	if (encrypt == 0x00) {
6563
		if (chan->sec_level == BT_SECURITY_MEDIUM) {
6564
			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6565
		} else if (chan->sec_level == BT_SECURITY_HIGH)
6566
			l2cap_chan_close(chan, ECONNREFUSED);
6567
	} else {
6568
		if (chan->sec_level == BT_SECURITY_MEDIUM)
6569
			__clear_chan_timer(chan);
6570 6571 6572
	}
}

6573
int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
L
Linus Torvalds 已提交
6574
{
6575
	struct l2cap_conn *conn = hcon->l2cap_data;
6576
	struct l2cap_chan *chan;
L
Linus Torvalds 已提交
6577

6578
	if (!conn)
L
Linus Torvalds 已提交
6579
		return 0;
6580

6581
	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
L
Linus Torvalds 已提交
6582

6583
	if (hcon->type == LE_LINK) {
6584 6585
		if (!status && encrypt)
			smp_distribute_keys(conn, 0);
6586
		cancel_delayed_work(&conn->security_timer);
6587 6588
	}

6589
	mutex_lock(&conn->chan_lock);
L
Linus Torvalds 已提交
6590

6591
	list_for_each_entry(chan, &conn->chan_l, list) {
6592
		l2cap_chan_lock(chan);
L
Linus Torvalds 已提交
6593

6594 6595
		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
		       state_to_string(chan->state));
6596

6597 6598 6599 6600 6601
		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
			l2cap_chan_unlock(chan);
			continue;
		}

6602
		if (chan->scid == L2CAP_CID_ATT) {
6603 6604
			if (!status && encrypt) {
				chan->sec_level = hcon->sec_level;
6605
				l2cap_chan_ready(chan);
6606 6607
			}

6608
			l2cap_chan_unlock(chan);
6609 6610 6611
			continue;
		}

6612
		if (!__l2cap_no_conn_pending(chan)) {
6613
			l2cap_chan_unlock(chan);
6614 6615 6616
			continue;
		}

6617
		if (!status && (chan->state == BT_CONNECTED ||
6618
				chan->state == BT_CONFIG)) {
6619
			chan->ops->resume(chan);
6620
			l2cap_check_encryption(chan, encrypt);
6621
			l2cap_chan_unlock(chan);
6622 6623 6624
			continue;
		}

6625
		if (chan->state == BT_CONNECT) {
6626
			if (!status)
6627
				l2cap_start_connection(chan);
6628
			else
6629
				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6630
		} else if (chan->state == BT_CONNECT2) {
6631
			struct l2cap_conn_rsp rsp;
6632
			__u16 res, stat;
L
Linus Torvalds 已提交
6633

6634
			if (!status) {
6635
				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6636 6637
					res = L2CAP_CR_PEND;
					stat = L2CAP_CS_AUTHOR_PEND;
6638
					chan->ops->defer(chan);
6639
				} else {
6640
					l2cap_state_change(chan, BT_CONFIG);
6641 6642 6643
					res = L2CAP_CR_SUCCESS;
					stat = L2CAP_CS_NO_INFO;
				}
6644
			} else {
6645
				l2cap_state_change(chan, BT_DISCONN);
6646
				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6647 6648
				res = L2CAP_CR_SEC_BLOCK;
				stat = L2CAP_CS_NO_INFO;
6649 6650
			}

6651 6652
			rsp.scid   = cpu_to_le16(chan->dcid);
			rsp.dcid   = cpu_to_le16(chan->scid);
6653 6654
			rsp.result = cpu_to_le16(res);
			rsp.status = cpu_to_le16(stat);
6655
			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6656
				       sizeof(rsp), &rsp);
6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667

			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
			    res == L2CAP_CR_SUCCESS) {
				char buf[128];
				set_bit(CONF_REQ_SENT, &chan->conf_state);
				l2cap_send_cmd(conn, l2cap_get_ident(conn),
					       L2CAP_CONF_REQ,
					       l2cap_build_conf_req(chan, buf),
					       buf);
				chan->num_conf_req++;
			}
6668
		}
L
Linus Torvalds 已提交
6669

6670
		l2cap_chan_unlock(chan);
L
Linus Torvalds 已提交
6671 6672
	}

6673
	mutex_unlock(&conn->chan_lock);
6674

L
Linus Torvalds 已提交
6675 6676 6677
	return 0;
}

6678
int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
L
Linus Torvalds 已提交
6679 6680
{
	struct l2cap_conn *conn = hcon->l2cap_data;
6681 6682
	struct l2cap_hdr *hdr;
	int len;
L
Linus Torvalds 已提交
6683

6684 6685 6686
	/* For AMP controller do not create l2cap conn */
	if (!conn && hcon->hdev->dev_type != HCI_BREDR)
		goto drop;
L
Linus Torvalds 已提交
6687

6688
	if (!conn)
6689
		conn = l2cap_conn_add(hcon);
6690 6691

	if (!conn)
L
Linus Torvalds 已提交
6692 6693 6694 6695
		goto drop;

	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);

6696 6697 6698 6699
	switch (flags) {
	case ACL_START:
	case ACL_START_NO_FLUSH:
	case ACL_COMPLETE:
L
Linus Torvalds 已提交
6700 6701 6702 6703 6704 6705 6706 6707
		if (conn->rx_len) {
			BT_ERR("Unexpected start frame (len %d)", skb->len);
			kfree_skb(conn->rx_skb);
			conn->rx_skb = NULL;
			conn->rx_len = 0;
			l2cap_conn_unreliable(conn, ECOMM);
		}

6708 6709
		/* Start fragment always begin with Basic L2CAP header */
		if (skb->len < L2CAP_HDR_SIZE) {
L
Linus Torvalds 已提交
6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727
			BT_ERR("Frame is too short (len %d)", skb->len);
			l2cap_conn_unreliable(conn, ECOMM);
			goto drop;
		}

		hdr = (struct l2cap_hdr *) skb->data;
		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;

		if (len == skb->len) {
			/* Complete frame received */
			l2cap_recv_frame(conn, skb);
			return 0;
		}

		BT_DBG("Start: total len %d, frag len %d", len, skb->len);

		if (skb->len > len) {
			BT_ERR("Frame is too long (len %d, expected len %d)",
6728
			       skb->len, len);
L
Linus Torvalds 已提交
6729 6730 6731 6732 6733
			l2cap_conn_unreliable(conn, ECOMM);
			goto drop;
		}

		/* Allocate skb for the complete frame (with header) */
6734
		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6735
		if (!conn->rx_skb)
L
Linus Torvalds 已提交
6736 6737
			goto drop;

6738
		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6739
					  skb->len);
L
Linus Torvalds 已提交
6740
		conn->rx_len = len - skb->len;
6741 6742 6743
		break;

	case ACL_CONT:
L
Linus Torvalds 已提交
6744 6745 6746 6747 6748 6749 6750 6751 6752 6753
		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);

		if (!conn->rx_len) {
			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
			l2cap_conn_unreliable(conn, ECOMM);
			goto drop;
		}

		if (skb->len > conn->rx_len) {
			BT_ERR("Fragment is too long (len %d, expected %d)",
6754
			       skb->len, conn->rx_len);
L
Linus Torvalds 已提交
6755 6756 6757 6758 6759 6760 6761
			kfree_skb(conn->rx_skb);
			conn->rx_skb = NULL;
			conn->rx_len = 0;
			l2cap_conn_unreliable(conn, ECOMM);
			goto drop;
		}

6762
		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6763
					  skb->len);
L
Linus Torvalds 已提交
6764 6765 6766
		conn->rx_len -= skb->len;

		if (!conn->rx_len) {
6767 6768 6769 6770 6771
			/* Complete frame received. l2cap_recv_frame
			 * takes ownership of the skb so set the global
			 * rx_skb pointer to NULL first.
			 */
			struct sk_buff *rx_skb = conn->rx_skb;
L
Linus Torvalds 已提交
6772
			conn->rx_skb = NULL;
6773
			l2cap_recv_frame(conn, rx_skb);
L
Linus Torvalds 已提交
6774
		}
6775
		break;
L
Linus Torvalds 已提交
6776 6777 6778 6779 6780 6781 6782
	}

drop:
	kfree_skb(skb);
	return 0;
}

6783
static int l2cap_debugfs_show(struct seq_file *f, void *p)
L
Linus Torvalds 已提交
6784
{
6785
	struct l2cap_chan *c;
L
Linus Torvalds 已提交
6786

6787
	read_lock(&chan_list_lock);
L
Linus Torvalds 已提交
6788

6789
	list_for_each_entry(c, &chan_list, global_l) {
6790
		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6791
			   &c->src, &c->dst,
6792 6793 6794
			   c->state, __le16_to_cpu(c->psm),
			   c->scid, c->dcid, c->imtu, c->omtu,
			   c->sec_level, c->mode);
6795
	}
L
Linus Torvalds 已提交
6796

6797
	read_unlock(&chan_list_lock);
L
Linus Torvalds 已提交
6798

6799
	return 0;
L
Linus Torvalds 已提交
6800 6801
}

6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814
static int l2cap_debugfs_open(struct inode *inode, struct file *file)
{
	return single_open(file, l2cap_debugfs_show, inode->i_private);
}

static const struct file_operations l2cap_debugfs_fops = {
	.open		= l2cap_debugfs_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static struct dentry *l2cap_debugfs;
L
Linus Torvalds 已提交
6815

6816
int __init l2cap_init(void)
L
Linus Torvalds 已提交
6817 6818
{
	int err;
6819

6820
	err = l2cap_init_sockets();
L
Linus Torvalds 已提交
6821 6822 6823
	if (err < 0)
		return err;

6824 6825 6826 6827 6828
	if (IS_ERR_OR_NULL(bt_debugfs))
		return 0;

	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
					    NULL, &l2cap_debugfs_fops);
L
Linus Torvalds 已提交
6829 6830 6831 6832

	return 0;
}

6833
void l2cap_exit(void)
L
Linus Torvalds 已提交
6834
{
6835
	debugfs_remove(l2cap_debugfs);
6836
	l2cap_cleanup_sockets();
L
Linus Torvalds 已提交
6837 6838
}

6839 6840
module_param(disable_ertm, bool, 0644);
MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");