l2cap_core.c 175.3 KB
Newer Older
1
/*
L
Linus Torvalds 已提交
2 3
   BlueZ - Bluetooth protocol stack for Linux
   Copyright (C) 2000-2001 Qualcomm Incorporated
4
   Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5
   Copyright (C) 2010 Google Inc.
6
   Copyright (C) 2011 ProFUSION Embedded Systems
7
   Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18

   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 20 21
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
L
Linus Torvalds 已提交
22 23
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

24 25
   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
L
Linus Torvalds 已提交
26 27 28
   SOFTWARE IS DISCLAIMED.
*/

29
/* Bluetooth L2CAP core. */
L
Linus Torvalds 已提交
30 31 32

#include <linux/module.h>

33
#include <linux/debugfs.h>
34
#include <linux/crc16.h>
L
Linus Torvalds 已提交
35 36 37 38

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
39

40
#include "smp.h"
41
#include "a2mp.h"
42
#include "amp.h"
L
Linus Torvalds 已提交
43

44 45
#define LE_FLOWCTL_MAX_CREDITS 65535

46
bool disable_ertm;
47

48
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49
static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
L
Linus Torvalds 已提交
50

51 52
static LIST_HEAD(chan_list);
static DEFINE_RWLOCK(chan_list_lock);
L
Linus Torvalds 已提交
53

54 55 56
static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;

L
Linus Torvalds 已提交
57
static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58
				       u8 code, u8 ident, u16 dlen, void *data);
59
static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60
			   void *data);
61
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62
static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
L
Linus Torvalds 已提交
63

64
static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65
		     struct sk_buff_head *skbs, u8 event);
66

67 68 69 70 71 72 73 74 75 76 77 78
static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
{
	if (hcon->type == LE_LINK) {
		if (type == ADDR_LE_DEV_PUBLIC)
			return BDADDR_LE_PUBLIC;
		else
			return BDADDR_LE_RANDOM;
	}

	return BDADDR_BREDR;
}

79
/* ---- L2CAP channels ---- */
80

81 82
static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
						   u16 cid)
83
{
84
	struct l2cap_chan *c;
85

86 87 88
	list_for_each_entry(c, &conn->chan_l, list) {
		if (c->dcid == cid)
			return c;
89
	}
90
	return NULL;
91 92
}

93 94
static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
						   u16 cid)
95
{
96
	struct l2cap_chan *c;
97

98 99 100
	list_for_each_entry(c, &conn->chan_l, list) {
		if (c->scid == cid)
			return c;
101
	}
102
	return NULL;
103 104 105
}

/* Find channel with given SCID.
106
 * Returns locked channel. */
107 108
static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
						 u16 cid)
109
{
110
	struct l2cap_chan *c;
111

112
	mutex_lock(&conn->chan_lock);
113
	c = __l2cap_get_chan_by_scid(conn, cid);
114 115
	if (c)
		l2cap_chan_lock(c);
116 117
	mutex_unlock(&conn->chan_lock);

118
	return c;
119 120
}

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
/* Find channel with given DCID.
 * Returns locked channel.
 */
static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
						 u16 cid)
{
	struct l2cap_chan *c;

	mutex_lock(&conn->chan_lock);
	c = __l2cap_get_chan_by_dcid(conn, cid);
	if (c)
		l2cap_chan_lock(c);
	mutex_unlock(&conn->chan_lock);

	return c;
}

138 139
static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
						    u8 ident)
140
{
141
	struct l2cap_chan *c;
142

143 144 145
	list_for_each_entry(c, &conn->chan_l, list) {
		if (c->ident == ident)
			return c;
146
	}
147
	return NULL;
148 149
}

M
Mat Martineau 已提交
150 151 152 153 154 155 156 157 158 159 160 161 162 163
static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
						  u8 ident)
{
	struct l2cap_chan *c;

	mutex_lock(&conn->chan_lock);
	c = __l2cap_get_chan_by_ident(conn, ident);
	if (c)
		l2cap_chan_lock(c);
	mutex_unlock(&conn->chan_lock);

	return c;
}

164
static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
165
{
166
	struct l2cap_chan *c;
167

168
	list_for_each_entry(c, &chan_list, global_l) {
169
		if (c->sport == psm && !bacmp(&c->src, src))
170
			return c;
171
	}
172
	return NULL;
173 174 175 176
}

int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
{
177 178
	int err;

179
	write_lock(&chan_list_lock);
180

181
	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
182 183
		err = -EADDRINUSE;
		goto done;
184 185
	}

186 187 188 189 190 191 192 193 194
	if (psm) {
		chan->psm = psm;
		chan->sport = psm;
		err = 0;
	} else {
		u16 p;

		err = -EINVAL;
		for (p = 0x1001; p < 0x1100; p += 2)
195
			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 197 198 199 200 201
				chan->psm   = cpu_to_le16(p);
				chan->sport = cpu_to_le16(p);
				err = 0;
				break;
			}
	}
202

203
done:
204
	write_unlock(&chan_list_lock);
205
	return err;
206
}
207
EXPORT_SYMBOL_GPL(l2cap_add_psm);
208 209 210

int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
{
211
	write_lock(&chan_list_lock);
212 213 214

	chan->scid = scid;

215
	write_unlock(&chan_list_lock);
216 217 218 219

	return 0;
}

220
static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
221
{
222
	u16 cid, dyn_end;
223

224 225 226 227 228 229
	if (conn->hcon->type == LE_LINK)
		dyn_end = L2CAP_CID_LE_DYN_END;
	else
		dyn_end = L2CAP_CID_DYN_END;

	for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
230
		if (!__l2cap_get_chan_by_scid(conn, cid))
231 232 233 234 235 236
			return cid;
	}

	return 0;
}

237
static void l2cap_state_change(struct l2cap_chan *chan, int state)
238
{
239
	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
240
	       state_to_string(state));
241

242
	chan->state = state;
243
	chan->ops->state_change(chan, state, 0);
244 245
}

246 247
static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
						int state, int err)
248
{
249
	chan->state = state;
250
	chan->ops->state_change(chan, chan->state, err);
251 252 253 254
}

static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
{
255
	chan->ops->state_change(chan, chan->state, err);
256 257
}

258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
static void __set_retrans_timer(struct l2cap_chan *chan)
{
	if (!delayed_work_pending(&chan->monitor_timer) &&
	    chan->retrans_timeout) {
		l2cap_set_timer(chan, &chan->retrans_timer,
				msecs_to_jiffies(chan->retrans_timeout));
	}
}

static void __set_monitor_timer(struct l2cap_chan *chan)
{
	__clear_retrans_timer(chan);
	if (chan->monitor_timeout) {
		l2cap_set_timer(chan, &chan->monitor_timer,
				msecs_to_jiffies(chan->monitor_timeout));
	}
}

276 277 278 279 280 281 282 283 284 285 286 287 288
static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
					       u16 seq)
{
	struct sk_buff *skb;

	skb_queue_walk(head, skb) {
		if (bt_cb(skb)->control.txseq == seq)
			return skb;
	}

	return NULL;
}

289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
/* ---- L2CAP sequence number lists ---- */

/* For ERTM, ordered lists of sequence numbers must be tracked for
 * SREJ requests that are received and for frames that are to be
 * retransmitted. These seq_list functions implement a singly-linked
 * list in an array, where membership in the list can also be checked
 * in constant time. Items can also be added to the tail of the list
 * and removed from the head in constant time, without further memory
 * allocs or frees.
 */

static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
{
	size_t alloc_size, i;

	/* Allocated size is a power of 2 to map sequence numbers
	 * (which may be up to 14 bits) in to a smaller array that is
	 * sized for the negotiated ERTM transmit windows.
	 */
	alloc_size = roundup_pow_of_two(size);

	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
	if (!seq_list->list)
		return -ENOMEM;

	seq_list->mask = alloc_size - 1;
	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
	for (i = 0; i < alloc_size; i++)
		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;

	return 0;
}

static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
{
	kfree(seq_list->list);
}

static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
					   u16 seq)
{
	/* Constant-time check for list membership */
	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
}

335
static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
336
{
337
	u16 seq = seq_list->head;
338 339
	u16 mask = seq_list->mask;

340 341 342 343 344 345
	seq_list->head = seq_list->list[seq & mask];
	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;

	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
346 347
	}

348
	return seq;
349 350 351 352
}

static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
{
353
	u16 i;
354

355 356 357 358 359 360 361 362
	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
		return;

	for (i = 0; i <= seq_list->mask; i++)
		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;

	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 364 365 366 367 368 369 370
}

static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
{
	u16 mask = seq_list->mask;

	/* All appends happen in constant time */

371 372
	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
		return;
373

374 375 376 377 378 379 380
	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
		seq_list->head = seq;
	else
		seq_list->list[seq_list->tail & mask] = seq;

	seq_list->tail = seq;
	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
381 382
}

383
static void l2cap_chan_timeout(struct work_struct *work)
384
{
385
	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
386
					       chan_timer.work);
387
	struct l2cap_conn *conn = chan->conn;
388 389
	int reason;

390
	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
391

392
	mutex_lock(&conn->chan_lock);
393
	l2cap_chan_lock(chan);
394

395
	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
396
		reason = ECONNREFUSED;
397
	else if (chan->state == BT_CONNECT &&
398
		 chan->sec_level != BT_SECURITY_SDP)
399 400 401 402
		reason = ECONNREFUSED;
	else
		reason = ETIMEDOUT;

403
	l2cap_chan_close(chan, reason);
404

405
	l2cap_chan_unlock(chan);
406

407
	chan->ops->close(chan);
408 409
	mutex_unlock(&conn->chan_lock);

410
	l2cap_chan_put(chan);
411 412
}

413
struct l2cap_chan *l2cap_chan_create(void)
414 415 416 417 418 419 420
{
	struct l2cap_chan *chan;

	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
	if (!chan)
		return NULL;

421 422
	mutex_init(&chan->lock);

423
	write_lock(&chan_list_lock);
424
	list_add(&chan->global_l, &chan_list);
425
	write_unlock(&chan_list_lock);
426

427
	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
428

429 430
	chan->state = BT_OPEN;

431
	kref_init(&chan->kref);
432

433 434 435
	/* This flag is cleared in l2cap_chan_ready() */
	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);

436
	BT_DBG("chan %p", chan);
437

438 439
	return chan;
}
440
EXPORT_SYMBOL_GPL(l2cap_chan_create);
441

442
static void l2cap_chan_destroy(struct kref *kref)
443
{
444 445
	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);

446 447
	BT_DBG("chan %p", chan);

448
	write_lock(&chan_list_lock);
449
	list_del(&chan->global_l);
450
	write_unlock(&chan_list_lock);
451

452
	kfree(chan);
453 454
}

455 456
void l2cap_chan_hold(struct l2cap_chan *c)
{
457
	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
458

459
	kref_get(&c->kref);
460 461 462 463
}

void l2cap_chan_put(struct l2cap_chan *c)
{
464
	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
465

466
	kref_put(&c->kref, l2cap_chan_destroy);
467
}
468
EXPORT_SYMBOL_GPL(l2cap_chan_put);
469

470 471 472 473 474 475
void l2cap_chan_set_defaults(struct l2cap_chan *chan)
{
	chan->fcs  = L2CAP_FCS_CRC16;
	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
476 477
	chan->remote_max_tx = chan->max_tx;
	chan->remote_tx_win = chan->tx_win;
478
	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
479
	chan->sec_level = BT_SECURITY_LOW;
480 481 482 483
	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
	chan->conf_state = 0;
484 485 486

	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
}
487
EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
488

489
static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
490
{
491 492 493
	chan->sdu = NULL;
	chan->sdu_last_frag = NULL;
	chan->sdu_len = 0;
494
	chan->tx_credits = 0;
495
	chan->rx_credits = le_max_credits;
496
	chan->mps = min_t(u16, chan->imtu, le_default_mps);
497 498

	skb_queue_head_init(&chan->tx_q);
499 500
}

501
void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
502
{
503
	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
504
	       __le16_to_cpu(chan->psm), chan->dcid);
505

506
	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
507

508
	chan->conn = conn;
509

510 511
	switch (chan->chan_type) {
	case L2CAP_CHAN_CONN_ORIENTED:
512 513 514
		/* Alloc CID for connection-oriented socket */
		chan->scid = l2cap_alloc_cid(conn);
		if (conn->hcon->type == ACL_LINK)
515
			chan->omtu = L2CAP_DEFAULT_MTU;
516 517 518
		break;

	case L2CAP_CHAN_CONN_LESS:
519
		/* Connectionless socket */
520 521
		chan->scid = L2CAP_CID_CONN_LESS;
		chan->dcid = L2CAP_CID_CONN_LESS;
522
		chan->omtu = L2CAP_DEFAULT_MTU;
523 524
		break;

525 526
	case L2CAP_CHAN_FIXED:
		/* Caller will set CID and CID specific MTU values */
527 528
		break;

529
	default:
530
		/* Raw socket can send/recv signalling messages only */
531 532
		chan->scid = L2CAP_CID_SIGNALING;
		chan->dcid = L2CAP_CID_SIGNALING;
533
		chan->omtu = L2CAP_DEFAULT_MTU;
534 535
	}

536 537 538 539 540
	chan->local_id		= L2CAP_BESTEFFORT_ID;
	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
541
	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
542

543
	l2cap_chan_hold(chan);
544

545 546
	hci_conn_hold(conn->hcon);

547
	list_add(&chan->list, &conn->chan_l);
548 549
}

550
void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
551 552 553
{
	mutex_lock(&conn->chan_lock);
	__l2cap_chan_add(conn, chan);
554
	mutex_unlock(&conn->chan_lock);
555 556
}

557
void l2cap_chan_del(struct l2cap_chan *chan, int err)
558
{
559
	struct l2cap_conn *conn = chan->conn;
560

561
	__clear_chan_timer(chan);
562

563
	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
564

565
	if (conn) {
566
		struct amp_mgr *mgr = conn->hcon->amp_mgr;
567
		/* Delete from channel list */
568
		list_del(&chan->list);
569

570
		l2cap_chan_put(chan);
571

572
		chan->conn = NULL;
573

574
		if (chan->scid != L2CAP_CID_A2MP)
575
			hci_conn_drop(conn->hcon);
576 577 578

		if (mgr && mgr->bredr_chan == chan)
			mgr->bredr_chan = NULL;
579 580
	}

581 582 583 584 585 586 587
	if (chan->hs_hchan) {
		struct hci_chan *hs_hchan = chan->hs_hchan;

		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
		amp_disconnect_logical_link(hs_hchan);
	}

588
	chan->ops->teardown(chan, err);
589

590
	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
591
		return;
592

593 594 595
	switch(chan->mode) {
	case L2CAP_MODE_BASIC:
		break;
596

597
	case L2CAP_MODE_LE_FLOWCTL:
598
		skb_queue_purge(&chan->tx_q);
599 600
		break;

601
	case L2CAP_MODE_ERTM:
602 603 604
		__clear_retrans_timer(chan);
		__clear_monitor_timer(chan);
		__clear_ack_timer(chan);
605

606
		skb_queue_purge(&chan->srej_q);
607

608 609
		l2cap_seq_list_free(&chan->srej_list);
		l2cap_seq_list_free(&chan->retrans_list);
610 611 612 613 614 615

		/* fall through */

	case L2CAP_MODE_STREAMING:
		skb_queue_purge(&chan->tx_q);
		break;
616
	}
617 618

	return;
619
}
620
EXPORT_SYMBOL_GPL(l2cap_chan_del);
621

622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
void l2cap_conn_update_id_addr(struct hci_conn *hcon)
{
	struct l2cap_conn *conn = hcon->l2cap_data;
	struct l2cap_chan *chan;

	mutex_lock(&conn->chan_lock);

	list_for_each_entry(chan, &conn->chan_l, list) {
		l2cap_chan_lock(chan);
		bacpy(&chan->dst, &hcon->dst);
		chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
		l2cap_chan_unlock(chan);
	}

	mutex_unlock(&conn->chan_lock);
}

639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
{
	struct l2cap_conn *conn = chan->conn;
	struct l2cap_le_conn_rsp rsp;
	u16 result;

	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
		result = L2CAP_CR_AUTHORIZATION;
	else
		result = L2CAP_CR_BAD_PSM;

	l2cap_state_change(chan, BT_DISCONN);

	rsp.dcid    = cpu_to_le16(chan->scid);
	rsp.mtu     = cpu_to_le16(chan->imtu);
654
	rsp.mps     = cpu_to_le16(chan->mps);
655
	rsp.credits = cpu_to_le16(chan->rx_credits);
656 657 658 659 660 661
	rsp.result  = cpu_to_le16(result);

	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
		       &rsp);
}

662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
{
	struct l2cap_conn *conn = chan->conn;
	struct l2cap_conn_rsp rsp;
	u16 result;

	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
		result = L2CAP_CR_SEC_BLOCK;
	else
		result = L2CAP_CR_BAD_PSM;

	l2cap_state_change(chan, BT_DISCONN);

	rsp.scid   = cpu_to_le16(chan->dcid);
	rsp.dcid   = cpu_to_le16(chan->scid);
	rsp.result = cpu_to_le16(result);
678
	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
679 680 681 682

	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
}

683
void l2cap_chan_close(struct l2cap_chan *chan, int reason)
684 685 686
{
	struct l2cap_conn *conn = chan->conn;

687
	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
688

689
	switch (chan->state) {
690
	case BT_LISTEN:
691
		chan->ops->teardown(chan, 0);
692 693 694 695
		break;

	case BT_CONNECTED:
	case BT_CONFIG:
696
		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
697
			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
698
			l2cap_send_disconn_req(chan, reason);
699 700 701 702 703
		} else
			l2cap_chan_del(chan, reason);
		break;

	case BT_CONNECT2:
704 705 706
		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
			if (conn->hcon->type == ACL_LINK)
				l2cap_chan_connect_reject(chan);
707 708
			else if (conn->hcon->type == LE_LINK)
				l2cap_chan_le_connect_reject(chan);
709 710 711 712 713 714 715 716 717 718 719
		}

		l2cap_chan_del(chan, reason);
		break;

	case BT_CONNECT:
	case BT_DISCONN:
		l2cap_chan_del(chan, reason);
		break;

	default:
720
		chan->ops->teardown(chan, 0);
721 722 723
		break;
	}
}
724
EXPORT_SYMBOL(l2cap_chan_close);
725

726
static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
727
{
728 729
	switch (chan->chan_type) {
	case L2CAP_CHAN_RAW:
730
		switch (chan->sec_level) {
731
		case BT_SECURITY_HIGH:
732
		case BT_SECURITY_FIPS:
733 734 735 736 737 738
			return HCI_AT_DEDICATED_BONDING_MITM;
		case BT_SECURITY_MEDIUM:
			return HCI_AT_DEDICATED_BONDING;
		default:
			return HCI_AT_NO_BONDING;
		}
739
		break;
740
	case L2CAP_CHAN_CONN_LESS:
741
		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
742 743 744
			if (chan->sec_level == BT_SECURITY_LOW)
				chan->sec_level = BT_SECURITY_SDP;
		}
745 746
		if (chan->sec_level == BT_SECURITY_HIGH ||
		    chan->sec_level == BT_SECURITY_FIPS)
747 748 749 750
			return HCI_AT_NO_BONDING_MITM;
		else
			return HCI_AT_NO_BONDING;
		break;
751
	case L2CAP_CHAN_CONN_ORIENTED:
752
		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
753 754
			if (chan->sec_level == BT_SECURITY_LOW)
				chan->sec_level = BT_SECURITY_SDP;
755

756 757
			if (chan->sec_level == BT_SECURITY_HIGH ||
			    chan->sec_level == BT_SECURITY_FIPS)
758 759 760 761 762 763
				return HCI_AT_NO_BONDING_MITM;
			else
				return HCI_AT_NO_BONDING;
		}
		/* fall through */
	default:
764
		switch (chan->sec_level) {
765
		case BT_SECURITY_HIGH:
766
		case BT_SECURITY_FIPS:
767
			return HCI_AT_GENERAL_BONDING_MITM;
768
		case BT_SECURITY_MEDIUM:
769
			return HCI_AT_GENERAL_BONDING;
770
		default:
771
			return HCI_AT_NO_BONDING;
772
		}
773
		break;
774
	}
775 776 777
}

/* Service level security */
778
int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
779
{
780
	struct l2cap_conn *conn = chan->conn;
781 782
	__u8 auth_type;

783 784 785
	if (conn->hcon->type == LE_LINK)
		return smp_conn_security(conn->hcon, chan->sec_level);

786
	auth_type = l2cap_get_auth_type(chan);
787

788 789
	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
				 initiator);
790 791
}

792
static u8 l2cap_get_ident(struct l2cap_conn *conn)
793 794 795 796 797 798 799 800 801
{
	u8 id;

	/* Get next available identificator.
	 *    1 - 128 are used by kernel.
	 *  129 - 199 are reserved.
	 *  200 - 254 are used by utilities like l2ping, etc.
	 */

802
	mutex_lock(&conn->ident_lock);
803 804 805 806 807 808

	if (++conn->tx_ident > 128)
		conn->tx_ident = 1;

	id = conn->tx_ident;

809
	mutex_unlock(&conn->ident_lock);
810 811 812 813

	return id;
}

814 815
static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
			   void *data)
816 817
{
	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
818
	u8 flags;
819 820 821 822

	BT_DBG("code 0x%2.2x", code);

	if (!skb)
823
		return;
824

825 826 827 828 829
	if (lmp_no_flush_capable(conn->hcon->hdev))
		flags = ACL_START_NO_FLUSH;
	else
		flags = ACL_START;

830
	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
831
	skb->priority = HCI_PRIO_MAX;
832

833 834 835
	hci_send_acl(conn->hchan, skb, flags);
}

836 837 838 839 840 841
static bool __chan_is_moving(struct l2cap_chan *chan)
{
	return chan->move_state != L2CAP_MOVE_STABLE &&
	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
}

842 843 844 845 846 847
static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
{
	struct hci_conn *hcon = chan->conn->hcon;
	u16 flags;

	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
848
	       skb->priority);
849

850 851 852 853 854 855 856 857 858
	if (chan->hs_hcon && !__chan_is_moving(chan)) {
		if (chan->hs_hchan)
			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
		else
			kfree_skb(skb);

		return;
	}

859
	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
860
	    lmp_no_flush_capable(hcon->hdev))
861 862 863
		flags = ACL_START_NO_FLUSH;
	else
		flags = ACL_START;
864

865 866
	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
	hci_send_acl(chan->conn->hchan, skb, flags);
867 868
}

869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922
static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
{
	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;

	if (enh & L2CAP_CTRL_FRAME_TYPE) {
		/* S-Frame */
		control->sframe = 1;
		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;

		control->sar = 0;
		control->txseq = 0;
	} else {
		/* I-Frame */
		control->sframe = 0;
		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;

		control->poll = 0;
		control->super = 0;
	}
}

static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
{
	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;

	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
		/* S-Frame */
		control->sframe = 1;
		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;

		control->sar = 0;
		control->txseq = 0;
	} else {
		/* I-Frame */
		control->sframe = 0;
		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;

		control->poll = 0;
		control->super = 0;
	}
}

static inline void __unpack_control(struct l2cap_chan *chan,
				    struct sk_buff *skb)
{
	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
		__unpack_extended_control(get_unaligned_le32(skb->data),
					  &bt_cb(skb)->control);
923
		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
924 925 926
	} else {
		__unpack_enhanced_control(get_unaligned_le16(skb->data),
					  &bt_cb(skb)->control);
927
		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
928 929 930
	}
}

931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
static u32 __pack_extended_control(struct l2cap_ctrl *control)
{
	u32 packed;

	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;

	if (control->sframe) {
		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
	} else {
		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
	}

	return packed;
}

static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
{
	u16 packed;

	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;

	if (control->sframe) {
		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
		packed |= L2CAP_CTRL_FRAME_TYPE;
	} else {
		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
	}

	return packed;
}

969 970 971 972 973 974 975 976 977 978 979 980 981
static inline void __pack_control(struct l2cap_chan *chan,
				  struct l2cap_ctrl *control,
				  struct sk_buff *skb)
{
	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
		put_unaligned_le32(__pack_extended_control(control),
				   skb->data + L2CAP_HDR_SIZE);
	} else {
		put_unaligned_le16(__pack_enhanced_control(control),
				   skb->data + L2CAP_HDR_SIZE);
	}
}

982 983 984 985 986 987 988 989
static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
{
	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
		return L2CAP_EXT_HDR_SIZE;
	else
		return L2CAP_ENH_HDR_SIZE;
}

990 991
static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
					       u32 control)
992 993 994
{
	struct sk_buff *skb;
	struct l2cap_hdr *lh;
995
	int hlen = __ertm_hdr_size(chan);
996 997 998 999

	if (chan->fcs == L2CAP_FCS_CRC16)
		hlen += L2CAP_FCS_SIZE;

1000
	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1001 1002

	if (!skb)
1003
		return ERR_PTR(-ENOMEM);
1004 1005 1006 1007 1008

	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
	lh->cid = cpu_to_le16(chan->dcid);

1009 1010 1011 1012
	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
	else
		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1013 1014

	if (chan->fcs == L2CAP_FCS_CRC16) {
1015
		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1016 1017 1018 1019
		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
	}

	skb->priority = HCI_PRIO_MAX;
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
	return skb;
}

static void l2cap_send_sframe(struct l2cap_chan *chan,
			      struct l2cap_ctrl *control)
{
	struct sk_buff *skb;
	u32 control_field;

	BT_DBG("chan %p, control %p", chan, control);

	if (!control->sframe)
		return;

1034 1035 1036
	if (__chan_is_moving(chan))
		return;

1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
	    !control->poll)
		control->final = 1;

	if (control->super == L2CAP_SUPER_RR)
		clear_bit(CONN_RNR_SENT, &chan->conn_state);
	else if (control->super == L2CAP_SUPER_RNR)
		set_bit(CONN_RNR_SENT, &chan->conn_state);

	if (control->super != L2CAP_SUPER_SREJ) {
		chan->last_acked_seq = control->reqseq;
		__clear_ack_timer(chan);
	}

	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
	       control->final, control->poll, control->super);

	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
		control_field = __pack_extended_control(control);
	else
		control_field = __pack_enhanced_control(control);

	skb = l2cap_create_sframe_pdu(chan, control_field);
	if (!IS_ERR(skb))
		l2cap_do_send(chan, skb);
1062 1063
}

1064
static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1065
{
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
	struct l2cap_ctrl control;

	BT_DBG("chan %p, poll %d", chan, poll);

	memset(&control, 0, sizeof(control));
	control.sframe = 1;
	control.poll = poll;

	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
		control.super = L2CAP_SUPER_RNR;
	else
		control.super = L2CAP_SUPER_RR;
1078

1079 1080
	control.reqseq = chan->buffer_seq;
	l2cap_send_sframe(chan, &control);
1081 1082
}

1083
static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1084
{
1085
	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1086 1087
}

1088
static bool __amp_capable(struct l2cap_chan *chan)
1089
{
1090
	struct l2cap_conn *conn = chan->conn;
1091
	struct hci_dev *hdev;
1092 1093 1094 1095 1096 1097 1098
	bool amp_available = false;

	if (!conn->hs_enabled)
		return false;

	if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
		return false;
1099 1100

	read_lock(&hci_dev_list_lock);
1101
	list_for_each_entry(hdev, &hci_dev_list, list) {
1102
		if (hdev->amp_type != AMP_TYPE_BREDR &&
1103 1104 1105 1106 1107
		    test_bit(HCI_UP, &hdev->flags)) {
			amp_available = true;
			break;
		}
	}
1108 1109
	read_unlock(&hci_dev_list_lock);

1110 1111
	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
		return amp_available;
1112 1113

	return false;
1114 1115
}

1116 1117 1118 1119 1120 1121
static bool l2cap_check_efs(struct l2cap_chan *chan)
{
	/* Check EFS parameters */
	return true;
}

1122
void l2cap_send_conn_req(struct l2cap_chan *chan)
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
{
	struct l2cap_conn *conn = chan->conn;
	struct l2cap_conn_req req;

	req.scid = cpu_to_le16(chan->scid);
	req.psm  = chan->psm;

	chan->ident = l2cap_get_ident(conn);

	set_bit(CONF_CONNECT_PEND, &chan->conf_state);

	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
}

1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
{
	struct l2cap_create_chan_req req;
	req.scid = cpu_to_le16(chan->scid);
	req.psm  = chan->psm;
	req.amp_id = amp_id;

	chan->ident = l2cap_get_ident(chan->conn);

	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
		       sizeof(req), &req);
}

1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
static void l2cap_move_setup(struct l2cap_chan *chan)
{
	struct sk_buff *skb;

	BT_DBG("chan %p", chan);

	if (chan->mode != L2CAP_MODE_ERTM)
		return;

	__clear_retrans_timer(chan);
	__clear_monitor_timer(chan);
	__clear_ack_timer(chan);

	chan->retry_count = 0;
	skb_queue_walk(&chan->tx_q, skb) {
		if (bt_cb(skb)->control.retries)
			bt_cb(skb)->control.retries = 1;
		else
			break;
	}

	chan->expected_tx_seq = chan->buffer_seq;

	clear_bit(CONN_REJ_ACT, &chan->conn_state);
	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
	l2cap_seq_list_clear(&chan->retrans_list);
	l2cap_seq_list_clear(&chan->srej_list);
	skb_queue_purge(&chan->srej_q);

	chan->tx_state = L2CAP_TX_STATE_XMIT;
	chan->rx_state = L2CAP_RX_STATE_MOVE;

	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
}

1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
static void l2cap_move_done(struct l2cap_chan *chan)
{
	u8 move_role = chan->move_role;
	BT_DBG("chan %p", chan);

	chan->move_state = L2CAP_MOVE_STABLE;
	chan->move_role = L2CAP_MOVE_ROLE_NONE;

	if (chan->mode != L2CAP_MODE_ERTM)
		return;

	switch (move_role) {
	case L2CAP_MOVE_ROLE_INITIATOR:
		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
		break;
	case L2CAP_MOVE_ROLE_RESPONDER:
		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
		break;
	}
}

1207 1208
static void l2cap_chan_ready(struct l2cap_chan *chan)
{
1209
	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1210 1211 1212
	chan->conf_state = 0;
	__clear_chan_timer(chan);

1213 1214
	if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
		chan->ops->suspend(chan);
1215

1216
	chan->state = BT_CONNECTED;
1217

1218
	chan->ops->ready(chan);
1219 1220
}

1221 1222 1223 1224 1225
static void l2cap_le_connect(struct l2cap_chan *chan)
{
	struct l2cap_conn *conn = chan->conn;
	struct l2cap_le_conn_req req;

1226 1227 1228
	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
		return;

1229 1230 1231
	req.psm     = chan->psm;
	req.scid    = cpu_to_le16(chan->scid);
	req.mtu     = cpu_to_le16(chan->imtu);
1232
	req.mps     = cpu_to_le16(chan->mps);
1233
	req.credits = cpu_to_le16(chan->rx_credits);
1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256

	chan->ident = l2cap_get_ident(conn);

	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
		       sizeof(req), &req);
}

static void l2cap_le_start(struct l2cap_chan *chan)
{
	struct l2cap_conn *conn = chan->conn;

	if (!smp_conn_security(conn->hcon, chan->sec_level))
		return;

	if (!chan->psm) {
		l2cap_chan_ready(chan);
		return;
	}

	if (chan->state == BT_CONNECT)
		l2cap_le_connect(chan);
}

1257 1258 1259 1260 1261
static void l2cap_start_connection(struct l2cap_chan *chan)
{
	if (__amp_capable(chan)) {
		BT_DBG("chan %p AMP capable: discover AMPs", chan);
		a2mp_discover_amp(chan);
1262 1263
	} else if (chan->conn->hcon->type == LE_LINK) {
		l2cap_le_start(chan);
1264 1265 1266 1267 1268
	} else {
		l2cap_send_conn_req(chan);
	}
}

1269
static void l2cap_do_start(struct l2cap_chan *chan)
1270
{
1271
	struct l2cap_conn *conn = chan->conn;
1272

1273
	if (conn->hcon->type == LE_LINK) {
1274
		l2cap_le_start(chan);
1275 1276 1277
		return;
	}

1278
	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1279 1280 1281
		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
			return;

1282
		if (l2cap_chan_check_security(chan, true) &&
1283
		    __l2cap_no_conn_pending(chan)) {
1284 1285
			l2cap_start_connection(chan);
		}
1286 1287
	} else {
		struct l2cap_info_req req;
1288
		req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1289 1290 1291 1292

		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
		conn->info_ident = l2cap_get_ident(conn);

1293
		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1294

1295 1296
		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
			       sizeof(req), &req);
1297 1298 1299
	}
}

1300 1301 1302
static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
{
	u32 local_feat_mask = l2cap_feat_mask;
1303
	if (!disable_ertm)
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;

	switch (mode) {
	case L2CAP_MODE_ERTM:
		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
	case L2CAP_MODE_STREAMING:
		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
	default:
		return 0x00;
	}
}

1316
static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1317
{
1318
	struct l2cap_conn *conn = chan->conn;
1319 1320
	struct l2cap_disconn_req req;

1321 1322 1323
	if (!conn)
		return;

1324
	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1325 1326 1327
		__clear_retrans_timer(chan);
		__clear_monitor_timer(chan);
		__clear_ack_timer(chan);
1328 1329
	}

1330
	if (chan->scid == L2CAP_CID_A2MP) {
1331
		l2cap_state_change(chan, BT_DISCONN);
1332 1333 1334
		return;
	}

1335 1336
	req.dcid = cpu_to_le16(chan->dcid);
	req.scid = cpu_to_le16(chan->scid);
1337 1338
	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
		       sizeof(req), &req);
1339

1340
	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1341 1342
}

L
Linus Torvalds 已提交
1343
/* ---- L2CAP connections ---- */
1344 1345
static void l2cap_conn_start(struct l2cap_conn *conn)
{
1346
	struct l2cap_chan *chan, *tmp;
1347 1348 1349

	BT_DBG("conn %p", conn);

1350
	mutex_lock(&conn->chan_lock);
1351

1352
	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1353
		l2cap_chan_lock(chan);
1354

1355
		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1356
			l2cap_chan_unlock(chan);
1357 1358 1359
			continue;
		}

1360
		if (chan->state == BT_CONNECT) {
1361
			if (!l2cap_chan_check_security(chan, true) ||
1362
			    !__l2cap_no_conn_pending(chan)) {
1363
				l2cap_chan_unlock(chan);
1364 1365
				continue;
			}
1366

1367
			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1368
			    && test_bit(CONF_STATE2_DEVICE,
1369
					&chan->conf_state)) {
1370
				l2cap_chan_close(chan, ECONNRESET);
1371
				l2cap_chan_unlock(chan);
1372
				continue;
1373
			}
1374

1375
			l2cap_start_connection(chan);
1376

1377
		} else if (chan->state == BT_CONNECT2) {
1378
			struct l2cap_conn_rsp rsp;
1379
			char buf[128];
1380 1381
			rsp.scid = cpu_to_le16(chan->dcid);
			rsp.dcid = cpu_to_le16(chan->scid);
1382

1383
			if (l2cap_chan_check_security(chan, false)) {
1384
				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1385 1386
					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1387
					chan->ops->defer(chan);
1388 1389

				} else {
1390
					l2cap_state_change(chan, BT_CONFIG);
1391 1392
					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1393
				}
1394
			} else {
1395 1396
				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1397 1398
			}

1399
			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1400
				       sizeof(rsp), &rsp);
1401

1402
			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1403
			    rsp.result != L2CAP_CR_SUCCESS) {
1404
				l2cap_chan_unlock(chan);
1405 1406 1407
				continue;
			}

1408
			set_bit(CONF_REQ_SENT, &chan->conf_state);
1409
			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1410
				       l2cap_build_conf_req(chan, buf), buf);
1411
			chan->num_conf_req++;
1412 1413
		}

1414
		l2cap_chan_unlock(chan);
1415 1416
	}

1417
	mutex_unlock(&conn->chan_lock);
1418 1419
}

1420
/* Find socket with cid and source/destination bdaddr.
1421 1422
 * Returns closest match, locked.
 */
1423
static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1424 1425
						    bdaddr_t *src,
						    bdaddr_t *dst)
1426
{
1427
	struct l2cap_chan *c, *c1 = NULL;
1428

1429
	read_lock(&chan_list_lock);
1430

1431
	list_for_each_entry(c, &chan_list, global_l) {
1432
		if (state && c->state != state)
1433 1434
			continue;

1435
		if (c->scid == cid) {
1436 1437 1438
			int src_match, dst_match;
			int src_any, dst_any;

1439
			/* Exact match. */
1440 1441
			src_match = !bacmp(&c->src, src);
			dst_match = !bacmp(&c->dst, dst);
1442
			if (src_match && dst_match) {
1443 1444 1445
				read_unlock(&chan_list_lock);
				return c;
			}
1446 1447

			/* Closest match */
1448 1449
			src_any = !bacmp(&c->src, BDADDR_ANY);
			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1450 1451
			if ((src_match && dst_any) || (src_any && dst_match) ||
			    (src_any && dst_any))
1452
				c1 = c;
1453 1454
		}
	}
1455

1456
	read_unlock(&chan_list_lock);
1457

1458
	return c1;
1459 1460 1461 1462
}

static void l2cap_le_conn_ready(struct l2cap_conn *conn)
{
1463
	struct hci_conn *hcon = conn->hcon;
1464
	struct hci_dev *hdev = hcon->hdev;
1465
	struct l2cap_chan *chan, *pchan;
1466
	u8 dst_type;
1467 1468 1469 1470

	BT_DBG("");

	/* Check if we have socket listening on cid */
1471
	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1472
					  &hcon->src, &hcon->dst);
1473
	if (!pchan)
1474 1475
		return;

1476 1477 1478 1479
	/* Client ATT sockets should override the server one */
	if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
		return;

1480 1481 1482
	dst_type = bdaddr_type(hcon, hcon->dst_type);

	/* If device is blocked, do not create a channel for it */
1483
	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
1484 1485
		return;

1486 1487 1488 1489 1490
	/* For LE slave connections, make sure the connection interval
	 * is in the range of the minium and maximum interval that has
	 * been configured for this connection. If not, then trigger
	 * the connection update procedure.
	 */
1491
	if (hcon->role == HCI_ROLE_SLAVE &&
1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
		struct l2cap_conn_param_update_req req;

		req.min = cpu_to_le16(hcon->le_conn_min_interval);
		req.max = cpu_to_le16(hcon->le_conn_max_interval);
		req.latency = cpu_to_le16(hcon->le_conn_latency);
		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);

		l2cap_send_cmd(conn, l2cap_get_ident(conn),
			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
	}

1505
	l2cap_chan_lock(pchan);
1506

1507
	chan = pchan->ops->new_connection(pchan);
1508
	if (!chan)
1509 1510
		goto clean;

1511 1512 1513 1514
	bacpy(&chan->src, &hcon->src);
	bacpy(&chan->dst, &hcon->dst);
	chan->src_type = bdaddr_type(hcon, hcon->src_type);
	chan->dst_type = dst_type;
1515

1516
	__l2cap_chan_add(conn, chan);
1517

1518
clean:
1519
	l2cap_chan_unlock(pchan);
1520 1521
}

1522 1523
static void l2cap_conn_ready(struct l2cap_conn *conn)
{
1524
	struct l2cap_chan *chan;
1525
	struct hci_conn *hcon = conn->hcon;
1526

1527
	BT_DBG("conn %p", conn);
1528

1529 1530 1531
	/* For outgoing pairing which doesn't necessarily have an
	 * associated socket (e.g. mgmt_pair_device).
	 */
1532 1533
	if (hcon->out && hcon->type == LE_LINK)
		smp_conn_security(hcon, hcon->pending_sec_level);
1534

1535
	mutex_lock(&conn->chan_lock);
1536

1537 1538 1539
	if (hcon->type == LE_LINK)
		l2cap_le_conn_ready(conn);

1540
	list_for_each_entry(chan, &conn->chan_l, list) {
1541

1542
		l2cap_chan_lock(chan);
1543

1544
		if (chan->scid == L2CAP_CID_A2MP) {
1545 1546 1547 1548
			l2cap_chan_unlock(chan);
			continue;
		}

1549
		if (hcon->type == LE_LINK) {
1550
			l2cap_le_start(chan);
1551
		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1552
			l2cap_chan_ready(chan);
1553

1554
		} else if (chan->state == BT_CONNECT) {
1555
			l2cap_do_start(chan);
1556
		}
1557

1558
		l2cap_chan_unlock(chan);
1559
	}
1560

1561
	mutex_unlock(&conn->chan_lock);
1562 1563

	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1564 1565 1566 1567 1568
}

/* Notify sockets that we cannot guaranty reliability anymore */
static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
{
1569
	struct l2cap_chan *chan;
1570 1571 1572

	BT_DBG("conn %p", conn);

1573
	mutex_lock(&conn->chan_lock);
1574

1575
	list_for_each_entry(chan, &conn->chan_l, list) {
1576
		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1577
			l2cap_chan_set_err(chan, err);
1578 1579
	}

1580
	mutex_unlock(&conn->chan_lock);
1581 1582
}

1583
static void l2cap_info_timeout(struct work_struct *work)
1584
{
1585
	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1586
					       info_timer.work);
1587

1588
	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1589
	conn->info_ident = 0;
1590

1591 1592 1593
	l2cap_conn_start(conn);
}

1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676
/*
 * l2cap_user
 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
 * callback is called during registration. The ->remove callback is called
 * during unregistration.
 * An l2cap_user object can either be explicitly unregistered or when the
 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
 * External modules must own a reference to the l2cap_conn object if they intend
 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
 * any time if they don't.
 */

int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
{
	struct hci_dev *hdev = conn->hcon->hdev;
	int ret;

	/* We need to check whether l2cap_conn is registered. If it is not, we
	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
	 * relies on the parent hci_conn object to be locked. This itself relies
	 * on the hci_dev object to be locked. So we must lock the hci device
	 * here, too. */

	hci_dev_lock(hdev);

	if (user->list.next || user->list.prev) {
		ret = -EINVAL;
		goto out_unlock;
	}

	/* conn->hchan is NULL after l2cap_conn_del() was called */
	if (!conn->hchan) {
		ret = -ENODEV;
		goto out_unlock;
	}

	ret = user->probe(conn, user);
	if (ret)
		goto out_unlock;

	list_add(&user->list, &conn->users);
	ret = 0;

out_unlock:
	hci_dev_unlock(hdev);
	return ret;
}
EXPORT_SYMBOL(l2cap_register_user);

void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
{
	struct hci_dev *hdev = conn->hcon->hdev;

	hci_dev_lock(hdev);

	if (!user->list.next || !user->list.prev)
		goto out_unlock;

	list_del(&user->list);
	user->list.next = NULL;
	user->list.prev = NULL;
	user->remove(conn, user);

out_unlock:
	hci_dev_unlock(hdev);
}
EXPORT_SYMBOL(l2cap_unregister_user);

static void l2cap_unregister_all_users(struct l2cap_conn *conn)
{
	struct l2cap_user *user;

	while (!list_empty(&conn->users)) {
		user = list_first_entry(&conn->users, struct l2cap_user, list);
		list_del(&user->list);
		user->list.next = NULL;
		user->list.prev = NULL;
		user->remove(conn, user);
	}
}

1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
static void l2cap_conn_del(struct hci_conn *hcon, int err)
{
	struct l2cap_conn *conn = hcon->l2cap_data;
	struct l2cap_chan *chan, *l;

	if (!conn)
		return;

	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);

	kfree_skb(conn->rx_skb);

1689
	skb_queue_purge(&conn->pending_rx);
1690 1691 1692 1693 1694 1695 1696

	/* We can not call flush_work(&conn->pending_rx_work) here since we
	 * might block if we are running on a worker from the same workqueue
	 * pending_rx_work is waiting on.
	 */
	if (work_pending(&conn->pending_rx_work))
		cancel_work_sync(&conn->pending_rx_work);
1697

1698 1699
	l2cap_unregister_all_users(conn);

1700 1701
	mutex_lock(&conn->chan_lock);

1702 1703
	/* Kill channels */
	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1704
		l2cap_chan_hold(chan);
1705 1706
		l2cap_chan_lock(chan);

1707
		l2cap_chan_del(chan, err);
1708 1709 1710

		l2cap_chan_unlock(chan);

1711
		chan->ops->close(chan);
1712
		l2cap_chan_put(chan);
1713 1714
	}

1715 1716
	mutex_unlock(&conn->chan_lock);

1717 1718
	hci_chan_del(conn->hchan);

1719
	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1720
		cancel_delayed_work_sync(&conn->info_timer);
1721

1722
	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1723
		cancel_delayed_work_sync(&conn->security_timer);
1724
		smp_chan_destroy(conn);
1725
	}
1726 1727

	hcon->l2cap_data = NULL;
1728 1729
	conn->hchan = NULL;
	l2cap_conn_put(conn);
1730 1731
}

1732
static void security_timeout(struct work_struct *work)
1733
{
1734
	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1735
					       security_timer.work);
1736

1737 1738 1739 1740 1741 1742
	BT_DBG("conn %p", conn);

	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
		smp_chan_destroy(conn);
		l2cap_conn_del(conn->hcon, ETIMEDOUT);
	}
1743 1744
}

1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
static void l2cap_conn_free(struct kref *ref)
{
	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);

	hci_conn_put(conn->hcon);
	kfree(conn);
}

void l2cap_conn_get(struct l2cap_conn *conn)
{
	kref_get(&conn->ref);
}
EXPORT_SYMBOL(l2cap_conn_get);

void l2cap_conn_put(struct l2cap_conn *conn)
{
	kref_put(&conn->ref, l2cap_conn_free);
}
EXPORT_SYMBOL(l2cap_conn_put);

L
Linus Torvalds 已提交
1765 1766
/* ---- Socket interface ---- */

1767
/* Find socket with psm and source / destination bdaddr.
L
Linus Torvalds 已提交
1768 1769
 * Returns closest match.
 */
1770 1771
static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
						   bdaddr_t *src,
1772 1773
						   bdaddr_t *dst,
						   u8 link_type)
L
Linus Torvalds 已提交
1774
{
1775
	struct l2cap_chan *c, *c1 = NULL;
L
Linus Torvalds 已提交
1776

1777
	read_lock(&chan_list_lock);
1778

1779
	list_for_each_entry(c, &chan_list, global_l) {
1780
		if (state && c->state != state)
L
Linus Torvalds 已提交
1781 1782
			continue;

1783 1784 1785 1786 1787 1788
		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
			continue;

		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
			continue;

1789
		if (c->psm == psm) {
1790 1791 1792
			int src_match, dst_match;
			int src_any, dst_any;

L
Linus Torvalds 已提交
1793
			/* Exact match. */
1794 1795
			src_match = !bacmp(&c->src, src);
			dst_match = !bacmp(&c->dst, dst);
1796
			if (src_match && dst_match) {
1797
				read_unlock(&chan_list_lock);
1798 1799
				return c;
			}
L
Linus Torvalds 已提交
1800 1801

			/* Closest match */
1802 1803
			src_any = !bacmp(&c->src, BDADDR_ANY);
			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1804 1805
			if ((src_match && dst_any) || (src_any && dst_match) ||
			    (src_any && dst_any))
1806
				c1 = c;
L
Linus Torvalds 已提交
1807 1808 1809
		}
	}

1810
	read_unlock(&chan_list_lock);
1811

1812
	return c1;
L
Linus Torvalds 已提交
1813 1814
}

1815
static void l2cap_monitor_timeout(struct work_struct *work)
1816
{
1817
	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1818
					       monitor_timer.work);
1819

1820
	BT_DBG("chan %p", chan);
1821

1822 1823
	l2cap_chan_lock(chan);

1824
	if (!chan->conn) {
1825
		l2cap_chan_unlock(chan);
1826
		l2cap_chan_put(chan);
1827 1828 1829
		return;
	}

1830
	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1831

1832
	l2cap_chan_unlock(chan);
1833
	l2cap_chan_put(chan);
1834 1835
}

1836
static void l2cap_retrans_timeout(struct work_struct *work)
1837
{
1838
	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1839
					       retrans_timer.work);
1840

1841
	BT_DBG("chan %p", chan);
1842

1843 1844
	l2cap_chan_lock(chan);

1845 1846 1847 1848 1849
	if (!chan->conn) {
		l2cap_chan_unlock(chan);
		l2cap_chan_put(chan);
		return;
	}
1850

1851
	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1852
	l2cap_chan_unlock(chan);
1853
	l2cap_chan_put(chan);
1854 1855
}

1856 1857
static void l2cap_streaming_send(struct l2cap_chan *chan,
				 struct sk_buff_head *skbs)
1858
{
1859
	struct sk_buff *skb;
1860
	struct l2cap_ctrl *control;
1861

1862 1863
	BT_DBG("chan %p, skbs %p", chan, skbs);

1864 1865 1866
	if (__chan_is_moving(chan))
		return;

1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879
	skb_queue_splice_tail_init(skbs, &chan->tx_q);

	while (!skb_queue_empty(&chan->tx_q)) {

		skb = skb_dequeue(&chan->tx_q);

		bt_cb(skb)->control.retries = 1;
		control = &bt_cb(skb)->control;

		control->reqseq = 0;
		control->txseq = chan->next_tx_seq;

		__pack_control(chan, control, skb);
1880

1881
		if (chan->fcs == L2CAP_FCS_CRC16) {
1882 1883
			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1884 1885
		}

1886
		l2cap_do_send(chan, skb);
1887

1888
		BT_DBG("Sent txseq %u", control->txseq);
1889

1890
		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1891
		chan->frames_sent++;
1892 1893 1894
	}
}

1895
static int l2cap_ertm_send(struct l2cap_chan *chan)
1896 1897
{
	struct sk_buff *skb, *tx_skb;
1898 1899 1900 1901
	struct l2cap_ctrl *control;
	int sent = 0;

	BT_DBG("chan %p", chan);
1902

1903
	if (chan->state != BT_CONNECTED)
1904
		return -ENOTCONN;
1905

1906 1907 1908
	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
		return 0;

1909 1910 1911
	if (__chan_is_moving(chan))
		return 0;

1912 1913 1914
	while (chan->tx_send_head &&
	       chan->unacked_frames < chan->remote_tx_win &&
	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1915

1916
		skb = chan->tx_send_head;
1917

1918 1919
		bt_cb(skb)->control.retries = 1;
		control = &bt_cb(skb)->control;
1920

1921
		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1922
			control->final = 1;
1923

1924 1925 1926
		control->reqseq = chan->buffer_seq;
		chan->last_acked_seq = chan->buffer_seq;
		control->txseq = chan->next_tx_seq;
1927

1928
		__pack_control(chan, control, skb);
1929

1930
		if (chan->fcs == L2CAP_FCS_CRC16) {
1931 1932
			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1933 1934
		}

1935 1936 1937 1938
		/* Clone after data has been modified. Data is assumed to be
		   read-only (for locking purposes) on cloned sk_buffs.
		 */
		tx_skb = skb_clone(skb, GFP_KERNEL);
1939

1940 1941
		if (!tx_skb)
			break;
1942

1943
		__set_retrans_timer(chan);
1944 1945

		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1946
		chan->unacked_frames++;
1947
		chan->frames_sent++;
1948
		sent++;
1949

1950 1951
		if (skb_queue_is_last(&chan->tx_q, skb))
			chan->tx_send_head = NULL;
1952
		else
1953
			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1954 1955

		l2cap_do_send(chan, tx_skb);
1956
		BT_DBG("Sent txseq %u", control->txseq);
1957 1958
	}

1959 1960
	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1961 1962

	return sent;
1963 1964
}

1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
static void l2cap_ertm_resend(struct l2cap_chan *chan)
{
	struct l2cap_ctrl control;
	struct sk_buff *skb;
	struct sk_buff *tx_skb;
	u16 seq;

	BT_DBG("chan %p", chan);

	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
		return;

1977 1978 1979
	if (__chan_is_moving(chan))
		return;

1980 1981 1982 1983 1984 1985
	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
		seq = l2cap_seq_list_pop(&chan->retrans_list);

		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
		if (!skb) {
			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1986
			       seq);
1987 1988 1989 1990 1991 1992 1993 1994 1995
			continue;
		}

		bt_cb(skb)->control.retries++;
		control = bt_cb(skb)->control;

		if (chan->max_tx != 0 &&
		    bt_cb(skb)->control.retries > chan->max_tx) {
			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1996
			l2cap_send_disconn_req(chan, ECONNRESET);
1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010
			l2cap_seq_list_clear(&chan->retrans_list);
			break;
		}

		control.reqseq = chan->buffer_seq;
		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
			control.final = 1;
		else
			control.final = 0;

		if (skb_cloned(skb)) {
			/* Cloned sk_buffs are read-only, so we need a
			 * writeable copy
			 */
2011
			tx_skb = skb_copy(skb, GFP_KERNEL);
2012
		} else {
2013
			tx_skb = skb_clone(skb, GFP_KERNEL);
2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043
		}

		if (!tx_skb) {
			l2cap_seq_list_clear(&chan->retrans_list);
			break;
		}

		/* Update skb contents */
		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
			put_unaligned_le32(__pack_extended_control(&control),
					   tx_skb->data + L2CAP_HDR_SIZE);
		} else {
			put_unaligned_le16(__pack_enhanced_control(&control),
					   tx_skb->data + L2CAP_HDR_SIZE);
		}

		if (chan->fcs == L2CAP_FCS_CRC16) {
			u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
			put_unaligned_le16(fcs, skb_put(tx_skb,
							L2CAP_FCS_SIZE));
		}

		l2cap_do_send(chan, tx_skb);

		BT_DBG("Resent txseq %d", control.txseq);

		chan->last_acked_seq = chan->buffer_seq;
	}
}

2044 2045 2046 2047 2048 2049 2050 2051 2052
static void l2cap_retransmit(struct l2cap_chan *chan,
			     struct l2cap_ctrl *control)
{
	BT_DBG("chan %p, control %p", chan, control);

	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
	l2cap_ertm_resend(chan);
}

2053 2054 2055
static void l2cap_retransmit_all(struct l2cap_chan *chan,
				 struct l2cap_ctrl *control)
{
2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070
	struct sk_buff *skb;

	BT_DBG("chan %p, control %p", chan, control);

	if (control->poll)
		set_bit(CONN_SEND_FBIT, &chan->conn_state);

	l2cap_seq_list_clear(&chan->retrans_list);

	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
		return;

	if (chan->unacked_frames) {
		skb_queue_walk(&chan->tx_q, skb) {
			if (bt_cb(skb)->control.txseq == control->reqseq ||
2071
			    skb == chan->tx_send_head)
2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084
				break;
		}

		skb_queue_walk_from(&chan->tx_q, skb) {
			if (skb == chan->tx_send_head)
				break;

			l2cap_seq_list_append(&chan->retrans_list,
					      bt_cb(skb)->control.txseq);
		}

		l2cap_ertm_resend(chan);
	}
2085 2086
}

2087
static void l2cap_send_ack(struct l2cap_chan *chan)
2088
{
2089 2090 2091 2092
	struct l2cap_ctrl control;
	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
					 chan->last_acked_seq);
	int threshold;
2093

2094 2095
	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
	       chan, chan->last_acked_seq, chan->buffer_seq);
2096

2097 2098
	memset(&control, 0, sizeof(control));
	control.sframe = 1;
2099

2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112
	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
	    chan->rx_state == L2CAP_RX_STATE_RECV) {
		__clear_ack_timer(chan);
		control.super = L2CAP_SUPER_RNR;
		control.reqseq = chan->buffer_seq;
		l2cap_send_sframe(chan, &control);
	} else {
		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
			l2cap_ertm_send(chan);
			/* If any i-frames were sent, they included an ack */
			if (chan->buffer_seq == chan->last_acked_seq)
				frames_to_ack = 0;
		}
2113

2114
		/* Ack now if the window is 3/4ths full.
2115 2116
		 * Calculate without mul or div
		 */
2117
		threshold = chan->ack_win;
2118 2119 2120
		threshold += threshold << 1;
		threshold >>= 2;

2121
		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2122 2123 2124 2125 2126 2127 2128 2129 2130
		       threshold);

		if (frames_to_ack >= threshold) {
			__clear_ack_timer(chan);
			control.super = L2CAP_SUPER_RR;
			control.reqseq = chan->buffer_seq;
			l2cap_send_sframe(chan, &control);
			frames_to_ack = 0;
		}
2131

2132 2133 2134
		if (frames_to_ack)
			__set_ack_timer(chan);
	}
2135 2136
}

2137 2138 2139
static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
					 struct msghdr *msg, int len,
					 int count, struct sk_buff *skb)
2140
{
2141
	struct l2cap_conn *conn = chan->conn;
2142
	struct sk_buff **frag;
2143
	int sent = 0;
L
Linus Torvalds 已提交
2144

2145 2146
	if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
					msg->msg_iov, count))
2147
		return -EFAULT;
L
Linus Torvalds 已提交
2148 2149 2150 2151 2152 2153 2154

	sent += count;
	len  -= count;

	/* Continuation fragments (no L2CAP header) */
	frag = &skb_shinfo(skb)->frag_list;
	while (len) {
2155 2156
		struct sk_buff *tmp;

L
Linus Torvalds 已提交
2157 2158
		count = min_t(unsigned int, conn->mtu, len);

2159
		tmp = chan->ops->alloc_skb(chan, 0, count,
2160 2161 2162 2163 2164
					   msg->msg_flags & MSG_DONTWAIT);
		if (IS_ERR(tmp))
			return PTR_ERR(tmp);

		*frag = tmp;
2165

2166 2167
		if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
						msg->msg_iov, count))
2168
			return -EFAULT;
L
Linus Torvalds 已提交
2169 2170 2171 2172

		sent += count;
		len  -= count;

2173 2174 2175
		skb->len += (*frag)->len;
		skb->data_len += (*frag)->len;

L
Linus Torvalds 已提交
2176 2177 2178 2179
		frag = &(*frag)->next;
	}

	return sent;
2180
}
L
Linus Torvalds 已提交
2181

2182
static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2183
						 struct msghdr *msg, size_t len)
2184
{
2185
	struct l2cap_conn *conn = chan->conn;
2186
	struct sk_buff *skb;
2187
	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2188 2189
	struct l2cap_hdr *lh;

2190 2191
	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
	       __le16_to_cpu(chan->psm), len);
2192 2193

	count = min_t(unsigned int, (conn->mtu - hlen), len);
2194

2195
	skb = chan->ops->alloc_skb(chan, hlen, count,
2196 2197 2198
				   msg->msg_flags & MSG_DONTWAIT);
	if (IS_ERR(skb))
		return skb;
2199 2200 2201

	/* Create L2CAP header */
	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2202
	lh->cid = cpu_to_le16(chan->dcid);
2203
	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2204
	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2205

2206
	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2207 2208 2209 2210 2211 2212 2213
	if (unlikely(err < 0)) {
		kfree_skb(skb);
		return ERR_PTR(err);
	}
	return skb;
}

2214
static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2215
					      struct msghdr *msg, size_t len)
2216
{
2217
	struct l2cap_conn *conn = chan->conn;
2218
	struct sk_buff *skb;
2219
	int err, count;
2220 2221
	struct l2cap_hdr *lh;

2222
	BT_DBG("chan %p len %zu", chan, len);
2223

2224
	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2225

2226
	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2227 2228 2229
				   msg->msg_flags & MSG_DONTWAIT);
	if (IS_ERR(skb))
		return skb;
2230 2231 2232

	/* Create L2CAP header */
	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2233
	lh->cid = cpu_to_le16(chan->dcid);
2234
	lh->len = cpu_to_le16(len);
2235

2236
	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2237 2238 2239 2240 2241 2242 2243
	if (unlikely(err < 0)) {
		kfree_skb(skb);
		return ERR_PTR(err);
	}
	return skb;
}

2244
static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2245 2246
					       struct msghdr *msg, size_t len,
					       u16 sdulen)
2247
{
2248
	struct l2cap_conn *conn = chan->conn;
2249
	struct sk_buff *skb;
2250
	int err, count, hlen;
2251 2252
	struct l2cap_hdr *lh;

2253
	BT_DBG("chan %p len %zu", chan, len);
2254

2255 2256 2257
	if (!conn)
		return ERR_PTR(-ENOTCONN);

2258
	hlen = __ertm_hdr_size(chan);
2259

2260
	if (sdulen)
2261
		hlen += L2CAP_SDULEN_SIZE;
2262

2263
	if (chan->fcs == L2CAP_FCS_CRC16)
2264
		hlen += L2CAP_FCS_SIZE;
2265

2266
	count = min_t(unsigned int, (conn->mtu - hlen), len);
2267

2268
	skb = chan->ops->alloc_skb(chan, hlen, count,
2269 2270 2271
				   msg->msg_flags & MSG_DONTWAIT);
	if (IS_ERR(skb))
		return skb;
2272 2273 2274

	/* Create L2CAP header */
	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2275
	lh->cid = cpu_to_le16(chan->dcid);
2276
	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2277

2278 2279 2280 2281 2282
	/* Control header is populated later */
	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
	else
		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2283

2284
	if (sdulen)
2285
		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2286

2287
	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2288 2289 2290 2291
	if (unlikely(err < 0)) {
		kfree_skb(skb);
		return ERR_PTR(err);
	}
2292

2293
	bt_cb(skb)->control.fcs = chan->fcs;
2294
	bt_cb(skb)->control.retries = 0;
2295
	return skb;
L
Linus Torvalds 已提交
2296 2297
}

2298 2299 2300
static int l2cap_segment_sdu(struct l2cap_chan *chan,
			     struct sk_buff_head *seg_queue,
			     struct msghdr *msg, size_t len)
2301 2302
{
	struct sk_buff *skb;
2303 2304 2305
	u16 sdu_len;
	size_t pdu_len;
	u8 sar;
2306

2307
	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2308

2309 2310 2311 2312
	/* It is critical that ERTM PDUs fit in a single HCI fragment,
	 * so fragmented skbs are not used.  The HCI layer's handling
	 * of fragmented skbs is not compatible with ERTM's queueing.
	 */
2313

2314 2315
	/* PDU size is derived from the HCI MTU */
	pdu_len = chan->conn->mtu;
2316

2317 2318 2319
	/* Constrain PDU size for BR/EDR connections */
	if (!chan->hs_hcon)
		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2320 2321

	/* Adjust for largest possible L2CAP overhead. */
2322 2323 2324
	if (chan->fcs)
		pdu_len -= L2CAP_FCS_SIZE;

2325
	pdu_len -= __ertm_hdr_size(chan);
2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341

	/* Remote device may have requested smaller PDUs */
	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);

	if (len <= pdu_len) {
		sar = L2CAP_SAR_UNSEGMENTED;
		sdu_len = 0;
		pdu_len = len;
	} else {
		sar = L2CAP_SAR_START;
		sdu_len = len;
		pdu_len -= L2CAP_SDULEN_SIZE;
	}

	while (len > 0) {
		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2342 2343

		if (IS_ERR(skb)) {
2344
			__skb_queue_purge(seg_queue);
2345 2346 2347
			return PTR_ERR(skb);
		}

2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362
		bt_cb(skb)->control.sar = sar;
		__skb_queue_tail(seg_queue, skb);

		len -= pdu_len;
		if (sdu_len) {
			sdu_len = 0;
			pdu_len += L2CAP_SDULEN_SIZE;
		}

		if (len <= pdu_len) {
			sar = L2CAP_SAR_END;
			pdu_len = len;
		} else {
			sar = L2CAP_SAR_CONTINUE;
		}
2363 2364
	}

2365
	return 0;
2366 2367
}

2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388
static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
						   struct msghdr *msg,
						   size_t len, u16 sdulen)
{
	struct l2cap_conn *conn = chan->conn;
	struct sk_buff *skb;
	int err, count, hlen;
	struct l2cap_hdr *lh;

	BT_DBG("chan %p len %zu", chan, len);

	if (!conn)
		return ERR_PTR(-ENOTCONN);

	hlen = L2CAP_HDR_SIZE;

	if (sdulen)
		hlen += L2CAP_SDULEN_SIZE;

	count = min_t(unsigned int, (conn->mtu - hlen), len);

2389
	skb = chan->ops->alloc_skb(chan, hlen, count,
2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450
				   msg->msg_flags & MSG_DONTWAIT);
	if (IS_ERR(skb))
		return skb;

	/* Create L2CAP header */
	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
	lh->cid = cpu_to_le16(chan->dcid);
	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));

	if (sdulen)
		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));

	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
	if (unlikely(err < 0)) {
		kfree_skb(skb);
		return ERR_PTR(err);
	}

	return skb;
}

static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
				struct sk_buff_head *seg_queue,
				struct msghdr *msg, size_t len)
{
	struct sk_buff *skb;
	size_t pdu_len;
	u16 sdu_len;

	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);

	pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;

	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);

	sdu_len = len;
	pdu_len -= L2CAP_SDULEN_SIZE;

	while (len > 0) {
		if (len <= pdu_len)
			pdu_len = len;

		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
		if (IS_ERR(skb)) {
			__skb_queue_purge(seg_queue);
			return PTR_ERR(skb);
		}

		__skb_queue_tail(seg_queue, skb);

		len -= pdu_len;

		if (sdu_len) {
			sdu_len = 0;
			pdu_len += L2CAP_SDULEN_SIZE;
		}
	}

	return 0;
}

2451
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2452 2453 2454
{
	struct sk_buff *skb;
	int err;
2455
	struct sk_buff_head seg_queue;
2456

2457 2458 2459
	if (!chan->conn)
		return -ENOTCONN;

2460
	/* Connectionless channel */
2461
	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2462
		skb = l2cap_create_connless_pdu(chan, msg, len);
2463 2464 2465
		if (IS_ERR(skb))
			return PTR_ERR(skb);

2466 2467 2468 2469 2470 2471 2472 2473
		/* Channel lock is released before requesting new skb and then
		 * reacquired thus we need to recheck channel state.
		 */
		if (chan->state != BT_CONNECTED) {
			kfree_skb(skb);
			return -ENOTCONN;
		}

2474 2475 2476 2477 2478
		l2cap_do_send(chan, skb);
		return len;
	}

	switch (chan->mode) {
2479
	case L2CAP_MODE_LE_FLOWCTL:
2480 2481 2482 2483
		/* Check outgoing MTU */
		if (len > chan->omtu)
			return -EMSGSIZE;

2484 2485 2486
		if (!chan->tx_credits)
			return -EAGAIN;

2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512
		__skb_queue_head_init(&seg_queue);

		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);

		if (chan->state != BT_CONNECTED) {
			__skb_queue_purge(&seg_queue);
			err = -ENOTCONN;
		}

		if (err)
			return err;

		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);

		while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
			l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
			chan->tx_credits--;
		}

		if (!chan->tx_credits)
			chan->ops->suspend(chan);

		err = len;

		break;

2513
	case L2CAP_MODE_BASIC:
2514 2515 2516 2517 2518
		/* Check outgoing MTU */
		if (len > chan->omtu)
			return -EMSGSIZE;

		/* Create a basic PDU */
2519
		skb = l2cap_create_basic_pdu(chan, msg, len);
2520 2521 2522
		if (IS_ERR(skb))
			return PTR_ERR(skb);

2523 2524 2525 2526 2527 2528 2529 2530
		/* Channel lock is released before requesting new skb and then
		 * reacquired thus we need to recheck channel state.
		 */
		if (chan->state != BT_CONNECTED) {
			kfree_skb(skb);
			return -ENOTCONN;
		}

2531 2532 2533 2534 2535 2536
		l2cap_do_send(chan, skb);
		err = len;
		break;

	case L2CAP_MODE_ERTM:
	case L2CAP_MODE_STREAMING:
2537 2538 2539 2540 2541
		/* Check outgoing MTU */
		if (len > chan->omtu) {
			err = -EMSGSIZE;
			break;
		}
2542

2543
		__skb_queue_head_init(&seg_queue);
2544

2545 2546 2547 2548 2549
		/* Do segmentation before calling in to the state machine,
		 * since it's possible to block while waiting for memory
		 * allocation.
		 */
		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2550

2551 2552 2553 2554 2555 2556
		/* The channel could have been closed while segmenting,
		 * check that it is still connected.
		 */
		if (chan->state != BT_CONNECTED) {
			__skb_queue_purge(&seg_queue);
			err = -ENOTCONN;
2557 2558
		}

2559
		if (err)
2560 2561
			break;

2562
		if (chan->mode == L2CAP_MODE_ERTM)
2563
			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2564
		else
2565
			l2cap_streaming_send(chan, &seg_queue);
2566

2567
		err = len;
2568

2569 2570 2571 2572
		/* If the skbs were not queued for sending, they'll still be in
		 * seg_queue and need to be purged.
		 */
		__skb_queue_purge(&seg_queue);
2573 2574 2575 2576 2577 2578 2579 2580 2581
		break;

	default:
		BT_DBG("bad state %1.1x", chan->mode);
		err = -EBADFD;
	}

	return err;
}
2582
EXPORT_SYMBOL_GPL(l2cap_chan_send);
2583

2584 2585
static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
{
2586 2587 2588
	struct l2cap_ctrl control;
	u16 seq;

2589
	BT_DBG("chan %p, txseq %u", chan, txseq);
2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604

	memset(&control, 0, sizeof(control));
	control.sframe = 1;
	control.super = L2CAP_SUPER_SREJ;

	for (seq = chan->expected_tx_seq; seq != txseq;
	     seq = __next_seq(chan, seq)) {
		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
			control.reqseq = seq;
			l2cap_send_sframe(chan, &control);
			l2cap_seq_list_append(&chan->srej_list, seq);
		}
	}

	chan->expected_tx_seq = __next_seq(chan, txseq);
2605 2606 2607 2608
}

static void l2cap_send_srej_tail(struct l2cap_chan *chan)
{
2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620
	struct l2cap_ctrl control;

	BT_DBG("chan %p", chan);

	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
		return;

	memset(&control, 0, sizeof(control));
	control.sframe = 1;
	control.super = L2CAP_SUPER_SREJ;
	control.reqseq = chan->srej_list.tail;
	l2cap_send_sframe(chan, &control);
2621 2622 2623 2624
}

static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
{
2625 2626 2627 2628
	struct l2cap_ctrl control;
	u16 initial_head;
	u16 seq;

2629
	BT_DBG("chan %p, txseq %u", chan, txseq);
2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646

	memset(&control, 0, sizeof(control));
	control.sframe = 1;
	control.super = L2CAP_SUPER_SREJ;

	/* Capture initial list head to allow only one pass through the list. */
	initial_head = chan->srej_list.head;

	do {
		seq = l2cap_seq_list_pop(&chan->srej_list);
		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
			break;

		control.reqseq = seq;
		l2cap_send_sframe(chan, &control);
		l2cap_seq_list_append(&chan->srej_list, seq);
	} while (chan->srej_list.head != initial_head);
2647 2648
}

2649 2650 2651 2652 2653
static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
{
	struct sk_buff *acked_skb;
	u16 ackseq;

2654
	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2655 2656 2657 2658

	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
		return;

2659
	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677
	       chan->expected_ack_seq, chan->unacked_frames);

	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
	     ackseq = __next_seq(chan, ackseq)) {

		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
		if (acked_skb) {
			skb_unlink(acked_skb, &chan->tx_q);
			kfree_skb(acked_skb);
			chan->unacked_frames--;
		}
	}

	chan->expected_ack_seq = reqseq;

	if (chan->unacked_frames == 0)
		__clear_retrans_timer(chan);

2678
	BT_DBG("unacked_frames %u", chan->unacked_frames);
2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690
}

static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
{
	BT_DBG("chan %p", chan);

	chan->expected_tx_seq = chan->buffer_seq;
	l2cap_seq_list_clear(&chan->srej_list);
	skb_queue_purge(&chan->srej_q);
	chan->rx_state = L2CAP_RX_STATE_RECV;
}

2691 2692 2693
static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
				struct l2cap_ctrl *control,
				struct sk_buff_head *skbs, u8 event)
2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731
{
	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
	       event);

	switch (event) {
	case L2CAP_EV_DATA_REQUEST:
		if (chan->tx_send_head == NULL)
			chan->tx_send_head = skb_peek(skbs);

		skb_queue_splice_tail_init(skbs, &chan->tx_q);
		l2cap_ertm_send(chan);
		break;
	case L2CAP_EV_LOCAL_BUSY_DETECTED:
		BT_DBG("Enter LOCAL_BUSY");
		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);

		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
			/* The SREJ_SENT state must be aborted if we are to
			 * enter the LOCAL_BUSY state.
			 */
			l2cap_abort_rx_srej_sent(chan);
		}

		l2cap_send_ack(chan);

		break;
	case L2CAP_EV_LOCAL_BUSY_CLEAR:
		BT_DBG("Exit LOCAL_BUSY");
		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);

		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
			struct l2cap_ctrl local_control;

			memset(&local_control, 0, sizeof(local_control));
			local_control.sframe = 1;
			local_control.super = L2CAP_SUPER_RR;
			local_control.poll = 1;
			local_control.reqseq = chan->buffer_seq;
2732
			l2cap_send_sframe(chan, &local_control);
2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762

			chan->retry_count = 1;
			__set_monitor_timer(chan);
			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
		}
		break;
	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
		l2cap_process_reqseq(chan, control->reqseq);
		break;
	case L2CAP_EV_EXPLICIT_POLL:
		l2cap_send_rr_or_rnr(chan, 1);
		chan->retry_count = 1;
		__set_monitor_timer(chan);
		__clear_ack_timer(chan);
		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
		break;
	case L2CAP_EV_RETRANS_TO:
		l2cap_send_rr_or_rnr(chan, 1);
		chan->retry_count = 1;
		__set_monitor_timer(chan);
		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
		break;
	case L2CAP_EV_RECV_FBIT:
		/* Nothing to process */
		break;
	default:
		break;
	}
}

2763 2764 2765
static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
				  struct l2cap_ctrl *control,
				  struct sk_buff_head *skbs, u8 event)
2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801
{
	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
	       event);

	switch (event) {
	case L2CAP_EV_DATA_REQUEST:
		if (chan->tx_send_head == NULL)
			chan->tx_send_head = skb_peek(skbs);
		/* Queue data, but don't send. */
		skb_queue_splice_tail_init(skbs, &chan->tx_q);
		break;
	case L2CAP_EV_LOCAL_BUSY_DETECTED:
		BT_DBG("Enter LOCAL_BUSY");
		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);

		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
			/* The SREJ_SENT state must be aborted if we are to
			 * enter the LOCAL_BUSY state.
			 */
			l2cap_abort_rx_srej_sent(chan);
		}

		l2cap_send_ack(chan);

		break;
	case L2CAP_EV_LOCAL_BUSY_CLEAR:
		BT_DBG("Exit LOCAL_BUSY");
		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);

		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
			struct l2cap_ctrl local_control;
			memset(&local_control, 0, sizeof(local_control));
			local_control.sframe = 1;
			local_control.super = L2CAP_SUPER_RR;
			local_control.poll = 1;
			local_control.reqseq = chan->buffer_seq;
2802
			l2cap_send_sframe(chan, &local_control);
2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832

			chan->retry_count = 1;
			__set_monitor_timer(chan);
			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
		}
		break;
	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
		l2cap_process_reqseq(chan, control->reqseq);

		/* Fall through */

	case L2CAP_EV_RECV_FBIT:
		if (control && control->final) {
			__clear_monitor_timer(chan);
			if (chan->unacked_frames > 0)
				__set_retrans_timer(chan);
			chan->retry_count = 0;
			chan->tx_state = L2CAP_TX_STATE_XMIT;
			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
		}
		break;
	case L2CAP_EV_EXPLICIT_POLL:
		/* Ignore */
		break;
	case L2CAP_EV_MONITOR_TO:
		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
			l2cap_send_rr_or_rnr(chan, 1);
			__set_monitor_timer(chan);
			chan->retry_count++;
		} else {
2833
			l2cap_send_disconn_req(chan, ECONNABORTED);
2834 2835 2836 2837 2838 2839 2840
		}
		break;
	default:
		break;
	}
}

2841 2842
static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
		     struct sk_buff_head *skbs, u8 event)
2843 2844 2845 2846 2847 2848
{
	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
	       chan, control, skbs, event, chan->tx_state);

	switch (chan->tx_state) {
	case L2CAP_TX_STATE_XMIT:
2849
		l2cap_tx_state_xmit(chan, control, skbs, event);
2850 2851
		break;
	case L2CAP_TX_STATE_WAIT_F:
2852
		l2cap_tx_state_wait_f(chan, control, skbs, event);
2853 2854 2855 2856 2857 2858 2859
		break;
	default:
		/* Ignore event */
		break;
	}
}

2860 2861 2862 2863
static void l2cap_pass_to_tx(struct l2cap_chan *chan,
			     struct l2cap_ctrl *control)
{
	BT_DBG("chan %p, control %p", chan, control);
2864
	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2865 2866
}

2867 2868 2869 2870
static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
				  struct l2cap_ctrl *control)
{
	BT_DBG("chan %p, control %p", chan, control);
2871
	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2872 2873
}

L
Linus Torvalds 已提交
2874 2875 2876 2877
/* Copy frame to all raw sockets on that connection */
static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
{
	struct sk_buff *nskb;
2878
	struct l2cap_chan *chan;
L
Linus Torvalds 已提交
2879 2880 2881

	BT_DBG("conn %p", conn);

2882
	mutex_lock(&conn->chan_lock);
2883

2884
	list_for_each_entry(chan, &conn->chan_l, list) {
2885
		if (chan->chan_type != L2CAP_CHAN_RAW)
L
Linus Torvalds 已提交
2886 2887
			continue;

2888 2889
		/* Don't send frame to the channel it came from */
		if (bt_cb(skb)->chan == chan)
L
Linus Torvalds 已提交
2890
			continue;
2891

2892
		nskb = skb_clone(skb, GFP_KERNEL);
2893
		if (!nskb)
L
Linus Torvalds 已提交
2894
			continue;
2895
		if (chan->ops->recv(chan, nskb))
L
Linus Torvalds 已提交
2896 2897
			kfree_skb(nskb);
	}
2898

2899
	mutex_unlock(&conn->chan_lock);
L
Linus Torvalds 已提交
2900 2901 2902
}

/* ---- L2CAP signalling commands ---- */
2903 2904
static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
				       u8 ident, u16 dlen, void *data)
L
Linus Torvalds 已提交
2905 2906 2907 2908 2909 2910
{
	struct sk_buff *skb, **frag;
	struct l2cap_cmd_hdr *cmd;
	struct l2cap_hdr *lh;
	int len, count;

2911 2912
	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
	       conn, code, ident, dlen);
L
Linus Torvalds 已提交
2913

2914 2915 2916
	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
		return NULL;

L
Linus Torvalds 已提交
2917 2918 2919
	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
	count = min_t(unsigned int, conn->mtu, len);

2920
	skb = bt_skb_alloc(count, GFP_KERNEL);
L
Linus Torvalds 已提交
2921 2922 2923 2924
	if (!skb)
		return NULL;

	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2925
	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2926 2927

	if (conn->hcon->type == LE_LINK)
2928
		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2929
	else
2930
		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
L
Linus Torvalds 已提交
2931 2932 2933 2934

	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
	cmd->code  = code;
	cmd->ident = ident;
2935
	cmd->len   = cpu_to_le16(dlen);
L
Linus Torvalds 已提交
2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949

	if (dlen) {
		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
		memcpy(skb_put(skb, count), data, count);
		data += count;
	}

	len -= skb->len;

	/* Continuation fragments (no L2CAP header) */
	frag = &skb_shinfo(skb)->frag_list;
	while (len) {
		count = min_t(unsigned int, conn->mtu, len);

2950
		*frag = bt_skb_alloc(count, GFP_KERNEL);
L
Linus Torvalds 已提交
2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968
		if (!*frag)
			goto fail;

		memcpy(skb_put(*frag, count), data, count);

		len  -= count;
		data += count;

		frag = &(*frag)->next;
	}

	return skb;

fail:
	kfree_skb(skb);
	return NULL;
}

2969 2970
static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
				     unsigned long *val)
L
Linus Torvalds 已提交
2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986
{
	struct l2cap_conf_opt *opt = *ptr;
	int len;

	len = L2CAP_CONF_OPT_SIZE + opt->len;
	*ptr += len;

	*type = opt->type;
	*olen = opt->len;

	switch (opt->len) {
	case 1:
		*val = *((u8 *) opt->val);
		break;

	case 2:
2987
		*val = get_unaligned_le16(opt->val);
L
Linus Torvalds 已提交
2988 2989 2990
		break;

	case 4:
2991
		*val = get_unaligned_le32(opt->val);
L
Linus Torvalds 已提交
2992 2993 2994 2995 2996 2997 2998
		break;

	default:
		*val = (unsigned long) opt->val;
		break;
	}

2999
	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
L
Linus Torvalds 已提交
3000 3001 3002 3003 3004 3005 3006
	return len;
}

static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
{
	struct l2cap_conf_opt *opt = *ptr;

3007
	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
L
Linus Torvalds 已提交
3008 3009 3010 3011 3012 3013 3014 3015 3016 3017

	opt->type = type;
	opt->len  = len;

	switch (len) {
	case 1:
		*((u8 *) opt->val)  = val;
		break;

	case 2:
3018
		put_unaligned_le16(val, opt->val);
L
Linus Torvalds 已提交
3019 3020 3021
		break;

	case 4:
3022
		put_unaligned_le32(val, opt->val);
L
Linus Torvalds 已提交
3023 3024 3025 3026 3027 3028 3029 3030 3031 3032
		break;

	default:
		memcpy(opt->val, (void *) val, len);
		break;
	}

	*ptr += L2CAP_CONF_OPT_SIZE + len;
}

3033 3034 3035 3036
static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
{
	struct l2cap_conf_efs efs;

3037
	switch (chan->mode) {
3038 3039 3040 3041 3042
	case L2CAP_MODE_ERTM:
		efs.id		= chan->local_id;
		efs.stype	= chan->local_stype;
		efs.msdu	= cpu_to_le16(chan->local_msdu);
		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3043 3044
		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060
		break;

	case L2CAP_MODE_STREAMING:
		efs.id		= 1;
		efs.stype	= L2CAP_SERV_BESTEFFORT;
		efs.msdu	= cpu_to_le16(chan->local_msdu);
		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
		efs.acc_lat	= 0;
		efs.flush_to	= 0;
		break;

	default:
		return;
	}

	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3061
			   (unsigned long) &efs);
3062 3063
}

3064
static void l2cap_ack_timeout(struct work_struct *work)
3065
{
3066
	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3067 3068
					       ack_timer.work);
	u16 frames_to_ack;
3069

3070 3071
	BT_DBG("chan %p", chan);

3072 3073
	l2cap_chan_lock(chan);

3074 3075
	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
				     chan->last_acked_seq);
3076

3077 3078
	if (frames_to_ack)
		l2cap_send_rr_or_rnr(chan, 0);
3079

3080
	l2cap_chan_unlock(chan);
3081
	l2cap_chan_put(chan);
3082 3083
}

3084
int l2cap_ertm_init(struct l2cap_chan *chan)
3085
{
3086 3087
	int err;

3088 3089
	chan->next_tx_seq = 0;
	chan->expected_tx_seq = 0;
3090
	chan->expected_ack_seq = 0;
3091
	chan->unacked_frames = 0;
3092
	chan->buffer_seq = 0;
3093
	chan->frames_sent = 0;
3094 3095 3096 3097 3098
	chan->last_acked_seq = 0;
	chan->sdu = NULL;
	chan->sdu_last_frag = NULL;
	chan->sdu_len = 0;

3099 3100
	skb_queue_head_init(&chan->tx_q);

3101 3102
	chan->local_amp_id = AMP_ID_BREDR;
	chan->move_id = AMP_ID_BREDR;
3103 3104 3105
	chan->move_state = L2CAP_MOVE_STABLE;
	chan->move_role = L2CAP_MOVE_ROLE_NONE;

3106 3107 3108 3109 3110
	if (chan->mode != L2CAP_MODE_ERTM)
		return 0;

	chan->rx_state = L2CAP_RX_STATE_RECV;
	chan->tx_state = L2CAP_TX_STATE_XMIT;
3111

3112 3113 3114
	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3115

3116
	skb_queue_head_init(&chan->srej_q);
3117

3118 3119 3120 3121
	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
	if (err < 0)
		return err;

3122 3123 3124 3125 3126
	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
	if (err < 0)
		l2cap_seq_list_free(&chan->srej_list);

	return err;
3127 3128
}

3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
{
	switch (mode) {
	case L2CAP_MODE_STREAMING:
	case L2CAP_MODE_ERTM:
		if (l2cap_mode_supported(mode, remote_feat_mask))
			return mode;
		/* fall through */
	default:
		return L2CAP_MODE_BASIC;
	}
}

3142
static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3143
{
3144
	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3145 3146
}

3147
static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3148
{
3149
	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3150 3151
}

3152 3153 3154
static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
				      struct l2cap_conf_rfc *rfc)
{
3155
	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184
		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;

		/* Class 1 devices have must have ERTM timeouts
		 * exceeding the Link Supervision Timeout.  The
		 * default Link Supervision Timeout for AMP
		 * controllers is 10 seconds.
		 *
		 * Class 1 devices use 0xffffffff for their
		 * best-effort flush timeout, so the clamping logic
		 * will result in a timeout that meets the above
		 * requirement.  ERTM timeouts are 16-bit values, so
		 * the maximum timeout is 65.535 seconds.
		 */

		/* Convert timeout to milliseconds and round */
		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);

		/* This is the recommended formula for class 2 devices
		 * that start ERTM timers when packets are sent to the
		 * controller.
		 */
		ertm_to = 3 * ertm_to + 500;

		if (ertm_to > 0xffff)
			ertm_to = 0xffff;

		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
		rfc->monitor_timeout = rfc->retrans_timeout;
	} else {
3185 3186
		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3187 3188 3189
	}
}

3190 3191 3192
static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
{
	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3193
	    __l2cap_ews_supported(chan->conn)) {
3194 3195
		/* use extended control field */
		set_bit(FLAG_EXT_CTRL, &chan->flags);
3196 3197
		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
	} else {
3198
		chan->tx_win = min_t(u16, chan->tx_win,
3199
				     L2CAP_DEFAULT_TX_WINDOW);
3200 3201
		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
	}
3202
	chan->ack_win = chan->tx_win;
3203 3204
}

3205
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
L
Linus Torvalds 已提交
3206 3207
{
	struct l2cap_conf_req *req = data;
3208
	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
L
Linus Torvalds 已提交
3209
	void *ptr = req->data;
3210
	u16 size;
L
Linus Torvalds 已提交
3211

3212
	BT_DBG("chan %p", chan);
L
Linus Torvalds 已提交
3213

3214
	if (chan->num_conf_req || chan->num_conf_rsp)
3215 3216
		goto done;

3217
	switch (chan->mode) {
3218 3219
	case L2CAP_MODE_STREAMING:
	case L2CAP_MODE_ERTM:
3220
		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3221 3222
			break;

3223
		if (__l2cap_efs_supported(chan->conn))
3224 3225
			set_bit(FLAG_EFS_ENABLE, &chan->flags);

3226
		/* fall through */
3227
	default:
3228
		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3229 3230 3231 3232
		break;
	}

done:
3233 3234
	if (chan->imtu != L2CAP_DEFAULT_MTU)
		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3235

3236
	switch (chan->mode) {
3237
	case L2CAP_MODE_BASIC:
3238 3239 3240
		if (disable_ertm)
			break;

3241
		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3242
		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3243 3244
			break;

3245 3246 3247 3248 3249 3250 3251
		rfc.mode            = L2CAP_MODE_BASIC;
		rfc.txwin_size      = 0;
		rfc.max_transmit    = 0;
		rfc.retrans_timeout = 0;
		rfc.monitor_timeout = 0;
		rfc.max_pdu_size    = 0;

3252
		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3253
				   (unsigned long) &rfc);
3254 3255 3256 3257
		break;

	case L2CAP_MODE_ERTM:
		rfc.mode            = L2CAP_MODE_ERTM;
3258
		rfc.max_transmit    = chan->max_tx;
3259 3260

		__l2cap_set_ertm_timeouts(chan, &rfc);
3261 3262

		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3263 3264
			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
			     L2CAP_FCS_SIZE);
3265
		rfc.max_pdu_size = cpu_to_le16(size);
3266

3267 3268 3269
		l2cap_txwin_setup(chan);

		rfc.txwin_size = min_t(u16, chan->tx_win,
3270
				       L2CAP_DEFAULT_TX_WINDOW);
3271

3272
		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3273
				   (unsigned long) &rfc);
3274

3275 3276 3277
		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
			l2cap_add_opt_efs(&ptr, chan);

3278 3279
		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3280
					   chan->tx_win);
3281 3282 3283

		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
			if (chan->fcs == L2CAP_FCS_NONE ||
3284
			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3285 3286 3287 3288
				chan->fcs = L2CAP_FCS_NONE;
				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
						   chan->fcs);
			}
3289 3290 3291
		break;

	case L2CAP_MODE_STREAMING:
3292
		l2cap_txwin_setup(chan);
3293 3294 3295 3296 3297
		rfc.mode            = L2CAP_MODE_STREAMING;
		rfc.txwin_size      = 0;
		rfc.max_transmit    = 0;
		rfc.retrans_timeout = 0;
		rfc.monitor_timeout = 0;
3298 3299

		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3300 3301
			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
			     L2CAP_FCS_SIZE);
3302
		rfc.max_pdu_size = cpu_to_le16(size);
3303

3304
		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3305
				   (unsigned long) &rfc);
3306

3307 3308 3309
		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
			l2cap_add_opt_efs(&ptr, chan);

3310 3311
		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
			if (chan->fcs == L2CAP_FCS_NONE ||
3312
			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3313 3314 3315 3316
				chan->fcs = L2CAP_FCS_NONE;
				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
						   chan->fcs);
			}
3317 3318
		break;
	}
L
Linus Torvalds 已提交
3319

3320
	req->dcid  = cpu_to_le16(chan->dcid);
3321
	req->flags = cpu_to_le16(0);
L
Linus Torvalds 已提交
3322 3323 3324 3325

	return ptr - data;
}

3326
static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
L
Linus Torvalds 已提交
3327
{
3328 3329
	struct l2cap_conf_rsp *rsp = data;
	void *ptr = rsp->data;
3330 3331
	void *req = chan->conf_req;
	int len = chan->conf_len;
3332 3333
	int type, hint, olen;
	unsigned long val;
3334
	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3335 3336
	struct l2cap_conf_efs efs;
	u8 remote_efs = 0;
3337
	u16 mtu = L2CAP_DEFAULT_MTU;
3338
	u16 result = L2CAP_CONF_SUCCESS;
3339
	u16 size;
L
Linus Torvalds 已提交
3340

3341
	BT_DBG("chan %p", chan);
3342

3343 3344
	while (len >= L2CAP_CONF_OPT_SIZE) {
		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
L
Linus Torvalds 已提交
3345

3346
		hint  = type & L2CAP_CONF_HINT;
3347
		type &= L2CAP_CONF_MASK;
3348 3349 3350

		switch (type) {
		case L2CAP_CONF_MTU:
3351
			mtu = val;
3352 3353 3354
			break;

		case L2CAP_CONF_FLUSH_TO:
3355
			chan->flush_to = val;
3356 3357 3358 3359 3360
			break;

		case L2CAP_CONF_QOS:
			break;

3361 3362 3363 3364 3365
		case L2CAP_CONF_RFC:
			if (olen == sizeof(rfc))
				memcpy(&rfc, (void *) val, olen);
			break;

3366 3367
		case L2CAP_CONF_FCS:
			if (val == L2CAP_FCS_NONE)
3368
				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3369
			break;
3370

3371 3372 3373 3374
		case L2CAP_CONF_EFS:
			remote_efs = 1;
			if (olen == sizeof(efs))
				memcpy(&efs, (void *) val, olen);
3375 3376
			break;

3377
		case L2CAP_CONF_EWS:
3378
			if (!chan->conn->hs_enabled)
3379
				return -ECONNREFUSED;
3380

3381 3382
			set_bit(FLAG_EXT_CTRL, &chan->flags);
			set_bit(CONF_EWS_RECV, &chan->conf_state);
3383
			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3384
			chan->remote_tx_win = val;
3385 3386
			break;

3387 3388 3389 3390 3391 3392 3393 3394 3395 3396
		default:
			if (hint)
				break;

			result = L2CAP_CONF_UNKNOWN;
			*((u8 *) ptr++) = type;
			break;
		}
	}

3397
	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3398 3399
		goto done;

3400
	switch (chan->mode) {
3401 3402
	case L2CAP_MODE_STREAMING:
	case L2CAP_MODE_ERTM:
3403
		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3404
			chan->mode = l2cap_select_mode(rfc.mode,
3405
						       chan->conn->feat_mask);
3406 3407 3408
			break;
		}

3409
		if (remote_efs) {
3410
			if (__l2cap_efs_supported(chan->conn))
3411 3412 3413 3414 3415
				set_bit(FLAG_EFS_ENABLE, &chan->flags);
			else
				return -ECONNREFUSED;
		}

3416
		if (chan->mode != rfc.mode)
3417
			return -ECONNREFUSED;
3418

3419 3420 3421 3422
		break;
	}

done:
3423
	if (chan->mode != rfc.mode) {
3424
		result = L2CAP_CONF_UNACCEPT;
3425
		rfc.mode = chan->mode;
3426

3427
		if (chan->num_conf_rsp == 1)
3428 3429
			return -ECONNREFUSED;

3430 3431
		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
				   (unsigned long) &rfc);
3432 3433
	}

3434 3435 3436 3437
	if (result == L2CAP_CONF_SUCCESS) {
		/* Configure output options and let the other side know
		 * which ones we don't like. */

3438 3439 3440
		if (mtu < L2CAP_DEFAULT_MIN_MTU)
			result = L2CAP_CONF_UNACCEPT;
		else {
3441
			chan->omtu = mtu;
3442
			set_bit(CONF_MTU_DONE, &chan->conf_state);
3443
		}
3444
		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3445

3446 3447
		if (remote_efs) {
			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3448 3449
			    efs.stype != L2CAP_SERV_NOTRAFIC &&
			    efs.stype != chan->local_stype) {
3450 3451 3452 3453 3454 3455 3456

				result = L2CAP_CONF_UNACCEPT;

				if (chan->num_conf_req >= 1)
					return -ECONNREFUSED;

				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3457 3458
						   sizeof(efs),
						   (unsigned long) &efs);
3459
			} else {
3460
				/* Send PENDING Conf Rsp */
3461 3462
				result = L2CAP_CONF_PENDING;
				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3463 3464 3465
			}
		}

3466 3467
		switch (rfc.mode) {
		case L2CAP_MODE_BASIC:
3468
			chan->fcs = L2CAP_FCS_NONE;
3469
			set_bit(CONF_MODE_DONE, &chan->conf_state);
3470 3471 3472
			break;

		case L2CAP_MODE_ERTM:
3473 3474 3475 3476
			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
				chan->remote_tx_win = rfc.txwin_size;
			else
				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3477

3478
			chan->remote_max_tx = rfc.max_transmit;
3479

3480
			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3481 3482
				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3483 3484
			rfc.max_pdu_size = cpu_to_le16(size);
			chan->remote_mps = size;
3485

3486
			__l2cap_set_ertm_timeouts(chan, &rfc);
3487

3488
			set_bit(CONF_MODE_DONE, &chan->conf_state);
3489 3490

			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3491
					   sizeof(rfc), (unsigned long) &rfc);
3492

3493 3494 3495 3496 3497
			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
				chan->remote_id = efs.id;
				chan->remote_stype = efs.stype;
				chan->remote_msdu = le16_to_cpu(efs.msdu);
				chan->remote_flush_to =
3498
					le32_to_cpu(efs.flush_to);
3499
				chan->remote_acc_lat =
3500
					le32_to_cpu(efs.acc_lat);
3501 3502 3503
				chan->remote_sdu_itime =
					le32_to_cpu(efs.sdu_itime);
				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3504 3505
						   sizeof(efs),
						   (unsigned long) &efs);
3506
			}
3507 3508 3509
			break;

		case L2CAP_MODE_STREAMING:
3510
			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3511 3512
				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3513 3514
			rfc.max_pdu_size = cpu_to_le16(size);
			chan->remote_mps = size;
3515

3516
			set_bit(CONF_MODE_DONE, &chan->conf_state);
3517

3518 3519
			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
					   (unsigned long) &rfc);
3520

3521 3522 3523
			break;

		default:
3524 3525
			result = L2CAP_CONF_UNACCEPT;

3526
			memset(&rfc, 0, sizeof(rfc));
3527
			rfc.mode = chan->mode;
3528
		}
3529

3530
		if (result == L2CAP_CONF_SUCCESS)
3531
			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3532
	}
3533
	rsp->scid   = cpu_to_le16(chan->dcid);
3534
	rsp->result = cpu_to_le16(result);
3535
	rsp->flags  = cpu_to_le16(0);
3536 3537

	return ptr - data;
L
Linus Torvalds 已提交
3538 3539
}

3540 3541
static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
				void *data, u16 *result)
3542 3543 3544 3545 3546
{
	struct l2cap_conf_req *req = data;
	void *ptr = req->data;
	int type, olen;
	unsigned long val;
3547
	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3548
	struct l2cap_conf_efs efs;
3549

3550
	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3551 3552 3553 3554 3555 3556 3557 3558

	while (len >= L2CAP_CONF_OPT_SIZE) {
		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);

		switch (type) {
		case L2CAP_CONF_MTU:
			if (val < L2CAP_DEFAULT_MIN_MTU) {
				*result = L2CAP_CONF_UNACCEPT;
3559
				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3560
			} else
3561 3562
				chan->imtu = val;
			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3563 3564 3565
			break;

		case L2CAP_CONF_FLUSH_TO:
3566
			chan->flush_to = val;
3567
			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3568
					   2, chan->flush_to);
3569 3570 3571 3572 3573 3574
			break;

		case L2CAP_CONF_RFC:
			if (olen == sizeof(rfc))
				memcpy(&rfc, (void *)val, olen);

3575
			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3576
			    rfc.mode != chan->mode)
3577 3578
				return -ECONNREFUSED;

3579
			chan->fcs = 0;
3580 3581

			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3582
					   sizeof(rfc), (unsigned long) &rfc);
3583
			break;
3584 3585

		case L2CAP_CONF_EWS:
3586
			chan->ack_win = min_t(u16, val, chan->ack_win);
3587
			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3588
					   chan->tx_win);
3589
			break;
3590 3591 3592 3593 3594 3595

		case L2CAP_CONF_EFS:
			if (olen == sizeof(efs))
				memcpy(&efs, (void *)val, olen);

			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3596 3597
			    efs.stype != L2CAP_SERV_NOTRAFIC &&
			    efs.stype != chan->local_stype)
3598 3599
				return -ECONNREFUSED;

3600 3601
			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
					   (unsigned long) &efs);
3602
			break;
3603 3604 3605 3606

		case L2CAP_CONF_FCS:
			if (*result == L2CAP_CONF_PENDING)
				if (val == L2CAP_FCS_NONE)
3607
					set_bit(CONF_RECV_NO_FCS,
3608 3609
						&chan->conf_state);
			break;
3610 3611 3612
		}
	}

3613
	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3614 3615
		return -ECONNREFUSED;

3616
	chan->mode = rfc.mode;
3617

3618
	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3619 3620
		switch (rfc.mode) {
		case L2CAP_MODE_ERTM:
3621 3622 3623
			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3624 3625 3626
			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
				chan->ack_win = min_t(u16, chan->ack_win,
						      rfc.txwin_size);
3627 3628 3629 3630

			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
				chan->local_msdu = le16_to_cpu(efs.msdu);
				chan->local_sdu_itime =
3631
					le32_to_cpu(efs.sdu_itime);
3632 3633
				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
				chan->local_flush_to =
3634
					le32_to_cpu(efs.flush_to);
3635
			}
3636
			break;
3637

3638
		case L2CAP_MODE_STREAMING:
3639
			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3640 3641 3642
		}
	}

3643
	req->dcid   = cpu_to_le16(chan->dcid);
3644
	req->flags  = cpu_to_le16(0);
3645 3646 3647 3648

	return ptr - data;
}

3649 3650
static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
				u16 result, u16 flags)
L
Linus Torvalds 已提交
3651 3652 3653 3654
{
	struct l2cap_conf_rsp *rsp = data;
	void *ptr = rsp->data;

3655
	BT_DBG("chan %p", chan);
L
Linus Torvalds 已提交
3656

3657
	rsp->scid   = cpu_to_le16(chan->dcid);
3658
	rsp->result = cpu_to_le16(result);
3659
	rsp->flags  = cpu_to_le16(flags);
L
Linus Torvalds 已提交
3660 3661 3662 3663

	return ptr - data;
}

3664 3665 3666 3667 3668 3669 3670 3671 3672
void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
{
	struct l2cap_le_conn_rsp rsp;
	struct l2cap_conn *conn = chan->conn;

	BT_DBG("chan %p", chan);

	rsp.dcid    = cpu_to_le16(chan->scid);
	rsp.mtu     = cpu_to_le16(chan->imtu);
3673
	rsp.mps     = cpu_to_le16(chan->mps);
3674
	rsp.credits = cpu_to_le16(chan->rx_credits);
3675
	rsp.result  = cpu_to_le16(L2CAP_CR_SUCCESS);
3676 3677 3678 3679 3680

	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
		       &rsp);
}

3681
void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3682 3683
{
	struct l2cap_conn_rsp rsp;
3684
	struct l2cap_conn *conn = chan->conn;
3685
	u8 buf[128];
3686
	u8 rsp_code;
3687

3688 3689
	rsp.scid   = cpu_to_le16(chan->dcid);
	rsp.dcid   = cpu_to_le16(chan->scid);
3690 3691
	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3692 3693 3694 3695 3696 3697 3698 3699 3700

	if (chan->hs_hcon)
		rsp_code = L2CAP_CREATE_CHAN_RSP;
	else
		rsp_code = L2CAP_CONN_RSP;

	BT_DBG("chan %p rsp_code %u", chan, rsp_code);

	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3701

3702
	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3703 3704 3705
		return;

	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3706
		       l2cap_build_conf_req(chan, buf), buf);
3707 3708 3709
	chan->num_conf_req++;
}

3710
static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3711 3712 3713
{
	int type, olen;
	unsigned long val;
3714 3715 3716 3717 3718 3719
	/* Use sane default values in case a misbehaving remote device
	 * did not send an RFC or extended window size option.
	 */
	u16 txwin_ext = chan->ack_win;
	struct l2cap_conf_rfc rfc = {
		.mode = chan->mode,
3720 3721
		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3722 3723 3724
		.max_pdu_size = cpu_to_le16(chan->imtu),
		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
	};
3725

3726
	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3727

3728
	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3729 3730 3731 3732 3733
		return;

	while (len >= L2CAP_CONF_OPT_SIZE) {
		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);

3734 3735 3736 3737
		switch (type) {
		case L2CAP_CONF_RFC:
			if (olen == sizeof(rfc))
				memcpy(&rfc, (void *)val, olen);
3738
			break;
3739 3740 3741 3742
		case L2CAP_CONF_EWS:
			txwin_ext = val;
			break;
		}
3743 3744 3745 3746
	}

	switch (rfc.mode) {
	case L2CAP_MODE_ERTM:
3747 3748
		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3749 3750 3751 3752 3753 3754
		chan->mps = le16_to_cpu(rfc.max_pdu_size);
		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
		else
			chan->ack_win = min_t(u16, chan->ack_win,
					      rfc.txwin_size);
3755 3756
		break;
	case L2CAP_MODE_STREAMING:
3757
		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3758 3759 3760
	}
}

3761
static inline int l2cap_command_rej(struct l2cap_conn *conn,
3762 3763
				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				    u8 *data)
3764
{
3765
	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3766

3767 3768 3769
	if (cmd_len < sizeof(*rej))
		return -EPROTO;

3770
	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3771 3772 3773
		return 0;

	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3774
	    cmd->ident == conn->info_ident) {
3775
		cancel_delayed_work(&conn->info_timer);
3776 3777

		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3778
		conn->info_ident = 0;
3779

3780 3781 3782 3783 3784 3785
		l2cap_conn_start(conn);
	}

	return 0;
}

3786 3787 3788
static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
					struct l2cap_cmd_hdr *cmd,
					u8 *data, u8 rsp_code, u8 amp_id)
L
Linus Torvalds 已提交
3789 3790 3791
{
	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
	struct l2cap_conn_rsp rsp;
3792
	struct l2cap_chan *chan = NULL, *pchan;
3793
	int result, status = L2CAP_CS_NO_INFO;
L
Linus Torvalds 已提交
3794 3795

	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3796
	__le16 psm = req->psm;
L
Linus Torvalds 已提交
3797

3798
	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
L
Linus Torvalds 已提交
3799 3800

	/* Check if we have socket listening on psm */
3801
	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3802
					 &conn->hcon->dst, ACL_LINK);
3803
	if (!pchan) {
L
Linus Torvalds 已提交
3804 3805 3806 3807
		result = L2CAP_CR_BAD_PSM;
		goto sendresp;
	}

3808
	mutex_lock(&conn->chan_lock);
3809
	l2cap_chan_lock(pchan);
3810

3811
	/* Check if the ACL is secure enough (if not SDP) */
3812
	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3813
	    !hci_conn_check_link_mode(conn->hcon)) {
3814
		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3815 3816 3817 3818
		result = L2CAP_CR_SEC_BLOCK;
		goto response;
	}

L
Linus Torvalds 已提交
3819 3820
	result = L2CAP_CR_NO_MEM;

3821 3822 3823 3824
	/* Check if we already have channel with that dcid */
	if (__l2cap_get_chan_by_dcid(conn, scid))
		goto response;

3825
	chan = pchan->ops->new_connection(pchan);
3826
	if (!chan)
L
Linus Torvalds 已提交
3827 3828
		goto response;

3829 3830 3831 3832 3833 3834 3835
	/* For certain devices (ex: HID mouse), support for authentication,
	 * pairing and bonding is optional. For such devices, inorder to avoid
	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
	 */
	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;

3836 3837
	bacpy(&chan->src, &conn->hcon->src);
	bacpy(&chan->dst, &conn->hcon->dst);
3838 3839
	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3840 3841
	chan->psm  = psm;
	chan->dcid = scid;
3842
	chan->local_amp_id = amp_id;
L
Linus Torvalds 已提交
3843

3844
	__l2cap_chan_add(conn, chan);
3845

3846
	dcid = chan->scid;
L
Linus Torvalds 已提交
3847

3848
	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
L
Linus Torvalds 已提交
3849

3850
	chan->ident = cmd->ident;
L
Linus Torvalds 已提交
3851

3852
	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3853
		if (l2cap_chan_check_security(chan, false)) {
3854
			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3855
				l2cap_state_change(chan, BT_CONNECT2);
3856 3857
				result = L2CAP_CR_PEND;
				status = L2CAP_CS_AUTHOR_PEND;
3858
				chan->ops->defer(chan);
3859
			} else {
3860 3861 3862 3863
				/* Force pending result for AMP controllers.
				 * The connection will succeed after the
				 * physical link is up.
				 */
3864
				if (amp_id == AMP_ID_BREDR) {
3865
					l2cap_state_change(chan, BT_CONFIG);
3866
					result = L2CAP_CR_SUCCESS;
3867
				} else {
3868
					l2cap_state_change(chan, BT_CONNECT2);
3869
					result = L2CAP_CR_PEND;
3870
				}
3871 3872
				status = L2CAP_CS_NO_INFO;
			}
3873
		} else {
3874
			l2cap_state_change(chan, BT_CONNECT2);
3875 3876 3877 3878
			result = L2CAP_CR_PEND;
			status = L2CAP_CS_AUTHEN_PEND;
		}
	} else {
3879
		l2cap_state_change(chan, BT_CONNECT2);
3880 3881
		result = L2CAP_CR_PEND;
		status = L2CAP_CS_NO_INFO;
L
Linus Torvalds 已提交
3882 3883 3884
	}

response:
3885
	l2cap_chan_unlock(pchan);
3886
	mutex_unlock(&conn->chan_lock);
L
Linus Torvalds 已提交
3887 3888

sendresp:
3889 3890 3891 3892
	rsp.scid   = cpu_to_le16(scid);
	rsp.dcid   = cpu_to_le16(dcid);
	rsp.result = cpu_to_le16(result);
	rsp.status = cpu_to_le16(status);
3893
	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3894 3895 3896

	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
		struct l2cap_info_req info;
3897
		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3898 3899 3900 3901

		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
		conn->info_ident = l2cap_get_ident(conn);

3902
		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3903

3904 3905
		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
			       sizeof(info), &info);
3906 3907
	}

3908
	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3909
	    result == L2CAP_CR_SUCCESS) {
3910
		u8 buf[128];
3911
		set_bit(CONF_REQ_SENT, &chan->conf_state);
3912
		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3913
			       l2cap_build_conf_req(chan, buf), buf);
3914
		chan->num_conf_req++;
3915
	}
3916 3917

	return chan;
3918
}
3919

3920
static int l2cap_connect_req(struct l2cap_conn *conn,
3921
			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3922
{
3923 3924 3925
	struct hci_dev *hdev = conn->hcon->hdev;
	struct hci_conn *hcon = conn->hcon;

3926 3927 3928
	if (cmd_len < sizeof(struct l2cap_conn_req))
		return -EPROTO;

3929 3930 3931 3932 3933 3934 3935 3936
	hci_dev_lock(hdev);
	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
		mgmt_device_connected(hdev, &hcon->dst, hcon->type,
				      hcon->dst_type, 0, NULL, 0,
				      hcon->dev_class);
	hci_dev_unlock(hdev);

3937
	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
L
Linus Torvalds 已提交
3938 3939 3940
	return 0;
}

3941
static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3942 3943
				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				    u8 *data)
L
Linus Torvalds 已提交
3944 3945 3946
{
	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
	u16 scid, dcid, result, status;
3947
	struct l2cap_chan *chan;
L
Linus Torvalds 已提交
3948
	u8 req[128];
3949
	int err;
L
Linus Torvalds 已提交
3950

3951 3952 3953
	if (cmd_len < sizeof(*rsp))
		return -EPROTO;

L
Linus Torvalds 已提交
3954 3955 3956 3957 3958
	scid   = __le16_to_cpu(rsp->scid);
	dcid   = __le16_to_cpu(rsp->dcid);
	result = __le16_to_cpu(rsp->result);
	status = __le16_to_cpu(rsp->status);

3959
	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3960
	       dcid, scid, result, status);
L
Linus Torvalds 已提交
3961

3962 3963
	mutex_lock(&conn->chan_lock);

L
Linus Torvalds 已提交
3964
	if (scid) {
3965 3966
		chan = __l2cap_get_chan_by_scid(conn, scid);
		if (!chan) {
3967
			err = -EBADSLT;
3968 3969
			goto unlock;
		}
L
Linus Torvalds 已提交
3970
	} else {
3971 3972
		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
		if (!chan) {
3973
			err = -EBADSLT;
3974 3975
			goto unlock;
		}
L
Linus Torvalds 已提交
3976 3977
	}

3978 3979
	err = 0;

3980
	l2cap_chan_lock(chan);
3981

L
Linus Torvalds 已提交
3982 3983
	switch (result) {
	case L2CAP_CR_SUCCESS:
3984
		l2cap_state_change(chan, BT_CONFIG);
3985
		chan->ident = 0;
3986
		chan->dcid = dcid;
3987
		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3988

3989
		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3990 3991
			break;

L
Linus Torvalds 已提交
3992
		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3993
			       l2cap_build_conf_req(chan, req), req);
3994
		chan->num_conf_req++;
L
Linus Torvalds 已提交
3995 3996 3997
		break;

	case L2CAP_CR_PEND:
3998
		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
L
Linus Torvalds 已提交
3999 4000 4001
		break;

	default:
4002
		l2cap_chan_del(chan, ECONNREFUSED);
L
Linus Torvalds 已提交
4003 4004 4005
		break;
	}

4006
	l2cap_chan_unlock(chan);
4007 4008 4009 4010 4011

unlock:
	mutex_unlock(&conn->chan_lock);

	return err;
L
Linus Torvalds 已提交
4012 4013
}

4014
static inline void set_default_fcs(struct l2cap_chan *chan)
4015 4016 4017 4018
{
	/* FCS is enabled only in ERTM or streaming mode, if one or both
	 * sides request it.
	 */
4019
	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4020
		chan->fcs = L2CAP_FCS_NONE;
4021
	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4022
		chan->fcs = L2CAP_FCS_CRC16;
4023 4024
}

4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040
static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
				    u8 ident, u16 flags)
{
	struct l2cap_conn *conn = chan->conn;

	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
	       flags);

	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);

	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
		       l2cap_build_conf_rsp(chan, data,
					    L2CAP_CONF_SUCCESS, flags), data);
}

4041 4042 4043 4044 4045
static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
				   u16 scid, u16 dcid)
{
	struct l2cap_cmd_rej_cid rej;

4046
	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4047 4048 4049 4050 4051 4052
	rej.scid = __cpu_to_le16(scid);
	rej.dcid = __cpu_to_le16(dcid);

	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
}

4053 4054 4055
static inline int l2cap_config_req(struct l2cap_conn *conn,
				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				   u8 *data)
L
Linus Torvalds 已提交
4056 4057 4058 4059
{
	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
	u16 dcid, flags;
	u8 rsp[64];
4060
	struct l2cap_chan *chan;
4061
	int len, err = 0;
L
Linus Torvalds 已提交
4062

4063 4064 4065
	if (cmd_len < sizeof(*req))
		return -EPROTO;

L
Linus Torvalds 已提交
4066 4067 4068 4069 4070
	dcid  = __le16_to_cpu(req->dcid);
	flags = __le16_to_cpu(req->flags);

	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);

4071
	chan = l2cap_get_chan_by_scid(conn, dcid);
4072 4073 4074 4075
	if (!chan) {
		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
		return 0;
	}
L
Linus Torvalds 已提交
4076

4077
	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4078 4079
		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
				       chan->dcid);
4080
		goto unlock;
4081
	}
4082

4083
	/* Reject if config buffer is too small. */
4084
	len = cmd_len - sizeof(*req);
4085
	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4086
		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4087 4088
			       l2cap_build_conf_rsp(chan, rsp,
			       L2CAP_CONF_REJECT, flags), rsp);
4089 4090 4091 4092
		goto unlock;
	}

	/* Store config. */
4093 4094
	memcpy(chan->conf_req + chan->conf_len, req->data, len);
	chan->conf_len += len;
L
Linus Torvalds 已提交
4095

4096
	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
L
Linus Torvalds 已提交
4097 4098
		/* Incomplete config. Send empty response. */
		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4099 4100
			       l2cap_build_conf_rsp(chan, rsp,
			       L2CAP_CONF_SUCCESS, flags), rsp);
L
Linus Torvalds 已提交
4101 4102 4103 4104
		goto unlock;
	}

	/* Complete config. */
4105
	len = l2cap_parse_conf_req(chan, rsp);
4106
	if (len < 0) {
4107
		l2cap_send_disconn_req(chan, ECONNRESET);
L
Linus Torvalds 已提交
4108
		goto unlock;
4109
	}
L
Linus Torvalds 已提交
4110

4111
	chan->ident = cmd->ident;
4112
	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4113
	chan->num_conf_rsp++;
4114 4115

	/* Reset config buffer. */
4116
	chan->conf_len = 0;
4117

4118
	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4119 4120
		goto unlock;

4121
	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4122
		set_default_fcs(chan);
4123

4124 4125
		if (chan->mode == L2CAP_MODE_ERTM ||
		    chan->mode == L2CAP_MODE_STREAMING)
4126 4127 4128
			err = l2cap_ertm_init(chan);

		if (err < 0)
4129
			l2cap_send_disconn_req(chan, -err);
4130 4131
		else
			l2cap_chan_ready(chan);
4132

4133 4134 4135
		goto unlock;
	}

4136
	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4137
		u8 buf[64];
L
Linus Torvalds 已提交
4138
		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4139
			       l2cap_build_conf_req(chan, buf), buf);
4140
		chan->num_conf_req++;
L
Linus Torvalds 已提交
4141 4142
	}

4143 4144 4145
	/* Got Conf Rsp PENDING from remote side and asume we sent
	   Conf Rsp PENDING in the code above */
	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4146
	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4147 4148 4149

		/* check compatibility */

4150
		/* Send rsp for BR/EDR channel */
4151
		if (!chan->hs_hcon)
4152 4153 4154
			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
		else
			chan->ident = cmd->ident;
4155 4156
	}

L
Linus Torvalds 已提交
4157
unlock:
4158
	l2cap_chan_unlock(chan);
4159
	return err;
L
Linus Torvalds 已提交
4160 4161
}

4162
static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4163 4164
				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				   u8 *data)
L
Linus Torvalds 已提交
4165 4166 4167
{
	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
	u16 scid, flags, result;
4168
	struct l2cap_chan *chan;
4169
	int len = cmd_len - sizeof(*rsp);
4170
	int err = 0;
L
Linus Torvalds 已提交
4171

4172 4173 4174
	if (cmd_len < sizeof(*rsp))
		return -EPROTO;

L
Linus Torvalds 已提交
4175 4176 4177 4178
	scid   = __le16_to_cpu(rsp->scid);
	flags  = __le16_to_cpu(rsp->flags);
	result = __le16_to_cpu(rsp->result);

4179 4180
	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
	       result, len);
L
Linus Torvalds 已提交
4181

4182
	chan = l2cap_get_chan_by_scid(conn, scid);
4183
	if (!chan)
L
Linus Torvalds 已提交
4184 4185 4186 4187
		return 0;

	switch (result) {
	case L2CAP_CONF_SUCCESS:
4188
		l2cap_conf_rfc_get(chan, rsp->data, len);
4189
		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
L
Linus Torvalds 已提交
4190 4191
		break;

4192 4193 4194 4195 4196 4197 4198
	case L2CAP_CONF_PENDING:
		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);

		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
			char buf[64];

			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4199
						   buf, &result);
4200
			if (len < 0) {
4201
				l2cap_send_disconn_req(chan, ECONNRESET);
4202 4203 4204
				goto done;
			}

4205
			if (!chan->hs_hcon) {
4206 4207
				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
							0);
4208 4209 4210 4211 4212 4213
			} else {
				if (l2cap_check_efs(chan)) {
					amp_create_logical_link(chan);
					chan->ident = cmd->ident;
				}
			}
4214 4215 4216
		}
		goto done;

L
Linus Torvalds 已提交
4217
	case L2CAP_CONF_UNACCEPT:
4218
		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4219 4220
			char req[64];

4221
			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4222
				l2cap_send_disconn_req(chan, ECONNRESET);
4223 4224 4225
				goto done;
			}

4226 4227
			/* throw out any old stored conf requests */
			result = L2CAP_CONF_SUCCESS;
4228
			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4229
						   req, &result);
4230
			if (len < 0) {
4231
				l2cap_send_disconn_req(chan, ECONNRESET);
4232 4233 4234 4235
				goto done;
			}

			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4236
				       L2CAP_CONF_REQ, len, req);
4237
			chan->num_conf_req++;
4238 4239 4240
			if (result != L2CAP_CONF_SUCCESS)
				goto done;
			break;
L
Linus Torvalds 已提交
4241 4242
		}

4243
	default:
4244
		l2cap_chan_set_err(chan, ECONNRESET);
4245

4246
		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4247
		l2cap_send_disconn_req(chan, ECONNRESET);
L
Linus Torvalds 已提交
4248 4249 4250
		goto done;
	}

4251
	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
L
Linus Torvalds 已提交
4252 4253
		goto done;

4254
	set_bit(CONF_INPUT_DONE, &chan->conf_state);
L
Linus Torvalds 已提交
4255

4256
	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4257
		set_default_fcs(chan);
4258

4259 4260
		if (chan->mode == L2CAP_MODE_ERTM ||
		    chan->mode == L2CAP_MODE_STREAMING)
4261
			err = l2cap_ertm_init(chan);
4262

4263
		if (err < 0)
4264
			l2cap_send_disconn_req(chan, -err);
4265 4266
		else
			l2cap_chan_ready(chan);
L
Linus Torvalds 已提交
4267 4268 4269
	}

done:
4270
	l2cap_chan_unlock(chan);
4271
	return err;
L
Linus Torvalds 已提交
4272 4273
}

4274
static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4275 4276
				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				       u8 *data)
L
Linus Torvalds 已提交
4277 4278 4279 4280
{
	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
	struct l2cap_disconn_rsp rsp;
	u16 dcid, scid;
4281
	struct l2cap_chan *chan;
L
Linus Torvalds 已提交
4282

4283 4284 4285
	if (cmd_len != sizeof(*req))
		return -EPROTO;

L
Linus Torvalds 已提交
4286 4287 4288 4289 4290
	scid = __le16_to_cpu(req->scid);
	dcid = __le16_to_cpu(req->dcid);

	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);

4291 4292 4293 4294 4295
	mutex_lock(&conn->chan_lock);

	chan = __l2cap_get_chan_by_scid(conn, dcid);
	if (!chan) {
		mutex_unlock(&conn->chan_lock);
4296 4297
		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
		return 0;
4298
	}
L
Linus Torvalds 已提交
4299

4300 4301
	l2cap_chan_lock(chan);

4302 4303
	rsp.dcid = cpu_to_le16(chan->scid);
	rsp.scid = cpu_to_le16(chan->dcid);
L
Linus Torvalds 已提交
4304 4305
	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);

4306
	chan->ops->set_shutdown(chan);
L
Linus Torvalds 已提交
4307

4308
	l2cap_chan_hold(chan);
4309
	l2cap_chan_del(chan, ECONNRESET);
4310 4311

	l2cap_chan_unlock(chan);
L
Linus Torvalds 已提交
4312

4313
	chan->ops->close(chan);
4314
	l2cap_chan_put(chan);
4315 4316 4317

	mutex_unlock(&conn->chan_lock);

L
Linus Torvalds 已提交
4318 4319 4320
	return 0;
}

4321
static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4322 4323
				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				       u8 *data)
L
Linus Torvalds 已提交
4324 4325 4326
{
	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
	u16 dcid, scid;
4327
	struct l2cap_chan *chan;
L
Linus Torvalds 已提交
4328

4329 4330 4331
	if (cmd_len != sizeof(*rsp))
		return -EPROTO;

L
Linus Torvalds 已提交
4332 4333 4334 4335 4336
	scid = __le16_to_cpu(rsp->scid);
	dcid = __le16_to_cpu(rsp->dcid);

	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);

4337 4338 4339 4340 4341
	mutex_lock(&conn->chan_lock);

	chan = __l2cap_get_chan_by_scid(conn, scid);
	if (!chan) {
		mutex_unlock(&conn->chan_lock);
L
Linus Torvalds 已提交
4342
		return 0;
4343
	}
L
Linus Torvalds 已提交
4344

4345
	l2cap_chan_lock(chan);
4346

4347
	l2cap_chan_hold(chan);
4348
	l2cap_chan_del(chan, 0);
4349 4350

	l2cap_chan_unlock(chan);
L
Linus Torvalds 已提交
4351

4352
	chan->ops->close(chan);
4353
	l2cap_chan_put(chan);
4354 4355 4356

	mutex_unlock(&conn->chan_lock);

L
Linus Torvalds 已提交
4357 4358 4359
	return 0;
}

4360
static inline int l2cap_information_req(struct l2cap_conn *conn,
4361 4362
					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
					u8 *data)
L
Linus Torvalds 已提交
4363 4364 4365 4366
{
	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
	u16 type;

4367 4368 4369
	if (cmd_len != sizeof(*req))
		return -EPROTO;

L
Linus Torvalds 已提交
4370 4371 4372 4373
	type = __le16_to_cpu(req->type);

	BT_DBG("type 0x%4.4x", type);

4374 4375
	if (type == L2CAP_IT_FEAT_MASK) {
		u8 buf[8];
4376
		u32 feat_mask = l2cap_feat_mask;
4377
		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4378 4379
		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4380
		if (!disable_ertm)
4381
			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4382
				| L2CAP_FEAT_FCS;
4383
		if (conn->hs_enabled)
4384
			feat_mask |= L2CAP_FEAT_EXT_FLOW
4385
				| L2CAP_FEAT_EXT_WINDOW;
4386

4387
		put_unaligned_le32(feat_mask, rsp->data);
4388 4389
		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
			       buf);
4390 4391 4392
	} else if (type == L2CAP_IT_FIXED_CHAN) {
		u8 buf[12];
		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4393

4394
		if (conn->hs_enabled)
4395 4396 4397 4398
			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
		else
			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;

4399 4400
		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4401
		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4402 4403
		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
			       buf);
4404 4405 4406
	} else {
		struct l2cap_info_rsp rsp;
		rsp.type   = cpu_to_le16(type);
4407
		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4408 4409
		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
			       &rsp);
4410
	}
L
Linus Torvalds 已提交
4411 4412 4413 4414

	return 0;
}

4415
static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4416 4417
					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
					u8 *data)
L
Linus Torvalds 已提交
4418 4419 4420 4421
{
	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
	u16 type, result;

4422
	if (cmd_len < sizeof(*rsp))
4423 4424
		return -EPROTO;

L
Linus Torvalds 已提交
4425 4426 4427 4428 4429
	type   = __le16_to_cpu(rsp->type);
	result = __le16_to_cpu(rsp->result);

	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);

4430 4431
	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
	if (cmd->ident != conn->info_ident ||
4432
	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4433 4434
		return 0;

4435
	cancel_delayed_work(&conn->info_timer);
4436

4437 4438 4439 4440 4441 4442 4443 4444 4445
	if (result != L2CAP_IR_SUCCESS) {
		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
		conn->info_ident = 0;

		l2cap_conn_start(conn);

		return 0;
	}

4446 4447
	switch (type) {
	case L2CAP_IT_FEAT_MASK:
4448
		conn->feat_mask = get_unaligned_le32(rsp->data);
4449

4450
		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4451
			struct l2cap_info_req req;
4452
			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4453 4454 4455 4456

			conn->info_ident = l2cap_get_ident(conn);

			l2cap_send_cmd(conn, conn->info_ident,
4457
				       L2CAP_INFO_REQ, sizeof(req), &req);
4458 4459 4460 4461 4462 4463
		} else {
			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
			conn->info_ident = 0;

			l2cap_conn_start(conn);
		}
4464 4465 4466 4467
		break;

	case L2CAP_IT_FIXED_CHAN:
		conn->fixed_chan_mask = rsp->data[0];
4468
		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4469
		conn->info_ident = 0;
4470 4471

		l2cap_conn_start(conn);
4472
		break;
4473
	}
4474

L
Linus Torvalds 已提交
4475 4476 4477
	return 0;
}

4478 4479 4480
static int l2cap_create_channel_req(struct l2cap_conn *conn,
				    struct l2cap_cmd_hdr *cmd,
				    u16 cmd_len, void *data)
4481 4482
{
	struct l2cap_create_chan_req *req = data;
4483
	struct l2cap_create_chan_rsp rsp;
4484
	struct l2cap_chan *chan;
4485
	struct hci_dev *hdev;
4486 4487 4488 4489 4490
	u16 psm, scid;

	if (cmd_len != sizeof(*req))
		return -EPROTO;

4491
	if (!conn->hs_enabled)
4492 4493 4494 4495 4496
		return -EINVAL;

	psm = le16_to_cpu(req->psm);
	scid = le16_to_cpu(req->scid);

4497
	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4498

4499
	/* For controller id 0 make BR/EDR connection */
4500
	if (req->amp_id == AMP_ID_BREDR) {
4501 4502 4503 4504
		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
			      req->amp_id);
		return 0;
	}
4505

4506 4507 4508 4509
	/* Validate AMP controller id */
	hdev = hci_dev_get(req->amp_id);
	if (!hdev)
		goto error;
4510

4511 4512 4513 4514
	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
		hci_dev_put(hdev);
		goto error;
	}
4515

4516 4517 4518 4519 4520
	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
			     req->amp_id);
	if (chan) {
		struct amp_mgr *mgr = conn->hcon->amp_mgr;
		struct hci_conn *hs_hcon;
4521

4522 4523
		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
						  &conn->hcon->dst);
4524 4525
		if (!hs_hcon) {
			hci_dev_put(hdev);
4526 4527 4528
			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
					       chan->dcid);
			return 0;
4529 4530
		}

4531 4532 4533 4534
		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);

		mgr->bredr_chan = chan;
		chan->hs_hcon = hs_hcon;
4535
		chan->fcs = L2CAP_FCS_NONE;
4536
		conn->mtu = hdev->block_mtu;
4537
	}
4538

4539
	hci_dev_put(hdev);
4540 4541

	return 0;
4542 4543 4544 4545

error:
	rsp.dcid = 0;
	rsp.scid = cpu_to_le16(scid);
4546 4547
	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4548 4549 4550 4551

	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
		       sizeof(rsp), &rsp);

4552
	return 0;
4553 4554
}

4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573
static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
{
	struct l2cap_move_chan_req req;
	u8 ident;

	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);

	ident = l2cap_get_ident(chan->conn);
	chan->ident = ident;

	req.icid = cpu_to_le16(chan->scid);
	req.dest_amp_id = dest_amp_id;

	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
		       &req);

	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
}

4574
static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4575 4576 4577
{
	struct l2cap_move_chan_rsp rsp;

4578
	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4579

4580
	rsp.icid = cpu_to_le16(chan->dcid);
4581 4582
	rsp.result = cpu_to_le16(result);

4583 4584
	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
		       sizeof(rsp), &rsp);
4585 4586
}

M
Mat Martineau 已提交
4587
static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4588 4589 4590
{
	struct l2cap_move_chan_cfm cfm;

M
Mat Martineau 已提交
4591
	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4592

M
Mat Martineau 已提交
4593
	chan->ident = l2cap_get_ident(chan->conn);
4594

M
Mat Martineau 已提交
4595
	cfm.icid = cpu_to_le16(chan->scid);
4596 4597
	cfm.result = cpu_to_le16(result);

M
Mat Martineau 已提交
4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610
	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
		       sizeof(cfm), &cfm);

	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
}

static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
{
	struct l2cap_move_chan_cfm cfm;

	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);

	cfm.icid = cpu_to_le16(icid);
4611
	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
M
Mat Martineau 已提交
4612 4613 4614

	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
		       sizeof(cfm), &cfm);
4615 4616 4617
}

static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4618
					 u16 icid)
4619 4620 4621
{
	struct l2cap_move_chan_cfm_rsp rsp;

4622
	BT_DBG("icid 0x%4.4x", icid);
4623 4624 4625 4626 4627

	rsp.icid = cpu_to_le16(icid);
	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
}

4628 4629 4630 4631 4632 4633 4634 4635
static void __release_logical_link(struct l2cap_chan *chan)
{
	chan->hs_hchan = NULL;
	chan->hs_hcon = NULL;

	/* Placeholder - release the logical link */
}

4636 4637 4638 4639 4640
static void l2cap_logical_fail(struct l2cap_chan *chan)
{
	/* Logical link setup failed */
	if (chan->state != BT_CONNECTED) {
		/* Create channel failure, disconnect */
4641
		l2cap_send_disconn_req(chan, ECONNRESET);
4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671
		return;
	}

	switch (chan->move_role) {
	case L2CAP_MOVE_ROLE_RESPONDER:
		l2cap_move_done(chan);
		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
		break;
	case L2CAP_MOVE_ROLE_INITIATOR:
		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
			/* Remote has only sent pending or
			 * success responses, clean up
			 */
			l2cap_move_done(chan);
		}

		/* Other amp move states imply that the move
		 * has already aborted
		 */
		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
		break;
	}
}

static void l2cap_logical_finish_create(struct l2cap_chan *chan,
					struct hci_chan *hchan)
{
	struct l2cap_conf_rsp rsp;

4672
	chan->hs_hchan = hchan;
4673 4674
	chan->hs_hcon->l2cap_data = chan->conn;

4675
	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4676 4677

	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4678
		int err;
4679 4680 4681 4682 4683

		set_default_fcs(chan);

		err = l2cap_ertm_init(chan);
		if (err < 0)
4684
			l2cap_send_disconn_req(chan, -err);
4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724
		else
			l2cap_chan_ready(chan);
	}
}

static void l2cap_logical_finish_move(struct l2cap_chan *chan,
				      struct hci_chan *hchan)
{
	chan->hs_hcon = hchan->conn;
	chan->hs_hcon->l2cap_data = chan->conn;

	BT_DBG("move_state %d", chan->move_state);

	switch (chan->move_state) {
	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
		/* Move confirm will be sent after a success
		 * response is received
		 */
		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
		break;
	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
		}
		break;
	default:
		/* Move was not in expected state, free the channel */
		__release_logical_link(chan);

		chan->move_state = L2CAP_MOVE_STABLE;
	}
}

/* Call with chan locked */
4725 4726
void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
		       u8 status)
M
Mat Martineau 已提交
4727
{
4728 4729 4730 4731 4732 4733 4734 4735 4736 4737
	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);

	if (status) {
		l2cap_logical_fail(chan);
		__release_logical_link(chan);
		return;
	}

	if (chan->state != BT_CONNECTED) {
		/* Ignore logical link if channel is on BR/EDR */
4738
		if (chan->local_amp_id != AMP_ID_BREDR)
4739 4740 4741 4742
			l2cap_logical_finish_create(chan, hchan);
	} else {
		l2cap_logical_finish_move(chan, hchan);
	}
M
Mat Martineau 已提交
4743 4744
}

4745 4746 4747 4748
void l2cap_move_start(struct l2cap_chan *chan)
{
	BT_DBG("chan %p", chan);

4749
	if (chan->local_amp_id == AMP_ID_BREDR) {
4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763
		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
			return;
		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
		/* Placeholder - start physical link setup */
	} else {
		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
		chan->move_id = 0;
		l2cap_move_setup(chan);
		l2cap_send_move_chan_req(chan, 0);
	}
}

4764 4765 4766
static void l2cap_do_create(struct l2cap_chan *chan, int result,
			    u8 local_amp_id, u8 remote_amp_id)
{
4767 4768 4769
	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
	       local_amp_id, remote_amp_id);

4770 4771
	chan->fcs = L2CAP_FCS_NONE;

4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786
	/* Outgoing channel on AMP */
	if (chan->state == BT_CONNECT) {
		if (result == L2CAP_CR_SUCCESS) {
			chan->local_amp_id = local_amp_id;
			l2cap_send_create_chan_req(chan, remote_amp_id);
		} else {
			/* Revert to BR/EDR connect */
			l2cap_send_conn_req(chan);
		}

		return;
	}

	/* Incoming channel on AMP */
	if (__l2cap_no_conn_pending(chan)) {
4787 4788 4789 4790 4791 4792 4793
		struct l2cap_conn_rsp rsp;
		char buf[128];
		rsp.scid = cpu_to_le16(chan->dcid);
		rsp.dcid = cpu_to_le16(chan->scid);

		if (result == L2CAP_CR_SUCCESS) {
			/* Send successful response */
4794 4795
			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4796 4797
		} else {
			/* Send negative response */
4798 4799
			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4800 4801 4802 4803 4804 4805
		}

		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
			       sizeof(rsp), &rsp);

		if (result == L2CAP_CR_SUCCESS) {
4806
			l2cap_state_change(chan, BT_CONFIG);
4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869
			set_bit(CONF_REQ_SENT, &chan->conf_state);
			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
				       L2CAP_CONF_REQ,
				       l2cap_build_conf_req(chan, buf), buf);
			chan->num_conf_req++;
		}
	}
}

static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
				   u8 remote_amp_id)
{
	l2cap_move_setup(chan);
	chan->move_id = local_amp_id;
	chan->move_state = L2CAP_MOVE_WAIT_RSP;

	l2cap_send_move_chan_req(chan, remote_amp_id);
}

static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
{
	struct hci_chan *hchan = NULL;

	/* Placeholder - get hci_chan for logical link */

	if (hchan) {
		if (hchan->state == BT_CONNECTED) {
			/* Logical link is ready to go */
			chan->hs_hcon = hchan->conn;
			chan->hs_hcon->l2cap_data = chan->conn;
			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);

			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
		} else {
			/* Wait for logical link to be ready */
			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
		}
	} else {
		/* Logical link not available */
		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
	}
}

static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
{
	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
		u8 rsp_result;
		if (result == -EINVAL)
			rsp_result = L2CAP_MR_BAD_ID;
		else
			rsp_result = L2CAP_MR_NOT_ALLOWED;

		l2cap_send_move_chan_rsp(chan, rsp_result);
	}

	chan->move_role = L2CAP_MOVE_ROLE_NONE;
	chan->move_state = L2CAP_MOVE_STABLE;

	/* Restart data transmission */
	l2cap_ertm_send(chan);
}

4870 4871
/* Invoke with locked chan */
void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4872
{
4873
	u8 local_amp_id = chan->local_amp_id;
4874
	u8 remote_amp_id = chan->remote_amp_id;
4875

4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903
	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
	       chan, result, local_amp_id, remote_amp_id);

	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
		l2cap_chan_unlock(chan);
		return;
	}

	if (chan->state != BT_CONNECTED) {
		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
	} else if (result != L2CAP_MR_SUCCESS) {
		l2cap_do_move_cancel(chan, result);
	} else {
		switch (chan->move_role) {
		case L2CAP_MOVE_ROLE_INITIATOR:
			l2cap_do_move_initiate(chan, local_amp_id,
					       remote_amp_id);
			break;
		case L2CAP_MOVE_ROLE_RESPONDER:
			l2cap_do_move_respond(chan, result);
			break;
		default:
			l2cap_do_move_cancel(chan, result);
			break;
		}
	}
}

4904
static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4905 4906
					 struct l2cap_cmd_hdr *cmd,
					 u16 cmd_len, void *data)
4907 4908
{
	struct l2cap_move_chan_req *req = data;
4909
	struct l2cap_move_chan_rsp rsp;
4910
	struct l2cap_chan *chan;
4911 4912 4913 4914 4915 4916 4917 4918
	u16 icid = 0;
	u16 result = L2CAP_MR_NOT_ALLOWED;

	if (cmd_len != sizeof(*req))
		return -EPROTO;

	icid = le16_to_cpu(req->icid);

4919
	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4920

4921
	if (!conn->hs_enabled)
4922 4923
		return -EINVAL;

4924 4925
	chan = l2cap_get_chan_by_dcid(conn, icid);
	if (!chan) {
4926
		rsp.icid = cpu_to_le16(icid);
4927
		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4928 4929
		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
			       sizeof(rsp), &rsp);
4930 4931 4932
		return 0;
	}

4933 4934
	chan->ident = cmd->ident;

4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947
	if (chan->scid < L2CAP_CID_DYN_START ||
	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
	    (chan->mode != L2CAP_MODE_ERTM &&
	     chan->mode != L2CAP_MODE_STREAMING)) {
		result = L2CAP_MR_NOT_ALLOWED;
		goto send_move_response;
	}

	if (chan->local_amp_id == req->dest_amp_id) {
		result = L2CAP_MR_SAME_ID;
		goto send_move_response;
	}

4948
	if (req->dest_amp_id != AMP_ID_BREDR) {
4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967
		struct hci_dev *hdev;
		hdev = hci_dev_get(req->dest_amp_id);
		if (!hdev || hdev->dev_type != HCI_AMP ||
		    !test_bit(HCI_UP, &hdev->flags)) {
			if (hdev)
				hci_dev_put(hdev);

			result = L2CAP_MR_BAD_ID;
			goto send_move_response;
		}
		hci_dev_put(hdev);
	}

	/* Detect a move collision.  Only send a collision response
	 * if this side has "lost", otherwise proceed with the move.
	 * The winner has the larger bd_addr.
	 */
	if ((__chan_is_moving(chan) ||
	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4968
	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4969 4970 4971 4972 4973 4974 4975 4976 4977
		result = L2CAP_MR_COLLISION;
		goto send_move_response;
	}

	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
	l2cap_move_setup(chan);
	chan->move_id = req->dest_amp_id;
	icid = chan->dcid;

4978
	if (req->dest_amp_id == AMP_ID_BREDR) {
4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994
		/* Moving to BR/EDR */
		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
			result = L2CAP_MR_PEND;
		} else {
			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
			result = L2CAP_MR_SUCCESS;
		}
	} else {
		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
		/* Placeholder - uncomment when amp functions are available */
		/*amp_accept_physical(chan, req->dest_amp_id);*/
		result = L2CAP_MR_PEND;
	}

send_move_response:
4995
	l2cap_send_move_chan_rsp(chan, result);
4996

4997 4998
	l2cap_chan_unlock(chan);

4999 5000 5001
	return 0;
}

M
Mat Martineau 已提交
5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123
static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
{
	struct l2cap_chan *chan;
	struct hci_chan *hchan = NULL;

	chan = l2cap_get_chan_by_scid(conn, icid);
	if (!chan) {
		l2cap_send_move_chan_cfm_icid(conn, icid);
		return;
	}

	__clear_chan_timer(chan);
	if (result == L2CAP_MR_PEND)
		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);

	switch (chan->move_state) {
	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
		/* Move confirm will be sent when logical link
		 * is complete.
		 */
		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
		break;
	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
		if (result == L2CAP_MR_PEND) {
			break;
		} else if (test_bit(CONN_LOCAL_BUSY,
				    &chan->conn_state)) {
			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
		} else {
			/* Logical link is up or moving to BR/EDR,
			 * proceed with move
			 */
			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
		}
		break;
	case L2CAP_MOVE_WAIT_RSP:
		/* Moving to AMP */
		if (result == L2CAP_MR_SUCCESS) {
			/* Remote is ready, send confirm immediately
			 * after logical link is ready
			 */
			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
		} else {
			/* Both logical link and move success
			 * are required to confirm
			 */
			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
		}

		/* Placeholder - get hci_chan for logical link */
		if (!hchan) {
			/* Logical link not available */
			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
			break;
		}

		/* If the logical link is not yet connected, do not
		 * send confirmation.
		 */
		if (hchan->state != BT_CONNECTED)
			break;

		/* Logical link is already ready to go */

		chan->hs_hcon = hchan->conn;
		chan->hs_hcon->l2cap_data = chan->conn;

		if (result == L2CAP_MR_SUCCESS) {
			/* Can confirm now */
			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
		} else {
			/* Now only need move success
			 * to confirm
			 */
			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
		}

		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
		break;
	default:
		/* Any other amp move state means the move failed. */
		chan->move_id = chan->local_amp_id;
		l2cap_move_done(chan);
		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
	}

	l2cap_chan_unlock(chan);
}

static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
			    u16 result)
{
	struct l2cap_chan *chan;

	chan = l2cap_get_chan_by_ident(conn, ident);
	if (!chan) {
		/* Could not locate channel, icid is best guess */
		l2cap_send_move_chan_cfm_icid(conn, icid);
		return;
	}

	__clear_chan_timer(chan);

	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
		if (result == L2CAP_MR_COLLISION) {
			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
		} else {
			/* Cleanup - cancel move */
			chan->move_id = chan->local_amp_id;
			l2cap_move_done(chan);
		}
	}

	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);

	l2cap_chan_unlock(chan);
}

static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
				  struct l2cap_cmd_hdr *cmd,
				  u16 cmd_len, void *data)
5124 5125 5126 5127 5128 5129 5130 5131 5132 5133
{
	struct l2cap_move_chan_rsp *rsp = data;
	u16 icid, result;

	if (cmd_len != sizeof(*rsp))
		return -EPROTO;

	icid = le16_to_cpu(rsp->icid);
	result = le16_to_cpu(rsp->result);

5134
	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5135

M
Mat Martineau 已提交
5136 5137 5138 5139
	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
		l2cap_move_continue(conn, icid, result);
	else
		l2cap_move_fail(conn, cmd->ident, icid, result);
5140 5141 5142 5143

	return 0;
}

5144 5145 5146
static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
				      struct l2cap_cmd_hdr *cmd,
				      u16 cmd_len, void *data)
5147 5148
{
	struct l2cap_move_chan_cfm *cfm = data;
5149
	struct l2cap_chan *chan;
5150 5151 5152 5153 5154 5155 5156 5157
	u16 icid, result;

	if (cmd_len != sizeof(*cfm))
		return -EPROTO;

	icid = le16_to_cpu(cfm->icid);
	result = le16_to_cpu(cfm->result);

5158
	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5159

5160 5161 5162 5163 5164 5165 5166 5167 5168 5169
	chan = l2cap_get_chan_by_dcid(conn, icid);
	if (!chan) {
		/* Spec requires a response even if the icid was not found */
		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
		return 0;
	}

	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
		if (result == L2CAP_MC_CONFIRMED) {
			chan->local_amp_id = chan->move_id;
5170
			if (chan->local_amp_id == AMP_ID_BREDR)
5171 5172 5173 5174 5175 5176 5177 5178
				__release_logical_link(chan);
		} else {
			chan->move_id = chan->local_amp_id;
		}

		l2cap_move_done(chan);
	}

5179 5180
	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);

5181 5182
	l2cap_chan_unlock(chan);

5183 5184 5185 5186
	return 0;
}

static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5187 5188
						 struct l2cap_cmd_hdr *cmd,
						 u16 cmd_len, void *data)
5189 5190
{
	struct l2cap_move_chan_cfm_rsp *rsp = data;
5191
	struct l2cap_chan *chan;
5192 5193 5194 5195 5196 5197 5198
	u16 icid;

	if (cmd_len != sizeof(*rsp))
		return -EPROTO;

	icid = le16_to_cpu(rsp->icid);

5199
	BT_DBG("icid 0x%4.4x", icid);
5200

5201 5202 5203 5204 5205 5206 5207 5208 5209
	chan = l2cap_get_chan_by_scid(conn, icid);
	if (!chan)
		return 0;

	__clear_chan_timer(chan);

	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
		chan->local_amp_id = chan->move_id;

5210
		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5211 5212 5213 5214 5215 5216 5217
			__release_logical_link(chan);

		l2cap_move_done(chan);
	}

	l2cap_chan_unlock(chan);

5218 5219 5220
	return 0;
}

5221
static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5222
					      struct l2cap_cmd_hdr *cmd,
5223
					      u16 cmd_len, u8 *data)
5224 5225 5226 5227
{
	struct hci_conn *hcon = conn->hcon;
	struct l2cap_conn_param_update_req *req;
	struct l2cap_conn_param_update_rsp rsp;
5228
	u16 min, max, latency, to_multiplier;
5229
	int err;
5230

5231
	if (hcon->role != HCI_ROLE_MASTER)
5232 5233 5234 5235 5236 5237
		return -EINVAL;

	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
		return -EPROTO;

	req = (struct l2cap_conn_param_update_req *) data;
5238 5239
	min		= __le16_to_cpu(req->min);
	max		= __le16_to_cpu(req->max);
5240 5241 5242 5243
	latency		= __le16_to_cpu(req->latency);
	to_multiplier	= __le16_to_cpu(req->to_multiplier);

	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5244
	       min, max, latency, to_multiplier);
5245 5246

	memset(&rsp, 0, sizeof(rsp));
5247

5248
	err = hci_check_conn_params(min, max, latency, to_multiplier);
5249
	if (err)
5250
		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5251
	else
5252
		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5253 5254

	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5255
		       sizeof(rsp), &rsp);
5256

5257
	if (!err) {
5258 5259 5260 5261
		u8 store_hint;

		store_hint = hci_le_conn_update(hcon, min, max, latency,
						to_multiplier);
5262
		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5263 5264
				    store_hint, min, max, latency,
				    to_multiplier);
5265 5266

	}
5267

5268 5269 5270
	return 0;
}

5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312
static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				u8 *data)
{
	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
	u16 dcid, mtu, mps, credits, result;
	struct l2cap_chan *chan;
	int err;

	if (cmd_len < sizeof(*rsp))
		return -EPROTO;

	dcid    = __le16_to_cpu(rsp->dcid);
	mtu     = __le16_to_cpu(rsp->mtu);
	mps     = __le16_to_cpu(rsp->mps);
	credits = __le16_to_cpu(rsp->credits);
	result  = __le16_to_cpu(rsp->result);

	if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
		return -EPROTO;

	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
	       dcid, mtu, mps, credits, result);

	mutex_lock(&conn->chan_lock);

	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
	if (!chan) {
		err = -EBADSLT;
		goto unlock;
	}

	err = 0;

	l2cap_chan_lock(chan);

	switch (result) {
	case L2CAP_CR_SUCCESS:
		chan->ident = 0;
		chan->dcid = dcid;
		chan->omtu = mtu;
		chan->remote_mps = mps;
5313
		chan->tx_credits = credits;
5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329
		l2cap_chan_ready(chan);
		break;

	default:
		l2cap_chan_del(chan, ECONNREFUSED);
		break;
	}

	l2cap_chan_unlock(chan);

unlock:
	mutex_unlock(&conn->chan_lock);

	return err;
}

5330
static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5331 5332
				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				      u8 *data)
5333 5334 5335 5336 5337
{
	int err = 0;

	switch (cmd->code) {
	case L2CAP_COMMAND_REJ:
5338
		l2cap_command_rej(conn, cmd, cmd_len, data);
5339 5340 5341
		break;

	case L2CAP_CONN_REQ:
5342
		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5343 5344 5345
		break;

	case L2CAP_CONN_RSP:
5346
	case L2CAP_CREATE_CHAN_RSP:
5347
		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5348 5349 5350 5351 5352 5353 5354
		break;

	case L2CAP_CONF_REQ:
		err = l2cap_config_req(conn, cmd, cmd_len, data);
		break;

	case L2CAP_CONF_RSP:
5355
		l2cap_config_rsp(conn, cmd, cmd_len, data);
5356 5357 5358
		break;

	case L2CAP_DISCONN_REQ:
5359
		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5360 5361 5362
		break;

	case L2CAP_DISCONN_RSP:
5363
		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5364 5365 5366 5367 5368 5369 5370 5371 5372 5373
		break;

	case L2CAP_ECHO_REQ:
		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
		break;

	case L2CAP_ECHO_RSP:
		break;

	case L2CAP_INFO_REQ:
5374
		err = l2cap_information_req(conn, cmd, cmd_len, data);
5375 5376 5377
		break;

	case L2CAP_INFO_RSP:
5378
		l2cap_information_rsp(conn, cmd, cmd_len, data);
5379 5380
		break;

5381 5382 5383 5384
	case L2CAP_CREATE_CHAN_REQ:
		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
		break;

5385 5386 5387 5388 5389
	case L2CAP_MOVE_CHAN_REQ:
		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
		break;

	case L2CAP_MOVE_CHAN_RSP:
5390
		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5391 5392 5393 5394 5395 5396 5397
		break;

	case L2CAP_MOVE_CHAN_CFM:
		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
		break;

	case L2CAP_MOVE_CHAN_CFM_RSP:
5398
		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5399 5400
		break;

5401 5402 5403 5404 5405 5406 5407 5408 5409
	default:
		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
		err = -EINVAL;
		break;
	}

	return err;
}

5410 5411 5412 5413 5414 5415 5416
static int l2cap_le_connect_req(struct l2cap_conn *conn,
				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				u8 *data)
{
	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
	struct l2cap_le_conn_rsp rsp;
	struct l2cap_chan *chan, *pchan;
5417
	u16 dcid, scid, credits, mtu, mps;
5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428
	__le16 psm;
	u8 result;

	if (cmd_len != sizeof(*req))
		return -EPROTO;

	scid = __le16_to_cpu(req->scid);
	mtu  = __le16_to_cpu(req->mtu);
	mps  = __le16_to_cpu(req->mps);
	psm  = req->psm;
	dcid = 0;
5429
	credits = 0;
5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467

	if (mtu < 23 || mps < 23)
		return -EPROTO;

	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
	       scid, mtu, mps);

	/* Check if we have socket listening on psm */
	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
					 &conn->hcon->dst, LE_LINK);
	if (!pchan) {
		result = L2CAP_CR_BAD_PSM;
		chan = NULL;
		goto response;
	}

	mutex_lock(&conn->chan_lock);
	l2cap_chan_lock(pchan);

	if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
		result = L2CAP_CR_AUTHENTICATION;
		chan = NULL;
		goto response_unlock;
	}

	/* Check if we already have channel with that dcid */
	if (__l2cap_get_chan_by_dcid(conn, scid)) {
		result = L2CAP_CR_NO_MEM;
		chan = NULL;
		goto response_unlock;
	}

	chan = pchan->ops->new_connection(pchan);
	if (!chan) {
		result = L2CAP_CR_NO_MEM;
		goto response_unlock;
	}

5468 5469
	l2cap_le_flowctl_init(chan);

5470 5471 5472 5473 5474 5475 5476 5477
	bacpy(&chan->src, &conn->hcon->src);
	bacpy(&chan->dst, &conn->hcon->dst);
	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
	chan->psm  = psm;
	chan->dcid = scid;
	chan->omtu = mtu;
	chan->remote_mps = mps;
5478
	chan->tx_credits = __le16_to_cpu(req->credits);
5479 5480 5481

	__l2cap_chan_add(conn, chan);
	dcid = chan->scid;
5482
	credits = chan->rx_credits;
5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506

	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));

	chan->ident = cmd->ident;

	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
		l2cap_state_change(chan, BT_CONNECT2);
		result = L2CAP_CR_PEND;
		chan->ops->defer(chan);
	} else {
		l2cap_chan_ready(chan);
		result = L2CAP_CR_SUCCESS;
	}

response_unlock:
	l2cap_chan_unlock(pchan);
	mutex_unlock(&conn->chan_lock);

	if (result == L2CAP_CR_PEND)
		return 0;

response:
	if (chan) {
		rsp.mtu = cpu_to_le16(chan->imtu);
5507
		rsp.mps = cpu_to_le16(chan->mps);
5508 5509 5510 5511 5512 5513
	} else {
		rsp.mtu = 0;
		rsp.mps = 0;
	}

	rsp.dcid    = cpu_to_le16(dcid);
5514
	rsp.credits = cpu_to_le16(credits);
5515 5516 5517 5518 5519 5520 5521
	rsp.result  = cpu_to_le16(result);

	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);

	return 0;
}

5522 5523 5524 5525 5526 5527
static inline int l2cap_le_credits(struct l2cap_conn *conn,
				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				   u8 *data)
{
	struct l2cap_le_credits *pkt;
	struct l2cap_chan *chan;
5528
	u16 cid, credits, max_credits;
5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542

	if (cmd_len != sizeof(*pkt))
		return -EPROTO;

	pkt = (struct l2cap_le_credits *) data;
	cid	= __le16_to_cpu(pkt->cid);
	credits	= __le16_to_cpu(pkt->credits);

	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);

	chan = l2cap_get_chan_by_dcid(conn, cid);
	if (!chan)
		return -EBADSLT;

5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553
	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
	if (credits > max_credits) {
		BT_ERR("LE credits overflow");
		l2cap_send_disconn_req(chan, ECONNRESET);

		/* Return 0 so that we don't trigger an unnecessary
		 * command reject packet.
		 */
		return 0;
	}

5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568
	chan->tx_credits += credits;

	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
		chan->tx_credits--;
	}

	if (chan->tx_credits)
		chan->ops->resume(chan);

	l2cap_chan_unlock(chan);

	return 0;
}

5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593
static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				       u8 *data)
{
	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
	struct l2cap_chan *chan;

	if (cmd_len < sizeof(*rej))
		return -EPROTO;

	mutex_lock(&conn->chan_lock);

	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
	if (!chan)
		goto done;

	l2cap_chan_lock(chan);
	l2cap_chan_del(chan, ECONNREFUSED);
	l2cap_chan_unlock(chan);

done:
	mutex_unlock(&conn->chan_lock);
	return 0;
}

5594
static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5595 5596
				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
				   u8 *data)
5597
{
5598 5599
	int err = 0;

5600 5601
	switch (cmd->code) {
	case L2CAP_COMMAND_REJ:
5602
		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5603
		break;
5604 5605

	case L2CAP_CONN_PARAM_UPDATE_REQ:
5606 5607
		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
		break;
5608 5609

	case L2CAP_CONN_PARAM_UPDATE_RSP:
5610
		break;
5611

5612 5613
	case L2CAP_LE_CONN_RSP:
		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5614
		break;
5615

5616
	case L2CAP_LE_CONN_REQ:
5617 5618
		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
		break;
5619

5620 5621 5622 5623
	case L2CAP_LE_CREDITS:
		err = l2cap_le_credits(conn, cmd, cmd_len, data);
		break;

5624
	case L2CAP_DISCONN_REQ:
5625 5626
		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
		break;
5627 5628 5629

	case L2CAP_DISCONN_RSP:
		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5630
		break;
5631

5632 5633
	default:
		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5634 5635
		err = -EINVAL;
		break;
5636
	}
5637 5638

	return err;
5639 5640
}

5641 5642 5643
static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
					struct sk_buff *skb)
{
5644
	struct hci_conn *hcon = conn->hcon;
5645 5646
	struct l2cap_cmd_hdr *cmd;
	u16 len;
5647 5648
	int err;

5649
	if (hcon->type != LE_LINK)
5650
		goto drop;
5651

5652 5653
	if (skb->len < L2CAP_CMD_HDR_SIZE)
		goto drop;
5654

5655 5656
	cmd = (void *) skb->data;
	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5657

5658
	len = le16_to_cpu(cmd->len);
5659

5660
	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5661

5662 5663 5664 5665
	if (len != skb->len || !cmd->ident) {
		BT_DBG("corrupted command");
		goto drop;
	}
5666

5667
	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5668 5669
	if (err) {
		struct l2cap_cmd_rej_unk rej;
5670

5671
		BT_ERR("Wrong link type (%d)", err);
5672

5673
		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5674 5675
		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
			       sizeof(rej), &rej);
5676 5677
	}

5678
drop:
5679 5680 5681
	kfree_skb(skb);
}

5682
static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5683
				     struct sk_buff *skb)
L
Linus Torvalds 已提交
5684
{
5685
	struct hci_conn *hcon = conn->hcon;
L
Linus Torvalds 已提交
5686 5687 5688
	u8 *data = skb->data;
	int len = skb->len;
	struct l2cap_cmd_hdr cmd;
5689
	int err;
L
Linus Torvalds 已提交
5690 5691 5692

	l2cap_raw_recv(conn, skb);

5693
	if (hcon->type != ACL_LINK)
5694
		goto drop;
5695

L
Linus Torvalds 已提交
5696
	while (len >= L2CAP_CMD_HDR_SIZE) {
5697
		u16 cmd_len;
L
Linus Torvalds 已提交
5698 5699 5700 5701
		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
		data += L2CAP_CMD_HDR_SIZE;
		len  -= L2CAP_CMD_HDR_SIZE;

5702
		cmd_len = le16_to_cpu(cmd.len);
L
Linus Torvalds 已提交
5703

5704 5705
		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
		       cmd.ident);
L
Linus Torvalds 已提交
5706

5707
		if (cmd_len > len || !cmd.ident) {
L
Linus Torvalds 已提交
5708 5709 5710 5711
			BT_DBG("corrupted command");
			break;
		}

5712
		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
L
Linus Torvalds 已提交
5713
		if (err) {
5714
			struct l2cap_cmd_rej_unk rej;
5715 5716

			BT_ERR("Wrong link type (%d)", err);
L
Linus Torvalds 已提交
5717

5718
			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5719 5720
			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
				       sizeof(rej), &rej);
L
Linus Torvalds 已提交
5721 5722
		}

5723 5724
		data += cmd_len;
		len  -= cmd_len;
L
Linus Torvalds 已提交
5725 5726
	}

5727
drop:
L
Linus Torvalds 已提交
5728 5729 5730
	kfree_skb(skb);
}

5731
static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5732 5733
{
	u16 our_fcs, rcv_fcs;
5734 5735 5736 5737 5738 5739
	int hdr_size;

	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
		hdr_size = L2CAP_EXT_HDR_SIZE;
	else
		hdr_size = L2CAP_ENH_HDR_SIZE;
5740

5741
	if (chan->fcs == L2CAP_FCS_CRC16) {
5742
		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5743 5744 5745 5746
		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);

		if (our_fcs != rcv_fcs)
5747
			return -EBADMSG;
5748 5749 5750 5751
	}
	return 0;
}

5752
static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5753
{
5754
	struct l2cap_ctrl control;
5755

5756
	BT_DBG("chan %p", chan);
5757

5758 5759 5760 5761 5762
	memset(&control, 0, sizeof(control));
	control.sframe = 1;
	control.final = 1;
	control.reqseq = chan->buffer_seq;
	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5763

5764
	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5765 5766
		control.super = L2CAP_SUPER_RNR;
		l2cap_send_sframe(chan, &control);
5767 5768
	}

5769 5770 5771
	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
	    chan->unacked_frames > 0)
		__set_retrans_timer(chan);
5772

5773
	/* Send pending iframes */
5774
	l2cap_ertm_send(chan);
5775

5776
	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5777 5778 5779 5780 5781 5782
	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
		/* F-bit wasn't sent in an s-frame or i-frame yet, so
		 * send it now.
		 */
		control.super = L2CAP_SUPER_RR;
		l2cap_send_sframe(chan, &control);
5783 5784 5785
	}
}

5786 5787
static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
			    struct sk_buff **last_frag)
5788
{
5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804
	/* skb->len reflects data in skb as well as all fragments
	 * skb->data_len reflects only data in fragments
	 */
	if (!skb_has_frag_list(skb))
		skb_shinfo(skb)->frag_list = new_frag;

	new_frag->next = NULL;

	(*last_frag)->next = new_frag;
	*last_frag = new_frag;

	skb->len += new_frag->len;
	skb->data_len += new_frag->len;
	skb->truesize += new_frag->truesize;
}

5805 5806
static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
				struct l2cap_ctrl *control)
5807 5808
{
	int err = -EINVAL;
5809

5810
	switch (control->sar) {
5811
	case L2CAP_SAR_UNSEGMENTED:
5812 5813
		if (chan->sdu)
			break;
5814

5815
		err = chan->ops->recv(chan, skb);
5816
		break;
5817

5818
	case L2CAP_SAR_START:
5819 5820
		if (chan->sdu)
			break;
5821

5822
		chan->sdu_len = get_unaligned_le16(skb->data);
5823
		skb_pull(skb, L2CAP_SDULEN_SIZE);
5824

5825 5826 5827 5828
		if (chan->sdu_len > chan->imtu) {
			err = -EMSGSIZE;
			break;
		}
5829

5830 5831
		if (skb->len >= chan->sdu_len)
			break;
5832

5833 5834
		chan->sdu = skb;
		chan->sdu_last_frag = skb;
5835

5836 5837
		skb = NULL;
		err = 0;
5838 5839
		break;

5840
	case L2CAP_SAR_CONTINUE:
5841
		if (!chan->sdu)
5842
			break;
5843

5844 5845 5846
		append_skb_frag(chan->sdu, skb,
				&chan->sdu_last_frag);
		skb = NULL;
5847

5848 5849
		if (chan->sdu->len >= chan->sdu_len)
			break;
5850

5851
		err = 0;
5852 5853
		break;

5854
	case L2CAP_SAR_END:
5855
		if (!chan->sdu)
5856
			break;
5857

5858 5859 5860
		append_skb_frag(chan->sdu, skb,
				&chan->sdu_last_frag);
		skb = NULL;
5861

5862 5863
		if (chan->sdu->len != chan->sdu_len)
			break;
5864

5865
		err = chan->ops->recv(chan, chan->sdu);
5866

5867 5868 5869 5870 5871
		if (!err) {
			/* Reassembly complete */
			chan->sdu = NULL;
			chan->sdu_last_frag = NULL;
			chan->sdu_len = 0;
5872
		}
5873 5874 5875
		break;
	}

5876 5877 5878 5879 5880 5881 5882
	if (err) {
		kfree_skb(skb);
		kfree_skb(chan->sdu);
		chan->sdu = NULL;
		chan->sdu_last_frag = NULL;
		chan->sdu_len = 0;
	}
5883

5884
	return err;
5885 5886
}

5887 5888 5889 5890 5891 5892
static int l2cap_resegment(struct l2cap_chan *chan)
{
	/* Placeholder */
	return 0;
}

5893
void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5894
{
5895
	u8 event;
5896

5897 5898
	if (chan->mode != L2CAP_MODE_ERTM)
		return;
5899

5900
	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5901
	l2cap_tx(chan, NULL, NULL, event);
5902 5903
}

5904 5905
static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
{
5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935
	int err = 0;
	/* Pass sequential frames to l2cap_reassemble_sdu()
	 * until a gap is encountered.
	 */

	BT_DBG("chan %p", chan);

	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
		struct sk_buff *skb;
		BT_DBG("Searching for skb with txseq %d (queue len %d)",
		       chan->buffer_seq, skb_queue_len(&chan->srej_q));

		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);

		if (!skb)
			break;

		skb_unlink(skb, &chan->srej_q);
		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
		if (err)
			break;
	}

	if (skb_queue_empty(&chan->srej_q)) {
		chan->rx_state = L2CAP_RX_STATE_RECV;
		l2cap_send_ack(chan);
	}

	return err;
5936 5937 5938 5939 5940
}

static void l2cap_handle_srej(struct l2cap_chan *chan,
			      struct l2cap_ctrl *control)
{
5941 5942 5943 5944 5945 5946
	struct sk_buff *skb;

	BT_DBG("chan %p, control %p", chan, control);

	if (control->reqseq == chan->next_tx_seq) {
		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5947
		l2cap_send_disconn_req(chan, ECONNRESET);
5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960
		return;
	}

	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);

	if (skb == NULL) {
		BT_DBG("Seq %d not available for retransmission",
		       control->reqseq);
		return;
	}

	if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5961
		l2cap_send_disconn_req(chan, ECONNRESET);
5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993
		return;
	}

	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);

	if (control->poll) {
		l2cap_pass_to_tx(chan, control);

		set_bit(CONN_SEND_FBIT, &chan->conn_state);
		l2cap_retransmit(chan, control);
		l2cap_ertm_send(chan);

		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
			set_bit(CONN_SREJ_ACT, &chan->conn_state);
			chan->srej_save_reqseq = control->reqseq;
		}
	} else {
		l2cap_pass_to_tx_fbit(chan, control);

		if (control->final) {
			if (chan->srej_save_reqseq != control->reqseq ||
			    !test_and_clear_bit(CONN_SREJ_ACT,
						&chan->conn_state))
				l2cap_retransmit(chan, control);
		} else {
			l2cap_retransmit(chan, control);
			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
				set_bit(CONN_SREJ_ACT, &chan->conn_state);
				chan->srej_save_reqseq = control->reqseq;
			}
		}
	}
5994 5995 5996 5997 5998
}

static void l2cap_handle_rej(struct l2cap_chan *chan,
			     struct l2cap_ctrl *control)
{
5999 6000 6001 6002 6003 6004
	struct sk_buff *skb;

	BT_DBG("chan %p, control %p", chan, control);

	if (control->reqseq == chan->next_tx_seq) {
		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6005
		l2cap_send_disconn_req(chan, ECONNRESET);
6006 6007 6008 6009 6010 6011 6012 6013
		return;
	}

	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);

	if (chan->max_tx && skb &&
	    bt_cb(skb)->control.retries >= chan->max_tx) {
		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6014
		l2cap_send_disconn_req(chan, ECONNRESET);
6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030
		return;
	}

	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);

	l2cap_pass_to_tx(chan, control);

	if (control->final) {
		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
			l2cap_retransmit_all(chan, control);
	} else {
		l2cap_retransmit_all(chan, control);
		l2cap_ertm_send(chan);
		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
			set_bit(CONN_REJ_ACT, &chan->conn_state);
	}
6031 6032
}

6033 6034 6035 6036 6037 6038 6039 6040 6041
static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
{
	BT_DBG("chan %p, txseq %d", chan, txseq);

	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
	       chan->expected_tx_seq);

	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6042
		    chan->tx_win) {
6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082
			/* See notes below regarding "double poll" and
			 * invalid packets.
			 */
			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
				BT_DBG("Invalid/Ignore - after SREJ");
				return L2CAP_TXSEQ_INVALID_IGNORE;
			} else {
				BT_DBG("Invalid - in window after SREJ sent");
				return L2CAP_TXSEQ_INVALID;
			}
		}

		if (chan->srej_list.head == txseq) {
			BT_DBG("Expected SREJ");
			return L2CAP_TXSEQ_EXPECTED_SREJ;
		}

		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
			BT_DBG("Duplicate SREJ - txseq already stored");
			return L2CAP_TXSEQ_DUPLICATE_SREJ;
		}

		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
			BT_DBG("Unexpected SREJ - not requested");
			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
		}
	}

	if (chan->expected_tx_seq == txseq) {
		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
		    chan->tx_win) {
			BT_DBG("Invalid - txseq outside tx window");
			return L2CAP_TXSEQ_INVALID;
		} else {
			BT_DBG("Expected");
			return L2CAP_TXSEQ_EXPECTED;
		}
	}

	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6083
	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118
		BT_DBG("Duplicate - expected_tx_seq later than txseq");
		return L2CAP_TXSEQ_DUPLICATE;
	}

	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
		/* A source of invalid packets is a "double poll" condition,
		 * where delays cause us to send multiple poll packets.  If
		 * the remote stack receives and processes both polls,
		 * sequence numbers can wrap around in such a way that a
		 * resent frame has a sequence number that looks like new data
		 * with a sequence gap.  This would trigger an erroneous SREJ
		 * request.
		 *
		 * Fortunately, this is impossible with a tx window that's
		 * less than half of the maximum sequence number, which allows
		 * invalid frames to be safely ignored.
		 *
		 * With tx window sizes greater than half of the tx window
		 * maximum, the frame is invalid and cannot be ignored.  This
		 * causes a disconnect.
		 */

		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
			BT_DBG("Invalid/Ignore - txseq outside tx window");
			return L2CAP_TXSEQ_INVALID_IGNORE;
		} else {
			BT_DBG("Invalid - txseq outside tx window");
			return L2CAP_TXSEQ_INVALID;
		}
	} else {
		BT_DBG("Unexpected - txseq indicates missing frames");
		return L2CAP_TXSEQ_UNEXPECTED;
	}
}

6119 6120 6121 6122 6123
static int l2cap_rx_state_recv(struct l2cap_chan *chan,
			       struct l2cap_ctrl *control,
			       struct sk_buff *skb, u8 event)
{
	int err = 0;
6124
	bool skb_in_use = false;
6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144

	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
	       event);

	switch (event) {
	case L2CAP_EV_RECV_IFRAME:
		switch (l2cap_classify_txseq(chan, control->txseq)) {
		case L2CAP_TXSEQ_EXPECTED:
			l2cap_pass_to_tx(chan, control);

			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
				BT_DBG("Busy, discarding expected seq %d",
				       control->txseq);
				break;
			}

			chan->expected_tx_seq = __next_seq(chan,
							   control->txseq);

			chan->buffer_seq = chan->expected_tx_seq;
6145
			skb_in_use = true;
6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180

			err = l2cap_reassemble_sdu(chan, skb, control);
			if (err)
				break;

			if (control->final) {
				if (!test_and_clear_bit(CONN_REJ_ACT,
							&chan->conn_state)) {
					control->final = 0;
					l2cap_retransmit_all(chan, control);
					l2cap_ertm_send(chan);
				}
			}

			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
				l2cap_send_ack(chan);
			break;
		case L2CAP_TXSEQ_UNEXPECTED:
			l2cap_pass_to_tx(chan, control);

			/* Can't issue SREJ frames in the local busy state.
			 * Drop this frame, it will be seen as missing
			 * when local busy is exited.
			 */
			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
				BT_DBG("Busy, discarding unexpected seq %d",
				       control->txseq);
				break;
			}

			/* There was a gap in the sequence, so an SREJ
			 * must be sent for each missing frame.  The
			 * current frame is stored for later use.
			 */
			skb_queue_tail(&chan->srej_q, skb);
6181
			skb_in_use = true;
6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197
			BT_DBG("Queued %p (queue len %d)", skb,
			       skb_queue_len(&chan->srej_q));

			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
			l2cap_seq_list_clear(&chan->srej_list);
			l2cap_send_srej(chan, control->txseq);

			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
			break;
		case L2CAP_TXSEQ_DUPLICATE:
			l2cap_pass_to_tx(chan, control);
			break;
		case L2CAP_TXSEQ_INVALID_IGNORE:
			break;
		case L2CAP_TXSEQ_INVALID:
		default:
6198
			l2cap_send_disconn_req(chan, ECONNRESET);
6199 6200 6201 6202 6203 6204 6205 6206
			break;
		}
		break;
	case L2CAP_EV_RECV_RR:
		l2cap_pass_to_tx(chan, control);
		if (control->final) {
			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);

6207 6208
			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
			    !__chan_is_moving(chan)) {
6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258
				control->final = 0;
				l2cap_retransmit_all(chan, control);
			}

			l2cap_ertm_send(chan);
		} else if (control->poll) {
			l2cap_send_i_or_rr_or_rnr(chan);
		} else {
			if (test_and_clear_bit(CONN_REMOTE_BUSY,
					       &chan->conn_state) &&
			    chan->unacked_frames)
				__set_retrans_timer(chan);

			l2cap_ertm_send(chan);
		}
		break;
	case L2CAP_EV_RECV_RNR:
		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
		l2cap_pass_to_tx(chan, control);
		if (control && control->poll) {
			set_bit(CONN_SEND_FBIT, &chan->conn_state);
			l2cap_send_rr_or_rnr(chan, 0);
		}
		__clear_retrans_timer(chan);
		l2cap_seq_list_clear(&chan->retrans_list);
		break;
	case L2CAP_EV_RECV_REJ:
		l2cap_handle_rej(chan, control);
		break;
	case L2CAP_EV_RECV_SREJ:
		l2cap_handle_srej(chan, control);
		break;
	default:
		break;
	}

	if (skb && !skb_in_use) {
		BT_DBG("Freeing %p", skb);
		kfree_skb(skb);
	}

	return err;
}

static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
				    struct l2cap_ctrl *control,
				    struct sk_buff *skb, u8 event)
{
	int err = 0;
	u16 txseq = control->txseq;
6259
	bool skb_in_use = false;
6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270

	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
	       event);

	switch (event) {
	case L2CAP_EV_RECV_IFRAME:
		switch (l2cap_classify_txseq(chan, txseq)) {
		case L2CAP_TXSEQ_EXPECTED:
			/* Keep frame for reassembly later */
			l2cap_pass_to_tx(chan, control);
			skb_queue_tail(&chan->srej_q, skb);
6271
			skb_in_use = true;
6272 6273 6274 6275 6276 6277 6278 6279 6280 6281
			BT_DBG("Queued %p (queue len %d)", skb,
			       skb_queue_len(&chan->srej_q));

			chan->expected_tx_seq = __next_seq(chan, txseq);
			break;
		case L2CAP_TXSEQ_EXPECTED_SREJ:
			l2cap_seq_list_pop(&chan->srej_list);

			l2cap_pass_to_tx(chan, control);
			skb_queue_tail(&chan->srej_q, skb);
6282
			skb_in_use = true;
6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296
			BT_DBG("Queued %p (queue len %d)", skb,
			       skb_queue_len(&chan->srej_q));

			err = l2cap_rx_queued_iframes(chan);
			if (err)
				break;

			break;
		case L2CAP_TXSEQ_UNEXPECTED:
			/* Got a frame that can't be reassembled yet.
			 * Save it for later, and send SREJs to cover
			 * the missing frames.
			 */
			skb_queue_tail(&chan->srej_q, skb);
6297
			skb_in_use = true;
6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310
			BT_DBG("Queued %p (queue len %d)", skb,
			       skb_queue_len(&chan->srej_q));

			l2cap_pass_to_tx(chan, control);
			l2cap_send_srej(chan, control->txseq);
			break;
		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
			/* This frame was requested with an SREJ, but
			 * some expected retransmitted frames are
			 * missing.  Request retransmission of missing
			 * SREJ'd frames.
			 */
			skb_queue_tail(&chan->srej_q, skb);
6311
			skb_in_use = true;
6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330
			BT_DBG("Queued %p (queue len %d)", skb,
			       skb_queue_len(&chan->srej_q));

			l2cap_pass_to_tx(chan, control);
			l2cap_send_srej_list(chan, control->txseq);
			break;
		case L2CAP_TXSEQ_DUPLICATE_SREJ:
			/* We've already queued this frame.  Drop this copy. */
			l2cap_pass_to_tx(chan, control);
			break;
		case L2CAP_TXSEQ_DUPLICATE:
			/* Expecting a later sequence number, so this frame
			 * was already received.  Ignore it completely.
			 */
			break;
		case L2CAP_TXSEQ_INVALID_IGNORE:
			break;
		case L2CAP_TXSEQ_INVALID:
		default:
6331
			l2cap_send_disconn_req(chan, ECONNRESET);
6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395
			break;
		}
		break;
	case L2CAP_EV_RECV_RR:
		l2cap_pass_to_tx(chan, control);
		if (control->final) {
			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);

			if (!test_and_clear_bit(CONN_REJ_ACT,
						&chan->conn_state)) {
				control->final = 0;
				l2cap_retransmit_all(chan, control);
			}

			l2cap_ertm_send(chan);
		} else if (control->poll) {
			if (test_and_clear_bit(CONN_REMOTE_BUSY,
					       &chan->conn_state) &&
			    chan->unacked_frames) {
				__set_retrans_timer(chan);
			}

			set_bit(CONN_SEND_FBIT, &chan->conn_state);
			l2cap_send_srej_tail(chan);
		} else {
			if (test_and_clear_bit(CONN_REMOTE_BUSY,
					       &chan->conn_state) &&
			    chan->unacked_frames)
				__set_retrans_timer(chan);

			l2cap_send_ack(chan);
		}
		break;
	case L2CAP_EV_RECV_RNR:
		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
		l2cap_pass_to_tx(chan, control);
		if (control->poll) {
			l2cap_send_srej_tail(chan);
		} else {
			struct l2cap_ctrl rr_control;
			memset(&rr_control, 0, sizeof(rr_control));
			rr_control.sframe = 1;
			rr_control.super = L2CAP_SUPER_RR;
			rr_control.reqseq = chan->buffer_seq;
			l2cap_send_sframe(chan, &rr_control);
		}

		break;
	case L2CAP_EV_RECV_REJ:
		l2cap_handle_rej(chan, control);
		break;
	case L2CAP_EV_RECV_SREJ:
		l2cap_handle_srej(chan, control);
		break;
	}

	if (skb && !skb_in_use) {
		BT_DBG("Freeing %p", skb);
		kfree_skb(skb);
	}

	return err;
}

6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485
static int l2cap_finish_move(struct l2cap_chan *chan)
{
	BT_DBG("chan %p", chan);

	chan->rx_state = L2CAP_RX_STATE_RECV;

	if (chan->hs_hcon)
		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
	else
		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;

	return l2cap_resegment(chan);
}

static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
				 struct l2cap_ctrl *control,
				 struct sk_buff *skb, u8 event)
{
	int err;

	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
	       event);

	if (!control->poll)
		return -EPROTO;

	l2cap_process_reqseq(chan, control->reqseq);

	if (!skb_queue_empty(&chan->tx_q))
		chan->tx_send_head = skb_peek(&chan->tx_q);
	else
		chan->tx_send_head = NULL;

	/* Rewind next_tx_seq to the point expected
	 * by the receiver.
	 */
	chan->next_tx_seq = control->reqseq;
	chan->unacked_frames = 0;

	err = l2cap_finish_move(chan);
	if (err)
		return err;

	set_bit(CONN_SEND_FBIT, &chan->conn_state);
	l2cap_send_i_or_rr_or_rnr(chan);

	if (event == L2CAP_EV_RECV_IFRAME)
		return -EPROTO;

	return l2cap_rx_state_recv(chan, control, NULL, event);
}

static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
				 struct l2cap_ctrl *control,
				 struct sk_buff *skb, u8 event)
{
	int err;

	if (!control->final)
		return -EPROTO;

	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);

	chan->rx_state = L2CAP_RX_STATE_RECV;
	l2cap_process_reqseq(chan, control->reqseq);

	if (!skb_queue_empty(&chan->tx_q))
		chan->tx_send_head = skb_peek(&chan->tx_q);
	else
		chan->tx_send_head = NULL;

	/* Rewind next_tx_seq to the point expected
	 * by the receiver.
	 */
	chan->next_tx_seq = control->reqseq;
	chan->unacked_frames = 0;

	if (chan->hs_hcon)
		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
	else
		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;

	err = l2cap_resegment(chan);

	if (!err)
		err = l2cap_rx_state_recv(chan, control, skb, event);

	return err;
}

6486 6487 6488 6489 6490 6491 6492 6493 6494
static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
{
	/* Make sure reqseq is for a packet that has been sent but not acked */
	u16 unacked;

	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
}

6495 6496
static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
		    struct sk_buff *skb, u8 event)
6497
{
6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511
	int err = 0;

	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
	       control, skb, event, chan->rx_state);

	if (__valid_reqseq(chan, control->reqseq)) {
		switch (chan->rx_state) {
		case L2CAP_RX_STATE_RECV:
			err = l2cap_rx_state_recv(chan, control, skb, event);
			break;
		case L2CAP_RX_STATE_SREJ_SENT:
			err = l2cap_rx_state_srej_sent(chan, control, skb,
						       event);
			break;
6512 6513 6514 6515 6516 6517
		case L2CAP_RX_STATE_WAIT_P:
			err = l2cap_rx_state_wait_p(chan, control, skb, event);
			break;
		case L2CAP_RX_STATE_WAIT_F:
			err = l2cap_rx_state_wait_f(chan, control, skb, event);
			break;
6518 6519 6520 6521 6522 6523 6524 6525
		default:
			/* shut it down */
			break;
		}
	} else {
		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
		       control->reqseq, chan->next_tx_seq,
		       chan->expected_ack_seq);
6526
		l2cap_send_disconn_req(chan, ECONNRESET);
6527 6528 6529
	}

	return err;
6530 6531 6532 6533 6534
}

static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
			   struct sk_buff *skb)
{
6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567
	int err = 0;

	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
	       chan->rx_state);

	if (l2cap_classify_txseq(chan, control->txseq) ==
	    L2CAP_TXSEQ_EXPECTED) {
		l2cap_pass_to_tx(chan, control);

		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
		       __next_seq(chan, chan->buffer_seq));

		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);

		l2cap_reassemble_sdu(chan, skb, control);
	} else {
		if (chan->sdu) {
			kfree_skb(chan->sdu);
			chan->sdu = NULL;
		}
		chan->sdu_last_frag = NULL;
		chan->sdu_len = 0;

		if (skb) {
			BT_DBG("Freeing %p", skb);
			kfree_skb(skb);
		}
	}

	chan->last_acked_seq = control->txseq;
	chan->expected_tx_seq = __next_seq(chan, control->txseq);

	return err;
6568 6569 6570 6571 6572 6573 6574
}

static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
{
	struct l2cap_ctrl *control = &bt_cb(skb)->control;
	u16 len;
	u8 event;
6575

6576 6577
	__unpack_control(chan, skb);

6578 6579 6580 6581 6582
	len = skb->len;

	/*
	 * We can just drop the corrupted I-frame here.
	 * Receiver will miss it and start proper recovery
6583
	 * procedures and ask for retransmission.
6584
	 */
6585
	if (l2cap_check_fcs(chan, skb))
6586 6587
		goto drop;

6588
	if (!control->sframe && control->sar == L2CAP_SAR_START)
6589
		len -= L2CAP_SDULEN_SIZE;
6590

6591
	if (chan->fcs == L2CAP_FCS_CRC16)
6592
		len -= L2CAP_FCS_SIZE;
6593

6594
	if (len > chan->mps) {
6595
		l2cap_send_disconn_req(chan, ECONNRESET);
6596 6597 6598
		goto drop;
	}

6599 6600
	if (!control->sframe) {
		int err;
6601

6602 6603 6604
		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
		       control->sar, control->reqseq, control->final,
		       control->txseq);
6605

6606 6607 6608 6609
		/* Validate F-bit - F=0 always valid, F=1 only
		 * valid in TX WAIT_F
		 */
		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6610
			goto drop;
6611 6612 6613 6614 6615 6616

		if (chan->mode != L2CAP_MODE_STREAMING) {
			event = L2CAP_EV_RECV_IFRAME;
			err = l2cap_rx(chan, control, skb, event);
		} else {
			err = l2cap_stream_rx(chan, control, skb);
6617 6618
		}

6619
		if (err)
6620
			l2cap_send_disconn_req(chan, ECONNRESET);
6621
	} else {
6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634
		const u8 rx_func_to_event[4] = {
			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
		};

		/* Only I-frames are expected in streaming mode */
		if (chan->mode == L2CAP_MODE_STREAMING)
			goto drop;

		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
		       control->reqseq, control->final, control->poll,
		       control->super);

6635
		if (len != 0) {
6636
			BT_ERR("Trailing bytes: %d in sframe", len);
6637
			l2cap_send_disconn_req(chan, ECONNRESET);
6638 6639 6640
			goto drop;
		}

6641 6642 6643 6644 6645 6646 6647
		/* Validate F and P bits */
		if (control->final && (control->poll ||
				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
			goto drop;

		event = rx_func_to_event[control->super];
		if (l2cap_rx(chan, control, skb, event))
6648
			l2cap_send_disconn_req(chan, ECONNRESET);
6649 6650 6651 6652 6653 6654 6655 6656 6657
	}

	return 0;

drop:
	kfree_skb(skb);
	return 0;
}

6658 6659 6660 6661 6662 6663 6664 6665 6666
static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
{
	struct l2cap_conn *conn = chan->conn;
	struct l2cap_le_credits pkt;
	u16 return_credits;

	/* We return more credits to the sender only after the amount of
	 * credits falls below half of the initial amount.
	 */
6667
	if (chan->rx_credits >= (le_max_credits + 1) / 2)
6668 6669
		return;

6670
	return_credits = le_max_credits - chan->rx_credits;
6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683

	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);

	chan->rx_credits += return_credits;

	pkt.cid     = cpu_to_le16(chan->scid);
	pkt.credits = cpu_to_le16(return_credits);

	chan->ident = l2cap_get_ident(conn);

	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
}

6684 6685
static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
{
6686 6687 6688 6689
	int err;

	if (!chan->rx_credits) {
		BT_ERR("No credits to receive LE L2CAP data");
6690
		l2cap_send_disconn_req(chan, ECONNRESET);
6691
		return -ENOBUFS;
6692
	}
6693

6694 6695
	if (chan->imtu < skb->len) {
		BT_ERR("Too big LE L2CAP PDU");
6696
		return -ENOBUFS;
6697
	}
6698 6699 6700 6701 6702 6703

	chan->rx_credits--;
	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);

	l2cap_chan_le_send_credits(chan);

6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771
	err = 0;

	if (!chan->sdu) {
		u16 sdu_len;

		sdu_len = get_unaligned_le16(skb->data);
		skb_pull(skb, L2CAP_SDULEN_SIZE);

		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
		       sdu_len, skb->len, chan->imtu);

		if (sdu_len > chan->imtu) {
			BT_ERR("Too big LE L2CAP SDU length received");
			err = -EMSGSIZE;
			goto failed;
		}

		if (skb->len > sdu_len) {
			BT_ERR("Too much LE L2CAP data received");
			err = -EINVAL;
			goto failed;
		}

		if (skb->len == sdu_len)
			return chan->ops->recv(chan, skb);

		chan->sdu = skb;
		chan->sdu_len = sdu_len;
		chan->sdu_last_frag = skb;

		return 0;
	}

	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
	       chan->sdu->len, skb->len, chan->sdu_len);

	if (chan->sdu->len + skb->len > chan->sdu_len) {
		BT_ERR("Too much LE L2CAP data received");
		err = -EINVAL;
		goto failed;
	}

	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
	skb = NULL;

	if (chan->sdu->len == chan->sdu_len) {
		err = chan->ops->recv(chan, chan->sdu);
		if (!err) {
			chan->sdu = NULL;
			chan->sdu_last_frag = NULL;
			chan->sdu_len = 0;
		}
	}

failed:
	if (err) {
		kfree_skb(skb);
		kfree_skb(chan->sdu);
		chan->sdu = NULL;
		chan->sdu_last_frag = NULL;
		chan->sdu_len = 0;
	}

	/* We can't return an error here since we took care of the skb
	 * freeing internally. An error return would cause the caller to
	 * do a double-free of the skb.
	 */
	return 0;
6772 6773
}

6774 6775
static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
			       struct sk_buff *skb)
L
Linus Torvalds 已提交
6776
{
6777
	struct l2cap_chan *chan;
L
Linus Torvalds 已提交
6778

6779
	chan = l2cap_get_chan_by_scid(conn, cid);
6780
	if (!chan) {
6781 6782 6783 6784
		if (cid == L2CAP_CID_A2MP) {
			chan = a2mp_channel_create(conn, skb);
			if (!chan) {
				kfree_skb(skb);
6785
				return;
6786 6787 6788 6789 6790 6791 6792
			}

			l2cap_chan_lock(chan);
		} else {
			BT_DBG("unknown cid 0x%4.4x", cid);
			/* Drop packet and return */
			kfree_skb(skb);
6793
			return;
6794
		}
L
Linus Torvalds 已提交
6795 6796
	}

6797
	BT_DBG("chan %p, len %d", chan, skb->len);
L
Linus Torvalds 已提交
6798

6799
	if (chan->state != BT_CONNECTED)
L
Linus Torvalds 已提交
6800 6801
		goto drop;

6802
	switch (chan->mode) {
6803
	case L2CAP_MODE_LE_FLOWCTL:
6804 6805 6806 6807 6808
		if (l2cap_le_data_rcv(chan, skb) < 0)
			goto drop;

		goto done;

6809 6810 6811 6812 6813
	case L2CAP_MODE_BASIC:
		/* If socket recv buffers overflows we drop data here
		 * which is *bad* because L2CAP has to be reliable.
		 * But we don't have any other choice. L2CAP doesn't
		 * provide flow control mechanism. */
L
Linus Torvalds 已提交
6814

6815 6816
		if (chan->imtu < skb->len) {
			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6817
			goto drop;
6818
		}
L
Linus Torvalds 已提交
6819

6820
		if (!chan->ops->recv(chan, skb))
6821 6822 6823 6824
			goto done;
		break;

	case L2CAP_MODE_ERTM:
6825
	case L2CAP_MODE_STREAMING:
6826
		l2cap_data_rcv(chan, skb);
6827 6828
		goto done;

6829
	default:
6830
		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6831 6832
		break;
	}
L
Linus Torvalds 已提交
6833 6834 6835 6836 6837

drop:
	kfree_skb(skb);

done:
6838
	l2cap_chan_unlock(chan);
L
Linus Torvalds 已提交
6839 6840
}

6841 6842
static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
				  struct sk_buff *skb)
L
Linus Torvalds 已提交
6843
{
6844
	struct hci_conn *hcon = conn->hcon;
6845
	struct l2cap_chan *chan;
L
Linus Torvalds 已提交
6846

6847 6848 6849
	if (hcon->type != ACL_LINK)
		goto drop;

6850 6851
	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
					ACL_LINK);
6852
	if (!chan)
L
Linus Torvalds 已提交
6853 6854
		goto drop;

6855
	BT_DBG("chan %p, len %d", chan, skb->len);
L
Linus Torvalds 已提交
6856

6857
	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
L
Linus Torvalds 已提交
6858 6859
		goto drop;

6860
	if (chan->imtu < skb->len)
L
Linus Torvalds 已提交
6861 6862
		goto drop;

6863
	/* Store remote BD_ADDR and PSM for msg_name */
6864
	bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6865 6866
	bt_cb(skb)->psm = psm;

6867
	if (!chan->ops->recv(chan, skb))
6868
		return;
L
Linus Torvalds 已提交
6869 6870 6871 6872 6873

drop:
	kfree_skb(skb);
}

6874
static void l2cap_att_channel(struct l2cap_conn *conn,
6875
			      struct sk_buff *skb)
6876
{
6877
	struct hci_conn *hcon = conn->hcon;
6878
	struct l2cap_chan *chan;
6879

6880 6881 6882
	if (hcon->type != LE_LINK)
		goto drop;

6883
	chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6884
					 &hcon->src, &hcon->dst);
6885
	if (!chan)
6886 6887
		goto drop;

6888
	BT_DBG("chan %p, len %d", chan, skb->len);
6889

6890
	if (chan->imtu < skb->len)
6891 6892
		goto drop;

6893
	if (!chan->ops->recv(chan, skb))
6894
		return;
6895 6896 6897 6898 6899

drop:
	kfree_skb(skb);
}

L
Linus Torvalds 已提交
6900 6901 6902
static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
{
	struct l2cap_hdr *lh = (void *) skb->data;
6903
	struct hci_conn *hcon = conn->hcon;
6904 6905
	u16 cid, len;
	__le16 psm;
L
Linus Torvalds 已提交
6906

6907 6908 6909 6910 6911 6912
	if (hcon->state != BT_CONNECTED) {
		BT_DBG("queueing pending rx skb");
		skb_queue_tail(&conn->pending_rx, skb);
		return;
	}

L
Linus Torvalds 已提交
6913 6914 6915 6916
	skb_pull(skb, L2CAP_HDR_SIZE);
	cid = __le16_to_cpu(lh->cid);
	len = __le16_to_cpu(lh->len);

6917 6918 6919 6920 6921
	if (len != skb->len) {
		kfree_skb(skb);
		return;
	}

6922 6923 6924 6925
	/* Since we can't actively block incoming LE connections we must
	 * at least ensure that we ignore incoming data from them.
	 */
	if (hcon->type == LE_LINK &&
6926 6927
	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
				   bdaddr_type(hcon, hcon->dst_type))) {
6928 6929 6930 6931
		kfree_skb(skb);
		return;
	}

L
Linus Torvalds 已提交
6932 6933 6934
	BT_DBG("len %d, cid 0x%4.4x", len, cid);

	switch (cid) {
6935
	case L2CAP_CID_SIGNALING:
L
Linus Torvalds 已提交
6936 6937 6938
		l2cap_sig_channel(conn, skb);
		break;

6939
	case L2CAP_CID_CONN_LESS:
6940
		psm = get_unaligned((__le16 *) skb->data);
6941
		skb_pull(skb, L2CAP_PSMLEN_SIZE);
L
Linus Torvalds 已提交
6942 6943 6944
		l2cap_conless_channel(conn, psm, skb);
		break;

6945
	case L2CAP_CID_ATT:
6946
		l2cap_att_channel(conn, skb);
6947 6948
		break;

6949 6950 6951 6952
	case L2CAP_CID_LE_SIGNALING:
		l2cap_le_sig_channel(conn, skb);
		break;

6953 6954 6955 6956 6957
	case L2CAP_CID_SMP:
		if (smp_sig_channel(conn, skb))
			l2cap_conn_del(conn->hcon, EACCES);
		break;

L
Linus Torvalds 已提交
6958 6959 6960 6961 6962 6963
	default:
		l2cap_data_channel(conn, cid, skb);
		break;
	}
}

6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975
static void process_pending_rx(struct work_struct *work)
{
	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
					       pending_rx_work);
	struct sk_buff *skb;

	BT_DBG("");

	while ((skb = skb_dequeue(&conn->pending_rx)))
		l2cap_recv_frame(conn, skb);
}

6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019
static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
{
	struct l2cap_conn *conn = hcon->l2cap_data;
	struct hci_chan *hchan;

	if (conn)
		return conn;

	hchan = hci_chan_create(hcon);
	if (!hchan)
		return NULL;

	conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
	if (!conn) {
		hci_chan_del(hchan);
		return NULL;
	}

	kref_init(&conn->ref);
	hcon->l2cap_data = conn;
	conn->hcon = hcon;
	hci_conn_get(conn->hcon);
	conn->hchan = hchan;

	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);

	switch (hcon->type) {
	case LE_LINK:
		if (hcon->hdev->le_mtu) {
			conn->mtu = hcon->hdev->le_mtu;
			break;
		}
		/* fall through */
	default:
		conn->mtu = hcon->hdev->acl_mtu;
		break;
	}

	conn->feat_mask = 0;

	if (hcon->type == ACL_LINK)
		conn->hs_enabled = test_bit(HCI_HS_ENABLED,
					    &hcon->hdev->dev_flags);

7020
	mutex_init(&conn->ident_lock);
7021 7022 7023 7024 7025 7026 7027 7028 7029 7030
	mutex_init(&conn->chan_lock);

	INIT_LIST_HEAD(&conn->chan_l);
	INIT_LIST_HEAD(&conn->users);

	if (hcon->type == LE_LINK)
		INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
	else
		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);

7031 7032 7033
	skb_queue_head_init(&conn->pending_rx);
	INIT_WORK(&conn->pending_rx_work, process_pending_rx);

7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074
	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;

	return conn;
}

static bool is_valid_psm(u16 psm, u8 dst_type) {
	if (!psm)
		return false;

	if (bdaddr_type_is_le(dst_type))
		return (psm <= 0x00ff);

	/* PSM must be odd and lsb of upper byte must be 0 */
	return ((psm & 0x0101) == 0x0001);
}

int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
		       bdaddr_t *dst, u8 dst_type)
{
	struct l2cap_conn *conn;
	struct hci_conn *hcon;
	struct hci_dev *hdev;
	int err;

	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
	       dst_type, __le16_to_cpu(psm));

	hdev = hci_get_route(dst, &chan->src);
	if (!hdev)
		return -EHOSTUNREACH;

	hci_dev_lock(hdev);

	l2cap_chan_lock(chan);

	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
	    chan->chan_type != L2CAP_CHAN_RAW) {
		err = -EINVAL;
		goto done;
	}

7075 7076 7077 7078 7079 7080
	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
		err = -EINVAL;
		goto done;
	}

	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096
		err = -EINVAL;
		goto done;
	}

	switch (chan->mode) {
	case L2CAP_MODE_BASIC:
		break;
	case L2CAP_MODE_LE_FLOWCTL:
		l2cap_le_flowctl_init(chan);
		break;
	case L2CAP_MODE_ERTM:
	case L2CAP_MODE_STREAMING:
		if (!disable_ertm)
			break;
		/* fall through */
	default:
7097
		err = -EOPNOTSUPP;
7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130
		goto done;
	}

	switch (chan->state) {
	case BT_CONNECT:
	case BT_CONNECT2:
	case BT_CONFIG:
		/* Already connecting */
		err = 0;
		goto done;

	case BT_CONNECTED:
		/* Already connected */
		err = -EISCONN;
		goto done;

	case BT_OPEN:
	case BT_BOUND:
		/* Can connect */
		break;

	default:
		err = -EBADFD;
		goto done;
	}

	/* Set destination address and psm */
	bacpy(&chan->dst, dst);
	chan->dst_type = dst_type;

	chan->psm = psm;
	chan->dcid = cid;

7131
	if (bdaddr_type_is_le(dst_type)) {
7132
		u8 role;
7133

7134 7135 7136 7137 7138 7139 7140
		/* Convert from L2CAP channel address type to HCI address type
		 */
		if (dst_type == BDADDR_LE_PUBLIC)
			dst_type = ADDR_LE_DEV_PUBLIC;
		else
			dst_type = ADDR_LE_DEV_RANDOM;

7141 7142 7143 7144
		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
			role = HCI_ROLE_SLAVE;
		else
			role = HCI_ROLE_MASTER;
7145

7146
		hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7147
				      HCI_LE_CONN_TIMEOUT, role);
7148
	} else {
7149
		u8 auth_type = l2cap_get_auth_type(chan);
7150
		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7151
	}
7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184

	if (IS_ERR(hcon)) {
		err = PTR_ERR(hcon);
		goto done;
	}

	conn = l2cap_conn_add(hcon);
	if (!conn) {
		hci_conn_drop(hcon);
		err = -ENOMEM;
		goto done;
	}

	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
		hci_conn_drop(hcon);
		err = -EBUSY;
		goto done;
	}

	/* Update source addr of the socket */
	bacpy(&chan->src, &hcon->src);
	chan->src_type = bdaddr_type(hcon, hcon->src_type);

	l2cap_chan_unlock(chan);
	l2cap_chan_add(conn, chan);
	l2cap_chan_lock(chan);

	/* l2cap_chan_add takes its own ref so we can drop this one */
	hci_conn_drop(hcon);

	l2cap_state_change(chan, BT_CONNECT);
	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));

7185 7186 7187 7188 7189 7190 7191
	/* Release chan->sport so that it can be reused by other
	 * sockets (as it's only used for listening sockets).
	 */
	write_lock(&chan_list_lock);
	chan->sport = 0;
	write_unlock(&chan_list_lock);

7192 7193 7194
	if (hcon->state == BT_CONNECTED) {
		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
			__clear_chan_timer(chan);
7195
			if (l2cap_chan_check_security(chan, true))
7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208
				l2cap_state_change(chan, BT_CONNECTED);
		} else
			l2cap_do_start(chan);
	}

	err = 0;

done:
	l2cap_chan_unlock(chan);
	hci_dev_unlock(hdev);
	hci_dev_put(hdev);
	return err;
}
7209
EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7210

L
Linus Torvalds 已提交
7211 7212
/* ---- L2CAP interface with lower layer (HCI) ---- */

7213
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
L
Linus Torvalds 已提交
7214 7215
{
	int exact = 0, lm1 = 0, lm2 = 0;
7216
	struct l2cap_chan *c;
L
Linus Torvalds 已提交
7217

7218
	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
L
Linus Torvalds 已提交
7219 7220

	/* Find listening sockets and check their link_mode */
7221 7222
	read_lock(&chan_list_lock);
	list_for_each_entry(c, &chan_list, global_l) {
7223
		if (c->state != BT_LISTEN)
L
Linus Torvalds 已提交
7224 7225
			continue;

7226
		if (!bacmp(&c->src, &hdev->bdaddr)) {
7227
			lm1 |= HCI_LM_ACCEPT;
7228
			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7229
				lm1 |= HCI_LM_MASTER;
L
Linus Torvalds 已提交
7230
			exact++;
7231
		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7232
			lm2 |= HCI_LM_ACCEPT;
7233
			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7234 7235
				lm2 |= HCI_LM_MASTER;
		}
L
Linus Torvalds 已提交
7236
	}
7237
	read_unlock(&chan_list_lock);
L
Linus Torvalds 已提交
7238 7239 7240 7241

	return exact ? lm1 : lm2;
}

7242
void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
L
Linus Torvalds 已提交
7243
{
7244 7245
	struct l2cap_conn *conn;

7246
	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
L
Linus Torvalds 已提交
7247 7248

	if (!status) {
7249
		conn = l2cap_conn_add(hcon);
L
Linus Torvalds 已提交
7250 7251
		if (conn)
			l2cap_conn_ready(conn);
7252
	} else {
7253
		l2cap_conn_del(hcon, bt_to_errno(status));
7254
	}
L
Linus Torvalds 已提交
7255 7256
}

7257
int l2cap_disconn_ind(struct hci_conn *hcon)
7258 7259 7260 7261 7262
{
	struct l2cap_conn *conn = hcon->l2cap_data;

	BT_DBG("hcon %p", hcon);

7263
	if (!conn)
7264
		return HCI_ERROR_REMOTE_USER_TERM;
7265 7266 7267
	return conn->disc_reason;
}

7268
void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
L
Linus Torvalds 已提交
7269 7270 7271
{
	BT_DBG("hcon %p reason %d", hcon, reason);

7272
	l2cap_conn_del(hcon, bt_to_errno(reason));
L
Linus Torvalds 已提交
7273 7274
}

7275
static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7276
{
7277
	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7278 7279
		return;

7280
	if (encrypt == 0x00) {
7281
		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7282
			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7283 7284
		} else if (chan->sec_level == BT_SECURITY_HIGH ||
			   chan->sec_level == BT_SECURITY_FIPS)
7285
			l2cap_chan_close(chan, ECONNREFUSED);
7286
	} else {
7287
		if (chan->sec_level == BT_SECURITY_MEDIUM)
7288
			__clear_chan_timer(chan);
7289 7290 7291
	}
}

7292
int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
L
Linus Torvalds 已提交
7293
{
7294
	struct l2cap_conn *conn = hcon->l2cap_data;
7295
	struct l2cap_chan *chan;
L
Linus Torvalds 已提交
7296

7297
	if (!conn)
L
Linus Torvalds 已提交
7298
		return 0;
7299

7300
	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
L
Linus Torvalds 已提交
7301

7302
	if (hcon->type == LE_LINK) {
7303
		if (!status && encrypt)
7304
			smp_distribute_keys(conn);
7305
		cancel_delayed_work(&conn->security_timer);
7306 7307
	}

7308
	mutex_lock(&conn->chan_lock);
L
Linus Torvalds 已提交
7309

7310
	list_for_each_entry(chan, &conn->chan_l, list) {
7311
		l2cap_chan_lock(chan);
L
Linus Torvalds 已提交
7312

7313 7314
		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
		       state_to_string(chan->state));
7315

7316
		if (chan->scid == L2CAP_CID_A2MP) {
7317 7318 7319 7320
			l2cap_chan_unlock(chan);
			continue;
		}

7321
		if (chan->scid == L2CAP_CID_ATT) {
7322 7323
			if (!status && encrypt) {
				chan->sec_level = hcon->sec_level;
7324
				l2cap_chan_ready(chan);
7325 7326
			}

7327
			l2cap_chan_unlock(chan);
7328 7329 7330
			continue;
		}

7331
		if (!__l2cap_no_conn_pending(chan)) {
7332
			l2cap_chan_unlock(chan);
7333 7334 7335
			continue;
		}

7336
		if (!status && (chan->state == BT_CONNECTED ||
7337
				chan->state == BT_CONFIG)) {
7338
			chan->ops->resume(chan);
7339
			l2cap_check_encryption(chan, encrypt);
7340
			l2cap_chan_unlock(chan);
7341 7342 7343
			continue;
		}

7344
		if (chan->state == BT_CONNECT) {
7345
			if (!status)
7346
				l2cap_start_connection(chan);
7347
			else
7348
				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7349
		} else if (chan->state == BT_CONNECT2) {
7350
			struct l2cap_conn_rsp rsp;
7351
			__u16 res, stat;
L
Linus Torvalds 已提交
7352

7353
			if (!status) {
7354
				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7355 7356
					res = L2CAP_CR_PEND;
					stat = L2CAP_CS_AUTHOR_PEND;
7357
					chan->ops->defer(chan);
7358
				} else {
7359
					l2cap_state_change(chan, BT_CONFIG);
7360 7361 7362
					res = L2CAP_CR_SUCCESS;
					stat = L2CAP_CS_NO_INFO;
				}
7363
			} else {
7364
				l2cap_state_change(chan, BT_DISCONN);
7365
				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7366 7367
				res = L2CAP_CR_SEC_BLOCK;
				stat = L2CAP_CS_NO_INFO;
7368 7369
			}

7370 7371
			rsp.scid   = cpu_to_le16(chan->dcid);
			rsp.dcid   = cpu_to_le16(chan->scid);
7372 7373
			rsp.result = cpu_to_le16(res);
			rsp.status = cpu_to_le16(stat);
7374
			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7375
				       sizeof(rsp), &rsp);
7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386

			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
			    res == L2CAP_CR_SUCCESS) {
				char buf[128];
				set_bit(CONF_REQ_SENT, &chan->conf_state);
				l2cap_send_cmd(conn, l2cap_get_ident(conn),
					       L2CAP_CONF_REQ,
					       l2cap_build_conf_req(chan, buf),
					       buf);
				chan->num_conf_req++;
			}
7387
		}
L
Linus Torvalds 已提交
7388

7389
		l2cap_chan_unlock(chan);
L
Linus Torvalds 已提交
7390 7391
	}

7392
	mutex_unlock(&conn->chan_lock);
7393

L
Linus Torvalds 已提交
7394 7395 7396
	return 0;
}

7397
int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
L
Linus Torvalds 已提交
7398 7399
{
	struct l2cap_conn *conn = hcon->l2cap_data;
7400 7401
	struct l2cap_hdr *hdr;
	int len;
L
Linus Torvalds 已提交
7402

7403 7404 7405
	/* For AMP controller do not create l2cap conn */
	if (!conn && hcon->hdev->dev_type != HCI_BREDR)
		goto drop;
L
Linus Torvalds 已提交
7406

7407
	if (!conn)
7408
		conn = l2cap_conn_add(hcon);
7409 7410

	if (!conn)
L
Linus Torvalds 已提交
7411 7412 7413 7414
		goto drop;

	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);

7415 7416 7417 7418
	switch (flags) {
	case ACL_START:
	case ACL_START_NO_FLUSH:
	case ACL_COMPLETE:
L
Linus Torvalds 已提交
7419 7420 7421 7422 7423 7424 7425 7426
		if (conn->rx_len) {
			BT_ERR("Unexpected start frame (len %d)", skb->len);
			kfree_skb(conn->rx_skb);
			conn->rx_skb = NULL;
			conn->rx_len = 0;
			l2cap_conn_unreliable(conn, ECOMM);
		}

7427 7428
		/* Start fragment always begin with Basic L2CAP header */
		if (skb->len < L2CAP_HDR_SIZE) {
L
Linus Torvalds 已提交
7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446
			BT_ERR("Frame is too short (len %d)", skb->len);
			l2cap_conn_unreliable(conn, ECOMM);
			goto drop;
		}

		hdr = (struct l2cap_hdr *) skb->data;
		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;

		if (len == skb->len) {
			/* Complete frame received */
			l2cap_recv_frame(conn, skb);
			return 0;
		}

		BT_DBG("Start: total len %d, frag len %d", len, skb->len);

		if (skb->len > len) {
			BT_ERR("Frame is too long (len %d, expected len %d)",
7447
			       skb->len, len);
L
Linus Torvalds 已提交
7448 7449 7450 7451 7452
			l2cap_conn_unreliable(conn, ECOMM);
			goto drop;
		}

		/* Allocate skb for the complete frame (with header) */
7453
		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7454
		if (!conn->rx_skb)
L
Linus Torvalds 已提交
7455 7456
			goto drop;

7457
		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7458
					  skb->len);
L
Linus Torvalds 已提交
7459
		conn->rx_len = len - skb->len;
7460 7461 7462
		break;

	case ACL_CONT:
L
Linus Torvalds 已提交
7463 7464 7465 7466 7467 7468 7469 7470 7471 7472
		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);

		if (!conn->rx_len) {
			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
			l2cap_conn_unreliable(conn, ECOMM);
			goto drop;
		}

		if (skb->len > conn->rx_len) {
			BT_ERR("Fragment is too long (len %d, expected %d)",
7473
			       skb->len, conn->rx_len);
L
Linus Torvalds 已提交
7474 7475 7476 7477 7478 7479 7480
			kfree_skb(conn->rx_skb);
			conn->rx_skb = NULL;
			conn->rx_len = 0;
			l2cap_conn_unreliable(conn, ECOMM);
			goto drop;
		}

7481
		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7482
					  skb->len);
L
Linus Torvalds 已提交
7483 7484 7485
		conn->rx_len -= skb->len;

		if (!conn->rx_len) {
7486 7487 7488 7489 7490
			/* Complete frame received. l2cap_recv_frame
			 * takes ownership of the skb so set the global
			 * rx_skb pointer to NULL first.
			 */
			struct sk_buff *rx_skb = conn->rx_skb;
L
Linus Torvalds 已提交
7491
			conn->rx_skb = NULL;
7492
			l2cap_recv_frame(conn, rx_skb);
L
Linus Torvalds 已提交
7493
		}
7494
		break;
L
Linus Torvalds 已提交
7495 7496 7497 7498 7499 7500 7501
	}

drop:
	kfree_skb(skb);
	return 0;
}

7502
static int l2cap_debugfs_show(struct seq_file *f, void *p)
L
Linus Torvalds 已提交
7503
{
7504
	struct l2cap_chan *c;
L
Linus Torvalds 已提交
7505

7506
	read_lock(&chan_list_lock);
L
Linus Torvalds 已提交
7507

7508
	list_for_each_entry(c, &chan_list, global_l) {
7509
		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7510
			   &c->src, &c->dst,
7511 7512 7513
			   c->state, __le16_to_cpu(c->psm),
			   c->scid, c->dcid, c->imtu, c->omtu,
			   c->sec_level, c->mode);
7514
	}
L
Linus Torvalds 已提交
7515

7516
	read_unlock(&chan_list_lock);
L
Linus Torvalds 已提交
7517

7518
	return 0;
L
Linus Torvalds 已提交
7519 7520
}

7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533
static int l2cap_debugfs_open(struct inode *inode, struct file *file)
{
	return single_open(file, l2cap_debugfs_show, inode->i_private);
}

static const struct file_operations l2cap_debugfs_fops = {
	.open		= l2cap_debugfs_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static struct dentry *l2cap_debugfs;
L
Linus Torvalds 已提交
7534

7535
int __init l2cap_init(void)
L
Linus Torvalds 已提交
7536 7537
{
	int err;
7538

7539
	err = l2cap_init_sockets();
L
Linus Torvalds 已提交
7540 7541 7542
	if (err < 0)
		return err;

7543 7544 7545 7546 7547
	if (IS_ERR_OR_NULL(bt_debugfs))
		return 0;

	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
					    NULL, &l2cap_debugfs_fops);
L
Linus Torvalds 已提交
7548

7549
	debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7550
			   &le_max_credits);
7551
	debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7552 7553
			   &le_default_mps);

L
Linus Torvalds 已提交
7554 7555 7556
	return 0;
}

7557
void l2cap_exit(void)
L
Linus Torvalds 已提交
7558
{
7559
	debugfs_remove(l2cap_debugfs);
7560
	l2cap_cleanup_sockets();
L
Linus Torvalds 已提交
7561 7562
}

7563 7564
module_param(disable_ertm, bool, 0644);
MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");