hci_event.c 120.4 KB
Newer Older
1
/*
L
Linus Torvalds 已提交
2
   BlueZ - Bluetooth protocol stack for Linux
3
   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
L
Linus Torvalds 已提交
4 5 6 7 8 9 10 11 12 13 14

   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 16 17
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
L
Linus Torvalds 已提交
18 19
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

20 21
   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI event handling. */

#include <asm/unaligned.h>

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
31
#include <net/bluetooth/mgmt.h>
32

33
#include "hci_request.h"
34
#include "hci_debugfs.h"
35
#include "a2mp.h"
36
#include "amp.h"
37
#include "smp.h"
L
Linus Torvalds 已提交
38 39 40

/* Handle HCI Event packets */

41
static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
L
Linus Torvalds 已提交
42
{
43
	__u8 status = *((__u8 *) skb->data);
L
Linus Torvalds 已提交
44

45
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
L
Linus Torvalds 已提交
46

47
	if (status)
48
		return;
L
Linus Torvalds 已提交
49

50
	clear_bit(HCI_INQUIRY, &hdev->flags);
51
	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
52
	wake_up_bit(&hdev->flags, HCI_INQUIRY);
53

54 55 56 57
	hci_dev_lock(hdev);
	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
	hci_dev_unlock(hdev);

58 59
	hci_conn_check_pending(hdev);
}
60

61 62 63 64
static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
{
	__u8 status = *((__u8 *) skb->data);

65
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
66 67 68 69 70

	if (status)
		return;

	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 72
}

73 74 75
static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
{
	__u8 status = *((__u8 *) skb->data);
76

77
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
78

79 80
	if (status)
		return;
L
Linus Torvalds 已提交
81

82 83
	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);

84 85 86
	hci_conn_check_pending(hdev);
}

87 88
static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
					  struct sk_buff *skb)
89 90 91 92 93 94 95 96 97
{
	BT_DBG("%s", hdev->name);
}

static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_rp_role_discovery *rp = (void *) skb->data;
	struct hci_conn *conn;

98
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
99 100 101 102 103 104 105

	if (rp->status)
		return;

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 107
	if (conn)
		conn->role = rp->role;
108 109

	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
110 111
}

112 113 114 115 116
static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_rp_read_link_policy *rp = (void *) skb->data;
	struct hci_conn *conn;

117
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
118 119 120 121 122 123 124 125 126 127 128 129 130

	if (rp->status)
		return;

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
	if (conn)
		conn->link_policy = __le16_to_cpu(rp->policy);

	hci_dev_unlock(hdev);
}

131
static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
L
Linus Torvalds 已提交
132
{
133
	struct hci_rp_write_link_policy *rp = (void *) skb->data;
L
Linus Torvalds 已提交
134
	struct hci_conn *conn;
135
	void *sent;
L
Linus Torvalds 已提交
136

137
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
L
Linus Torvalds 已提交
138

139 140
	if (rp->status)
		return;
L
Linus Torvalds 已提交
141

142 143 144
	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
	if (!sent)
		return;
L
Linus Torvalds 已提交
145

146
	hci_dev_lock(hdev);
L
Linus Torvalds 已提交
147

148
	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149
	if (conn)
150
		conn->link_policy = get_unaligned_le16(sent + 2);
L
Linus Torvalds 已提交
151

152 153
	hci_dev_unlock(hdev);
}
L
Linus Torvalds 已提交
154

155 156
static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
					struct sk_buff *skb)
157 158 159
{
	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;

160
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
161 162 163 164 165 166 167

	if (rp->status)
		return;

	hdev->link_policy = __le16_to_cpu(rp->policy);
}

168 169
static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
					 struct sk_buff *skb)
170 171 172 173
{
	__u8 status = *((__u8 *) skb->data);
	void *sent;

174
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
175

176 177 178
	if (status)
		return;

179 180 181 182
	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
	if (!sent)
		return;

183
	hdev->link_policy = get_unaligned_le16(sent);
184 185
}

186 187 188
static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
{
	__u8 status = *((__u8 *) skb->data);
189

190
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
191

192 193
	clear_bit(HCI_RESET, &hdev->flags);

194 195 196
	if (status)
		return;

197
	/* Reset all non-persistent flags */
198
	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
199 200

	hdev->discovery.state = DISCOVERY_STOPPED;
201 202
	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
203 204 205

	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
	hdev->adv_data_len = 0;
206 207 208

	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
	hdev->scan_rsp_data_len = 0;
209

210 211
	hdev->le_scan_type = LE_SCAN_PASSIVE;

212
	hdev->ssp_debug_mode = 0;
213 214

	hci_bdaddr_list_clear(&hdev->le_white_list);
215
}
216

217 218 219 220
static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
{
	__u8 status = *((__u8 *) skb->data);
	void *sent;
221

222
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
223

224 225 226
	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
	if (!sent)
		return;
227

228 229
	hci_dev_lock(hdev);

230 231
	if (test_bit(HCI_MGMT, &hdev->dev_flags))
		mgmt_set_local_name_complete(hdev, sent, status);
232 233
	else if (!status)
		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
234

235
	hci_dev_unlock(hdev);
236 237 238 239 240 241
}

static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_rp_read_local_name *rp = (void *) skb->data;

242
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
243 244 245 246

	if (rp->status)
		return;

247 248
	if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
	    test_bit(HCI_CONFIG, &hdev->dev_flags))
249
		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
250 251 252 253 254 255 256
}

static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
{
	__u8 status = *((__u8 *) skb->data);
	void *sent;

257
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
258 259 260 261 262

	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
	if (!sent)
		return;

263 264
	hci_dev_lock(hdev);

265 266 267 268 269 270 271
	if (!status) {
		__u8 param = *((__u8 *) sent);

		if (param == AUTH_ENABLED)
			set_bit(HCI_AUTH, &hdev->flags);
		else
			clear_bit(HCI_AUTH, &hdev->flags);
L
Linus Torvalds 已提交
272
	}
273

274 275
	if (test_bit(HCI_MGMT, &hdev->dev_flags))
		mgmt_auth_enable_complete(hdev, status);
276 277

	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
278 279
}

280
static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
L
Linus Torvalds 已提交
281
{
282
	__u8 status = *((__u8 *) skb->data);
283
	__u8 param;
L
Linus Torvalds 已提交
284 285
	void *sent;

286
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
L
Linus Torvalds 已提交
287

288 289 290
	if (status)
		return;

291 292 293
	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
	if (!sent)
		return;
L
Linus Torvalds 已提交
294

295
	param = *((__u8 *) sent);
296

297 298 299 300
	if (param)
		set_bit(HCI_ENCRYPT, &hdev->flags);
	else
		clear_bit(HCI_ENCRYPT, &hdev->flags);
301
}
L
Linus Torvalds 已提交
302

303 304
static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
{
305 306
	__u8 status = *((__u8 *) skb->data);
	__u8 param;
307
	void *sent;
L
Linus Torvalds 已提交
308

309
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
L
Linus Torvalds 已提交
310

311 312 313
	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
	if (!sent)
		return;
L
Linus Torvalds 已提交
314

315 316
	param = *((__u8 *) sent);

317 318
	hci_dev_lock(hdev);

319
	if (status) {
320 321 322 323
		hdev->discov_timeout = 0;
		goto done;
	}

324
	if (param & SCAN_INQUIRY)
325
		set_bit(HCI_ISCAN, &hdev->flags);
326 327
	else
		clear_bit(HCI_ISCAN, &hdev->flags);
328

329
	if (param & SCAN_PAGE)
330
		set_bit(HCI_PSCAN, &hdev->flags);
331
	else
332
		clear_bit(HCI_PSCAN, &hdev->flags);
L
Linus Torvalds 已提交
333

334
done:
335
	hci_dev_unlock(hdev);
336
}
L
Linus Torvalds 已提交
337

338 339 340
static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
L
Linus Torvalds 已提交
341

342
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
L
Linus Torvalds 已提交
343

344 345
	if (rp->status)
		return;
L
Linus Torvalds 已提交
346

347
	memcpy(hdev->dev_class, rp->dev_class, 3);
L
Linus Torvalds 已提交
348

349
	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
350
	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
351
}
L
Linus Torvalds 已提交
352

353 354 355 356
static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
{
	__u8 status = *((__u8 *) skb->data);
	void *sent;
L
Linus Torvalds 已提交
357

358
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
L
Linus Torvalds 已提交
359

360 361 362
	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
	if (!sent)
		return;
L
Linus Torvalds 已提交
363

364 365 366 367 368 369 370 371 372
	hci_dev_lock(hdev);

	if (status == 0)
		memcpy(hdev->dev_class, sent, 3);

	if (test_bit(HCI_MGMT, &hdev->dev_flags))
		mgmt_set_class_of_dev_complete(hdev, sent, status);

	hci_dev_unlock(hdev);
373
}
L
Linus Torvalds 已提交
374

375 376 377 378 379
static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
	__u16 setting;

380
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
381 382 383 384 385 386

	if (rp->status)
		return;

	setting = __le16_to_cpu(rp->voice_setting);

387
	if (hdev->voice_setting == setting)
388 389 390 391
		return;

	hdev->voice_setting = setting;

392
	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
393

394
	if (hdev->notify)
395 396 397
		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
}

398 399
static void hci_cc_write_voice_setting(struct hci_dev *hdev,
				       struct sk_buff *skb)
400 401
{
	__u8 status = *((__u8 *) skb->data);
402
	__u16 setting;
403 404
	void *sent;

405
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
L
Linus Torvalds 已提交
406

407 408 409
	if (status)
		return;

410 411 412
	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
	if (!sent)
		return;
L
Linus Torvalds 已提交
413

414
	setting = get_unaligned_le16(sent);
L
Linus Torvalds 已提交
415

416 417 418 419
	if (hdev->voice_setting == setting)
		return;

	hdev->voice_setting = setting;
L
Linus Torvalds 已提交
420

421
	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
L
Linus Torvalds 已提交
422

423
	if (hdev->notify)
424
		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
L
Linus Torvalds 已提交
425 426
}

427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
					  struct sk_buff *skb)
{
	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;

	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

	if (rp->status)
		return;

	hdev->num_iac = rp->num_iac;

	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
}

442 443 444
static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
{
	__u8 status = *((__u8 *) skb->data);
445
	struct hci_cp_write_ssp_mode *sent;
446

447
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
448 449 450 451 452

	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
	if (!sent)
		return;

453 454
	hci_dev_lock(hdev);

455 456
	if (!status) {
		if (sent->mode)
457
			hdev->features[1][0] |= LMP_HOST_SSP;
458
		else
459
			hdev->features[1][0] &= ~LMP_HOST_SSP;
460 461
	}

462
	if (test_bit(HCI_MGMT, &hdev->dev_flags))
463
		mgmt_ssp_enable_complete(hdev, sent->mode, status);
464
	else if (!status) {
465
		if (sent->mode)
466 467 468 469
			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
		else
			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
	}
470 471

	hci_dev_unlock(hdev);
472 473
}

474 475 476 477 478 479 480 481 482 483 484
static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
{
	u8 status = *((u8 *) skb->data);
	struct hci_cp_write_sc_support *sent;

	BT_DBG("%s status 0x%2.2x", hdev->name, status);

	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
	if (!sent)
		return;

485 486
	hci_dev_lock(hdev);

487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
	if (!status) {
		if (sent->support)
			hdev->features[1][0] |= LMP_HOST_SC;
		else
			hdev->features[1][0] &= ~LMP_HOST_SC;
	}

	if (test_bit(HCI_MGMT, &hdev->dev_flags))
		mgmt_sc_enable_complete(hdev, sent->support, status);
	else if (!status) {
		if (sent->support)
			set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
		else
			clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
	}
502 503

	hci_dev_unlock(hdev);
504 505
}

506 507 508
static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_rp_read_local_version *rp = (void *) skb->data;
509

510
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
511

512
	if (rp->status)
513
		return;
514

515 516
	if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
	    test_bit(HCI_CONFIG, &hdev->dev_flags)) {
517 518 519 520 521 522
		hdev->hci_ver = rp->hci_ver;
		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
		hdev->lmp_ver = rp->lmp_ver;
		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
	}
523 524
}

525 526
static void hci_cc_read_local_commands(struct hci_dev *hdev,
				       struct sk_buff *skb)
527 528
{
	struct hci_rp_read_local_commands *rp = (void *) skb->data;
L
Linus Torvalds 已提交
529

530
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
L
Linus Torvalds 已提交
531

532 533 534
	if (rp->status)
		return;

535 536
	if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
	    test_bit(HCI_CONFIG, &hdev->dev_flags))
537
		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
538
}
L
Linus Torvalds 已提交
539

540 541
static void hci_cc_read_local_features(struct hci_dev *hdev,
				       struct sk_buff *skb)
542 543
{
	struct hci_rp_read_local_features *rp = (void *) skb->data;
544

545
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
L
Linus Torvalds 已提交
546

547 548
	if (rp->status)
		return;
549

550
	memcpy(hdev->features, rp->features, 8);
551

552 553
	/* Adjust default settings according to features
	 * supported by device. */
L
Linus Torvalds 已提交
554

555
	if (hdev->features[0][0] & LMP_3SLOT)
556
		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
L
Linus Torvalds 已提交
557

558
	if (hdev->features[0][0] & LMP_5SLOT)
559
		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
L
Linus Torvalds 已提交
560

561
	if (hdev->features[0][1] & LMP_HV2) {
562 563 564
		hdev->pkt_type  |= (HCI_HV2);
		hdev->esco_type |= (ESCO_HV2);
	}
L
Linus Torvalds 已提交
565

566
	if (hdev->features[0][1] & LMP_HV3) {
567 568 569
		hdev->pkt_type  |= (HCI_HV3);
		hdev->esco_type |= (ESCO_HV3);
	}
L
Linus Torvalds 已提交
570

571
	if (lmp_esco_capable(hdev))
572
		hdev->esco_type |= (ESCO_EV3);
573

574
	if (hdev->features[0][4] & LMP_EV4)
575
		hdev->esco_type |= (ESCO_EV4);
576

577
	if (hdev->features[0][4] & LMP_EV5)
578
		hdev->esco_type |= (ESCO_EV5);
L
Linus Torvalds 已提交
579

580
	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
581 582
		hdev->esco_type |= (ESCO_2EV3);

583
	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
584 585
		hdev->esco_type |= (ESCO_3EV3);

586
	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
587
		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
588
}
L
Linus Torvalds 已提交
589

590
static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
591
					   struct sk_buff *skb)
592 593 594
{
	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;

595
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
596 597

	if (rp->status)
598
		return;
599

600 601
	if (hdev->max_page < rp->max_page)
		hdev->max_page = rp->max_page;
602

603 604
	if (rp->page < HCI_MAX_PAGES)
		memcpy(hdev->features[rp->page], rp->features, 8);
605 606
}

607
static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
608
					  struct sk_buff *skb)
609 610 611
{
	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;

612
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
613

614 615 616 617
	if (rp->status)
		return;

	hdev->flow_ctl_mode = rp->mode;
618 619
}

620 621 622
static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
L
Linus Torvalds 已提交
623

624
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
L
Linus Torvalds 已提交
625

626 627
	if (rp->status)
		return;
L
Linus Torvalds 已提交
628

629 630 631 632 633 634 635 636
	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
	hdev->sco_mtu  = rp->sco_mtu;
	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);

	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
		hdev->sco_mtu  = 64;
		hdev->sco_pkts = 8;
L
Linus Torvalds 已提交
637
	}
638 639 640 641

	hdev->acl_cnt = hdev->acl_pkts;
	hdev->sco_cnt = hdev->sco_pkts;

642 643
	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
644 645 646 647 648 649
}

static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_rp_read_bd_addr *rp = (void *) skb->data;

650
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651

652 653 654 655
	if (rp->status)
		return;

	if (test_bit(HCI_INIT, &hdev->flags))
656
		bacpy(&hdev->bdaddr, &rp->bdaddr);
657 658 659

	if (test_bit(HCI_SETUP, &hdev->dev_flags))
		bacpy(&hdev->setup_addr, &rp->bdaddr);
660 661
}

662 663 664 665 666 667 668
static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
					   struct sk_buff *skb)
{
	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;

	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

669 670 671 672
	if (rp->status)
		return;

	if (test_bit(HCI_INIT, &hdev->flags)) {
673 674 675 676 677
		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
		hdev->page_scan_window = __le16_to_cpu(rp->window);
	}
}

678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
					    struct sk_buff *skb)
{
	u8 status = *((u8 *) skb->data);
	struct hci_cp_write_page_scan_activity *sent;

	BT_DBG("%s status 0x%2.2x", hdev->name, status);

	if (status)
		return;

	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
	if (!sent)
		return;

	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
	hdev->page_scan_window = __le16_to_cpu(sent->window);
}

697 698 699 700 701 702 703
static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
					   struct sk_buff *skb)
{
	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;

	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

704 705 706 707
	if (rp->status)
		return;

	if (test_bit(HCI_INIT, &hdev->flags))
708 709 710
		hdev->page_scan_type = rp->type;
}

711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
					struct sk_buff *skb)
{
	u8 status = *((u8 *) skb->data);
	u8 *type;

	BT_DBG("%s status 0x%2.2x", hdev->name, status);

	if (status)
		return;

	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
	if (type)
		hdev->page_scan_type = *type;
}

727
static void hci_cc_read_data_block_size(struct hci_dev *hdev,
728
					struct sk_buff *skb)
729 730 731
{
	struct hci_rp_read_data_block_size *rp = (void *) skb->data;

732
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
733 734 735 736 737 738 739 740 741 742 743

	if (rp->status)
		return;

	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
	hdev->block_len = __le16_to_cpu(rp->block_len);
	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);

	hdev->block_cnt = hdev->num_blocks;

	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
744
	       hdev->block_cnt, hdev->block_len);
745 746
}

747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_rp_read_clock *rp = (void *) skb->data;
	struct hci_cp_read_clock *cp;
	struct hci_conn *conn;

	BT_DBG("%s", hdev->name);

	if (skb->len < sizeof(*rp))
		return;

	if (rp->status)
		return;

	hci_dev_lock(hdev);

	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
	if (!cp)
		goto unlock;

	if (cp->which == 0x00) {
		hdev->clock = le32_to_cpu(rp->clock);
		goto unlock;
	}

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
	if (conn) {
		conn->clock = le32_to_cpu(rp->clock);
		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
	}

unlock:
	hci_dev_unlock(hdev);
}

782
static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
783
				       struct sk_buff *skb)
784 785 786
{
	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;

787
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
788 789

	if (rp->status)
790
		goto a2mp_rsp;
791 792 793 794 795 796 797 798 799 800 801 802

	hdev->amp_status = rp->amp_status;
	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
	hdev->amp_type = rp->amp_type;
	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);

803 804
a2mp_rsp:
	a2mp_send_getinfo_rsp(hdev);
805 806
}

807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
					struct sk_buff *skb)
{
	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
	struct amp_assoc *assoc = &hdev->loc_assoc;
	size_t rem_len, frag_len;

	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

	if (rp->status)
		goto a2mp_rsp;

	frag_len = skb->len - sizeof(*rp);
	rem_len = __le16_to_cpu(rp->rem_len);

	if (rem_len > frag_len) {
823
		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840

		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
		assoc->offset += frag_len;

		/* Read other fragments */
		amp_read_loc_assoc_frag(hdev, rp->phy_handle);

		return;
	}

	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
	assoc->len = assoc->offset + rem_len;
	assoc->offset = 0;

a2mp_rsp:
	/* Send A2MP Rsp when all fragments are received */
	a2mp_send_getampassoc_rsp(hdev, rp->status);
841
	a2mp_send_create_phy_link_req(hdev, rp->status);
842 843
}

844
static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
845
					 struct sk_buff *skb)
846
{
847
	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
848

849
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
850

851 852 853 854
	if (rp->status)
		return;

	hdev->inq_tx_power = rp->tx_power;
855 856
}

857 858 859 860 861 862
static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
	struct hci_cp_pin_code_reply *cp;
	struct hci_conn *conn;

863
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
864

865 866
	hci_dev_lock(hdev);

867
	if (test_bit(HCI_MGMT, &hdev->dev_flags))
868
		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
869

870
	if (rp->status)
871
		goto unlock;
872 873 874

	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
	if (!cp)
875
		goto unlock;
876 877 878 879

	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
	if (conn)
		conn->pin_length = cp->pin_len;
880 881 882

unlock:
	hci_dev_unlock(hdev);
883 884 885 886 887 888
}

static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;

889
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
890

891 892
	hci_dev_lock(hdev);

893
	if (test_bit(HCI_MGMT, &hdev->dev_flags))
894
		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
895
						 rp->status);
896 897

	hci_dev_unlock(hdev);
898
}
899

900 901 902 903 904
static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
				       struct sk_buff *skb)
{
	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;

905
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
906 907 908 909 910 911 912 913 914 915 916

	if (rp->status)
		return;

	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
	hdev->le_pkts = rp->le_max_pkt;

	hdev->le_cnt = hdev->le_pkts;

	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
}
917

918 919 920 921 922 923 924
static void hci_cc_le_read_local_features(struct hci_dev *hdev,
					  struct sk_buff *skb)
{
	struct hci_rp_le_read_local_features *rp = (void *) skb->data;

	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

925 926 927 928
	if (rp->status)
		return;

	memcpy(hdev->le_features, rp->features, 8);
929 930
}

931 932 933 934 935 936 937
static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
					struct sk_buff *skb)
{
	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;

	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

938 939 940 941
	if (rp->status)
		return;

	hdev->adv_tx_power = rp->tx_power;
942 943
}

944 945 946 947
static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;

948
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
949

950 951
	hci_dev_lock(hdev);

952
	if (test_bit(HCI_MGMT, &hdev->dev_flags))
953 954
		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
						 rp->status);
955 956

	hci_dev_unlock(hdev);
957 958 959
}

static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
960
					  struct sk_buff *skb)
961 962 963
{
	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;

964
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
965

966 967
	hci_dev_lock(hdev);

968
	if (test_bit(HCI_MGMT, &hdev->dev_flags))
969
		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
970
						     ACL_LINK, 0, rp->status);
971 972

	hci_dev_unlock(hdev);
973 974
}

975 976 977 978
static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;

979
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
980 981 982

	hci_dev_lock(hdev);

983
	if (test_bit(HCI_MGMT, &hdev->dev_flags))
984
		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
985
						 0, rp->status);
986 987 988 989 990

	hci_dev_unlock(hdev);
}

static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
991
					  struct sk_buff *skb)
992 993 994
{
	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;

995
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
996 997 998

	hci_dev_lock(hdev);

999
	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1000
		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1001
						     ACL_LINK, 0, rp->status);
1002 1003 1004 1005

	hci_dev_unlock(hdev);
}

1006 1007
static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
				       struct sk_buff *skb)
1008 1009 1010
{
	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;

1011
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1012

1013
	hci_dev_lock(hdev);
1014 1015
	mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
					  rp->status);
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
	hci_dev_unlock(hdev);
}

static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
					   struct sk_buff *skb)
{
	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;

	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

	hci_dev_lock(hdev);
1027 1028
	mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
					  rp->hash256, rp->rand256,
1029
					  rp->status);
1030
	hci_dev_unlock(hdev);
1031 1032
}

1033 1034 1035 1036 1037 1038 1039 1040

static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
{
	__u8 status = *((__u8 *) skb->data);
	bdaddr_t *sent;

	BT_DBG("%s status 0x%2.2x", hdev->name, status);

1041 1042 1043
	if (status)
		return;

1044 1045 1046 1047 1048 1049
	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
	if (!sent)
		return;

	hci_dev_lock(hdev);

1050
	bacpy(&hdev->random_addr, sent);
1051 1052 1053 1054

	hci_dev_unlock(hdev);
}

1055 1056 1057 1058 1059 1060
static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
{
	__u8 *sent, status = *((__u8 *) skb->data);

	BT_DBG("%s status 0x%2.2x", hdev->name, status);

1061
	if (status)
1062 1063
		return;

1064 1065
	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
	if (!sent)
1066 1067
		return;

1068 1069
	hci_dev_lock(hdev);

S
Stephen Hemminger 已提交
1070
	/* If we're doing connection initiation as peripheral. Set a
1071 1072 1073 1074 1075
	 * timeout in case something goes wrong.
	 */
	if (*sent) {
		struct hci_conn *conn;

1076 1077
		set_bit(HCI_LE_ADV, &hdev->dev_flags);

1078 1079 1080 1081
		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
		if (conn)
			queue_delayed_work(hdev->workqueue,
					   &conn->le_conn_timeout,
1082
					   conn->conn_timeout);
1083 1084
	} else {
		clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1085 1086
	}

1087
	hci_dev_unlock(hdev);
1088 1089
}

1090 1091 1092 1093 1094 1095 1096
static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_cp_le_set_scan_param *cp;
	__u8 status = *((__u8 *) skb->data);

	BT_DBG("%s status 0x%2.2x", hdev->name, status);

1097 1098 1099
	if (status)
		return;

1100 1101 1102 1103 1104 1105
	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
	if (!cp)
		return;

	hci_dev_lock(hdev);

1106
	hdev->le_scan_type = cp->type;
1107 1108 1109 1110

	hci_dev_unlock(hdev);
}

1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
static bool has_pending_adv_report(struct hci_dev *hdev)
{
	struct discovery_state *d = &hdev->discovery;

	return bacmp(&d->last_adv_addr, BDADDR_ANY);
}

static void clear_pending_adv_report(struct hci_dev *hdev)
{
	struct discovery_state *d = &hdev->discovery;

	bacpy(&d->last_adv_addr, BDADDR_ANY);
	d->last_adv_data_len = 0;
}

static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1127 1128
				     u8 bdaddr_type, s8 rssi, u32 flags,
				     u8 *data, u8 len)
1129 1130 1131 1132 1133
{
	struct discovery_state *d = &hdev->discovery;

	bacpy(&d->last_adv_addr, bdaddr);
	d->last_adv_addr_type = bdaddr_type;
1134
	d->last_adv_rssi = rssi;
1135
	d->last_adv_flags = flags;
1136 1137 1138 1139
	memcpy(d->last_adv_data, data, len);
	d->last_adv_data_len = len;
}

1140
static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1141
				      struct sk_buff *skb)
1142 1143 1144 1145
{
	struct hci_cp_le_set_scan_enable *cp;
	__u8 status = *((__u8 *) skb->data);

1146
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1147

1148
	if (status)
1149 1150
		return;

1151 1152
	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
	if (!cp)
1153 1154
		return;

1155 1156
	hci_dev_lock(hdev);

1157
	switch (cp->enable) {
1158
	case LE_SCAN_ENABLE:
1159
		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1160 1161
		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
			clear_pending_adv_report(hdev);
1162 1163
		break;

1164
	case LE_SCAN_DISABLE:
1165 1166 1167 1168 1169 1170 1171 1172
		/* We do this here instead of when setting DISCOVERY_STOPPED
		 * since the latter would potentially require waiting for
		 * inquiry to stop too.
		 */
		if (has_pending_adv_report(hdev)) {
			struct discovery_state *d = &hdev->discovery;

			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1173
					  d->last_adv_addr_type, NULL,
1174
					  d->last_adv_rssi, d->last_adv_flags,
1175
					  d->last_adv_data,
1176 1177 1178
					  d->last_adv_data_len, NULL, 0);
		}

1179 1180 1181 1182 1183
		/* Cancel this timer so that we don't try to disable scanning
		 * when it's already disabled.
		 */
		cancel_delayed_work(&hdev->le_scan_disable);

1184
		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1185

1186 1187
		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
		 * interrupted scanning due to a connect request. Mark
1188 1189 1190 1191
		 * therefore discovery as stopped. If this was not
		 * because of a connect request advertising might have
		 * been disabled because of active scanning, so
		 * re-enable it again if necessary.
1192 1193 1194 1195
		 */
		if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
				       &hdev->dev_flags))
			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1196
		else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1197
			 hdev->discovery.state == DISCOVERY_FINDING)
1198 1199
			mgmt_reenable_advertising(hdev);

1200 1201 1202 1203 1204
		break;

	default:
		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
		break;
1205
	}
1206 1207

	hci_dev_unlock(hdev);
1208 1209
}

1210 1211 1212 1213 1214 1215 1216
static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
					   struct sk_buff *skb)
{
	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;

	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);

1217 1218 1219 1220
	if (rp->status)
		return;

	hdev->le_white_list_size = rp->size;
1221 1222
}

1223 1224 1225 1226 1227 1228 1229
static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
				       struct sk_buff *skb)
{
	__u8 status = *((__u8 *) skb->data);

	BT_DBG("%s status 0x%2.2x", hdev->name, status);

1230 1231 1232
	if (status)
		return;

1233
	hci_bdaddr_list_clear(&hdev->le_white_list);
1234 1235 1236 1237 1238 1239 1240 1241 1242 1243
}

static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
					struct sk_buff *skb)
{
	struct hci_cp_le_add_to_white_list *sent;
	__u8 status = *((__u8 *) skb->data);

	BT_DBG("%s status 0x%2.2x", hdev->name, status);

1244 1245 1246
	if (status)
		return;

1247 1248 1249 1250
	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
	if (!sent)
		return;

1251 1252
	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
			   sent->bdaddr_type);
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
}

static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
					  struct sk_buff *skb)
{
	struct hci_cp_le_del_from_white_list *sent;
	__u8 status = *((__u8 *) skb->data);

	BT_DBG("%s status 0x%2.2x", hdev->name, status);

1263 1264 1265
	if (status)
		return;

1266 1267 1268 1269
	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
	if (!sent)
		return;

1270 1271
	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
			    sent->bdaddr_type);
1272 1273
}

1274 1275 1276 1277 1278 1279 1280
static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
					    struct sk_buff *skb)
{
	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;

	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

1281 1282 1283 1284
	if (rp->status)
		return;

	memcpy(hdev->le_states, rp->le_states, 8);
1285 1286
}

1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335
static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
					struct sk_buff *skb)
{
	struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;

	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

	if (rp->status)
		return;

	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
}

static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
					 struct sk_buff *skb)
{
	struct hci_cp_le_write_def_data_len *sent;
	__u8 status = *((__u8 *) skb->data);

	BT_DBG("%s status 0x%2.2x", hdev->name, status);

	if (status)
		return;

	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
	if (!sent)
		return;

	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
}

static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
					struct sk_buff *skb)
{
	struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;

	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

	if (rp->status)
		return;

	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
}

1336 1337
static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
					   struct sk_buff *skb)
1338
{
1339
	struct hci_cp_write_le_host_supported *sent;
1340 1341
	__u8 status = *((__u8 *) skb->data);

1342
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1343

1344 1345 1346
	if (status)
		return;

1347
	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1348
	if (!sent)
1349 1350
		return;

1351 1352
	hci_dev_lock(hdev);

1353 1354 1355 1356 1357 1358 1359
	if (sent->le) {
		hdev->features[1][0] |= LMP_HOST_LE;
		set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
	} else {
		hdev->features[1][0] &= ~LMP_HOST_LE;
		clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1360
	}
1361 1362 1363 1364 1365

	if (sent->simul)
		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
	else
		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1366 1367

	hci_dev_unlock(hdev);
1368 1369
}

1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_cp_le_set_adv_param *cp;
	u8 status = *((u8 *) skb->data);

	BT_DBG("%s status 0x%2.2x", hdev->name, status);

	if (status)
		return;

	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
	if (!cp)
		return;

	hci_dev_lock(hdev);
	hdev->adv_addr_type = cp->own_address_type;
	hci_dev_unlock(hdev);
}

1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
					  struct sk_buff *skb)
{
	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;

	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
	       hdev->name, rp->status, rp->phy_handle);

	if (rp->status)
		return;

	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
}

1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_rp_read_rssi *rp = (void *) skb->data;
	struct hci_conn *conn;

	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

	if (rp->status)
		return;

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
	if (conn)
		conn->rssi = rp->rssi;

	hci_dev_unlock(hdev);
}

1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_cp_read_tx_power *sent;
	struct hci_rp_read_tx_power *rp = (void *) skb->data;
	struct hci_conn *conn;

	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

	if (rp->status)
		return;

	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
	if (!sent)
		return;

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1440 1441 1442 1443 1444
	if (!conn)
		goto unlock;

	switch (sent->type) {
	case 0x00:
1445
		conn->tx_power = rp->tx_power;
1446 1447 1448 1449 1450
		break;
	case 0x01:
		conn->max_tx_power = rp->tx_power;
		break;
	}
1451

1452
unlock:
1453 1454 1455
	hci_dev_unlock(hdev);
}

1456
static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1457
{
1458
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1459 1460 1461

	if (status) {
		hci_conn_check_pending(hdev);
1462 1463 1464
		return;
	}

1465
	set_bit(HCI_INQUIRY, &hdev->flags);
L
Linus Torvalds 已提交
1466 1467
}

1468
static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
L
Linus Torvalds 已提交
1469
{
1470
	struct hci_cp_create_conn *cp;
L
Linus Torvalds 已提交
1471 1472
	struct hci_conn *conn;

1473
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1474 1475

	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
L
Linus Torvalds 已提交
1476 1477 1478 1479 1480 1481 1482
	if (!cp)
		return;

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);

1483
	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
L
Linus Torvalds 已提交
1484 1485 1486

	if (status) {
		if (conn && conn->state == BT_CONNECT) {
1487 1488 1489 1490 1491 1492
			if (status != 0x0c || conn->attempt > 2) {
				conn->state = BT_CLOSED;
				hci_proto_connect_cfm(conn, status);
				hci_conn_del(conn);
			} else
				conn->state = BT_CONNECT2;
L
Linus Torvalds 已提交
1493 1494 1495
		}
	} else {
		if (!conn) {
1496 1497 1498
			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
					    HCI_ROLE_MASTER);
			if (!conn)
1499
				BT_ERR("No memory for new connection");
L
Linus Torvalds 已提交
1500 1501 1502 1503 1504 1505
		}
	}

	hci_dev_unlock(hdev);
}

1506
static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
L
Linus Torvalds 已提交
1507
{
1508 1509 1510
	struct hci_cp_add_sco *cp;
	struct hci_conn *acl, *sco;
	__u16 handle;
L
Linus Torvalds 已提交
1511

1512
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1513

1514 1515
	if (!status)
		return;
L
Linus Torvalds 已提交
1516

1517 1518 1519
	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
	if (!cp)
		return;
L
Linus Torvalds 已提交
1520

1521
	handle = __le16_to_cpu(cp->handle);
L
Linus Torvalds 已提交
1522

1523
	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
L
Linus Torvalds 已提交
1524

1525
	hci_dev_lock(hdev);
L
Linus Torvalds 已提交
1526

1527
	acl = hci_conn_hash_lookup_handle(hdev, handle);
1528 1529 1530 1531
	if (acl) {
		sco = acl->link;
		if (sco) {
			sco->state = BT_CLOSED;
L
Linus Torvalds 已提交
1532

1533 1534 1535
			hci_proto_connect_cfm(sco, status);
			hci_conn_del(sco);
		}
1536
	}
L
Linus Torvalds 已提交
1537

1538 1539
	hci_dev_unlock(hdev);
}
L
Linus Torvalds 已提交
1540

1541 1542 1543 1544 1545
static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
{
	struct hci_cp_auth_requested *cp;
	struct hci_conn *conn;

1546
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560

	if (!status)
		return;

	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
	if (!cp)
		return;

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
	if (conn) {
		if (conn->state == BT_CONFIG) {
			hci_proto_connect_cfm(conn, status);
1561
			hci_conn_drop(conn);
1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
		}
	}

	hci_dev_unlock(hdev);
}

static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
{
	struct hci_cp_set_conn_encrypt *cp;
	struct hci_conn *conn;

1573
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587

	if (!status)
		return;

	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
	if (!cp)
		return;

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
	if (conn) {
		if (conn->state == BT_CONFIG) {
			hci_proto_connect_cfm(conn, status);
1588
			hci_conn_drop(conn);
1589 1590 1591 1592 1593 1594
		}
	}

	hci_dev_unlock(hdev);
}

1595
static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1596
				    struct hci_conn *conn)
1597 1598 1599 1600
{
	if (conn->state != BT_CONFIG || !conn->out)
		return 0;

1601
	if (conn->pending_sec_level == BT_SECURITY_SDP)
1602 1603 1604
		return 0;

	/* Only request authentication for SSP connections or non-SSP
1605 1606 1607
	 * devices with sec_level MEDIUM or HIGH or if MITM protection
	 * is requested.
	 */
1608
	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1609
	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1610 1611
	    conn->pending_sec_level != BT_SECURITY_HIGH &&
	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1612 1613 1614 1615 1616
		return 0;

	return 1;
}

1617
static int hci_resolve_name(struct hci_dev *hdev,
1618
				   struct inquiry_entry *e)
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
{
	struct hci_cp_remote_name_req cp;

	memset(&cp, 0, sizeof(cp));

	bacpy(&cp.bdaddr, &e->data.bdaddr);
	cp.pscan_rep_mode = e->data.pscan_rep_mode;
	cp.pscan_mode = e->data.pscan_mode;
	cp.clock_offset = e->data.clock_offset;

	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
}

1632
static bool hci_resolve_next_name(struct hci_dev *hdev)
1633 1634 1635 1636
{
	struct discovery_state *discov = &hdev->discovery;
	struct inquiry_entry *e;

1637 1638 1639 1640
	if (list_empty(&discov->resolve))
		return false;

	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1641 1642 1643
	if (!e)
		return false;

1644 1645 1646 1647 1648 1649 1650 1651 1652
	if (hci_resolve_name(hdev, e) == 0) {
		e->name_state = NAME_PENDING;
		return true;
	}

	return false;
}

static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1653
				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1654 1655 1656 1657
{
	struct discovery_state *discov = &hdev->discovery;
	struct inquiry_entry *e;

1658 1659 1660 1661 1662 1663 1664
	/* Update the mgmt connected state if necessary. Be careful with
	 * conn objects that exist but are not (yet) connected however.
	 * Only those in BT_CONFIG or BT_CONNECTED states can be
	 * considered connected.
	 */
	if (conn &&
	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1665
	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1666
		mgmt_device_connected(hdev, conn, 0, name, name_len);
1667 1668 1669 1670

	if (discov->state == DISCOVERY_STOPPED)
		return;

1671 1672 1673 1674 1675 1676 1677
	if (discov->state == DISCOVERY_STOPPING)
		goto discov_complete;

	if (discov->state != DISCOVERY_RESOLVING)
		return;

	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1678 1679 1680 1681 1682 1683 1684 1685 1686
	/* If the device was not found in a list of found devices names of which
	 * are pending. there is no need to continue resolving a next name as it
	 * will be done upon receiving another Remote Name Request Complete
	 * Event */
	if (!e)
		return;

	list_del(&e->list);
	if (name) {
1687
		e->name_state = NAME_KNOWN;
1688 1689
		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
				 e->data.rssi, name, name_len);
1690 1691
	} else {
		e->name_state = NAME_NOT_KNOWN;
1692 1693
	}

1694
	if (hci_resolve_next_name(hdev))
1695 1696 1697 1698 1699 1700
		return;

discov_complete:
	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
}

1701 1702
static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
{
1703 1704 1705
	struct hci_cp_remote_name_req *cp;
	struct hci_conn *conn;

1706
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718

	/* If successful wait for the name req complete event before
	 * checking for the need to do authentication */
	if (!status)
		return;

	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
	if (!cp)
		return;

	hci_dev_lock(hdev);

1719 1720
	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);

1721
	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1722
		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1723

1724 1725 1726 1727 1728 1729
	if (!conn)
		goto unlock;

	if (!hci_outgoing_auth_needed(hdev, conn))
		goto unlock;

1730
	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1731 1732
		struct hci_cp_auth_requested auth_cp;

1733 1734
		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);

1735 1736 1737
		auth_cp.handle = __cpu_to_le16(conn->handle);
		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
			     sizeof(auth_cp), &auth_cp);
1738 1739
	}

1740
unlock:
1741
	hci_dev_unlock(hdev);
1742
}
L
Linus Torvalds 已提交
1743

1744 1745 1746 1747 1748
static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
{
	struct hci_cp_read_remote_features *cp;
	struct hci_conn *conn;

1749
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763

	if (!status)
		return;

	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
	if (!cp)
		return;

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
	if (conn) {
		if (conn->state == BT_CONFIG) {
			hci_proto_connect_cfm(conn, status);
1764
			hci_conn_drop(conn);
1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775
		}
	}

	hci_dev_unlock(hdev);
}

static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
{
	struct hci_cp_read_remote_ext_features *cp;
	struct hci_conn *conn;

1776
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790

	if (!status)
		return;

	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
	if (!cp)
		return;

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
	if (conn) {
		if (conn->state == BT_CONFIG) {
			hci_proto_connect_cfm(conn, status);
1791
			hci_conn_drop(conn);
1792 1793 1794 1795 1796 1797
		}
	}

	hci_dev_unlock(hdev);
}

1798 1799
static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
{
1800 1801 1802 1803
	struct hci_cp_setup_sync_conn *cp;
	struct hci_conn *acl, *sco;
	__u16 handle;

1804
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1805 1806 1807 1808 1809 1810 1811 1812 1813 1814

	if (!status)
		return;

	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
	if (!cp)
		return;

	handle = __le16_to_cpu(cp->handle);

1815
	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1816 1817 1818 1819

	hci_dev_lock(hdev);

	acl = hci_conn_hash_lookup_handle(hdev, handle);
1820 1821 1822 1823
	if (acl) {
		sco = acl->link;
		if (sco) {
			sco->state = BT_CLOSED;
1824

1825 1826 1827
			hci_proto_connect_cfm(sco, status);
			hci_conn_del(sco);
		}
1828 1829 1830
	}

	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
1831 1832
}

1833
static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
L
Linus Torvalds 已提交
1834
{
1835 1836
	struct hci_cp_sniff_mode *cp;
	struct hci_conn *conn;
L
Linus Torvalds 已提交
1837

1838
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1839

1840 1841
	if (!status)
		return;
1842

1843 1844 1845
	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
	if (!cp)
		return;
1846

1847
	hci_dev_lock(hdev);
1848

1849
	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1850
	if (conn) {
1851
		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1852

1853
		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1854 1855 1856
			hci_sco_setup(conn, status);
	}

1857 1858
	hci_dev_unlock(hdev);
}
1859

1860 1861 1862 1863
static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
{
	struct hci_cp_exit_sniff_mode *cp;
	struct hci_conn *conn;
1864

1865
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1866

1867 1868
	if (!status)
		return;
1869

1870 1871 1872
	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
	if (!cp)
		return;
1873

1874
	hci_dev_lock(hdev);
L
Linus Torvalds 已提交
1875

1876
	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1877
	if (conn) {
1878
		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
L
Linus Torvalds 已提交
1879

1880
		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1881 1882 1883
			hci_sco_setup(conn, status);
	}

1884
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
1885 1886
}

1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903
static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
{
	struct hci_cp_disconnect *cp;
	struct hci_conn *conn;

	if (!status)
		return;

	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
	if (!cp)
		return;

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
	if (conn)
		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1904
				       conn->dst_type, status);
1905 1906 1907 1908

	hci_dev_unlock(hdev);
}

1909 1910
static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
{
1911 1912
	struct hci_cp_create_phy_link *cp;

1913
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1914 1915 1916 1917 1918

	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
	if (!cp)
		return;

1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931
	hci_dev_lock(hdev);

	if (status) {
		struct hci_conn *hcon;

		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
		if (hcon)
			hci_conn_del(hcon);
	} else {
		amp_write_remote_assoc(hdev, cp->phy_handle);
	}

	hci_dev_unlock(hdev);
1932 1933
}

1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949
static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
{
	struct hci_cp_accept_phy_link *cp;

	BT_DBG("%s status 0x%2.2x", hdev->name, status);

	if (status)
		return;

	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
	if (!cp)
		return;

	amp_write_remote_assoc(hdev, cp->phy_handle);
}

1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986
static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
{
	struct hci_cp_le_create_conn *cp;
	struct hci_conn *conn;

	BT_DBG("%s status 0x%2.2x", hdev->name, status);

	/* All connection failure handling is taken care of by the
	 * hci_le_conn_failed function which is triggered by the HCI
	 * request completion callbacks used for connecting.
	 */
	if (status)
		return;

	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
	if (!cp)
		return;

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
	if (!conn)
		goto unlock;

	/* Store the initiator and responder address information which
	 * is needed for SMP. These values will not change during the
	 * lifetime of the connection.
	 */
	conn->init_addr_type = cp->own_address_type;
	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
		bacpy(&conn->init_addr, &hdev->random_addr);
	else
		bacpy(&conn->init_addr, &hdev->bdaddr);

	conn->resp_addr_type = cp->peer_addr_type;
	bacpy(&conn->resp_addr, &cp->peer_addr);

1987 1988 1989 1990 1991 1992 1993 1994
	/* We don't want the connection attempt to stick around
	 * indefinitely since LE doesn't have a page timeout concept
	 * like BR/EDR. Set a timer for any connection that doesn't use
	 * the white list for connecting.
	 */
	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
		queue_delayed_work(conn->hdev->workqueue,
				   &conn->le_conn_timeout,
1995
				   conn->conn_timeout);
1996

1997 1998 1999 2000
unlock:
	hci_dev_unlock(hdev);
}

2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030
static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
{
	struct hci_cp_le_start_enc *cp;
	struct hci_conn *conn;

	BT_DBG("%s status 0x%2.2x", hdev->name, status);

	if (!status)
		return;

	hci_dev_lock(hdev);

	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
	if (!cp)
		goto unlock;

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
	if (!conn)
		goto unlock;

	if (conn->state != BT_CONNECTED)
		goto unlock;

	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
	hci_conn_drop(conn);

unlock:
	hci_dev_unlock(hdev);
}

2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053
static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
{
	struct hci_cp_switch_role *cp;
	struct hci_conn *conn;

	BT_DBG("%s status 0x%2.2x", hdev->name, status);

	if (!status)
		return;

	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
	if (!cp)
		return;

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
	if (conn)
		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);

	hci_dev_unlock(hdev);
}

2054
static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
L
Linus Torvalds 已提交
2055 2056
{
	__u8 status = *((__u8 *) skb->data);
2057 2058
	struct discovery_state *discov = &hdev->discovery;
	struct inquiry_entry *e;
L
Linus Torvalds 已提交
2059

2060
	BT_DBG("%s status 0x%2.2x", hdev->name, status);
L
Linus Torvalds 已提交
2061

2062
	hci_conn_check_pending(hdev);
2063 2064 2065 2066

	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
		return;

2067
	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2068 2069
	wake_up_bit(&hdev->flags, HCI_INQUIRY);

2070
	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2071 2072
		return;

2073
	hci_dev_lock(hdev);
2074

2075
	if (discov->state != DISCOVERY_FINDING)
2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091
		goto unlock;

	if (list_empty(&discov->resolve)) {
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
		goto unlock;
	}

	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
	if (e && hci_resolve_name(hdev, e) == 0) {
		e->name_state = NAME_PENDING;
		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
	} else {
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
	}

unlock:
2092
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
2093 2094
}

2095
static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
L
Linus Torvalds 已提交
2096
{
2097
	struct inquiry_data data;
2098
	struct inquiry_info *info = (void *) (skb->data + 1);
L
Linus Torvalds 已提交
2099 2100 2101 2102
	int num_rsp = *((__u8 *) skb->data);

	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);

2103 2104 2105
	if (!num_rsp)
		return;

2106 2107 2108
	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
		return;

L
Linus Torvalds 已提交
2109
	hci_dev_lock(hdev);
2110

2111
	for (; num_rsp; num_rsp--, info++) {
2112
		u32 flags;
2113

L
Linus Torvalds 已提交
2114 2115 2116 2117 2118 2119
		bacpy(&data.bdaddr, &info->bdaddr);
		data.pscan_rep_mode	= info->pscan_rep_mode;
		data.pscan_period_mode	= info->pscan_period_mode;
		data.pscan_mode		= info->pscan_mode;
		memcpy(data.dev_class, info->dev_class, 3);
		data.clock_offset	= info->clock_offset;
2120
		data.rssi		= HCI_RSSI_INVALID;
2121
		data.ssp_mode		= 0x00;
2122

2123 2124
		flags = hci_inquiry_cache_update(hdev, &data, false);

2125
		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2126 2127
				  info->dev_class, HCI_RSSI_INVALID,
				  flags, NULL, 0, NULL, 0);
L
Linus Torvalds 已提交
2128
	}
2129

L
Linus Torvalds 已提交
2130 2131 2132
	hci_dev_unlock(hdev);
}

2133
static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
L
Linus Torvalds 已提交
2134
{
2135 2136
	struct hci_ev_conn_complete *ev = (void *) skb->data;
	struct hci_conn *conn;
L
Linus Torvalds 已提交
2137 2138 2139 2140 2141 2142

	BT_DBG("%s", hdev->name);

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2143 2144 2145 2146 2147 2148 2149 2150 2151 2152
	if (!conn) {
		if (ev->link_type != SCO_LINK)
			goto unlock;

		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
		if (!conn)
			goto unlock;

		conn->type = SCO_LINK;
	}
L
Linus Torvalds 已提交
2153 2154 2155

	if (!ev->status) {
		conn->handle = __le16_to_cpu(ev->handle);
2156 2157 2158 2159

		if (conn->type == ACL_LINK) {
			conn->state = BT_CONFIG;
			hci_conn_hold(conn);
2160 2161 2162 2163 2164 2165

			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
			    !hci_find_link_key(hdev, &ev->bdaddr))
				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
			else
				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2166 2167
		} else
			conn->state = BT_CONNECTED;
L
Linus Torvalds 已提交
2168

2169
		hci_debugfs_create_conn(conn);
2170 2171
		hci_conn_add_sysfs(conn);

L
Linus Torvalds 已提交
2172
		if (test_bit(HCI_AUTH, &hdev->flags))
2173
			set_bit(HCI_CONN_AUTH, &conn->flags);
L
Linus Torvalds 已提交
2174 2175

		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2176
			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
L
Linus Torvalds 已提交
2177

2178 2179 2180 2181
		/* Get remote features */
		if (conn->type == ACL_LINK) {
			struct hci_cp_read_remote_features cp;
			cp.handle = ev->handle;
2182
			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2183
				     sizeof(cp), &cp);
2184

2185
			hci_update_page_scan(hdev);
2186 2187
		}

L
Linus Torvalds 已提交
2188
		/* Set packet type for incoming connection */
2189
		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
L
Linus Torvalds 已提交
2190 2191
			struct hci_cp_change_conn_ptype cp;
			cp.handle = ev->handle;
2192
			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2193 2194
			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
				     &cp);
L
Linus Torvalds 已提交
2195
		}
2196
	} else {
L
Linus Torvalds 已提交
2197
		conn->state = BT_CLOSED;
2198
		if (conn->type == ACL_LINK)
2199
			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2200
					    conn->dst_type, ev->status);
2201
	}
L
Linus Torvalds 已提交
2202

2203 2204
	if (conn->type == ACL_LINK)
		hci_sco_setup(conn, ev->status);
L
Linus Torvalds 已提交
2205

2206 2207
	if (ev->status) {
		hci_proto_connect_cfm(conn, ev->status);
L
Linus Torvalds 已提交
2208
		hci_conn_del(conn);
2209 2210
	} else if (ev->link_type != ACL_LINK)
		hci_proto_connect_cfm(conn, ev->status);
L
Linus Torvalds 已提交
2211

2212
unlock:
L
Linus Torvalds 已提交
2213 2214
	hci_dev_unlock(hdev);

2215
	hci_conn_check_pending(hdev);
L
Linus Torvalds 已提交
2216 2217
}

2218 2219 2220 2221 2222 2223 2224 2225 2226
static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
	struct hci_cp_reject_conn_req cp;

	bacpy(&cp.bdaddr, bdaddr);
	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
}

2227
static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
L
Linus Torvalds 已提交
2228
{
2229 2230
	struct hci_ev_conn_request *ev = (void *) skb->data;
	int mask = hdev->link_mode;
2231 2232
	struct inquiry_entry *ie;
	struct hci_conn *conn;
2233
	__u8 flags = 0;
L
Linus Torvalds 已提交
2234

2235
	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2236
	       ev->link_type);
L
Linus Torvalds 已提交
2237

2238 2239
	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
				      &flags);
L
Linus Torvalds 已提交
2240

2241 2242 2243 2244 2245
	if (!(mask & HCI_LM_ACCEPT)) {
		hci_reject_conn(hdev, &ev->bdaddr);
		return;
	}

2246 2247 2248 2249 2250 2251
	if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
				   BDADDR_BREDR)) {
		hci_reject_conn(hdev, &ev->bdaddr);
		return;
	}

2252 2253 2254 2255 2256 2257
	/* Require HCI_CONNECTABLE or a whitelist entry to accept the
	 * connection. These features are only touched through mgmt so
	 * only do the checks if HCI_MGMT is set.
	 */
	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
	    !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2258 2259 2260 2261
	    !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
				    BDADDR_BREDR)) {
		    hci_reject_conn(hdev, &ev->bdaddr);
		    return;
2262
	}
L
Linus Torvalds 已提交
2263

2264
	/* Connection accepted */
2265

2266 2267 2268 2269 2270
	hci_dev_lock(hdev);

	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
	if (ie)
		memcpy(ie->data.dev_class, ev->dev_class, 3);
2271

2272 2273 2274
	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
			&ev->bdaddr);
	if (!conn) {
2275 2276
		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
				    HCI_ROLE_SLAVE);
2277
		if (!conn) {
2278 2279 2280
			BT_ERR("No memory for new connection");
			hci_dev_unlock(hdev);
			return;
L
Linus Torvalds 已提交
2281
		}
2282
	}
2283

2284
	memcpy(conn->dev_class, ev->dev_class, 3);
2285

2286
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
2287

2288 2289 2290 2291
	if (ev->link_type == ACL_LINK ||
	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
		struct hci_cp_accept_conn_req cp;
		conn->state = BT_CONNECT;
L
Linus Torvalds 已提交
2292

2293
		bacpy(&cp.bdaddr, &ev->bdaddr);
2294

2295 2296 2297 2298
		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
			cp.role = 0x00; /* Become master */
		else
			cp.role = 0x01; /* Remain slave */
2299

2300 2301 2302 2303
		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
	} else if (!(flags & HCI_PROTO_DEFER)) {
		struct hci_cp_accept_sync_conn_req cp;
		conn->state = BT_CONNECT;
2304

2305 2306
		bacpy(&cp.bdaddr, &ev->bdaddr);
		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2307

2308 2309 2310 2311 2312
		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
		cp.max_latency    = cpu_to_le16(0xffff);
		cp.content_format = cpu_to_le16(hdev->voice_setting);
		cp.retrans_effort = 0xff;
L
Linus Torvalds 已提交
2313

2314 2315
		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
			     &cp);
2316
	} else {
2317 2318
		conn->state = BT_CONNECT2;
		hci_proto_connect_cfm(conn, 0);
L
Linus Torvalds 已提交
2319 2320 2321
	}
}

2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337
static u8 hci_to_mgmt_reason(u8 err)
{
	switch (err) {
	case HCI_ERROR_CONNECTION_TIMEOUT:
		return MGMT_DEV_DISCONN_TIMEOUT;
	case HCI_ERROR_REMOTE_USER_TERM:
	case HCI_ERROR_REMOTE_LOW_RESOURCES:
	case HCI_ERROR_REMOTE_POWER_OFF:
		return MGMT_DEV_DISCONN_REMOTE;
	case HCI_ERROR_LOCAL_HOST_TERM:
		return MGMT_DEV_DISCONN_LOCAL_HOST;
	default:
		return MGMT_DEV_DISCONN_UNKNOWN;
	}
}

2338
static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2339
{
2340
	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2341
	u8 reason = hci_to_mgmt_reason(ev->reason);
2342
	struct hci_conn_params *params;
2343
	struct hci_conn *conn;
2344
	bool mgmt_connected;
2345
	u8 type;
2346

2347
	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2348 2349 2350 2351

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2352 2353
	if (!conn)
		goto unlock;
2354

2355 2356 2357 2358
	if (ev->status) {
		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
				       conn->dst_type, ev->status);
		goto unlock;
2359
	}
2360

2361 2362
	conn->state = BT_CLOSED;

2363 2364 2365
	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
				reason, mgmt_connected);
2366

2367 2368 2369 2370
	if (conn->type == ACL_LINK) {
		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
			hci_remove_link_key(hdev, &conn->dst);

2371
		hci_update_page_scan(hdev);
2372
	}
2373

2374 2375 2376 2377 2378 2379 2380 2381
	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
	if (params) {
		switch (params->auto_connect) {
		case HCI_AUTO_CONN_LINK_LOSS:
			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
				break;
			/* Fall through */

2382
		case HCI_AUTO_CONN_DIRECT:
2383
		case HCI_AUTO_CONN_ALWAYS:
2384 2385 2386
			list_del_init(&params->action);
			list_add(&params->action, &hdev->pend_le_conns);
			hci_update_background_scan(hdev);
2387 2388 2389 2390 2391 2392 2393
			break;

		default:
			break;
		}
	}

2394
	type = conn->type;
2395

2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410
	hci_proto_disconn_cfm(conn, ev->reason);
	hci_conn_del(conn);

	/* Re-enable advertising if necessary, since it might
	 * have been disabled by the connection. From the
	 * HCI_LE_Set_Advertise_Enable command description in
	 * the core specification (v4.0):
	 * "The Controller shall continue advertising until the Host
	 * issues an LE_Set_Advertise_Enable command with
	 * Advertising_Enable set to 0x00 (Advertising is disabled)
	 * or until a connection is created or until the Advertising
	 * is timed out due to Directed Advertising."
	 */
	if (type == LE_LINK)
		mgmt_reenable_advertising(hdev);
2411 2412

unlock:
2413 2414 2415
	hci_dev_unlock(hdev);
}

2416
static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
L
Linus Torvalds 已提交
2417
{
2418
	struct hci_ev_auth_complete *ev = (void *) skb->data;
2419
	struct hci_conn *conn;
L
Linus Torvalds 已提交
2420

2421
	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
L
Linus Torvalds 已提交
2422 2423 2424

	hci_dev_lock(hdev);

2425
	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2426 2427 2428 2429
	if (!conn)
		goto unlock;

	if (!ev->status) {
2430
		if (!hci_conn_ssp_enabled(conn) &&
2431
		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2432
			BT_INFO("re-auth of legacy device is not possible.");
2433
		} else {
2434
			set_bit(HCI_CONN_AUTH, &conn->flags);
2435
			conn->sec_level = conn->pending_sec_level;
2436
		}
2437
	} else {
2438
		mgmt_auth_failed(conn, ev->status);
2439
	}
L
Linus Torvalds 已提交
2440

2441 2442
	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
L
Linus Torvalds 已提交
2443

2444
	if (conn->state == BT_CONFIG) {
2445
		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2446 2447 2448 2449
			struct hci_cp_set_conn_encrypt cp;
			cp.handle  = ev->handle;
			cp.encrypt = 0x01;
			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2450
				     &cp);
2451
		} else {
2452 2453
			conn->state = BT_CONNECTED;
			hci_proto_connect_cfm(conn, ev->status);
2454
			hci_conn_drop(conn);
2455
		}
2456 2457
	} else {
		hci_auth_cfm(conn, ev->status);
2458

2459 2460
		hci_conn_hold(conn);
		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2461
		hci_conn_drop(conn);
2462 2463
	}

2464
	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2465 2466 2467 2468 2469
		if (!ev->status) {
			struct hci_cp_set_conn_encrypt cp;
			cp.handle  = ev->handle;
			cp.encrypt = 0x01;
			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2470
				     &cp);
2471
		} else {
2472
			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2473
			hci_encrypt_cfm(conn, ev->status, 0x00);
L
Linus Torvalds 已提交
2474 2475 2476
		}
	}

2477
unlock:
L
Linus Torvalds 已提交
2478 2479 2480
	hci_dev_unlock(hdev);
}

2481
static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
L
Linus Torvalds 已提交
2482
{
2483 2484 2485
	struct hci_ev_remote_name *ev = (void *) skb->data;
	struct hci_conn *conn;

2486
	BT_DBG("%s", hdev->name);
L
Linus Torvalds 已提交
2487

2488
	hci_conn_check_pending(hdev);
2489 2490 2491

	hci_dev_lock(hdev);

2492
	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2493

2494 2495
	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
		goto check_auth;
2496

2497 2498
	if (ev->status == 0)
		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2499
				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2500 2501 2502 2503
	else
		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);

check_auth:
2504 2505 2506 2507 2508 2509
	if (!conn)
		goto unlock;

	if (!hci_outgoing_auth_needed(hdev, conn))
		goto unlock;

2510
	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2511
		struct hci_cp_auth_requested cp;
2512 2513 2514

		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);

2515 2516 2517 2518
		cp.handle = __cpu_to_le16(conn->handle);
		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
	}

2519
unlock:
2520
	hci_dev_unlock(hdev);
2521 2522
}

2523
static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2524 2525 2526 2527
{
	struct hci_ev_encrypt_change *ev = (void *) skb->data;
	struct hci_conn *conn;

2528
	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
L
Linus Torvalds 已提交
2529 2530 2531

	hci_dev_lock(hdev);

2532
	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2533 2534
	if (!conn)
		goto unlock;
L
Linus Torvalds 已提交
2535

2536 2537 2538
	if (!ev->status) {
		if (ev->encrypt) {
			/* Encryption implies authentication */
2539 2540
			set_bit(HCI_CONN_AUTH, &conn->flags);
			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2541
			conn->sec_level = conn->pending_sec_level;
2542

2543 2544
			/* P-256 authentication key implies FIPS */
			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2545
				set_bit(HCI_CONN_FIPS, &conn->flags);
2546

2547 2548 2549 2550
			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
			    conn->type == LE_LINK)
				set_bit(HCI_CONN_AES_CCM, &conn->flags);
		} else {
2551
			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2552 2553
			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
		}
2554
	}
2555

2556 2557 2558 2559 2560 2561
	/* We should disregard the current RPA and generate a new one
	 * whenever the encryption procedure fails.
	 */
	if (ev->status && conn->type == LE_LINK)
		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);

2562
	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2563

2564 2565 2566 2567
	if (ev->status && conn->state == BT_CONNECTED) {
		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
		hci_conn_drop(conn);
		goto unlock;
L
Linus Torvalds 已提交
2568 2569
	}

2570 2571 2572 2573
	if (conn->state == BT_CONFIG) {
		if (!ev->status)
			conn->state = BT_CONNECTED;

2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585
		/* In Secure Connections Only mode, do not allow any
		 * connections that are not encrypted with AES-CCM
		 * using a P-256 authenticated combination key.
		 */
		if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
		    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
		     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
			hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
			hci_conn_drop(conn);
			goto unlock;
		}

2586 2587 2588 2589 2590
		hci_proto_connect_cfm(conn, ev->status);
		hci_conn_drop(conn);
	} else
		hci_encrypt_cfm(conn, ev->status, ev->encrypt);

2591
unlock:
L
Linus Torvalds 已提交
2592 2593 2594
	hci_dev_unlock(hdev);
}

2595 2596
static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
					     struct sk_buff *skb)
L
Linus Torvalds 已提交
2597
{
2598
	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2599
	struct hci_conn *conn;
L
Linus Torvalds 已提交
2600

2601
	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
L
Linus Torvalds 已提交
2602 2603 2604

	hci_dev_lock(hdev);

2605
	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
L
Linus Torvalds 已提交
2606 2607
	if (conn) {
		if (!ev->status)
2608
			set_bit(HCI_CONN_SECURE, &conn->flags);
L
Linus Torvalds 已提交
2609

2610
		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
L
Linus Torvalds 已提交
2611 2612 2613 2614 2615 2616 2617

		hci_key_change_cfm(conn, ev->status);
	}

	hci_dev_unlock(hdev);
}

2618 2619
static void hci_remote_features_evt(struct hci_dev *hdev,
				    struct sk_buff *skb)
L
Linus Torvalds 已提交
2620
{
2621 2622 2623
	struct hci_ev_remote_features *ev = (void *) skb->data;
	struct hci_conn *conn;

2624
	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2625 2626 2627 2628

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2629 2630
	if (!conn)
		goto unlock;
2631

2632
	if (!ev->status)
2633
		memcpy(conn->features[0], ev->features, 8);
2634 2635 2636 2637 2638 2639 2640 2641 2642

	if (conn->state != BT_CONFIG)
		goto unlock;

	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
		struct hci_cp_read_remote_ext_features cp;
		cp.handle = ev->handle;
		cp.page = 0x01;
		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2643
			     sizeof(cp), &cp);
2644 2645 2646
		goto unlock;
	}

2647
	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2648 2649 2650 2651 2652
		struct hci_cp_remote_name_req cp;
		memset(&cp, 0, sizeof(cp));
		bacpy(&cp.bdaddr, &conn->dst);
		cp.pscan_rep_mode = 0x02;
		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2653
	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2654
		mgmt_device_connected(hdev, conn, 0, NULL, 0);
2655

2656
	if (!hci_outgoing_auth_needed(hdev, conn)) {
2657 2658
		conn->state = BT_CONNECTED;
		hci_proto_connect_cfm(conn, ev->status);
2659
		hci_conn_drop(conn);
2660
	}
2661

2662
unlock:
2663
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
2664 2665
}

2666
static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2667 2668
{
	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2669
	u8 status = skb->data[sizeof(*ev)];
2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680
	__u16 opcode;

	skb_pull(skb, sizeof(*ev));

	opcode = __le16_to_cpu(ev->opcode);

	switch (opcode) {
	case HCI_OP_INQUIRY_CANCEL:
		hci_cc_inquiry_cancel(hdev, skb);
		break;

2681 2682 2683 2684
	case HCI_OP_PERIODIC_INQ:
		hci_cc_periodic_inq(hdev, skb);
		break;

2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696
	case HCI_OP_EXIT_PERIODIC_INQ:
		hci_cc_exit_periodic_inq(hdev, skb);
		break;

	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
		hci_cc_remote_name_req_cancel(hdev, skb);
		break;

	case HCI_OP_ROLE_DISCOVERY:
		hci_cc_role_discovery(hdev, skb);
		break;

2697 2698 2699 2700
	case HCI_OP_READ_LINK_POLICY:
		hci_cc_read_link_policy(hdev, skb);
		break;

2701 2702 2703 2704
	case HCI_OP_WRITE_LINK_POLICY:
		hci_cc_write_link_policy(hdev, skb);
		break;

2705 2706 2707 2708 2709 2710 2711 2712
	case HCI_OP_READ_DEF_LINK_POLICY:
		hci_cc_read_def_link_policy(hdev, skb);
		break;

	case HCI_OP_WRITE_DEF_LINK_POLICY:
		hci_cc_write_def_link_policy(hdev, skb);
		break;

2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752
	case HCI_OP_RESET:
		hci_cc_reset(hdev, skb);
		break;

	case HCI_OP_WRITE_LOCAL_NAME:
		hci_cc_write_local_name(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_NAME:
		hci_cc_read_local_name(hdev, skb);
		break;

	case HCI_OP_WRITE_AUTH_ENABLE:
		hci_cc_write_auth_enable(hdev, skb);
		break;

	case HCI_OP_WRITE_ENCRYPT_MODE:
		hci_cc_write_encrypt_mode(hdev, skb);
		break;

	case HCI_OP_WRITE_SCAN_ENABLE:
		hci_cc_write_scan_enable(hdev, skb);
		break;

	case HCI_OP_READ_CLASS_OF_DEV:
		hci_cc_read_class_of_dev(hdev, skb);
		break;

	case HCI_OP_WRITE_CLASS_OF_DEV:
		hci_cc_write_class_of_dev(hdev, skb);
		break;

	case HCI_OP_READ_VOICE_SETTING:
		hci_cc_read_voice_setting(hdev, skb);
		break;

	case HCI_OP_WRITE_VOICE_SETTING:
		hci_cc_write_voice_setting(hdev, skb);
		break;

2753 2754 2755 2756
	case HCI_OP_READ_NUM_SUPPORTED_IAC:
		hci_cc_read_num_supported_iac(hdev, skb);
		break;

2757 2758 2759 2760
	case HCI_OP_WRITE_SSP_MODE:
		hci_cc_write_ssp_mode(hdev, skb);
		break;

2761 2762 2763 2764
	case HCI_OP_WRITE_SC_SUPPORT:
		hci_cc_write_sc_support(hdev, skb);
		break;

2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776
	case HCI_OP_READ_LOCAL_VERSION:
		hci_cc_read_local_version(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_COMMANDS:
		hci_cc_read_local_commands(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_FEATURES:
		hci_cc_read_local_features(hdev, skb);
		break;

2777 2778 2779 2780
	case HCI_OP_READ_LOCAL_EXT_FEATURES:
		hci_cc_read_local_ext_features(hdev, skb);
		break;

2781 2782 2783 2784 2785 2786 2787 2788
	case HCI_OP_READ_BUFFER_SIZE:
		hci_cc_read_buffer_size(hdev, skb);
		break;

	case HCI_OP_READ_BD_ADDR:
		hci_cc_read_bd_addr(hdev, skb);
		break;

2789 2790 2791 2792
	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
		hci_cc_read_page_scan_activity(hdev, skb);
		break;

2793 2794 2795 2796
	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
		hci_cc_write_page_scan_activity(hdev, skb);
		break;

2797 2798 2799 2800
	case HCI_OP_READ_PAGE_SCAN_TYPE:
		hci_cc_read_page_scan_type(hdev, skb);
		break;

2801 2802 2803 2804
	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
		hci_cc_write_page_scan_type(hdev, skb);
		break;

2805 2806 2807 2808
	case HCI_OP_READ_DATA_BLOCK_SIZE:
		hci_cc_read_data_block_size(hdev, skb);
		break;

2809 2810 2811 2812
	case HCI_OP_READ_FLOW_CONTROL_MODE:
		hci_cc_read_flow_control_mode(hdev, skb);
		break;

2813 2814 2815 2816
	case HCI_OP_READ_LOCAL_AMP_INFO:
		hci_cc_read_local_amp_info(hdev, skb);
		break;

2817 2818 2819 2820
	case HCI_OP_READ_CLOCK:
		hci_cc_read_clock(hdev, skb);
		break;

2821 2822 2823 2824
	case HCI_OP_READ_LOCAL_AMP_ASSOC:
		hci_cc_read_local_amp_assoc(hdev, skb);
		break;

2825 2826 2827 2828
	case HCI_OP_READ_INQ_RSP_TX_POWER:
		hci_cc_read_inq_rsp_tx_power(hdev, skb);
		break;

2829 2830 2831 2832 2833 2834 2835 2836
	case HCI_OP_PIN_CODE_REPLY:
		hci_cc_pin_code_reply(hdev, skb);
		break;

	case HCI_OP_PIN_CODE_NEG_REPLY:
		hci_cc_pin_code_neg_reply(hdev, skb);
		break;

2837
	case HCI_OP_READ_LOCAL_OOB_DATA:
2838 2839 2840 2841 2842
		hci_cc_read_local_oob_data(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
		hci_cc_read_local_oob_ext_data(hdev, skb);
2843 2844
		break;

2845 2846 2847 2848
	case HCI_OP_LE_READ_BUFFER_SIZE:
		hci_cc_le_read_buffer_size(hdev, skb);
		break;

2849 2850 2851 2852
	case HCI_OP_LE_READ_LOCAL_FEATURES:
		hci_cc_le_read_local_features(hdev, skb);
		break;

2853 2854 2855 2856
	case HCI_OP_LE_READ_ADV_TX_POWER:
		hci_cc_le_read_adv_tx_power(hdev, skb);
		break;

2857 2858 2859 2860 2861 2862 2863 2864
	case HCI_OP_USER_CONFIRM_REPLY:
		hci_cc_user_confirm_reply(hdev, skb);
		break;

	case HCI_OP_USER_CONFIRM_NEG_REPLY:
		hci_cc_user_confirm_neg_reply(hdev, skb);
		break;

2865 2866 2867 2868 2869 2870
	case HCI_OP_USER_PASSKEY_REPLY:
		hci_cc_user_passkey_reply(hdev, skb);
		break;

	case HCI_OP_USER_PASSKEY_NEG_REPLY:
		hci_cc_user_passkey_neg_reply(hdev, skb);
2871
		break;
2872

2873 2874 2875 2876
	case HCI_OP_LE_SET_RANDOM_ADDR:
		hci_cc_le_set_random_addr(hdev, skb);
		break;

2877 2878 2879 2880
	case HCI_OP_LE_SET_ADV_ENABLE:
		hci_cc_le_set_adv_enable(hdev, skb);
		break;

2881 2882 2883 2884
	case HCI_OP_LE_SET_SCAN_PARAM:
		hci_cc_le_set_scan_param(hdev, skb);
		break;

2885 2886 2887 2888
	case HCI_OP_LE_SET_SCAN_ENABLE:
		hci_cc_le_set_scan_enable(hdev, skb);
		break;

2889 2890 2891 2892
	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
		hci_cc_le_read_white_list_size(hdev, skb);
		break;

2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904
	case HCI_OP_LE_CLEAR_WHITE_LIST:
		hci_cc_le_clear_white_list(hdev, skb);
		break;

	case HCI_OP_LE_ADD_TO_WHITE_LIST:
		hci_cc_le_add_to_white_list(hdev, skb);
		break;

	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
		hci_cc_le_del_from_white_list(hdev, skb);
		break;

2905 2906 2907 2908
	case HCI_OP_LE_READ_SUPPORTED_STATES:
		hci_cc_le_read_supported_states(hdev, skb);
		break;

2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920
	case HCI_OP_LE_READ_DEF_DATA_LEN:
		hci_cc_le_read_def_data_len(hdev, skb);
		break;

	case HCI_OP_LE_WRITE_DEF_DATA_LEN:
		hci_cc_le_write_def_data_len(hdev, skb);
		break;

	case HCI_OP_LE_READ_MAX_DATA_LEN:
		hci_cc_le_read_max_data_len(hdev, skb);
		break;

2921 2922 2923 2924
	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
		hci_cc_write_le_host_supported(hdev, skb);
		break;

2925 2926 2927 2928
	case HCI_OP_LE_SET_ADV_PARAM:
		hci_cc_set_adv_param(hdev, skb);
		break;

2929 2930 2931 2932
	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
		hci_cc_write_remote_amp_assoc(hdev, skb);
		break;

2933 2934 2935 2936
	case HCI_OP_READ_RSSI:
		hci_cc_read_rssi(hdev, skb);
		break;

2937 2938 2939 2940
	case HCI_OP_READ_TX_POWER:
		hci_cc_read_tx_power(hdev, skb);
		break;

2941
	default:
2942
		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2943 2944 2945
		break;
	}

2946
	if (opcode != HCI_OP_NOP)
2947
		cancel_delayed_work(&hdev->cmd_timer);
2948

2949
	hci_req_cmd_complete(hdev, opcode, status);
2950

2951
	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2952 2953
		atomic_set(&hdev->cmd_cnt, 1);
		if (!skb_queue_empty(&hdev->cmd_q))
2954
			queue_work(hdev->workqueue, &hdev->cmd_work);
2955 2956 2957
	}
}

2958
static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975
{
	struct hci_ev_cmd_status *ev = (void *) skb->data;
	__u16 opcode;

	skb_pull(skb, sizeof(*ev));

	opcode = __le16_to_cpu(ev->opcode);

	switch (opcode) {
	case HCI_OP_INQUIRY:
		hci_cs_inquiry(hdev, ev->status);
		break;

	case HCI_OP_CREATE_CONN:
		hci_cs_create_conn(hdev, ev->status);
		break;

2976 2977 2978 2979
	case HCI_OP_DISCONNECT:
		hci_cs_disconnect(hdev, ev->status);
		break;

2980 2981 2982 2983
	case HCI_OP_ADD_SCO:
		hci_cs_add_sco(hdev, ev->status);
		break;

2984 2985 2986 2987 2988 2989 2990 2991
	case HCI_OP_AUTH_REQUESTED:
		hci_cs_auth_requested(hdev, ev->status);
		break;

	case HCI_OP_SET_CONN_ENCRYPT:
		hci_cs_set_conn_encrypt(hdev, ev->status);
		break;

2992 2993 2994 2995
	case HCI_OP_REMOTE_NAME_REQ:
		hci_cs_remote_name_req(hdev, ev->status);
		break;

2996 2997 2998 2999 3000 3001 3002 3003
	case HCI_OP_READ_REMOTE_FEATURES:
		hci_cs_read_remote_features(hdev, ev->status);
		break;

	case HCI_OP_READ_REMOTE_EXT_FEATURES:
		hci_cs_read_remote_ext_features(hdev, ev->status);
		break;

3004 3005 3006 3007
	case HCI_OP_SETUP_SYNC_CONN:
		hci_cs_setup_sync_conn(hdev, ev->status);
		break;

3008 3009 3010 3011 3012 3013 3014 3015
	case HCI_OP_CREATE_PHY_LINK:
		hci_cs_create_phylink(hdev, ev->status);
		break;

	case HCI_OP_ACCEPT_PHY_LINK:
		hci_cs_accept_phylink(hdev, ev->status);
		break;

3016 3017 3018 3019 3020 3021 3022 3023
	case HCI_OP_SNIFF_MODE:
		hci_cs_sniff_mode(hdev, ev->status);
		break;

	case HCI_OP_EXIT_SNIFF_MODE:
		hci_cs_exit_sniff_mode(hdev, ev->status);
		break;

3024 3025 3026 3027
	case HCI_OP_SWITCH_ROLE:
		hci_cs_switch_role(hdev, ev->status);
		break;

3028 3029 3030 3031
	case HCI_OP_LE_CREATE_CONN:
		hci_cs_le_create_conn(hdev, ev->status);
		break;

3032 3033 3034 3035
	case HCI_OP_LE_START_ENC:
		hci_cs_le_start_enc(hdev, ev->status);
		break;

3036
	default:
3037
		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3038 3039 3040
		break;
	}

3041
	if (opcode != HCI_OP_NOP)
3042
		cancel_delayed_work(&hdev->cmd_timer);
3043

3044 3045 3046
	if (ev->status ||
	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
		hci_req_cmd_complete(hdev, opcode, ev->status);
3047

3048
	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
3049 3050
		atomic_set(&hdev->cmd_cnt, 1);
		if (!skb_queue_empty(&hdev->cmd_q))
3051
			queue_work(hdev->workqueue, &hdev->cmd_work);
3052 3053 3054
	}
}

3055 3056 3057 3058 3059 3060 3061
static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_ev_hardware_error *ev = (void *) skb->data;

	BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code);
}

3062
static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3063 3064 3065 3066
{
	struct hci_ev_role_change *ev = (void *) skb->data;
	struct hci_conn *conn;

3067
	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3068 3069 3070 3071 3072

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
	if (conn) {
3073 3074
		if (!ev->status)
			conn->role = ev->role;
3075

3076
		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3077 3078 3079 3080 3081 3082 3083

		hci_role_switch_cfm(conn, ev->status, ev->role);
	}

	hci_dev_unlock(hdev);
}

3084
static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3085 3086 3087 3088
{
	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
	int i;

3089 3090 3091 3092 3093
	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
		return;
	}

3094
	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3095
	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3096 3097 3098 3099
		BT_DBG("%s bad parameters", hdev->name);
		return;
	}

3100 3101
	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);

3102 3103
	for (i = 0; i < ev->num_hndl; i++) {
		struct hci_comp_pkts_info *info = &ev->handles[i];
3104 3105 3106
		struct hci_conn *conn;
		__u16  handle, count;

3107 3108
		handle = __le16_to_cpu(info->handle);
		count  = __le16_to_cpu(info->count);
3109 3110

		conn = hci_conn_hash_lookup_handle(hdev, handle);
3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128
		if (!conn)
			continue;

		conn->sent -= count;

		switch (conn->type) {
		case ACL_LINK:
			hdev->acl_cnt += count;
			if (hdev->acl_cnt > hdev->acl_pkts)
				hdev->acl_cnt = hdev->acl_pkts;
			break;

		case LE_LINK:
			if (hdev->le_pkts) {
				hdev->le_cnt += count;
				if (hdev->le_cnt > hdev->le_pkts)
					hdev->le_cnt = hdev->le_pkts;
			} else {
A
Andrei Emeltchenko 已提交
3129 3130
				hdev->acl_cnt += count;
				if (hdev->acl_cnt > hdev->acl_pkts)
3131 3132
					hdev->acl_cnt = hdev->acl_pkts;
			}
3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143
			break;

		case SCO_LINK:
			hdev->sco_cnt += count;
			if (hdev->sco_cnt > hdev->sco_pkts)
				hdev->sco_cnt = hdev->sco_pkts;
			break;

		default:
			BT_ERR("Unknown type %d conn %p", conn->type, conn);
			break;
3144 3145 3146
		}
	}

3147
	queue_work(hdev->workqueue, &hdev->tx_work);
3148 3149
}

3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170
static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
						 __u16 handle)
{
	struct hci_chan *chan;

	switch (hdev->dev_type) {
	case HCI_BREDR:
		return hci_conn_hash_lookup_handle(hdev, handle);
	case HCI_AMP:
		chan = hci_chan_lookup_handle(hdev, handle);
		if (chan)
			return chan->conn;
		break;
	default:
		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
		break;
	}

	return NULL;
}

3171
static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3172 3173 3174 3175 3176 3177 3178 3179 3180 3181
{
	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
	int i;

	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
		return;
	}

	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3182
	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3183 3184 3185 3186 3187
		BT_DBG("%s bad parameters", hdev->name);
		return;
	}

	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3188
	       ev->num_hndl);
3189 3190 3191

	for (i = 0; i < ev->num_hndl; i++) {
		struct hci_comp_blocks_info *info = &ev->handles[i];
3192
		struct hci_conn *conn = NULL;
3193 3194 3195 3196 3197
		__u16  handle, block_count;

		handle = __le16_to_cpu(info->handle);
		block_count = __le16_to_cpu(info->blocks);

3198
		conn = __hci_conn_lookup_handle(hdev, handle);
3199 3200 3201 3202 3203 3204 3205
		if (!conn)
			continue;

		conn->sent -= block_count;

		switch (conn->type) {
		case ACL_LINK:
3206
		case AMP_LINK:
3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220
			hdev->block_cnt += block_count;
			if (hdev->block_cnt > hdev->num_blocks)
				hdev->block_cnt = hdev->num_blocks;
			break;

		default:
			BT_ERR("Unknown type %d conn %p", conn->type, conn);
			break;
		}
	}

	queue_work(hdev->workqueue, &hdev->tx_work);
}

3221
static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3222
{
3223
	struct hci_ev_mode_change *ev = (void *) skb->data;
3224 3225
	struct hci_conn *conn;

3226
	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3227 3228 3229 3230

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3231 3232 3233
	if (conn) {
		conn->mode = ev->mode;

3234 3235
		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
					&conn->flags)) {
3236
			if (conn->mode == HCI_CM_ACTIVE)
3237
				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3238
			else
3239
				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3240
		}
3241

3242
		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3243
			hci_sco_setup(conn, ev->status);
3244 3245 3246 3247 3248
	}

	hci_dev_unlock(hdev);
}

3249
static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3250
{
3251 3252 3253
	struct hci_ev_pin_code_req *ev = (void *) skb->data;
	struct hci_conn *conn;

3254
	BT_DBG("%s", hdev->name);
3255 3256 3257 3258

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3259 3260 3261 3262
	if (!conn)
		goto unlock;

	if (conn->state == BT_CONNECTED) {
3263 3264
		hci_conn_hold(conn);
		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3265
		hci_conn_drop(conn);
3266 3267
	}

3268
	if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3269
	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3270
		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3271
			     sizeof(ev->bdaddr), &ev->bdaddr);
3272
	} else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3273 3274 3275 3276 3277 3278 3279
		u8 secure;

		if (conn->pending_sec_level == BT_SECURITY_HIGH)
			secure = 1;
		else
			secure = 0;

3280
		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3281
	}
3282

3283
unlock:
3284
	hci_dev_unlock(hdev);
3285 3286
}

3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318
static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
{
	if (key_type == HCI_LK_CHANGED_COMBINATION)
		return;

	conn->pin_length = pin_len;
	conn->key_type = key_type;

	switch (key_type) {
	case HCI_LK_LOCAL_UNIT:
	case HCI_LK_REMOTE_UNIT:
	case HCI_LK_DEBUG_COMBINATION:
		return;
	case HCI_LK_COMBINATION:
		if (pin_len == 16)
			conn->pending_sec_level = BT_SECURITY_HIGH;
		else
			conn->pending_sec_level = BT_SECURITY_MEDIUM;
		break;
	case HCI_LK_UNAUTH_COMBINATION_P192:
	case HCI_LK_UNAUTH_COMBINATION_P256:
		conn->pending_sec_level = BT_SECURITY_MEDIUM;
		break;
	case HCI_LK_AUTH_COMBINATION_P192:
		conn->pending_sec_level = BT_SECURITY_HIGH;
		break;
	case HCI_LK_AUTH_COMBINATION_P256:
		conn->pending_sec_level = BT_SECURITY_FIPS;
		break;
	}
}

3319
static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3320
{
3321 3322 3323 3324 3325
	struct hci_ev_link_key_req *ev = (void *) skb->data;
	struct hci_cp_link_key_reply cp;
	struct hci_conn *conn;
	struct link_key *key;

3326
	BT_DBG("%s", hdev->name);
3327

3328
	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3329 3330 3331 3332 3333 3334
		return;

	hci_dev_lock(hdev);

	key = hci_find_link_key(hdev, &ev->bdaddr);
	if (!key) {
3335 3336
		BT_DBG("%s link key not found for %pMR", hdev->name,
		       &ev->bdaddr);
3337 3338 3339
		goto not_found;
	}

3340 3341
	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
	       &ev->bdaddr);
3342 3343

	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3344
	if (conn) {
3345 3346
		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);

3347 3348
		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3349
		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3350 3351 3352
			BT_DBG("%s ignoring unauthenticated key", hdev->name);
			goto not_found;
		}
3353

3354
		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3355 3356
		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3357 3358
			BT_DBG("%s ignoring key unauthenticated for high security",
			       hdev->name);
3359 3360 3361
			goto not_found;
		}

3362
		conn_set_key(conn, key->type, key->pin_len);
3363 3364 3365
	}

	bacpy(&cp.bdaddr, &ev->bdaddr);
3366
	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3367 3368 3369 3370 3371 3372 3373 3374 3375 3376

	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);

	hci_dev_unlock(hdev);

	return;

not_found:
	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
	hci_dev_unlock(hdev);
3377 3378
}

3379
static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3380
{
3381 3382
	struct hci_ev_link_key_notify *ev = (void *) skb->data;
	struct hci_conn *conn;
3383 3384
	struct link_key *key;
	bool persistent;
3385
	u8 pin_len = 0;
3386

3387
	BT_DBG("%s", hdev->name);
3388 3389 3390 3391

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3392 3393 3394 3395 3396 3397 3398
	if (!conn)
		goto unlock;

	hci_conn_hold(conn);
	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
	hci_conn_drop(conn);

3399
	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3400
	conn_set_key(conn, ev->key_type, conn->pin_length);
3401

3402 3403 3404 3405 3406 3407 3408 3409
	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
		goto unlock;

	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
			        ev->key_type, pin_len, &persistent);
	if (!key)
		goto unlock;

3410 3411 3412 3413 3414 3415
	/* Update connection information since adding the key will have
	 * fixed up the type in the case of changed combination keys.
	 */
	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
		conn_set_key(conn, key->type, key->pin_len);

3416
	mgmt_new_link_key(hdev, key, persistent);
3417

3418 3419 3420 3421 3422 3423 3424
	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
	 * is set. If it's not set simply remove the key from the kernel
	 * list (we've still notified user space about it but with
	 * store_hint being 0).
	 */
	if (key->type == HCI_LK_DEBUG_COMBINATION &&
	    !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3425 3426
		list_del_rcu(&key->list);
		kfree_rcu(key, rcu);
3427
		goto unlock;
3428
	}
3429

3430 3431 3432 3433 3434
	if (persistent)
		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
	else
		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);

3435
unlock:
3436
	hci_dev_unlock(hdev);
3437 3438
}

3439
static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
L
Linus Torvalds 已提交
3440
{
3441
	struct hci_ev_clock_offset *ev = (void *) skb->data;
3442
	struct hci_conn *conn;
L
Linus Torvalds 已提交
3443

3444
	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
L
Linus Torvalds 已提交
3445 3446 3447

	hci_dev_lock(hdev);

3448
	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
L
Linus Torvalds 已提交
3449 3450 3451
	if (conn && !ev->status) {
		struct inquiry_entry *ie;

3452 3453
		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
		if (ie) {
L
Linus Torvalds 已提交
3454 3455 3456 3457 3458 3459 3460 3461
			ie->data.clock_offset = ev->clock_offset;
			ie->timestamp = jiffies;
		}
	}

	hci_dev_unlock(hdev);
}

3462
static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3463 3464 3465 3466
{
	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
	struct hci_conn *conn;

3467
	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3468 3469 3470 3471 3472 3473 3474 3475 3476 3477

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
	if (conn && !ev->status)
		conn->pkt_type = __le16_to_cpu(ev->pkt_type);

	hci_dev_unlock(hdev);
}

3478
static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3479
{
3480
	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3481 3482 3483 3484 3485 3486
	struct inquiry_entry *ie;

	BT_DBG("%s", hdev->name);

	hci_dev_lock(hdev);

3487 3488
	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
	if (ie) {
3489 3490 3491 3492 3493 3494 3495
		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
		ie->timestamp = jiffies;
	}

	hci_dev_unlock(hdev);
}

3496 3497
static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
					     struct sk_buff *skb)
3498 3499 3500 3501 3502 3503 3504 3505 3506
{
	struct inquiry_data data;
	int num_rsp = *((__u8 *) skb->data);

	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);

	if (!num_rsp)
		return;

3507 3508 3509
	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
		return;

3510 3511 3512
	hci_dev_lock(hdev);

	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3513 3514
		struct inquiry_info_with_rssi_and_pscan_mode *info;
		info = (void *) (skb->data + 1);
3515

3516
		for (; num_rsp; num_rsp--, info++) {
3517 3518
			u32 flags;

3519 3520 3521 3522 3523 3524 3525
			bacpy(&data.bdaddr, &info->bdaddr);
			data.pscan_rep_mode	= info->pscan_rep_mode;
			data.pscan_period_mode	= info->pscan_period_mode;
			data.pscan_mode		= info->pscan_mode;
			memcpy(data.dev_class, info->dev_class, 3);
			data.clock_offset	= info->clock_offset;
			data.rssi		= info->rssi;
3526
			data.ssp_mode		= 0x00;
3527

3528 3529
			flags = hci_inquiry_cache_update(hdev, &data, false);

3530
			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3531
					  info->dev_class, info->rssi,
3532
					  flags, NULL, 0, NULL, 0);
3533 3534 3535 3536
		}
	} else {
		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);

3537
		for (; num_rsp; num_rsp--, info++) {
3538 3539
			u32 flags;

3540 3541 3542 3543 3544 3545 3546
			bacpy(&data.bdaddr, &info->bdaddr);
			data.pscan_rep_mode	= info->pscan_rep_mode;
			data.pscan_period_mode	= info->pscan_period_mode;
			data.pscan_mode		= 0x00;
			memcpy(data.dev_class, info->dev_class, 3);
			data.clock_offset	= info->clock_offset;
			data.rssi		= info->rssi;
3547
			data.ssp_mode		= 0x00;
3548 3549 3550

			flags = hci_inquiry_cache_update(hdev, &data, false);

3551
			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3552
					  info->dev_class, info->rssi,
3553
					  flags, NULL, 0, NULL, 0);
3554 3555 3556 3557 3558 3559
		}
	}

	hci_dev_unlock(hdev);
}

3560 3561
static void hci_remote_ext_features_evt(struct hci_dev *hdev,
					struct sk_buff *skb)
3562
{
3563 3564 3565
	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
	struct hci_conn *conn;

3566
	BT_DBG("%s", hdev->name);
3567 3568 3569 3570

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3571 3572
	if (!conn)
		goto unlock;
3573

3574 3575 3576
	if (ev->page < HCI_MAX_PAGES)
		memcpy(conn->features[ev->page], ev->features, 8);

3577 3578
	if (!ev->status && ev->page == 0x01) {
		struct inquiry_entry *ie;
3579

3580 3581
		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
		if (ie)
3582
			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3583

3584
		if (ev->features[0] & LMP_HOST_SSP) {
3585
			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596
		} else {
			/* It is mandatory by the Bluetooth specification that
			 * Extended Inquiry Results are only used when Secure
			 * Simple Pairing is enabled, but some devices violate
			 * this.
			 *
			 * To make these devices work, the internal SSP
			 * enabled flag needs to be cleared if the remote host
			 * features do not indicate SSP support */
			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
		}
3597 3598 3599

		if (ev->features[0] & LMP_HOST_SC)
			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3600 3601 3602 3603 3604
	}

	if (conn->state != BT_CONFIG)
		goto unlock;

3605
	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3606 3607 3608 3609 3610
		struct hci_cp_remote_name_req cp;
		memset(&cp, 0, sizeof(cp));
		bacpy(&cp.bdaddr, &conn->dst);
		cp.pscan_rep_mode = 0x02;
		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3611
	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3612
		mgmt_device_connected(hdev, conn, 0, NULL, 0);
3613

3614
	if (!hci_outgoing_auth_needed(hdev, conn)) {
3615 3616
		conn->state = BT_CONNECTED;
		hci_proto_connect_cfm(conn, ev->status);
3617
		hci_conn_drop(conn);
3618 3619
	}

3620
unlock:
3621
	hci_dev_unlock(hdev);
3622 3623
}

3624 3625
static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
				       struct sk_buff *skb)
3626
{
3627 3628 3629
	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
	struct hci_conn *conn;

3630
	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3631 3632 3633 3634

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3635 3636 3637 3638 3639 3640 3641 3642 3643 3644
	if (!conn) {
		if (ev->link_type == ESCO_LINK)
			goto unlock;

		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
		if (!conn)
			goto unlock;

		conn->type = SCO_LINK;
	}
3645

3646 3647
	switch (ev->status) {
	case 0x00:
3648 3649
		conn->handle = __le16_to_cpu(ev->handle);
		conn->state  = BT_CONNECTED;
3650

3651
		hci_debugfs_create_conn(conn);
3652
		hci_conn_add_sysfs(conn);
3653 3654
		break;

3655
	case 0x10:	/* Connection Accept Timeout */
3656
	case 0x0d:	/* Connection Rejected due to Limited Resources */
3657
	case 0x11:	/* Unsupported Feature or Parameter Value */
3658
	case 0x1c:	/* SCO interval rejected */
3659
	case 0x1a:	/* Unsupported Remote Feature */
3660
	case 0x1f:	/* Unspecified error */
3661
	case 0x20:	/* Unsupported LMP Parameter value */
3662
		if (conn->out) {
3663 3664
			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
					(hdev->esco_type & EDR_ESCO_MASK);
3665 3666
			if (hci_setup_sync(conn, conn->link->handle))
				goto unlock;
3667 3668 3669 3670
		}
		/* fall through */

	default:
3671
		conn->state = BT_CLOSED;
3672 3673
		break;
	}
3674 3675 3676 3677 3678 3679 3680

	hci_proto_connect_cfm(conn, ev->status);
	if (ev->status)
		hci_conn_del(conn);

unlock:
	hci_dev_unlock(hdev);
3681 3682
}

3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699
static inline size_t eir_get_length(u8 *eir, size_t eir_len)
{
	size_t parsed = 0;

	while (parsed < eir_len) {
		u8 field_len = eir[0];

		if (field_len == 0)
			return parsed;

		parsed += field_len + 1;
		eir += field_len + 1;
	}

	return eir_len;
}

3700 3701
static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
					    struct sk_buff *skb)
L
Linus Torvalds 已提交
3702
{
3703 3704 3705
	struct inquiry_data data;
	struct extended_inquiry_info *info = (void *) (skb->data + 1);
	int num_rsp = *((__u8 *) skb->data);
3706
	size_t eir_len;
L
Linus Torvalds 已提交
3707

3708
	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
L
Linus Torvalds 已提交
3709

3710 3711
	if (!num_rsp)
		return;
L
Linus Torvalds 已提交
3712

3713 3714 3715
	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
		return;

3716 3717
	hci_dev_lock(hdev);

3718
	for (; num_rsp; num_rsp--, info++) {
3719 3720
		u32 flags;
		bool name_known;
3721

3722
		bacpy(&data.bdaddr, &info->bdaddr);
3723 3724 3725
		data.pscan_rep_mode	= info->pscan_rep_mode;
		data.pscan_period_mode	= info->pscan_period_mode;
		data.pscan_mode		= 0x00;
3726
		memcpy(data.dev_class, info->dev_class, 3);
3727 3728
		data.clock_offset	= info->clock_offset;
		data.rssi		= info->rssi;
3729
		data.ssp_mode		= 0x01;
3730

3731
		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3732
			name_known = eir_has_data_type(info->data,
3733 3734
						       sizeof(info->data),
						       EIR_NAME_COMPLETE);
3735 3736 3737
		else
			name_known = true;

3738 3739
		flags = hci_inquiry_cache_update(hdev, &data, name_known);

3740
		eir_len = eir_get_length(info->data, sizeof(info->data));
3741

3742
		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3743 3744
				  info->dev_class, info->rssi,
				  flags, info->data, eir_len, NULL, 0);
3745 3746 3747 3748
	}

	hci_dev_unlock(hdev);
}
L
Linus Torvalds 已提交
3749

3750 3751 3752 3753 3754 3755
static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
					 struct sk_buff *skb)
{
	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
	struct hci_conn *conn;

3756
	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3757 3758 3759 3760 3761 3762 3763 3764
	       __le16_to_cpu(ev->handle));

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
	if (!conn)
		goto unlock;

3765 3766 3767 3768 3769 3770
	/* For BR/EDR the necessary steps are taken through the
	 * auth_complete event.
	 */
	if (conn->type != LE_LINK)
		goto unlock;

3771 3772 3773 3774 3775 3776
	if (!ev->status)
		conn->sec_level = conn->pending_sec_level;

	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);

	if (ev->status && conn->state == BT_CONNECTED) {
A
Andre Guedes 已提交
3777
		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3778
		hci_conn_drop(conn);
3779 3780 3781 3782 3783 3784 3785 3786
		goto unlock;
	}

	if (conn->state == BT_CONFIG) {
		if (!ev->status)
			conn->state = BT_CONNECTED;

		hci_proto_connect_cfm(conn, ev->status);
3787
		hci_conn_drop(conn);
3788 3789 3790 3791 3792
	} else {
		hci_auth_cfm(conn, ev->status);

		hci_conn_hold(conn);
		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3793
		hci_conn_drop(conn);
3794 3795 3796 3797 3798 3799
	}

unlock:
	hci_dev_unlock(hdev);
}

3800
static u8 hci_get_auth_req(struct hci_conn *conn)
3801 3802
{
	/* If remote requests no-bonding follow that lead */
3803 3804
	if (conn->remote_auth == HCI_AT_NO_BONDING ||
	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3805
		return conn->remote_auth | (conn->auth_type & 0x01);
3806

3807 3808 3809 3810 3811 3812 3813
	/* If both remote and local have enough IO capabilities, require
	 * MITM protection
	 */
	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
		return conn->remote_auth | 0x01;

3814 3815
	/* No MITM protection possible so ignore remote requirement */
	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3816 3817
}

3818
static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3819 3820 3821 3822 3823 3824 3825 3826 3827
{
	struct hci_ev_io_capa_request *ev = (void *) skb->data;
	struct hci_conn *conn;

	BT_DBG("%s", hdev->name);

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3828 3829 3830 3831 3832
	if (!conn)
		goto unlock;

	hci_conn_hold(conn);

3833
	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3834 3835
		goto unlock;

3836 3837 3838
	/* Allow pairing if we're pairable, the initiators of the
	 * pairing or if the remote is not requesting bonding.
	 */
3839
	if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3840
	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3841
	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3842 3843 3844
		struct hci_cp_io_capability_reply cp;

		bacpy(&cp.bdaddr, &ev->bdaddr);
3845 3846 3847
		/* Change the IO capability from KeyboardDisplay
		 * to DisplayYesNo as it is not supported by BT spec. */
		cp.capability = (conn->io_capability == 0x04) ?
3848
				HCI_IO_DISPLAY_YESNO : conn->io_capability;
3849 3850 3851

		/* If we are initiators, there is no remote information yet */
		if (conn->remote_auth == 0xff) {
3852
			/* Request MITM protection if our IO caps allow it
3853
			 * except for the no-bonding case.
3854
			 */
3855
			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3856
			    conn->auth_type != HCI_AT_NO_BONDING)
3857
				conn->auth_type |= 0x01;
3858 3859 3860
		} else {
			conn->auth_type = hci_get_auth_req(conn);
		}
3861

3862 3863 3864 3865 3866 3867 3868 3869
		/* If we're not bondable, force one of the non-bondable
		 * authentication requirement values.
		 */
		if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
			conn->auth_type &= HCI_AT_NO_BONDING_MITM;

		cp.authentication = conn->auth_type;

3870
		if (hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR) &&
3871
		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3872 3873 3874 3875
			cp.oob_data = 0x01;
		else
			cp.oob_data = 0x00;

3876
		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3877
			     sizeof(cp), &cp);
3878 3879 3880 3881
	} else {
		struct hci_cp_io_capability_neg_reply cp;

		bacpy(&cp.bdaddr, &ev->bdaddr);
3882
		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3883

3884
		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3885
			     sizeof(cp), &cp);
3886 3887 3888 3889 3890 3891
	}

unlock:
	hci_dev_unlock(hdev);
}

3892
static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906
{
	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
	struct hci_conn *conn;

	BT_DBG("%s", hdev->name);

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
	if (!conn)
		goto unlock;

	conn->remote_cap = ev->capability;
	conn->remote_auth = ev->authentication;
3907 3908
	if (ev->oob_data)
		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3909 3910

unlock:
3911 3912 3913
	hci_dev_unlock(hdev);
}

3914 3915
static void hci_user_confirm_request_evt(struct hci_dev *hdev,
					 struct sk_buff *skb)
3916 3917
{
	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3918
	int loc_mitm, rem_mitm, confirm_hint = 0;
3919
	struct hci_conn *conn;
3920 3921 3922 3923 3924

	BT_DBG("%s", hdev->name);

	hci_dev_lock(hdev);

3925
	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3926
		goto unlock;
3927

3928 3929 3930 3931 3932 3933 3934 3935
	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
	if (!conn)
		goto unlock;

	loc_mitm = (conn->auth_type & 0x01);
	rem_mitm = (conn->remote_auth & 0x01);

	/* If we require MITM but the remote device can't provide that
3936 3937 3938
	 * (it has NoInputNoOutput) then reject the confirmation
	 * request. We check the security level here since it doesn't
	 * necessarily match conn->auth_type.
3939
	 */
3940 3941
	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3942 3943
		BT_DBG("Rejecting request: remote device can't provide MITM");
		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3944
			     sizeof(ev->bdaddr), &ev->bdaddr);
3945 3946 3947 3948
		goto unlock;
	}

	/* If no side requires MITM protection; auto-accept */
3949 3950
	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3951 3952 3953

		/* If we're not the initiators request authorization to
		 * proceed from user space (mgmt_user_confirm with
3954
		 * confirm_hint set to 1). The exception is if neither
3955 3956
		 * side had MITM or if the local IO capability is
		 * NoInputNoOutput, in which case we do auto-accept
3957 3958
		 */
		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3959
		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3960
		    (loc_mitm || rem_mitm)) {
3961 3962 3963 3964 3965
			BT_DBG("Confirming auto-accept as acceptor");
			confirm_hint = 1;
			goto confirm;
		}

3966
		BT_DBG("Auto-accept of user confirmation with %ums delay",
3967
		       hdev->auto_accept_delay);
3968 3969 3970

		if (hdev->auto_accept_delay > 0) {
			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3971 3972
			queue_delayed_work(conn->hdev->workqueue,
					   &conn->auto_accept_work, delay);
3973 3974 3975
			goto unlock;
		}

3976
		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3977
			     sizeof(ev->bdaddr), &ev->bdaddr);
3978 3979 3980
		goto unlock;
	}

3981
confirm:
3982 3983
	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
				  le32_to_cpu(ev->passkey), confirm_hint);
3984 3985

unlock:
3986 3987 3988
	hci_dev_unlock(hdev);
}

3989 3990
static void hci_user_passkey_request_evt(struct hci_dev *hdev,
					 struct sk_buff *skb)
3991 3992 3993 3994 3995
{
	struct hci_ev_user_passkey_req *ev = (void *) skb->data;

	BT_DBG("%s", hdev->name);

3996
	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3997
		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3998 3999
}

4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058
static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
					struct sk_buff *skb)
{
	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
	struct hci_conn *conn;

	BT_DBG("%s", hdev->name);

	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
	if (!conn)
		return;

	conn->passkey_notify = __le32_to_cpu(ev->passkey);
	conn->passkey_entered = 0;

	if (test_bit(HCI_MGMT, &hdev->dev_flags))
		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
					 conn->dst_type, conn->passkey_notify,
					 conn->passkey_entered);
}

static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_ev_keypress_notify *ev = (void *) skb->data;
	struct hci_conn *conn;

	BT_DBG("%s", hdev->name);

	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
	if (!conn)
		return;

	switch (ev->type) {
	case HCI_KEYPRESS_STARTED:
		conn->passkey_entered = 0;
		return;

	case HCI_KEYPRESS_ENTERED:
		conn->passkey_entered++;
		break;

	case HCI_KEYPRESS_ERASED:
		conn->passkey_entered--;
		break;

	case HCI_KEYPRESS_CLEARED:
		conn->passkey_entered = 0;
		break;

	case HCI_KEYPRESS_COMPLETED:
		return;
	}

	if (test_bit(HCI_MGMT, &hdev->dev_flags))
		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
					 conn->dst_type, conn->passkey_notify,
					 conn->passkey_entered);
}

4059 4060
static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
					 struct sk_buff *skb)
4061 4062 4063 4064 4065 4066 4067 4068 4069
{
	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
	struct hci_conn *conn;

	BT_DBG("%s", hdev->name);

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4070 4071 4072
	if (!conn)
		goto unlock;

4073 4074 4075
	/* Reset the authentication requirement to unknown */
	conn->remote_auth = 0xff;

4076 4077 4078 4079 4080
	/* To avoid duplicate auth_failed events to user space we check
	 * the HCI_CONN_AUTH_PEND flag which will be set if we
	 * initiated the authentication. A traditional auth_complete
	 * event gets always produced as initiator and is also mapped to
	 * the mgmt_auth_failed event */
4081
	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4082
		mgmt_auth_failed(conn, ev->status);
4083

4084
	hci_conn_drop(conn);
4085 4086

unlock:
4087 4088 4089
	hci_dev_unlock(hdev);
}

4090 4091
static void hci_remote_host_features_evt(struct hci_dev *hdev,
					 struct sk_buff *skb)
4092 4093 4094
{
	struct hci_ev_remote_host_features *ev = (void *) skb->data;
	struct inquiry_entry *ie;
4095
	struct hci_conn *conn;
4096 4097 4098 4099 4100

	BT_DBG("%s", hdev->name);

	hci_dev_lock(hdev);

4101 4102 4103 4104
	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
	if (conn)
		memcpy(conn->features[1], ev->features, 8);

4105 4106
	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
	if (ie)
4107
		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4108 4109 4110 4111

	hci_dev_unlock(hdev);
}

4112 4113
static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
					    struct sk_buff *skb)
4114 4115 4116 4117 4118 4119 4120 4121
{
	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
	struct oob_data *data;

	BT_DBG("%s", hdev->name);

	hci_dev_lock(hdev);

4122
	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4123 4124
		goto unlock;

4125
	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4126
	if (data) {
4127
		if (bredr_sc_enabled(hdev)) {
4128
			struct hci_cp_remote_oob_ext_data_reply cp;
4129

4130 4131
			bacpy(&cp.bdaddr, &ev->bdaddr);
			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4132
			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4133
			memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4134
			memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4135 4136 4137 4138 4139

			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
				     sizeof(cp), &cp);
		} else {
			struct hci_cp_remote_oob_data_reply cp;
4140

4141 4142
			bacpy(&cp.bdaddr, &ev->bdaddr);
			memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4143
			memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4144 4145 4146 4147

			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
				     sizeof(cp), &cp);
		}
4148 4149 4150 4151
	} else {
		struct hci_cp_remote_oob_data_neg_reply cp;

		bacpy(&cp.bdaddr, &ev->bdaddr);
4152 4153
		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
			     sizeof(cp), &cp);
4154 4155
	}

4156
unlock:
4157 4158 4159
	hci_dev_unlock(hdev);
}

4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189
static void hci_phy_link_complete_evt(struct hci_dev *hdev,
				      struct sk_buff *skb)
{
	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
	struct hci_conn *hcon, *bredr_hcon;

	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
	       ev->status);

	hci_dev_lock(hdev);

	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
	if (!hcon) {
		hci_dev_unlock(hdev);
		return;
	}

	if (ev->status) {
		hci_conn_del(hcon);
		hci_dev_unlock(hdev);
		return;
	}

	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;

	hcon->state = BT_CONNECTED;
	bacpy(&hcon->dst, &bredr_hcon->dst);

	hci_conn_hold(hcon);
	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4190
	hci_conn_drop(hcon);
4191

4192
	hci_debugfs_create_conn(hcon);
4193 4194
	hci_conn_add_sysfs(hcon);

4195
	amp_physical_cfm(bredr_hcon, hcon);
4196

4197
	hci_dev_unlock(hdev);
4198 4199
}

4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237
static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
	struct hci_conn *hcon;
	struct hci_chan *hchan;
	struct amp_mgr *mgr;

	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
	       ev->status);

	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
	if (!hcon)
		return;

	/* Create AMP hchan */
	hchan = hci_chan_create(hcon);
	if (!hchan)
		return;

	hchan->handle = le16_to_cpu(ev->handle);

	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);

	mgr = hcon->amp_mgr;
	if (mgr && mgr->bredr_chan) {
		struct l2cap_chan *bredr_chan = mgr->bredr_chan;

		l2cap_chan_lock(bredr_chan);

		bredr_chan->conn->mtu = hdev->block_mtu;
		l2cap_logical_cfm(bredr_chan, hchan, 0);
		hci_conn_hold(hcon);

		l2cap_chan_unlock(bredr_chan);
	}
}

4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261
static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
					     struct sk_buff *skb)
{
	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
	struct hci_chan *hchan;

	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
	       le16_to_cpu(ev->handle), ev->status);

	if (ev->status)
		return;

	hci_dev_lock(hdev);

	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
	if (!hchan)
		goto unlock;

	amp_destroy_logical_link(hchan, ev->reason);

unlock:
	hci_dev_unlock(hdev);
}

4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283
static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
					     struct sk_buff *skb)
{
	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
	struct hci_conn *hcon;

	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);

	if (ev->status)
		return;

	hci_dev_lock(hdev);

	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
	if (hcon) {
		hcon->state = BT_CLOSED;
		hci_conn_del(hcon);
	}

	hci_dev_unlock(hdev);
}

4284
static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
V
Ville Tervo 已提交
4285 4286
{
	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4287
	struct hci_conn_params *params;
V
Ville Tervo 已提交
4288
	struct hci_conn *conn;
4289
	struct smp_irk *irk;
4290
	u8 addr_type;
V
Ville Tervo 已提交
4291

4292
	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
V
Ville Tervo 已提交
4293 4294 4295

	hci_dev_lock(hdev);

4296 4297 4298 4299 4300
	/* All controllers implicitly stop advertising in the event of a
	 * connection, so ensure that the state bit is cleared.
	 */
	clear_bit(HCI_LE_ADV, &hdev->dev_flags);

4301
	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4302
	if (!conn) {
4303
		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4304 4305
		if (!conn) {
			BT_ERR("No memory for new connection");
A
Andre Guedes 已提交
4306
			goto unlock;
4307
		}
4308 4309

		conn->dst_type = ev->bdaddr_type;
4310

4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330
		/* If we didn't have a hci_conn object previously
		 * but we're in master role this must be something
		 * initiated using a white list. Since white list based
		 * connections are not "first class citizens" we don't
		 * have full tracking of them. Therefore, we go ahead
		 * with a "best effort" approach of determining the
		 * initiator address based on the HCI_PRIVACY flag.
		 */
		if (conn->out) {
			conn->resp_addr_type = ev->bdaddr_type;
			bacpy(&conn->resp_addr, &ev->bdaddr);
			if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
				bacpy(&conn->init_addr, &hdev->rpa);
			} else {
				hci_copy_identity_address(hdev,
							  &conn->init_addr,
							  &conn->init_addr_type);
			}
		}
4331 4332
	} else {
		cancel_delayed_work(&conn->le_conn_timeout);
4333
	}
V
Ville Tervo 已提交
4334

4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346
	if (!conn->out) {
		/* Set the responder (our side) address type based on
		 * the advertising address type.
		 */
		conn->resp_addr_type = hdev->adv_addr_type;
		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
			bacpy(&conn->resp_addr, &hdev->random_addr);
		else
			bacpy(&conn->resp_addr, &hdev->bdaddr);

		conn->init_addr_type = ev->bdaddr_type;
		bacpy(&conn->init_addr, &ev->bdaddr);
4347 4348 4349 4350 4351 4352 4353 4354

		/* For incoming connections, set the default minimum
		 * and maximum connection interval. They will be used
		 * to check if the parameters are in range and if not
		 * trigger the connection update procedure.
		 */
		conn->le_conn_min_interval = hdev->le_conn_min_interval;
		conn->le_conn_max_interval = hdev->le_conn_max_interval;
4355
	}
4356

4357 4358 4359 4360 4361 4362 4363 4364 4365 4366
	/* Lookup the identity address from the stored connection
	 * address and address type.
	 *
	 * When establishing connections to an identity address, the
	 * connection procedure will store the resolvable random
	 * address first. Now if it can be converted back into the
	 * identity address, start using the identity address from
	 * now on.
	 */
	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4367 4368 4369 4370 4371
	if (irk) {
		bacpy(&conn->dst, &irk->bdaddr);
		conn->dst_type = irk->addr_type;
	}

4372 4373
	if (ev->status) {
		hci_le_conn_failed(conn, ev->status);
4374 4375 4376
		goto unlock;
	}

4377 4378 4379 4380 4381
	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
		addr_type = BDADDR_LE_PUBLIC;
	else
		addr_type = BDADDR_LE_RANDOM;

4382 4383 4384
	/* Drop the connection if the device is blocked */
	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
		hci_conn_drop(conn);
4385 4386 4387
		goto unlock;
	}

4388
	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4389
		mgmt_device_connected(hdev, conn, 0, NULL, 0);
4390

4391
	conn->sec_level = BT_SECURITY_LOW;
V
Ville Tervo 已提交
4392 4393 4394
	conn->handle = __le16_to_cpu(ev->handle);
	conn->state = BT_CONNECTED;

4395 4396 4397 4398
	conn->le_conn_interval = le16_to_cpu(ev->interval);
	conn->le_conn_latency = le16_to_cpu(ev->latency);
	conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);

4399
	hci_debugfs_create_conn(conn);
V
Ville Tervo 已提交
4400 4401 4402 4403
	hci_conn_add_sysfs(conn);

	hci_proto_connect_cfm(conn, ev->status);

4404 4405
	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
					   conn->dst_type);
4406
	if (params) {
4407
		list_del_init(&params->action);
4408 4409
		if (params->conn) {
			hci_conn_drop(params->conn);
4410
			hci_conn_put(params->conn);
4411 4412 4413
			params->conn = NULL;
		}
	}
4414

V
Ville Tervo 已提交
4415
unlock:
4416
	hci_update_background_scan(hdev);
V
Ville Tervo 已提交
4417 4418 4419
	hci_dev_unlock(hdev);
}

4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442
static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
					    struct sk_buff *skb)
{
	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
	struct hci_conn *conn;

	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);

	if (ev->status)
		return;

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
	if (conn) {
		conn->le_conn_interval = le16_to_cpu(ev->interval);
		conn->le_conn_latency = le16_to_cpu(ev->latency);
		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
	}

	hci_dev_unlock(hdev);
}

4443
/* This function requires the caller holds hdev->lock */
4444 4445 4446
static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
					      bdaddr_t *addr,
					      u8 addr_type, u8 adv_type)
4447 4448
{
	struct hci_conn *conn;
4449
	struct hci_conn_params *params;
4450

4451 4452
	/* If the event is not connectable don't proceed further */
	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4453
		return NULL;
4454 4455

	/* Ignore if the device is blocked */
4456
	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4457
		return NULL;
4458

4459 4460 4461 4462
	/* Most controller will fail if we try to create new connections
	 * while we have an existing one in slave role.
	 */
	if (hdev->conn_hash.le_num_slave > 0)
4463
		return NULL;
4464

4465 4466 4467
	/* If we're not connectable only connect devices that we have in
	 * our pend_le_conns list.
	 */
4468 4469 4470
	params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
					   addr, addr_type);
	if (!params)
4471
		return NULL;
4472 4473 4474 4475 4476 4477 4478 4479

	switch (params->auto_connect) {
	case HCI_AUTO_CONN_DIRECT:
		/* Only devices advertising with ADV_DIRECT_IND are
		 * triggering a connection attempt. This is allowing
		 * incoming connections from slave devices.
		 */
		if (adv_type != LE_ADV_DIRECT_IND)
4480
			return NULL;
4481 4482 4483 4484 4485 4486 4487 4488 4489 4490
		break;
	case HCI_AUTO_CONN_ALWAYS:
		/* Devices advertising with ADV_IND or ADV_DIRECT_IND
		 * are triggering a connection attempt. This means
		 * that incoming connectioms from slave device are
		 * accepted and also outgoing connections to slave
		 * devices are established when found.
		 */
		break;
	default:
4491
		return NULL;
4492
	}
4493 4494

	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4495
			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4496 4497 4498 4499 4500 4501 4502
	if (!IS_ERR(conn)) {
		/* Store the pointer since we don't really have any
		 * other owner of the object besides the params that
		 * triggered it. This way we can abort the connection if
		 * the parameters get removed and keep the reference
		 * count consistent once the connection is established.
		 */
4503
		params->conn = hci_conn_get(conn);
4504
		return conn;
4505
	}
4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516

	switch (PTR_ERR(conn)) {
	case -EBUSY:
		/* If hci_connect() returns -EBUSY it means there is already
		 * an LE connection attempt going on. Since controllers don't
		 * support more than one connection attempt at the time, we
		 * don't consider this an error case.
		 */
		break;
	default:
		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4517
		return NULL;
4518
	}
4519 4520

	return NULL;
4521 4522
}

4523
static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4524 4525
			       u8 bdaddr_type, bdaddr_t *direct_addr,
			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4526
{
4527
	struct discovery_state *d = &hdev->discovery;
4528
	struct smp_irk *irk;
4529
	struct hci_conn *conn;
4530
	bool match;
4531
	u32 flags;
4532

4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558
	/* If the direct address is present, then this report is from
	 * a LE Direct Advertising Report event. In that case it is
	 * important to see if the address is matching the local
	 * controller address.
	 */
	if (direct_addr) {
		/* Only resolvable random addresses are valid for these
		 * kind of reports and others can be ignored.
		 */
		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
			return;

		/* If the controller is not using resolvable random
		 * addresses, then this report can be ignored.
		 */
		if (!test_bit(HCI_PRIVACY, &hdev->dev_flags))
			return;

		/* If the local IRK of the controller does not match
		 * with the resolvable random address provided, then
		 * this report can be ignored.
		 */
		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
			return;
	}

4559 4560 4561 4562 4563 4564 4565 4566
	/* Check if we need to convert to identity address */
	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
	if (irk) {
		bdaddr = &irk->bdaddr;
		bdaddr_type = irk->addr_type;
	}

	/* Check if we have been requested to connect to this device */
4567 4568 4569 4570 4571 4572 4573 4574
	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
	if (conn && type == LE_ADV_IND) {
		/* Store report for later inclusion by
		 * mgmt_device_connected
		 */
		memcpy(conn->le_adv_data, data, len);
		conn->le_adv_data_len = len;
	}
4575

4576 4577 4578 4579
	/* Passive scanning shouldn't trigger any device found events,
	 * except for devices marked as CONN_REPORT for which we do send
	 * device found events.
	 */
4580
	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4581 4582 4583
		if (type == LE_ADV_DIRECT_IND)
			return;

4584 4585
		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
					       bdaddr, bdaddr_type))
4586 4587 4588 4589 4590 4591 4592 4593
			return;

		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
		else
			flags = 0;
		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
				  rssi, flags, data, len, NULL, 0);
4594
		return;
4595
	}
4596

4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617
	/* When receiving non-connectable or scannable undirected
	 * advertising reports, this means that the remote device is
	 * not connectable and then clearly indicate this in the
	 * device found event.
	 *
	 * When receiving a scan response, then there is no way to
	 * know if the remote device is connectable or not. However
	 * since scan responses are merged with a previously seen
	 * advertising report, the flags field from that report
	 * will be used.
	 *
	 * In the really unlikely case that a controller get confused
	 * and just sends a scan response event, then it is marked as
	 * not connectable as well.
	 */
	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
	    type == LE_ADV_SCAN_RSP)
		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
	else
		flags = 0;

4618 4619 4620 4621 4622 4623 4624 4625 4626 4627
	/* If there's nothing pending either store the data from this
	 * event or send an immediate device found event if the data
	 * should not be stored for later.
	 */
	if (!has_pending_adv_report(hdev)) {
		/* If the report will trigger a SCAN_REQ store it for
		 * later merging.
		 */
		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4628
						 rssi, flags, data, len);
4629 4630 4631 4632
			return;
		}

		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4633
				  rssi, flags, data, len, NULL, 0);
4634 4635 4636
		return;
	}

4637 4638 4639 4640
	/* Check if the pending report is for the same device as the new one */
	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
		 bdaddr_type == d->last_adv_addr_type);

4641 4642 4643 4644
	/* If the pending data doesn't match this report or this isn't a
	 * scan response (e.g. we got a duplicate ADV_IND) then force
	 * sending of the pending data.
	 */
4645 4646 4647 4648
	if (type != LE_ADV_SCAN_RSP || !match) {
		/* Send out whatever is in the cache, but skip duplicates */
		if (!match)
			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4649
					  d->last_adv_addr_type, NULL,
4650
					  d->last_adv_rssi, d->last_adv_flags,
4651
					  d->last_adv_data,
4652
					  d->last_adv_data_len, NULL, 0);
4653 4654 4655 4656 4657 4658

		/* If the new report will trigger a SCAN_REQ store it for
		 * later merging.
		 */
		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4659
						 rssi, flags, data, len);
4660 4661 4662 4663 4664 4665 4666
			return;
		}

		/* The advertising reports cannot be merged, so clear
		 * the pending report and send out a device found event.
		 */
		clear_pending_adv_report(hdev);
4667
		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4668
				  rssi, flags, data, len, NULL, 0);
4669 4670 4671 4672 4673 4674 4675 4676
		return;
	}

	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
	 * the new event is a SCAN_RSP. We can therefore proceed with
	 * sending a merged device found event.
	 */
	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4677
			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4678
			  d->last_adv_data, d->last_adv_data_len, data, len);
4679
	clear_pending_adv_report(hdev);
4680 4681
}

4682
static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4683
{
4684 4685
	u8 num_reports = skb->data[0];
	void *ptr = &skb->data[1];
4686

4687 4688
	hci_dev_lock(hdev);

4689 4690
	while (num_reports--) {
		struct hci_ev_le_advertising_info *ev = ptr;
4691
		s8 rssi;
4692

A
Andre Guedes 已提交
4693
		rssi = ev->data[ev->length];
4694
		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4695 4696
				   ev->bdaddr_type, NULL, 0, rssi,
				   ev->data, ev->length);
A
Andre Guedes 已提交
4697

4698
		ptr += sizeof(*ev) + ev->length + 1;
4699
	}
4700 4701

	hci_dev_unlock(hdev);
4702 4703
}

4704
static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4705 4706 4707
{
	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
	struct hci_cp_le_ltk_reply cp;
4708
	struct hci_cp_le_ltk_neg_reply neg;
4709
	struct hci_conn *conn;
4710
	struct smp_ltk *ltk;
4711

4712
	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4713 4714 4715 4716

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4717 4718
	if (conn == NULL)
		goto not_found;
4719

4720
	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4721
	if (!ltk)
4722 4723
		goto not_found;

4724 4725 4726 4727 4728 4729 4730 4731 4732 4733
	if (smp_ltk_is_sc(ltk)) {
		/* With SC both EDiv and Rand are set to zero */
		if (ev->ediv || ev->rand)
			goto not_found;
	} else {
		/* For non-SC keys check that EDiv and Rand match */
		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
			goto not_found;
	}

4734
	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4735
	cp.handle = cpu_to_le16(conn->handle);
4736

4737
	conn->pending_sec_level = smp_ltk_sec_level(ltk);
4738

4739
	conn->enc_key_size = ltk->enc_size;
4740 4741 4742

	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);

4743 4744 4745 4746 4747 4748
	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
	 * temporary key used to encrypt a connection following
	 * pairing. It is used during the Encrypted Session Setup to
	 * distribute the keys. Later, security can be re-established
	 * using a distributed LTK.
	 */
4749
	if (ltk->type == SMP_STK) {
4750
		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
J
Johan Hedberg 已提交
4751 4752
		list_del_rcu(&ltk->list);
		kfree_rcu(ltk, rcu);
4753 4754
	} else {
		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4755 4756
	}

4757
	hci_dev_unlock(hdev);
4758 4759 4760 4761 4762 4763 4764

	return;

not_found:
	neg.handle = ev->handle;
	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
	hci_dev_unlock(hdev);
4765 4766
}

4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801
static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
				      u8 reason)
{
	struct hci_cp_le_conn_param_req_neg_reply cp;

	cp.handle = cpu_to_le16(handle);
	cp.reason = reason;

	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
		     &cp);
}

static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
					     struct sk_buff *skb)
{
	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
	struct hci_cp_le_conn_param_req_reply cp;
	struct hci_conn *hcon;
	u16 handle, min, max, latency, timeout;

	handle = le16_to_cpu(ev->handle);
	min = le16_to_cpu(ev->interval_min);
	max = le16_to_cpu(ev->interval_max);
	latency = le16_to_cpu(ev->latency);
	timeout = le16_to_cpu(ev->timeout);

	hcon = hci_conn_hash_lookup_handle(hdev, handle);
	if (!hcon || hcon->state != BT_CONNECTED)
		return send_conn_param_neg_reply(hdev, handle,
						 HCI_ERROR_UNKNOWN_CONN_ID);

	if (hci_check_conn_params(min, max, latency, timeout))
		return send_conn_param_neg_reply(hdev, handle,
						 HCI_ERROR_INVALID_LL_PARAMS);

4802
	if (hcon->role == HCI_ROLE_MASTER) {
4803
		struct hci_conn_params *params;
4804
		u8 store_hint;
4805 4806 4807 4808 4809 4810 4811 4812 4813 4814

		hci_dev_lock(hdev);

		params = hci_conn_params_lookup(hdev, &hcon->dst,
						hcon->dst_type);
		if (params) {
			params->conn_min_interval = min;
			params->conn_max_interval = max;
			params->conn_latency = latency;
			params->supervision_timeout = timeout;
4815 4816 4817
			store_hint = 0x01;
		} else{
			store_hint = 0x00;
4818 4819 4820 4821
		}

		hci_dev_unlock(hdev);

4822 4823
		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
				    store_hint, min, max, latency, timeout);
4824
	}
4825

4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836
	cp.handle = ev->handle;
	cp.interval_min = ev->interval_min;
	cp.interval_max = ev->interval_max;
	cp.latency = ev->latency;
	cp.timeout = ev->timeout;
	cp.min_ce_len = 0;
	cp.max_ce_len = 0;

	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
}

4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857
static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
					 struct sk_buff *skb)
{
	u8 num_reports = skb->data[0];
	void *ptr = &skb->data[1];

	hci_dev_lock(hdev);

	while (num_reports--) {
		struct hci_ev_le_direct_adv_info *ev = ptr;

		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
				   ev->bdaddr_type, &ev->direct_addr,
				   ev->direct_addr_type, ev->rssi, NULL, 0);

		ptr += sizeof(*ev);
	}

	hci_dev_unlock(hdev);
}

4858
static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
V
Ville Tervo 已提交
4859 4860 4861 4862 4863 4864 4865 4866 4867 4868
{
	struct hci_ev_le_meta *le_ev = (void *) skb->data;

	skb_pull(skb, sizeof(*le_ev));

	switch (le_ev->subevent) {
	case HCI_EV_LE_CONN_COMPLETE:
		hci_le_conn_complete_evt(hdev, skb);
		break;

4869 4870 4871 4872
	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
		hci_le_conn_update_complete_evt(hdev, skb);
		break;

4873 4874 4875 4876
	case HCI_EV_LE_ADVERTISING_REPORT:
		hci_le_adv_report_evt(hdev, skb);
		break;

4877 4878 4879 4880
	case HCI_EV_LE_LTK_REQ:
		hci_le_ltk_request_evt(hdev, skb);
		break;

4881 4882 4883 4884
	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
		hci_le_remote_conn_param_req_evt(hdev, skb);
		break;

4885 4886 4887 4888
	case HCI_EV_LE_DIRECT_ADV_REPORT:
		hci_le_direct_adv_report_evt(hdev, skb);
		break;

V
Ville Tervo 已提交
4889 4890 4891 4892 4893
	default:
		break;
	}
}

4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909
static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_ev_channel_selected *ev = (void *) skb->data;
	struct hci_conn *hcon;

	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);

	skb_pull(skb, sizeof(*ev));

	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
	if (!hcon)
		return;

	amp_read_loc_assoc_final_data(hdev, hcon);
}

4910 4911 4912 4913 4914
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_event_hdr *hdr = (void *) skb->data;
	__u8 event = hdr->evt;

4915 4916 4917 4918 4919
	hci_dev_lock(hdev);

	/* Received events are (currently) only needed when a request is
	 * ongoing so avoid unnecessary memory allocation.
	 */
4920
	if (hci_req_pending(hdev)) {
4921 4922 4923 4924 4925 4926
		kfree_skb(hdev->recv_evt);
		hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
	}

	hci_dev_unlock(hdev);

4927 4928
	skb_pull(skb, HCI_EVENT_HDR_SIZE);

4929
	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4930 4931
		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
		u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4932 4933 4934 4935

		hci_req_cmd_complete(hdev, opcode, 0);
	}

4936
	switch (event) {
L
Linus Torvalds 已提交
4937 4938 4939 4940 4941 4942 4943 4944
	case HCI_EV_INQUIRY_COMPLETE:
		hci_inquiry_complete_evt(hdev, skb);
		break;

	case HCI_EV_INQUIRY_RESULT:
		hci_inquiry_result_evt(hdev, skb);
		break;

4945 4946
	case HCI_EV_CONN_COMPLETE:
		hci_conn_complete_evt(hdev, skb);
4947 4948
		break;

L
Linus Torvalds 已提交
4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960
	case HCI_EV_CONN_REQUEST:
		hci_conn_request_evt(hdev, skb);
		break;

	case HCI_EV_DISCONN_COMPLETE:
		hci_disconn_complete_evt(hdev, skb);
		break;

	case HCI_EV_AUTH_COMPLETE:
		hci_auth_complete_evt(hdev, skb);
		break;

4961 4962 4963 4964
	case HCI_EV_REMOTE_NAME:
		hci_remote_name_evt(hdev, skb);
		break;

L
Linus Torvalds 已提交
4965 4966 4967 4968
	case HCI_EV_ENCRYPT_CHANGE:
		hci_encrypt_change_evt(hdev, skb);
		break;

4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984
	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
		hci_change_link_key_complete_evt(hdev, skb);
		break;

	case HCI_EV_REMOTE_FEATURES:
		hci_remote_features_evt(hdev, skb);
		break;

	case HCI_EV_CMD_COMPLETE:
		hci_cmd_complete_evt(hdev, skb);
		break;

	case HCI_EV_CMD_STATUS:
		hci_cmd_status_evt(hdev, skb);
		break;

4985 4986 4987 4988
	case HCI_EV_HARDWARE_ERROR:
		hci_hardware_error_evt(hdev, skb);
		break;

4989 4990 4991 4992 4993 4994 4995 4996 4997 4998
	case HCI_EV_ROLE_CHANGE:
		hci_role_change_evt(hdev, skb);
		break;

	case HCI_EV_NUM_COMP_PKTS:
		hci_num_comp_pkts_evt(hdev, skb);
		break;

	case HCI_EV_MODE_CHANGE:
		hci_mode_change_evt(hdev, skb);
L
Linus Torvalds 已提交
4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016
		break;

	case HCI_EV_PIN_CODE_REQ:
		hci_pin_code_request_evt(hdev, skb);
		break;

	case HCI_EV_LINK_KEY_REQ:
		hci_link_key_request_evt(hdev, skb);
		break;

	case HCI_EV_LINK_KEY_NOTIFY:
		hci_link_key_notify_evt(hdev, skb);
		break;

	case HCI_EV_CLOCK_OFFSET:
		hci_clock_offset_evt(hdev, skb);
		break;

5017 5018 5019 5020
	case HCI_EV_PKT_TYPE_CHANGE:
		hci_pkt_type_change_evt(hdev, skb);
		break;

5021 5022 5023 5024
	case HCI_EV_PSCAN_REP_MODE:
		hci_pscan_rep_mode_evt(hdev, skb);
		break;

5025 5026
	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
		hci_inquiry_result_with_rssi_evt(hdev, skb);
5027 5028
		break;

5029 5030
	case HCI_EV_REMOTE_EXT_FEATURES:
		hci_remote_ext_features_evt(hdev, skb);
L
Linus Torvalds 已提交
5031 5032
		break;

5033 5034 5035
	case HCI_EV_SYNC_CONN_COMPLETE:
		hci_sync_conn_complete_evt(hdev, skb);
		break;
L
Linus Torvalds 已提交
5036

5037 5038 5039
	case HCI_EV_EXTENDED_INQUIRY_RESULT:
		hci_extended_inquiry_result_evt(hdev, skb);
		break;
L
Linus Torvalds 已提交
5040

5041 5042 5043 5044
	case HCI_EV_KEY_REFRESH_COMPLETE:
		hci_key_refresh_complete_evt(hdev, skb);
		break;

5045 5046 5047 5048
	case HCI_EV_IO_CAPA_REQUEST:
		hci_io_capa_request_evt(hdev, skb);
		break;

5049 5050 5051 5052
	case HCI_EV_IO_CAPA_REPLY:
		hci_io_capa_reply_evt(hdev, skb);
		break;

5053 5054 5055 5056
	case HCI_EV_USER_CONFIRM_REQUEST:
		hci_user_confirm_request_evt(hdev, skb);
		break;

5057 5058 5059 5060
	case HCI_EV_USER_PASSKEY_REQUEST:
		hci_user_passkey_request_evt(hdev, skb);
		break;

5061 5062 5063 5064 5065 5066 5067 5068
	case HCI_EV_USER_PASSKEY_NOTIFY:
		hci_user_passkey_notify_evt(hdev, skb);
		break;

	case HCI_EV_KEYPRESS_NOTIFY:
		hci_keypress_notify_evt(hdev, skb);
		break;

5069 5070 5071 5072
	case HCI_EV_SIMPLE_PAIR_COMPLETE:
		hci_simple_pair_complete_evt(hdev, skb);
		break;

5073 5074 5075 5076
	case HCI_EV_REMOTE_HOST_FEATURES:
		hci_remote_host_features_evt(hdev, skb);
		break;

V
Ville Tervo 已提交
5077 5078 5079 5080
	case HCI_EV_LE_META:
		hci_le_meta_evt(hdev, skb);
		break;

5081 5082 5083 5084
	case HCI_EV_CHANNEL_SELECTED:
		hci_chan_selected_evt(hdev, skb);
		break;

5085 5086 5087 5088
	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
		hci_remote_oob_data_request_evt(hdev, skb);
		break;

5089 5090 5091 5092
	case HCI_EV_PHY_LINK_COMPLETE:
		hci_phy_link_complete_evt(hdev, skb);
		break;

5093 5094 5095 5096
	case HCI_EV_LOGICAL_LINK_COMPLETE:
		hci_loglink_complete_evt(hdev, skb);
		break;

5097 5098 5099 5100
	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
		hci_disconn_loglink_complete_evt(hdev, skb);
		break;

5101 5102 5103 5104
	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
		hci_disconn_phylink_complete_evt(hdev, skb);
		break;

5105 5106 5107 5108
	case HCI_EV_NUM_COMP_BLOCKS:
		hci_num_comp_blocks_evt(hdev, skb);
		break;

5109
	default:
5110
		BT_DBG("%s event 0x%2.2x", hdev->name, event);
L
Linus Torvalds 已提交
5111 5112 5113 5114 5115 5116
		break;
	}

	kfree_skb(skb);
	hdev->stat.evt_rx++;
}