hci_core.c 98.6 KB
Newer Older
1
/*
L
Linus Torvalds 已提交
2 3
   BlueZ - Bluetooth protocol stack for Linux
   Copyright (C) 2000-2001 Qualcomm Incorporated
4
   Copyright (C) 2011 ProFUSION Embedded Systems
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15

   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 17 18
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
L
Linus Torvalds 已提交
19 20
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

21 22
   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
L
Linus Torvalds 已提交
23 24 25 26 27
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI core. */

28
#include <linux/export.h>
29
#include <linux/idr.h>
30
#include <linux/rfkill.h>
31
#include <linux/debugfs.h>
32
#include <linux/crypto.h>
33
#include <asm/unaligned.h>
L
Linus Torvalds 已提交
34 35 36

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
37
#include <net/bluetooth/l2cap.h>
38
#include <net/bluetooth/mgmt.h>
L
Linus Torvalds 已提交
39

40
#include "hci_request.h"
41
#include "hci_debugfs.h"
42 43
#include "smp.h"

44
static void hci_rx_work(struct work_struct *work);
45
static void hci_cmd_work(struct work_struct *work);
46
static void hci_tx_work(struct work_struct *work);
L
Linus Torvalds 已提交
47 48 49 50 51 52 53

/* HCI device list */
LIST_HEAD(hci_dev_list);
DEFINE_RWLOCK(hci_dev_list_lock);

/* HCI callback list */
LIST_HEAD(hci_cb_list);
54
DEFINE_MUTEX(hci_cb_list_lock);
L
Linus Torvalds 已提交
55

56 57 58
/* HCI ID Numbering */
static DEFINE_IDA(hci_index_ida);

59 60
/* ---- HCI debugfs entries ---- */

61 62 63 64 65 66
static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
			     size_t count, loff_t *ppos)
{
	struct hci_dev *hdev = file->private_data;
	char buf[3];

67
	buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
	buf[1] = '\n';
	buf[2] = '\0';
	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}

static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
			      size_t count, loff_t *ppos)
{
	struct hci_dev *hdev = file->private_data;
	struct sk_buff *skb;
	char buf[32];
	size_t buf_size = min(count, (sizeof(buf)-1));
	bool enable;

	if (!test_bit(HCI_UP, &hdev->flags))
		return -ENETDOWN;

	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;

	buf[buf_size] = '\0';
	if (strtobool(buf, &enable))
		return -EINVAL;

92
	if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
93 94
		return -EALREADY;

95
	hci_req_sync_lock(hdev);
96 97 98 99 100 101
	if (enable)
		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
				     HCI_CMD_TIMEOUT);
	else
		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
				     HCI_CMD_TIMEOUT);
102
	hci_req_sync_unlock(hdev);
103 104 105 106 107 108

	if (IS_ERR(skb))
		return PTR_ERR(skb);

	kfree_skb(skb);

109
	hci_dev_change_flag(hdev, HCI_DUT_MODE);
110 111 112 113 114 115 116 117 118 119 120

	return count;
}

static const struct file_operations dut_mode_fops = {
	.open		= simple_open,
	.read		= dut_mode_read,
	.write		= dut_mode_write,
	.llseek		= default_llseek,
};

121 122 123 124 125 126
static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
				size_t count, loff_t *ppos)
{
	struct hci_dev *hdev = file->private_data;
	char buf[3];

127
	buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
	buf[1] = '\n';
	buf[2] = '\0';
	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}

static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
				 size_t count, loff_t *ppos)
{
	struct hci_dev *hdev = file->private_data;
	char buf[32];
	size_t buf_size = min(count, (sizeof(buf)-1));
	bool enable;
	int err;

	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;

	buf[buf_size] = '\0';
	if (strtobool(buf, &enable))
		return -EINVAL;

149 150 151 152 153 154 155 156 157 158
	/* When the diagnostic flags are not persistent and the transport
	 * is not active, then there is no need for the vendor callback.
	 *
	 * Instead just store the desired value. If needed the setting
	 * will be programmed when the controller gets powered on.
	 */
	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
	    !test_bit(HCI_RUNNING, &hdev->flags))
		goto done;

159
	hci_req_sync_lock(hdev);
160
	err = hdev->set_diag(hdev, enable);
161
	hci_req_sync_unlock(hdev);
162 163 164 165

	if (err < 0)
		return err;

166
done:
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
	if (enable)
		hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
	else
		hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);

	return count;
}

static const struct file_operations vendor_diag_fops = {
	.open		= simple_open,
	.read		= vendor_diag_read,
	.write		= vendor_diag_write,
	.llseek		= default_llseek,
};

182 183 184 185 186 187 188 189 190 191
static void hci_debugfs_create_basic(struct hci_dev *hdev)
{
	debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
			    &dut_mode_fops);

	if (hdev->set_diag)
		debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
				    &vendor_diag_fops);
}

192
static int hci_reset_req(struct hci_request *req, unsigned long opt)
L
Linus Torvalds 已提交
193
{
194
	BT_DBG("%s %ld", req->hdev->name, opt);
L
Linus Torvalds 已提交
195 196

	/* Reset device */
197 198
	set_bit(HCI_RESET, &req->hdev->flags);
	hci_req_add(req, HCI_OP_RESET, 0, NULL);
199
	return 0;
L
Linus Torvalds 已提交
200 201
}

202
static void bredr_init(struct hci_request *req)
L
Linus Torvalds 已提交
203
{
204
	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
205

L
Linus Torvalds 已提交
206
	/* Read Local Supported Features */
207
	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
L
Linus Torvalds 已提交
208

209
	/* Read Local Version */
210
	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
211 212

	/* Read BD Address */
213
	hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
L
Linus Torvalds 已提交
214 215
}

216
static void amp_init1(struct hci_request *req)
217
{
218
	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
219

220
	/* Read Local Version */
221
	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
222

223 224 225
	/* Read Local Supported Commands */
	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);

226
	/* Read Local AMP Info */
227
	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
228 229

	/* Read Data Blk size */
230
	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
231

232 233 234
	/* Read Flow Control Mode */
	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);

235 236
	/* Read Location Data */
	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
237 238
}

239
static int amp_init2(struct hci_request *req)
240 241 242 243 244 245 246
{
	/* Read Local Supported Features. Not all AMP controllers
	 * support this so it's placed conditionally in the second
	 * stage init.
	 */
	if (req->hdev->commands[14] & 0x20)
		hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
247 248

	return 0;
249 250
}

251
static int hci_init1_req(struct hci_request *req, unsigned long opt)
252
{
253
	struct hci_dev *hdev = req->hdev;
254 255 256

	BT_DBG("%s %ld", hdev->name, opt);

257 258
	/* Reset */
	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
259
		hci_reset_req(req, 0);
260

261 262
	switch (hdev->dev_type) {
	case HCI_BREDR:
263
		bredr_init(req);
264 265 266
		break;

	case HCI_AMP:
267
		amp_init1(req);
268 269 270 271 272 273
		break;

	default:
		BT_ERR("Unknown device type %d", hdev->dev_type);
		break;
	}
274 275

	return 0;
276 277
}

278
static void bredr_setup(struct hci_request *req)
279 280 281 282 283
{
	__le16 param;
	__u8 flt_type;

	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
284
	hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
285 286

	/* Read Class of Device */
287
	hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
288 289

	/* Read Local Name */
290
	hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
291 292

	/* Read Voice Setting */
293
	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
294

295 296 297
	/* Read Number of Supported IAC */
	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);

298 299 300
	/* Read Current IAC LAP */
	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);

301 302
	/* Clear Event Filters */
	flt_type = HCI_FLT_CLEAR_ALL;
303
	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
304 305

	/* Connection accept timeout ~20 secs */
306
	param = cpu_to_le16(0x7d00);
307
	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
308 309
}

310
static void le_setup(struct hci_request *req)
311
{
312 313
	struct hci_dev *hdev = req->hdev;

314
	/* Read LE Buffer Size */
315
	hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
316 317

	/* Read LE Local Supported Features */
318
	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
319

320 321 322
	/* Read LE Supported States */
	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);

323 324
	/* LE-only controllers have LE implicitly enabled */
	if (!lmp_bredr_capable(hdev))
325
		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
326 327
}

328
static void hci_setup_event_mask(struct hci_request *req)
329
{
330 331
	struct hci_dev *hdev = req->hdev;

332 333 334 335 336 337 338 339 340 341 342 343 344 345
	/* The second byte is 0xff instead of 0x9f (two reserved bits
	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
	 * command otherwise.
	 */
	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };

	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
	 * any event mask for pre 1.2 devices.
	 */
	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
		return;

	if (lmp_bredr_capable(hdev)) {
		events[4] |= 0x01; /* Flow Specification Complete */
346 347 348 349 350 351
	} else {
		/* Use a different default for LE-only devices */
		memset(events, 0, sizeof(events));
		events[1] |= 0x20; /* Command Complete */
		events[1] |= 0x40; /* Command Status */
		events[1] |= 0x80; /* Hardware Error */
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369

		/* If the controller supports the Disconnect command, enable
		 * the corresponding event. In addition enable packet flow
		 * control related events.
		 */
		if (hdev->commands[0] & 0x20) {
			events[0] |= 0x10; /* Disconnection Complete */
			events[2] |= 0x04; /* Number of Completed Packets */
			events[3] |= 0x02; /* Data Buffer Overflow */
		}

		/* If the controller supports the Read Remote Version
		 * Information command, enable the corresponding event.
		 */
		if (hdev->commands[2] & 0x80)
			events[1] |= 0x08; /* Read Remote Version Information
					    * Complete
					    */
370 371 372 373 374

		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
			events[0] |= 0x80; /* Encryption Change */
			events[5] |= 0x80; /* Encryption Key Refresh Complete */
		}
375 376
	}

377 378
	if (lmp_inq_rssi_capable(hdev) ||
	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
379 380
		events[4] |= 0x02; /* Inquiry Result with RSSI */

381 382 383 384 385 386 387 388
	if (lmp_ext_feat_capable(hdev))
		events[4] |= 0x04; /* Read Remote Extended Features Complete */

	if (lmp_esco_capable(hdev)) {
		events[5] |= 0x08; /* Synchronous Connection Complete */
		events[5] |= 0x10; /* Synchronous Connection Changed */
	}

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
	if (lmp_sniffsubr_capable(hdev))
		events[5] |= 0x20; /* Sniff Subrating */

	if (lmp_pause_enc_capable(hdev))
		events[5] |= 0x80; /* Encryption Key Refresh Complete */

	if (lmp_ext_inq_capable(hdev))
		events[5] |= 0x40; /* Extended Inquiry Result */

	if (lmp_no_flush_capable(hdev))
		events[7] |= 0x01; /* Enhanced Flush Complete */

	if (lmp_lsto_capable(hdev))
		events[6] |= 0x80; /* Link Supervision Timeout Changed */

	if (lmp_ssp_capable(hdev)) {
		events[6] |= 0x01;	/* IO Capability Request */
		events[6] |= 0x02;	/* IO Capability Response */
		events[6] |= 0x04;	/* User Confirmation Request */
		events[6] |= 0x08;	/* User Passkey Request */
		events[6] |= 0x10;	/* Remote OOB Data Request */
		events[6] |= 0x20;	/* Simple Pairing Complete */
		events[7] |= 0x04;	/* User Passkey Notification */
		events[7] |= 0x08;	/* Keypress Notification */
		events[7] |= 0x10;	/* Remote Host Supported
					 * Features Notification
					 */
	}

	if (lmp_le_capable(hdev))
		events[7] |= 0x20;	/* LE Meta-Event */

421
	hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
422 423
}

424
static int hci_init2_req(struct hci_request *req, unsigned long opt)
425
{
426 427
	struct hci_dev *hdev = req->hdev;

428 429 430
	if (hdev->dev_type == HCI_AMP)
		return amp_init2(req);

431
	if (lmp_bredr_capable(hdev))
432
		bredr_setup(req);
433
	else
434
		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
435 436

	if (lmp_le_capable(hdev))
437
		le_setup(req);
438

439 440 441 442 443 444 445
	/* All Bluetooth 1.2 and later controllers should support the
	 * HCI command for reading the local supported commands.
	 *
	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
	 * but do not have support for this command. If that is the case,
	 * the driver can quirk the behavior and skip reading the local
	 * supported commands.
446
	 */
447 448
	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
449
		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
450 451

	if (lmp_ssp_capable(hdev)) {
452 453 454 455 456 457 458 459
		/* When SSP is available, then the host features page
		 * should also be available as well. However some
		 * controllers list the max_page as 0 as long as SSP
		 * has not been enabled. To achieve proper debugging
		 * output, force the minimum max_page to 1 at least.
		 */
		hdev->max_page = 0x01;

460
		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
461
			u8 mode = 0x01;
462

463 464
			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
				    sizeof(mode), &mode);
465 466 467 468 469 470
		} else {
			struct hci_cp_write_eir cp;

			memset(hdev->eir, 0, sizeof(hdev->eir));
			memset(&cp, 0, sizeof(cp));

471
			hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
472 473 474
		}
	}

475 476
	if (lmp_inq_rssi_capable(hdev) ||
	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
477 478 479 480 481 482 483 484 485 486
		u8 mode;

		/* If Extended Inquiry Result events are supported, then
		 * they are clearly preferred over Inquiry Result with RSSI
		 * events.
		 */
		mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;

		hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
	}
487 488

	if (lmp_inq_tx_pwr_capable(hdev))
489
		hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
490 491 492 493 494

	if (lmp_ext_feat_capable(hdev)) {
		struct hci_cp_read_local_ext_features cp;

		cp.page = 0x01;
495 496
		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
			    sizeof(cp), &cp);
497 498
	}

499
	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
500
		u8 enable = 1;
501 502
		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
			    &enable);
503
	}
504 505

	return 0;
506 507
}

508
static void hci_setup_link_policy(struct hci_request *req)
509
{
510
	struct hci_dev *hdev = req->hdev;
511 512 513 514 515 516 517 518 519 520 521 522 523
	struct hci_cp_write_def_link_policy cp;
	u16 link_policy = 0;

	if (lmp_rswitch_capable(hdev))
		link_policy |= HCI_LP_RSWITCH;
	if (lmp_hold_capable(hdev))
		link_policy |= HCI_LP_HOLD;
	if (lmp_sniff_capable(hdev))
		link_policy |= HCI_LP_SNIFF;
	if (lmp_park_capable(hdev))
		link_policy |= HCI_LP_PARK;

	cp.policy = cpu_to_le16(link_policy);
524
	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
525 526
}

527
static void hci_set_le_support(struct hci_request *req)
528
{
529
	struct hci_dev *hdev = req->hdev;
530 531
	struct hci_cp_write_le_host_supported cp;

532 533 534 535
	/* LE-only devices do not support explicit enablement */
	if (!lmp_bredr_capable(hdev))
		return;

536 537
	memset(&cp, 0, sizeof(cp));

538
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
539
		cp.le = 0x01;
540
		cp.simul = 0x00;
541 542 543
	}

	if (cp.le != lmp_host_le_capable(hdev))
544 545
		hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
			    &cp);
546 547
}

548 549 550 551 552 553 554 555
static void hci_set_event_mask_page_2(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };

	/* If Connectionless Slave Broadcast master role is supported
	 * enable all necessary events for it.
	 */
556
	if (lmp_csb_master_capable(hdev)) {
557 558 559 560 561 562 563 564 565
		events[1] |= 0x40;	/* Triggered Clock Capture */
		events[1] |= 0x80;	/* Synchronization Train Complete */
		events[2] |= 0x10;	/* Slave Page Response Timeout */
		events[2] |= 0x20;	/* CSB Channel Map Change */
	}

	/* If Connectionless Slave Broadcast slave role is supported
	 * enable all necessary events for it.
	 */
566
	if (lmp_csb_slave_capable(hdev)) {
567 568 569 570 571 572
		events[2] |= 0x01;	/* Synchronization Train Received */
		events[2] |= 0x02;	/* CSB Receive */
		events[2] |= 0x04;	/* CSB Timeout */
		events[2] |= 0x08;	/* Truncated Page Complete */
	}

573
	/* Enable Authenticated Payload Timeout Expired event if supported */
574
	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
575 576
		events[2] |= 0x80;

577 578 579
	hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
}

580
static int hci_init3_req(struct hci_request *req, unsigned long opt)
581
{
582
	struct hci_dev *hdev = req->hdev;
583
	u8 p;
584

585 586
	hci_setup_event_mask(req);

587 588
	if (hdev->commands[6] & 0x20 &&
	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
589 590 591 592 593 594 595
		struct hci_cp_read_stored_link_key cp;

		bacpy(&cp.bdaddr, BDADDR_ANY);
		cp.read_all = 0x01;
		hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
	}

596
	if (hdev->commands[5] & 0x10)
597
		hci_setup_link_policy(req);
598

599 600 601 602 603 604 605 606 607 608
	if (hdev->commands[8] & 0x01)
		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);

	/* Some older Broadcom based Bluetooth 1.2 controllers do not
	 * support the Read Page Scan Type command. Check support for
	 * this command in the bit mask of supported commands.
	 */
	if (hdev->commands[13] & 0x01)
		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);

609 610 611 612
	if (lmp_le_capable(hdev)) {
		u8 events[8];

		memset(events, 0, sizeof(events));
613 614 615

		if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
			events[0] |= 0x10;	/* LE Long Term Key Request */
616 617 618 619 620 621 622 623 624

		/* If controller supports the Connection Parameters Request
		 * Link Layer Procedure, enable the corresponding event.
		 */
		if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
			events[0] |= 0x20;	/* LE Remote Connection
						 * Parameter Request
						 */

625 626 627 628 629 630
		/* If the controller supports the Data Length Extension
		 * feature, enable the corresponding event.
		 */
		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
			events[0] |= 0x40;	/* LE Data Length Change */

631 632 633 634 635 636 637 638
		/* If the controller supports Extended Scanner Filter
		 * Policies, enable the correspondig event.
		 */
		if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
			events[1] |= 0x04;	/* LE Direct Advertising
						 * Report
						 */

639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
		/* If the controller supports the LE Set Scan Enable command,
		 * enable the corresponding advertising report event.
		 */
		if (hdev->commands[26] & 0x08)
			events[0] |= 0x02;	/* LE Advertising Report */

		/* If the controller supports the LE Create Connection
		 * command, enable the corresponding event.
		 */
		if (hdev->commands[26] & 0x10)
			events[0] |= 0x01;	/* LE Connection Complete */

		/* If the controller supports the LE Connection Update
		 * command, enable the corresponding event.
		 */
		if (hdev->commands[27] & 0x04)
			events[0] |= 0x04;	/* LE Connection Update
						 * Complete
						 */

		/* If the controller supports the LE Read Remote Used Features
		 * command, enable the corresponding event.
		 */
		if (hdev->commands[27] & 0x20)
			events[0] |= 0x08;	/* LE Read Remote Used
						 * Features Complete
						 */

667 668 669 670 671 672 673 674 675 676 677 678 679 680
		/* If the controller supports the LE Read Local P-256
		 * Public Key command, enable the corresponding event.
		 */
		if (hdev->commands[34] & 0x02)
			events[0] |= 0x80;	/* LE Read Local P-256
						 * Public Key Complete
						 */

		/* If the controller supports the LE Generate DHKey
		 * command, enable the corresponding event.
		 */
		if (hdev->commands[34] & 0x04)
			events[1] |= 0x01;	/* LE Generate DHKey Complete */

681 682 683
		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
			    events);

684 685 686 687 688
		if (hdev->commands[25] & 0x40) {
			/* Read LE Advertising Channel TX Power */
			hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
		}

689 690 691 692 693 694 695 696 697 698 699
		if (hdev->commands[26] & 0x40) {
			/* Read LE White List Size */
			hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
				    0, NULL);
		}

		if (hdev->commands[26] & 0x80) {
			/* Clear LE White List */
			hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
		}

700 701 702 703 704 705 706 707
		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
			/* Read LE Maximum Data Length */
			hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);

			/* Read LE Suggested Default Data Length */
			hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
		}

708
		hci_set_le_support(req);
709
	}
710 711 712 713 714 715 716 717 718

	/* Read features beyond page 1 if available */
	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
		struct hci_cp_read_local_ext_features cp;

		cp.page = p;
		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
			    sizeof(cp), &cp);
	}
719 720

	return 0;
721 722
}

723
static int hci_init4_req(struct hci_request *req, unsigned long opt)
724 725 726
{
	struct hci_dev *hdev = req->hdev;

727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
	/* Some Broadcom based Bluetooth controllers do not support the
	 * Delete Stored Link Key command. They are clearly indicating its
	 * absence in the bit mask of supported commands.
	 *
	 * Check the supported commands and only if the the command is marked
	 * as supported send it. If not supported assume that the controller
	 * does not have actual support for stored link keys which makes this
	 * command redundant anyway.
	 *
	 * Some controllers indicate that they support handling deleting
	 * stored link keys, but they don't. The quirk lets a driver
	 * just disable this command.
	 */
	if (hdev->commands[6] & 0x80 &&
	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
		struct hci_cp_delete_stored_link_key cp;

		bacpy(&cp.bdaddr, BDADDR_ANY);
		cp.delete_all = 0x01;
		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
			    sizeof(cp), &cp);
	}

750 751 752 753
	/* Set event mask page 2 if the HCI command for it is supported */
	if (hdev->commands[22] & 0x04)
		hci_set_event_mask_page_2(req);

754 755 756 757
	/* Read local codec list if the HCI command is supported */
	if (hdev->commands[29] & 0x20)
		hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);

758 759 760 761
	/* Get MWS transport configuration if the HCI command is supported */
	if (hdev->commands[30] & 0x08)
		hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);

762
	/* Check for Synchronization Train support */
763
	if (lmp_sync_train_capable(hdev))
764
		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
765 766

	/* Enable Secure Connections if supported and configured */
767
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
768
	    bredr_sc_enabled(hdev)) {
769
		u8 support = 0x01;
770

771 772 773
		hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
			    sizeof(support), &support);
	}
774 775

	return 0;
776 777
}

778 779 780 781
static int __hci_init(struct hci_dev *hdev)
{
	int err;

782
	err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
783 784 785
	if (err < 0)
		return err;

786 787
	if (hci_dev_test_flag(hdev, HCI_SETUP))
		hci_debugfs_create_basic(hdev);
788

789
	err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
790 791 792
	if (err < 0)
		return err;

793 794
	/* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
	 * BR/EDR/LE type controllers. AMP controllers only need the
795
	 * first two stages of init.
796 797 798 799
	 */
	if (hdev->dev_type != HCI_BREDR)
		return 0;

800
	err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
801 802 803
	if (err < 0)
		return err;

804
	err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
805 806 807
	if (err < 0)
		return err;

808 809 810 811 812 813 814 815 816 817 818
	/* This function is only called when the controller is actually in
	 * configured state. When the controller is marked as unconfigured,
	 * this initialization procedure is not run.
	 *
	 * It means that it is possible that a controller runs through its
	 * setup phase and then discovers missing settings. If that is the
	 * case, then this function will not be called. It then will only
	 * be called during the config phase.
	 *
	 * So only when in setup phase or config phase, create the debugfs
	 * entries and register the SMP channels.
819
	 */
820 821
	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
	    !hci_dev_test_flag(hdev, HCI_CONFIG))
822 823
		return 0;

824 825
	hci_debugfs_create_common(hdev);

826
	if (lmp_bredr_capable(hdev))
827
		hci_debugfs_create_bredr(hdev);
828

829
	if (lmp_le_capable(hdev))
830
		hci_debugfs_create_le(hdev);
831

832
	return 0;
833 834
}

835
static int hci_init0_req(struct hci_request *req, unsigned long opt)
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
{
	struct hci_dev *hdev = req->hdev;

	BT_DBG("%s %ld", hdev->name, opt);

	/* Reset */
	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
		hci_reset_req(req, 0);

	/* Read Local Version */
	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);

	/* Read BD Address */
	if (hdev->set_bdaddr)
		hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
851 852

	return 0;
853 854 855 856 857 858
}

static int __hci_unconf_init(struct hci_dev *hdev)
{
	int err;

859 860 861
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		return 0;

862
	err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
863 864 865
	if (err < 0)
		return err;

866 867 868
	if (hci_dev_test_flag(hdev, HCI_SETUP))
		hci_debugfs_create_basic(hdev);

869 870 871
	return 0;
}

872
static int hci_scan_req(struct hci_request *req, unsigned long opt)
L
Linus Torvalds 已提交
873 874 875
{
	__u8 scan = opt;

876
	BT_DBG("%s %x", req->hdev->name, scan);
L
Linus Torvalds 已提交
877 878

	/* Inquiry and Page scans */
879
	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
880
	return 0;
L
Linus Torvalds 已提交
881 882
}

883
static int hci_auth_req(struct hci_request *req, unsigned long opt)
L
Linus Torvalds 已提交
884 885 886
{
	__u8 auth = opt;

887
	BT_DBG("%s %x", req->hdev->name, auth);
L
Linus Torvalds 已提交
888 889

	/* Authentication */
890
	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
891
	return 0;
L
Linus Torvalds 已提交
892 893
}

894
static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
L
Linus Torvalds 已提交
895 896 897
{
	__u8 encrypt = opt;

898
	BT_DBG("%s %x", req->hdev->name, encrypt);
L
Linus Torvalds 已提交
899

900
	/* Encryption */
901
	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
902
	return 0;
L
Linus Torvalds 已提交
903 904
}

905
static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
906 907 908
{
	__le16 policy = cpu_to_le16(opt);

909
	BT_DBG("%s %x", req->hdev->name, policy);
910 911

	/* Default link policy */
912
	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
913
	return 0;
914 915
}

916
/* Get HCI device by index.
L
Linus Torvalds 已提交
917 918 919
 * Device is held on return. */
struct hci_dev *hci_dev_get(int index)
{
920
	struct hci_dev *hdev = NULL, *d;
L
Linus Torvalds 已提交
921 922 923 924 925 926 927

	BT_DBG("%d", index);

	if (index < 0)
		return NULL;

	read_lock(&hci_dev_list_lock);
928
	list_for_each_entry(d, &hci_dev_list, list) {
L
Linus Torvalds 已提交
929 930 931 932 933 934 935 936 937 938
		if (d->id == index) {
			hdev = hci_dev_hold(d);
			break;
		}
	}
	read_unlock(&hci_dev_list_lock);
	return hdev;
}

/* ---- Inquiry support ---- */
939

940 941 942 943
bool hci_discovery_active(struct hci_dev *hdev)
{
	struct discovery_state *discov = &hdev->discovery;

A
Andre Guedes 已提交
944
	switch (discov->state) {
945
	case DISCOVERY_FINDING:
A
Andre Guedes 已提交
946
	case DISCOVERY_RESOLVING:
947 948
		return true;

A
Andre Guedes 已提交
949 950 951
	default:
		return false;
	}
952 953
}

954 955
void hci_discovery_set_state(struct hci_dev *hdev, int state)
{
956 957
	int old_state = hdev->discovery.state;

958 959
	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);

960
	if (old_state == state)
961 962
		return;

963 964
	hdev->discovery.state = state;

965 966
	switch (state) {
	case DISCOVERY_STOPPED:
967 968
		hci_update_background_scan(hdev);

969
		if (old_state != DISCOVERY_STARTING)
970
			mgmt_discovering(hdev, 0);
971 972 973
		break;
	case DISCOVERY_STARTING:
		break;
974
	case DISCOVERY_FINDING:
975 976
		mgmt_discovering(hdev, 1);
		break;
977 978
	case DISCOVERY_RESOLVING:
		break;
979 980 981 982 983
	case DISCOVERY_STOPPING:
		break;
	}
}

984
void hci_inquiry_cache_flush(struct hci_dev *hdev)
L
Linus Torvalds 已提交
985
{
986
	struct discovery_state *cache = &hdev->discovery;
987
	struct inquiry_entry *p, *n;
L
Linus Torvalds 已提交
988

989 990
	list_for_each_entry_safe(p, n, &cache->all, all) {
		list_del(&p->all);
991
		kfree(p);
L
Linus Torvalds 已提交
992
	}
993 994 995

	INIT_LIST_HEAD(&cache->unknown);
	INIT_LIST_HEAD(&cache->resolve);
L
Linus Torvalds 已提交
996 997
}

998 999
struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
					       bdaddr_t *bdaddr)
L
Linus Torvalds 已提交
1000
{
1001
	struct discovery_state *cache = &hdev->discovery;
L
Linus Torvalds 已提交
1002 1003
	struct inquiry_entry *e;

1004
	BT_DBG("cache %p, %pMR", cache, bdaddr);
L
Linus Torvalds 已提交
1005

1006 1007 1008 1009 1010 1011 1012 1013 1014
	list_for_each_entry(e, &cache->all, all) {
		if (!bacmp(&e->data.bdaddr, bdaddr))
			return e;
	}

	return NULL;
}

struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1015
						       bdaddr_t *bdaddr)
1016
{
1017
	struct discovery_state *cache = &hdev->discovery;
1018 1019
	struct inquiry_entry *e;

1020
	BT_DBG("cache %p, %pMR", cache, bdaddr);
1021 1022

	list_for_each_entry(e, &cache->unknown, list) {
L
Linus Torvalds 已提交
1023
		if (!bacmp(&e->data.bdaddr, bdaddr))
1024 1025 1026 1027
			return e;
	}

	return NULL;
L
Linus Torvalds 已提交
1028 1029
}

1030
struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1031 1032
						       bdaddr_t *bdaddr,
						       int state)
1033 1034 1035 1036
{
	struct discovery_state *cache = &hdev->discovery;
	struct inquiry_entry *e;

1037
	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048

	list_for_each_entry(e, &cache->resolve, list) {
		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
			return e;
		if (!bacmp(&e->data.bdaddr, bdaddr))
			return e;
	}

	return NULL;
}

1049
void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1050
				      struct inquiry_entry *ie)
1051 1052 1053 1054 1055 1056 1057 1058 1059
{
	struct discovery_state *cache = &hdev->discovery;
	struct list_head *pos = &cache->resolve;
	struct inquiry_entry *p;

	list_del(&ie->list);

	list_for_each_entry(p, &cache->resolve, list) {
		if (p->name_state != NAME_PENDING &&
1060
		    abs(p->data.rssi) >= abs(ie->data.rssi))
1061 1062 1063 1064 1065 1066 1067
			break;
		pos = &p->list;
	}

	list_add(&ie->list, pos);
}

1068 1069
u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
			     bool name_known)
L
Linus Torvalds 已提交
1070
{
1071
	struct discovery_state *cache = &hdev->discovery;
A
Andrei Emeltchenko 已提交
1072
	struct inquiry_entry *ie;
1073
	u32 flags = 0;
L
Linus Torvalds 已提交
1074

1075
	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
L
Linus Torvalds 已提交
1076

1077
	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1078

1079 1080
	if (!data->ssp_mode)
		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1081

A
Andrei Emeltchenko 已提交
1082
	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1083
	if (ie) {
1084 1085
		if (!ie->data.ssp_mode)
			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1086

1087
		if (ie->name_state == NAME_NEEDED &&
1088
		    data->rssi != ie->data.rssi) {
1089 1090 1091 1092
			ie->data.rssi = data->rssi;
			hci_inquiry_cache_update_resolve(hdev, ie);
		}

1093
		goto update;
1094
	}
1095 1096

	/* Entry not in the cache. Add new one. */
1097
	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1098 1099 1100 1101
	if (!ie) {
		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
		goto done;
	}
1102 1103 1104 1105 1106 1107 1108 1109 1110

	list_add(&ie->all, &cache->all);

	if (name_known) {
		ie->name_state = NAME_KNOWN;
	} else {
		ie->name_state = NAME_NOT_KNOWN;
		list_add(&ie->list, &cache->unknown);
	}
A
Andrei Emeltchenko 已提交
1111

1112 1113
update:
	if (name_known && ie->name_state != NAME_KNOWN &&
1114
	    ie->name_state != NAME_PENDING) {
1115 1116
		ie->name_state = NAME_KNOWN;
		list_del(&ie->list);
L
Linus Torvalds 已提交
1117 1118
	}

A
Andrei Emeltchenko 已提交
1119 1120
	memcpy(&ie->data, data, sizeof(*data));
	ie->timestamp = jiffies;
L
Linus Torvalds 已提交
1121
	cache->timestamp = jiffies;
1122 1123

	if (ie->name_state == NAME_NOT_KNOWN)
1124
		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1125

1126 1127
done:
	return flags;
L
Linus Torvalds 已提交
1128 1129 1130 1131
}

static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
{
1132
	struct discovery_state *cache = &hdev->discovery;
L
Linus Torvalds 已提交
1133 1134 1135 1136
	struct inquiry_info *info = (struct inquiry_info *) buf;
	struct inquiry_entry *e;
	int copied = 0;

1137
	list_for_each_entry(e, &cache->all, all) {
L
Linus Torvalds 已提交
1138
		struct inquiry_data *data = &e->data;
1139 1140 1141 1142

		if (copied >= num)
			break;

L
Linus Torvalds 已提交
1143 1144 1145 1146 1147 1148
		bacpy(&info->bdaddr, &data->bdaddr);
		info->pscan_rep_mode	= data->pscan_rep_mode;
		info->pscan_period_mode	= data->pscan_period_mode;
		info->pscan_mode	= data->pscan_mode;
		memcpy(info->dev_class, data->dev_class, 3);
		info->clock_offset	= data->clock_offset;
1149

L
Linus Torvalds 已提交
1150
		info++;
1151
		copied++;
L
Linus Torvalds 已提交
1152 1153 1154 1155 1156 1157
	}

	BT_DBG("cache %p, copied %d", cache, copied);
	return copied;
}

1158
static int hci_inq_req(struct hci_request *req, unsigned long opt)
L
Linus Torvalds 已提交
1159 1160
{
	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1161
	struct hci_dev *hdev = req->hdev;
L
Linus Torvalds 已提交
1162 1163 1164 1165 1166
	struct hci_cp_inquiry cp;

	BT_DBG("%s", hdev->name);

	if (test_bit(HCI_INQUIRY, &hdev->flags))
1167
		return 0;
L
Linus Torvalds 已提交
1168 1169 1170 1171 1172

	/* Start Inquiry */
	memcpy(&cp.lap, &ir->lap, 3);
	cp.length  = ir->length;
	cp.num_rsp = ir->num_rsp;
1173
	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1174 1175

	return 0;
L
Linus Torvalds 已提交
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
}

int hci_inquiry(void __user *arg)
{
	__u8 __user *ptr = arg;
	struct hci_inquiry_req ir;
	struct hci_dev *hdev;
	int err = 0, do_inquiry = 0, max_rsp;
	long timeo;
	__u8 *buf;

	if (copy_from_user(&ir, ptr, sizeof(ir)))
		return -EFAULT;

1190 1191
	hdev = hci_dev_get(ir.dev_id);
	if (!hdev)
L
Linus Torvalds 已提交
1192 1193
		return -ENODEV;

1194
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1195 1196 1197 1198
		err = -EBUSY;
		goto done;
	}

1199
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1200 1201 1202 1203
		err = -EOPNOTSUPP;
		goto done;
	}

1204 1205 1206 1207 1208
	if (hdev->dev_type != HCI_BREDR) {
		err = -EOPNOTSUPP;
		goto done;
	}

1209
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1210 1211 1212 1213
		err = -EOPNOTSUPP;
		goto done;
	}

1214
	hci_dev_lock(hdev);
1215
	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1216
	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1217
		hci_inquiry_cache_flush(hdev);
L
Linus Torvalds 已提交
1218 1219
		do_inquiry = 1;
	}
1220
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
1221

1222
	timeo = ir.length * msecs_to_jiffies(2000);
A
Andrei Emeltchenko 已提交
1223 1224

	if (do_inquiry) {
1225
		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1226
				   timeo, NULL);
A
Andrei Emeltchenko 已提交
1227 1228
		if (err < 0)
			goto done;
1229 1230 1231 1232

		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
		 * cleared). If it is interrupted by a signal, return -EINTR.
		 */
1233
		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1234 1235
				TASK_INTERRUPTIBLE))
			return -EINTR;
A
Andrei Emeltchenko 已提交
1236
	}
L
Linus Torvalds 已提交
1237

1238 1239 1240
	/* for unlimited number of responses we will use buffer with
	 * 255 entries
	 */
L
Linus Torvalds 已提交
1241 1242 1243 1244 1245
	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;

	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
	 * copy it to the user space.
	 */
1246
	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
A
Andrei Emeltchenko 已提交
1247
	if (!buf) {
L
Linus Torvalds 已提交
1248 1249 1250 1251
		err = -ENOMEM;
		goto done;
	}

1252
	hci_dev_lock(hdev);
L
Linus Torvalds 已提交
1253
	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1254
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
1255 1256 1257 1258 1259 1260

	BT_DBG("num_rsp %d", ir.num_rsp);

	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
		ptr += sizeof(ir);
		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1261
				 ir.num_rsp))
L
Linus Torvalds 已提交
1262
			err = -EFAULT;
1263
	} else
L
Linus Torvalds 已提交
1264 1265 1266 1267 1268 1269 1270 1271 1272
		err = -EFAULT;

	kfree(buf);

done:
	hci_dev_put(hdev);
	return err;
}

1273
static int hci_dev_do_open(struct hci_dev *hdev)
L
Linus Torvalds 已提交
1274 1275 1276 1277 1278
{
	int ret = 0;

	BT_DBG("%s %p", hdev->name, hdev);

1279
	hci_req_sync_lock(hdev);
L
Linus Torvalds 已提交
1280

1281
	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1282 1283 1284 1285
		ret = -ENODEV;
		goto done;
	}

1286 1287
	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1288 1289 1290
		/* Check for rfkill but allow the HCI setup stage to
		 * proceed (which in itself doesn't cause any RF activity).
		 */
1291
		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1292 1293 1294 1295 1296 1297 1298 1299 1300
			ret = -ERFKILL;
			goto done;
		}

		/* Check for valid public address or a configured static
		 * random adddress, but let the HCI setup proceed to
		 * be able to determine if there is a public address
		 * or not.
		 *
1301 1302 1303 1304
		 * In case of user channel usage, it is not important
		 * if a public address or static random address is
		 * available.
		 *
1305 1306 1307
		 * This check is only valid for BR/EDR controllers
		 * since AMP controllers do not have an address.
		 */
1308
		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1309
		    hdev->dev_type == HCI_BREDR &&
1310 1311 1312 1313 1314
		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
			ret = -EADDRNOTAVAIL;
			goto done;
		}
1315 1316
	}

L
Linus Torvalds 已提交
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
	if (test_bit(HCI_UP, &hdev->flags)) {
		ret = -EALREADY;
		goto done;
	}

	if (hdev->open(hdev)) {
		ret = -EIO;
		goto done;
	}

1327
	set_bit(HCI_RUNNING, &hdev->flags);
1328
	hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1329

1330 1331 1332
	atomic_set(&hdev->cmd_cnt, 1);
	set_bit(HCI_INIT, &hdev->flags);

1333
	if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1334 1335
		hci_sock_dev_event(hdev, HCI_DEV_SETUP);

1336 1337
		if (hdev->setup)
			ret = hdev->setup(hdev);
1338

1339 1340 1341 1342 1343 1344
		/* The transport driver can set these quirks before
		 * creating the HCI device or in its setup callback.
		 *
		 * In case any of them is set, the controller has to
		 * start up as unconfigured.
		 */
1345 1346
		if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
		    test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1347
			hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1348

1349 1350 1351 1352 1353 1354 1355 1356
		/* For an unconfigured controller it is required to
		 * read at least the version information provided by
		 * the Read Local Version Information command.
		 *
		 * If the set_bdaddr driver callback is provided, then
		 * also the original Bluetooth public device address
		 * will be read using the Read BD Address command.
		 */
1357
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1358
			ret = __hci_unconf_init(hdev);
1359 1360
	}

1361
	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1362 1363 1364 1365 1366 1367 1368
		/* If public address change is configured, ensure that
		 * the address gets programmed. If the driver does not
		 * support changing the public address, fail the power
		 * on procedure.
		 */
		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
		    hdev->set_bdaddr)
1369 1370 1371 1372 1373
			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
		else
			ret = -EADDRNOTAVAIL;
	}

1374
	if (!ret) {
1375
		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1376
		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1377
			ret = __hci_init(hdev);
1378 1379 1380
			if (!ret && hdev->post_init)
				ret = hdev->post_init(hdev);
		}
L
Linus Torvalds 已提交
1381 1382
	}

1383 1384 1385 1386 1387 1388 1389 1390
	/* If the HCI Reset command is clearing all diagnostic settings,
	 * then they need to be reprogrammed after the init procedure
	 * completed.
	 */
	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
		ret = hdev->set_diag(hdev, true);

1391 1392
	clear_bit(HCI_INIT, &hdev->flags);

L
Linus Torvalds 已提交
1393 1394
	if (!ret) {
		hci_dev_hold(hdev);
1395
		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
L
Linus Torvalds 已提交
1396
		set_bit(HCI_UP, &hdev->flags);
1397
		hci_sock_dev_event(hdev, HCI_DEV_UP);
1398 1399 1400 1401
		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1402
		    hci_dev_test_flag(hdev, HCI_MGMT) &&
1403
		    hdev->dev_type == HCI_BREDR) {
1404 1405
			ret = __hci_req_hci_power_on(hdev);
			mgmt_power_on(hdev, ret);
1406
		}
1407
	} else {
L
Linus Torvalds 已提交
1408
		/* Init failed, cleanup */
1409
		flush_work(&hdev->tx_work);
1410
		flush_work(&hdev->cmd_work);
1411
		flush_work(&hdev->rx_work);
L
Linus Torvalds 已提交
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423

		skb_queue_purge(&hdev->cmd_q);
		skb_queue_purge(&hdev->rx_q);

		if (hdev->flush)
			hdev->flush(hdev);

		if (hdev->sent_cmd) {
			kfree_skb(hdev->sent_cmd);
			hdev->sent_cmd = NULL;
		}

1424
		clear_bit(HCI_RUNNING, &hdev->flags);
1425
		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1426

L
Linus Torvalds 已提交
1427
		hdev->close(hdev);
1428
		hdev->flags &= BIT(HCI_RAW);
L
Linus Torvalds 已提交
1429 1430 1431
	}

done:
1432
	hci_req_sync_unlock(hdev);
L
Linus Torvalds 已提交
1433 1434 1435
	return ret;
}

1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446
/* ---- HCI ioctl helpers ---- */

int hci_dev_open(__u16 dev)
{
	struct hci_dev *hdev;
	int err;

	hdev = hci_dev_get(dev);
	if (!hdev)
		return -ENODEV;

1447
	/* Devices that are marked as unconfigured can only be powered
1448 1449 1450 1451 1452 1453 1454 1455
	 * up as user channel. Trying to bring them up as normal devices
	 * will result into a failure. Only user channel operation is
	 * possible.
	 *
	 * When this function is called for a user channel, the flag
	 * HCI_USER_CHANNEL will be set first before attempting to
	 * open the device.
	 */
1456 1457
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1458 1459 1460 1461
		err = -EOPNOTSUPP;
		goto done;
	}

1462 1463 1464 1465 1466
	/* We need to ensure that no other power on/off work is pending
	 * before proceeding to call hci_dev_do_open. This is
	 * particularly important if the setup procedure has not yet
	 * completed.
	 */
1467
	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1468 1469
		cancel_delayed_work(&hdev->power_off);

1470 1471 1472 1473
	/* After this call it is guaranteed that the setup procedure
	 * has finished. This means that error conditions like RFKILL
	 * or no valid public or static random address apply.
	 */
1474 1475
	flush_workqueue(hdev->req_workqueue);

1476
	/* For controllers not using the management interface and that
1477
	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1478 1479 1480 1481
	 * so that pairing works for them. Once the management interface
	 * is in use this bit will be cleared again and userspace has
	 * to explicitly enable it.
	 */
1482 1483
	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
	    !hci_dev_test_flag(hdev, HCI_MGMT))
1484
		hci_dev_set_flag(hdev, HCI_BONDABLE);
1485

1486 1487
	err = hci_dev_do_open(hdev);

1488
done:
1489 1490 1491 1492
	hci_dev_put(hdev);
	return err;
}

1493 1494 1495 1496 1497
/* This function requires the caller holds hdev->lock */
static void hci_pend_le_actions_clear(struct hci_dev *hdev)
{
	struct hci_conn_params *p;

1498 1499 1500
	list_for_each_entry(p, &hdev->le_conn_params, list) {
		if (p->conn) {
			hci_conn_drop(p->conn);
1501
			hci_conn_put(p->conn);
1502 1503
			p->conn = NULL;
		}
1504
		list_del_init(&p->action);
1505
	}
1506 1507 1508 1509

	BT_DBG("All LE pending actions cleared");
}

1510
int hci_dev_do_close(struct hci_dev *hdev)
L
Linus Torvalds 已提交
1511
{
1512 1513
	bool auto_off;

L
Linus Torvalds 已提交
1514 1515
	BT_DBG("%s %p", hdev->name, hdev);

1516
	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1517
	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1518
	    test_bit(HCI_UP, &hdev->flags)) {
1519 1520 1521 1522 1523
		/* Execute vendor specific shutdown routine */
		if (hdev->shutdown)
			hdev->shutdown(hdev);
	}

1524 1525
	cancel_delayed_work(&hdev->power_off);

1526
	hci_request_cancel_all(hdev);
1527
	hci_req_sync_lock(hdev);
L
Linus Torvalds 已提交
1528 1529

	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1530
		cancel_delayed_work_sync(&hdev->cmd_timer);
1531
		hci_req_sync_unlock(hdev);
L
Linus Torvalds 已提交
1532 1533 1534
		return 0;
	}

1535 1536
	/* Flush RX and TX works */
	flush_work(&hdev->tx_work);
1537
	flush_work(&hdev->rx_work);
L
Linus Torvalds 已提交
1538

1539 1540
	if (hdev->discov_timeout > 0) {
		hdev->discov_timeout = 0;
1541 1542
		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1543 1544
	}

1545
	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1546 1547
		cancel_delayed_work(&hdev->service_cache);

1548
	if (hci_dev_test_flag(hdev, HCI_MGMT))
1549
		cancel_delayed_work_sync(&hdev->rpa_expired);
A
Andre Guedes 已提交
1550

1551 1552 1553 1554 1555
	/* Avoid potential lockdep warnings from the *_flush() calls by
	 * ensuring the workqueue is empty up front.
	 */
	drain_workqueue(hdev->workqueue);

1556
	hci_dev_lock(hdev);
1557

1558 1559
	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);

1560 1561
	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);

1562 1563 1564
	if (!auto_off && hdev->dev_type == HCI_BREDR &&
	    hci_dev_test_flag(hdev, HCI_MGMT))
		__mgmt_power_off(hdev);
1565

1566
	hci_inquiry_cache_flush(hdev);
1567
	hci_pend_le_actions_clear(hdev);
1568
	hci_conn_hash_flush(hdev);
1569
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
1570

1571 1572
	smp_unregister(hdev);

1573
	hci_sock_dev_event(hdev, HCI_DEV_DOWN);
L
Linus Torvalds 已提交
1574 1575 1576 1577 1578 1579 1580

	if (hdev->flush)
		hdev->flush(hdev);

	/* Reset device */
	skb_queue_purge(&hdev->cmd_q);
	atomic_set(&hdev->cmd_cnt, 1);
1581 1582
	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
L
Linus Torvalds 已提交
1583
		set_bit(HCI_INIT, &hdev->flags);
1584
		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
L
Linus Torvalds 已提交
1585 1586 1587
		clear_bit(HCI_INIT, &hdev->flags);
	}

1588 1589
	/* flush cmd  work */
	flush_work(&hdev->cmd_work);
L
Linus Torvalds 已提交
1590 1591 1592 1593 1594 1595 1596 1597

	/* Drop queues */
	skb_queue_purge(&hdev->rx_q);
	skb_queue_purge(&hdev->cmd_q);
	skb_queue_purge(&hdev->raw_q);

	/* Drop last sent command */
	if (hdev->sent_cmd) {
1598
		cancel_delayed_work_sync(&hdev->cmd_timer);
L
Linus Torvalds 已提交
1599 1600 1601 1602
		kfree_skb(hdev->sent_cmd);
		hdev->sent_cmd = NULL;
	}

1603
	clear_bit(HCI_RUNNING, &hdev->flags);
1604
	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1605

L
Linus Torvalds 已提交
1606 1607 1608 1609
	/* After this point our queues are empty
	 * and no tasks are scheduled. */
	hdev->close(hdev);

1610
	/* Clear flags */
1611
	hdev->flags &= BIT(HCI_RAW);
1612
	hci_dev_clear_volatile_flags(hdev);
1613

1614
	/* Controller radio is available but is currently powered down */
1615
	hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1616

1617
	memset(hdev->eir, 0, sizeof(hdev->eir));
1618
	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1619
	bacpy(&hdev->random_addr, BDADDR_ANY);
1620

1621
	hci_req_sync_unlock(hdev);
L
Linus Torvalds 已提交
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631

	hci_dev_put(hdev);
	return 0;
}

int hci_dev_close(__u16 dev)
{
	struct hci_dev *hdev;
	int err;

A
Andrei Emeltchenko 已提交
1632 1633
	hdev = hci_dev_get(dev);
	if (!hdev)
L
Linus Torvalds 已提交
1634
		return -ENODEV;
1635

1636
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1637 1638 1639 1640
		err = -EBUSY;
		goto done;
	}

1641
	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1642 1643
		cancel_delayed_work(&hdev->power_off);

L
Linus Torvalds 已提交
1644
	err = hci_dev_do_close(hdev);
1645

1646
done:
L
Linus Torvalds 已提交
1647 1648 1649 1650
	hci_dev_put(hdev);
	return err;
}

1651
static int hci_dev_do_reset(struct hci_dev *hdev)
L
Linus Torvalds 已提交
1652
{
1653
	int ret;
L
Linus Torvalds 已提交
1654

1655
	BT_DBG("%s %p", hdev->name, hdev);
L
Linus Torvalds 已提交
1656

1657
	hci_req_sync_lock(hdev);
L
Linus Torvalds 已提交
1658 1659 1660 1661 1662

	/* Drop queues */
	skb_queue_purge(&hdev->rx_q);
	skb_queue_purge(&hdev->cmd_q);

1663 1664 1665 1666 1667
	/* Avoid potential lockdep warnings from the *_flush() calls by
	 * ensuring the workqueue is empty up front.
	 */
	drain_workqueue(hdev->workqueue);

1668
	hci_dev_lock(hdev);
1669
	hci_inquiry_cache_flush(hdev);
L
Linus Torvalds 已提交
1670
	hci_conn_hash_flush(hdev);
1671
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
1672 1673 1674 1675

	if (hdev->flush)
		hdev->flush(hdev);

1676
	atomic_set(&hdev->cmd_cnt, 1);
1677
	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
L
Linus Torvalds 已提交
1678

1679
	ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
L
Linus Torvalds 已提交
1680

1681
	hci_req_sync_unlock(hdev);
L
Linus Torvalds 已提交
1682 1683 1684
	return ret;
}

1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698
int hci_dev_reset(__u16 dev)
{
	struct hci_dev *hdev;
	int err;

	hdev = hci_dev_get(dev);
	if (!hdev)
		return -ENODEV;

	if (!test_bit(HCI_UP, &hdev->flags)) {
		err = -ENETDOWN;
		goto done;
	}

1699
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1700 1701 1702 1703
		err = -EBUSY;
		goto done;
	}

1704
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715
		err = -EOPNOTSUPP;
		goto done;
	}

	err = hci_dev_do_reset(hdev);

done:
	hci_dev_put(hdev);
	return err;
}

L
Linus Torvalds 已提交
1716 1717 1718 1719 1720
int hci_dev_reset_stat(__u16 dev)
{
	struct hci_dev *hdev;
	int ret = 0;

A
Andrei Emeltchenko 已提交
1721 1722
	hdev = hci_dev_get(dev);
	if (!hdev)
L
Linus Torvalds 已提交
1723 1724
		return -ENODEV;

1725
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1726 1727 1728 1729
		ret = -EBUSY;
		goto done;
	}

1730
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1731 1732 1733 1734
		ret = -EOPNOTSUPP;
		goto done;
	}

L
Linus Torvalds 已提交
1735 1736
	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));

1737
done:
L
Linus Torvalds 已提交
1738 1739 1740 1741
	hci_dev_put(hdev);
	return ret;
}

1742 1743
static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
{
1744
	bool conn_changed, discov_changed;
1745 1746 1747 1748

	BT_DBG("%s scan 0x%02x", hdev->name, scan);

	if ((scan & SCAN_PAGE))
1749 1750
		conn_changed = !hci_dev_test_and_set_flag(hdev,
							  HCI_CONNECTABLE);
1751
	else
1752 1753
		conn_changed = hci_dev_test_and_clear_flag(hdev,
							   HCI_CONNECTABLE);
1754

1755
	if ((scan & SCAN_INQUIRY)) {
1756 1757
		discov_changed = !hci_dev_test_and_set_flag(hdev,
							    HCI_DISCOVERABLE);
1758
	} else {
1759
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1760 1761
		discov_changed = hci_dev_test_and_clear_flag(hdev,
							     HCI_DISCOVERABLE);
1762 1763
	}

1764
	if (!hci_dev_test_flag(hdev, HCI_MGMT))
1765 1766
		return;

1767 1768
	if (conn_changed || discov_changed) {
		/* In case this was disabled through mgmt */
1769
		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1770

1771
		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1772
			hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1773

1774
		mgmt_new_settings(hdev);
1775
	}
1776 1777
}

L
Linus Torvalds 已提交
1778 1779 1780 1781 1782 1783 1784 1785 1786
int hci_dev_cmd(unsigned int cmd, void __user *arg)
{
	struct hci_dev *hdev;
	struct hci_dev_req dr;
	int err = 0;

	if (copy_from_user(&dr, arg, sizeof(dr)))
		return -EFAULT;

A
Andrei Emeltchenko 已提交
1787 1788
	hdev = hci_dev_get(dr.dev_id);
	if (!hdev)
L
Linus Torvalds 已提交
1789 1790
		return -ENODEV;

1791
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1792 1793 1794 1795
		err = -EBUSY;
		goto done;
	}

1796
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1797 1798 1799 1800
		err = -EOPNOTSUPP;
		goto done;
	}

1801 1802 1803 1804 1805
	if (hdev->dev_type != HCI_BREDR) {
		err = -EOPNOTSUPP;
		goto done;
	}

1806
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1807 1808 1809 1810
		err = -EOPNOTSUPP;
		goto done;
	}

L
Linus Torvalds 已提交
1811 1812
	switch (cmd) {
	case HCISETAUTH:
1813
		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1814
				   HCI_INIT_TIMEOUT, NULL);
L
Linus Torvalds 已提交
1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
		break;

	case HCISETENCRYPT:
		if (!lmp_encrypt_capable(hdev)) {
			err = -EOPNOTSUPP;
			break;
		}

		if (!test_bit(HCI_AUTH, &hdev->flags)) {
			/* Auth must be enabled first */
1825
			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1826
					   HCI_INIT_TIMEOUT, NULL);
L
Linus Torvalds 已提交
1827 1828 1829 1830
			if (err)
				break;
		}

1831
		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1832
				   HCI_INIT_TIMEOUT, NULL);
L
Linus Torvalds 已提交
1833 1834 1835
		break;

	case HCISETSCAN:
1836
		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1837
				   HCI_INIT_TIMEOUT, NULL);
1838

1839 1840
		/* Ensure that the connectable and discoverable states
		 * get correctly modified as this was a non-mgmt change.
1841
		 */
1842 1843
		if (!err)
			hci_update_scan_state(hdev, dr.dev_opt);
L
Linus Torvalds 已提交
1844 1845 1846
		break;

	case HCISETLINKPOL:
1847
		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1848
				   HCI_INIT_TIMEOUT, NULL);
L
Linus Torvalds 已提交
1849 1850 1851
		break;

	case HCISETLINKMODE:
1852 1853 1854 1855 1856 1857
		hdev->link_mode = ((__u16) dr.dev_opt) &
					(HCI_LM_MASTER | HCI_LM_ACCEPT);
		break;

	case HCISETPTYPE:
		hdev->pkt_type = (__u16) dr.dev_opt;
L
Linus Torvalds 已提交
1858 1859 1860
		break;

	case HCISETACLMTU:
1861 1862
		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
L
Linus Torvalds 已提交
1863 1864 1865
		break;

	case HCISETSCOMTU:
1866 1867
		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
L
Linus Torvalds 已提交
1868 1869 1870 1871 1872 1873
		break;

	default:
		err = -EINVAL;
		break;
	}
1874

1875
done:
L
Linus Torvalds 已提交
1876 1877 1878 1879 1880 1881
	hci_dev_put(hdev);
	return err;
}

int hci_get_dev_list(void __user *arg)
{
1882
	struct hci_dev *hdev;
L
Linus Torvalds 已提交
1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895
	struct hci_dev_list_req *dl;
	struct hci_dev_req *dr;
	int n = 0, size, err;
	__u16 dev_num;

	if (get_user(dev_num, (__u16 __user *) arg))
		return -EFAULT;

	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
		return -EINVAL;

	size = sizeof(*dl) + dev_num * sizeof(*dr);

A
Andrei Emeltchenko 已提交
1896 1897
	dl = kzalloc(size, GFP_KERNEL);
	if (!dl)
L
Linus Torvalds 已提交
1898 1899 1900 1901
		return -ENOMEM;

	dr = dl->dev_req;

1902
	read_lock(&hci_dev_list_lock);
1903
	list_for_each_entry(hdev, &hci_dev_list, list) {
1904
		unsigned long flags = hdev->flags;
1905

1906 1907 1908 1909
		/* When the auto-off is configured it means the transport
		 * is running, but in that case still indicate that the
		 * device is actually down.
		 */
1910
		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1911
			flags &= ~BIT(HCI_UP);
1912

L
Linus Torvalds 已提交
1913
		(dr + n)->dev_id  = hdev->id;
1914
		(dr + n)->dev_opt = flags;
1915

L
Linus Torvalds 已提交
1916 1917 1918
		if (++n >= dev_num)
			break;
	}
1919
	read_unlock(&hci_dev_list_lock);
L
Linus Torvalds 已提交
1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933

	dl->dev_num = n;
	size = sizeof(*dl) + n * sizeof(*dr);

	err = copy_to_user(arg, dl, size);
	kfree(dl);

	return err ? -EFAULT : 0;
}

int hci_get_dev_info(void __user *arg)
{
	struct hci_dev *hdev;
	struct hci_dev_info di;
1934
	unsigned long flags;
L
Linus Torvalds 已提交
1935 1936 1937 1938 1939
	int err = 0;

	if (copy_from_user(&di, arg, sizeof(di)))
		return -EFAULT;

A
Andrei Emeltchenko 已提交
1940 1941
	hdev = hci_dev_get(di.dev_id);
	if (!hdev)
L
Linus Torvalds 已提交
1942 1943
		return -ENODEV;

1944 1945 1946 1947
	/* When the auto-off is configured it means the transport
	 * is running, but in that case still indicate that the
	 * device is actually down.
	 */
1948
	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1949 1950 1951
		flags = hdev->flags & ~BIT(HCI_UP);
	else
		flags = hdev->flags;
1952

L
Linus Torvalds 已提交
1953 1954
	strcpy(di.name, hdev->name);
	di.bdaddr   = hdev->bdaddr;
1955
	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1956
	di.flags    = flags;
L
Linus Torvalds 已提交
1957
	di.pkt_type = hdev->pkt_type;
1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
	if (lmp_bredr_capable(hdev)) {
		di.acl_mtu  = hdev->acl_mtu;
		di.acl_pkts = hdev->acl_pkts;
		di.sco_mtu  = hdev->sco_mtu;
		di.sco_pkts = hdev->sco_pkts;
	} else {
		di.acl_mtu  = hdev->le_mtu;
		di.acl_pkts = hdev->le_pkts;
		di.sco_mtu  = 0;
		di.sco_pkts = 0;
	}
L
Linus Torvalds 已提交
1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984
	di.link_policy = hdev->link_policy;
	di.link_mode   = hdev->link_mode;

	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
	memcpy(&di.features, &hdev->features, sizeof(di.features));

	if (copy_to_user(arg, &di, sizeof(di)))
		err = -EFAULT;

	hci_dev_put(hdev);

	return err;
}

/* ---- Interface to HCI drivers ---- */

1985 1986 1987 1988 1989 1990
static int hci_rfkill_set_block(void *data, bool blocked)
{
	struct hci_dev *hdev = data;

	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);

1991
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1992 1993
		return -EBUSY;

1994
	if (blocked) {
1995
		hci_dev_set_flag(hdev, HCI_RFKILLED);
1996 1997
		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
		    !hci_dev_test_flag(hdev, HCI_CONFIG))
1998
			hci_dev_do_close(hdev);
1999
	} else {
2000
		hci_dev_clear_flag(hdev, HCI_RFKILLED);
2001
	}
2002 2003 2004 2005 2006 2007 2008 2009

	return 0;
}

static const struct rfkill_ops hci_rfkill_ops = {
	.set_block = hci_rfkill_set_block,
};

2010 2011 2012
static void hci_power_on(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2013
	int err;
2014 2015 2016

	BT_DBG("%s", hdev->name);

2017 2018 2019 2020 2021 2022 2023 2024 2025 2026
	if (test_bit(HCI_UP, &hdev->flags) &&
	    hci_dev_test_flag(hdev, HCI_MGMT) &&
	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
		hci_req_sync_lock(hdev);
		err = __hci_req_hci_power_on(hdev);
		hci_req_sync_unlock(hdev);
		mgmt_power_on(hdev, err);
		return;
	}

2027
	err = hci_dev_do_open(hdev);
2028
	if (err < 0) {
2029
		hci_dev_lock(hdev);
2030
		mgmt_set_powered_failed(hdev, err);
2031
		hci_dev_unlock(hdev);
2032
		return;
2033
	}
2034

2035 2036 2037 2038
	/* During the HCI setup phase, a few error conditions are
	 * ignored and they need to be checked now. If they are still
	 * valid, it is important to turn the device back off.
	 */
2039 2040
	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2041 2042 2043
	    (hdev->dev_type == HCI_BREDR &&
	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2044
		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2045
		hci_dev_do_close(hdev);
2046
	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2047 2048
		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
				   HCI_AUTO_OFF_TIMEOUT);
2049
	}
2050

2051
	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2052 2053 2054
		/* For unconfigured devices, set the HCI_RAW flag
		 * so that userspace can easily identify them.
		 */
2055
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2056
			set_bit(HCI_RAW, &hdev->flags);
2057 2058 2059 2060 2061 2062 2063 2064 2065

		/* For fully configured devices, this will send
		 * the Index Added event. For unconfigured devices,
		 * it will send Unconfigued Index Added event.
		 *
		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
		 * and no event will be send.
		 */
		mgmt_index_added(hdev);
2066
	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2067 2068 2069
		/* When the controller is now configured, then it
		 * is important to clear the HCI_RAW flag.
		 */
2070
		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2071 2072
			clear_bit(HCI_RAW, &hdev->flags);

2073 2074 2075 2076
		/* Powering on the controller with HCI_CONFIG set only
		 * happens with the transition from unconfigured to
		 * configured. This will send the Index Added event.
		 */
2077
		mgmt_index_added(hdev);
2078
	}
2079 2080 2081 2082
}

static void hci_power_off(struct work_struct *work)
{
2083
	struct hci_dev *hdev = container_of(work, struct hci_dev,
2084
					    power_off.work);
2085 2086 2087

	BT_DBG("%s", hdev->name);

2088
	hci_dev_do_close(hdev);
2089 2090
}

2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108
static void hci_error_reset(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);

	BT_DBG("%s", hdev->name);

	if (hdev->hw_error)
		hdev->hw_error(hdev, hdev->hw_error_code);
	else
		BT_ERR("%s hardware error 0x%2.2x", hdev->name,
		       hdev->hw_error_code);

	if (hci_dev_do_close(hdev))
		return;

	hci_dev_do_open(hdev);
}

2109
void hci_uuids_clear(struct hci_dev *hdev)
2110
{
2111
	struct bt_uuid *uuid, *tmp;
2112

2113 2114
	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
		list_del(&uuid->list);
2115 2116 2117 2118
		kfree(uuid);
	}
}

2119
void hci_link_keys_clear(struct hci_dev *hdev)
2120
{
2121
	struct link_key *key;
2122

2123 2124 2125
	list_for_each_entry_rcu(key, &hdev->link_keys, list) {
		list_del_rcu(&key->list);
		kfree_rcu(key, rcu);
2126 2127 2128
	}
}

2129
void hci_smp_ltks_clear(struct hci_dev *hdev)
2130
{
J
Johan Hedberg 已提交
2131
	struct smp_ltk *k;
2132

J
Johan Hedberg 已提交
2133 2134 2135
	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
		list_del_rcu(&k->list);
		kfree_rcu(k, rcu);
2136 2137 2138
	}
}

2139 2140
void hci_smp_irks_clear(struct hci_dev *hdev)
{
J
Johan Hedberg 已提交
2141
	struct smp_irk *k;
2142

J
Johan Hedberg 已提交
2143 2144 2145
	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
		list_del_rcu(&k->list);
		kfree_rcu(k, rcu);
2146 2147 2148
	}
}

2149 2150
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
2151
	struct link_key *k;
2152

2153 2154 2155 2156
	rcu_read_lock();
	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
		if (bacmp(bdaddr, &k->bdaddr) == 0) {
			rcu_read_unlock();
2157
			return k;
2158 2159 2160
		}
	}
	rcu_read_unlock();
2161 2162 2163 2164

	return NULL;
}

2165
static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2166
			       u8 key_type, u8 old_key_type)
2167 2168 2169
{
	/* Legacy key */
	if (key_type < 0x03)
2170
		return true;
2171 2172 2173

	/* Debug keys are insecure so don't store them persistently */
	if (key_type == HCI_LK_DEBUG_COMBINATION)
2174
		return false;
2175 2176 2177

	/* Changed combination key and there's no previous one */
	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2178
		return false;
2179 2180 2181

	/* Security mode 3 case */
	if (!conn)
2182
		return true;
2183

2184 2185 2186 2187
	/* BR/EDR key derived using SC from an LE link */
	if (conn->type == LE_LINK)
		return true;

2188 2189
	/* Neither local nor remote side had no-bonding as requirement */
	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2190
		return true;
2191 2192 2193

	/* Local side had dedicated bonding as requirement */
	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2194
		return true;
2195 2196 2197

	/* Remote side had dedicated bonding as requirement */
	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2198
		return true;
2199 2200 2201

	/* If none of the above criteria match, then don't store the key
	 * persistently */
2202
	return false;
2203 2204
}

2205
static u8 ltk_role(u8 type)
2206
{
2207 2208
	if (type == SMP_LTK)
		return HCI_ROLE_MASTER;
2209

2210
	return HCI_ROLE_SLAVE;
2211 2212
}

2213 2214
struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
			     u8 addr_type, u8 role)
2215
{
2216
	struct smp_ltk *k;
2217

J
Johan Hedberg 已提交
2218 2219
	rcu_read_lock();
	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2220 2221 2222
		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
			continue;

2223
		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
J
Johan Hedberg 已提交
2224
			rcu_read_unlock();
2225
			return k;
J
Johan Hedberg 已提交
2226 2227 2228
		}
	}
	rcu_read_unlock();
2229 2230 2231 2232

	return NULL;
}

2233 2234 2235 2236
struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
{
	struct smp_irk *irk;

J
Johan Hedberg 已提交
2237 2238 2239 2240
	rcu_read_lock();
	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
		if (!bacmp(&irk->rpa, rpa)) {
			rcu_read_unlock();
2241
			return irk;
J
Johan Hedberg 已提交
2242
		}
2243 2244
	}

J
Johan Hedberg 已提交
2245
	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2246
		if (smp_irk_matches(hdev, irk->val, rpa)) {
2247
			bacpy(&irk->rpa, rpa);
J
Johan Hedberg 已提交
2248
			rcu_read_unlock();
2249 2250 2251
			return irk;
		}
	}
J
Johan Hedberg 已提交
2252
	rcu_read_unlock();
2253 2254 2255 2256 2257 2258 2259 2260 2261

	return NULL;
}

struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
				     u8 addr_type)
{
	struct smp_irk *irk;

2262 2263 2264 2265
	/* Identity Address must be public or static random */
	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
		return NULL;

J
Johan Hedberg 已提交
2266 2267
	rcu_read_lock();
	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2268
		if (addr_type == irk->addr_type &&
J
Johan Hedberg 已提交
2269 2270
		    bacmp(bdaddr, &irk->bdaddr) == 0) {
			rcu_read_unlock();
2271
			return irk;
J
Johan Hedberg 已提交
2272
		}
2273
	}
J
Johan Hedberg 已提交
2274
	rcu_read_unlock();
2275 2276 2277 2278

	return NULL;
}

2279
struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2280 2281
				  bdaddr_t *bdaddr, u8 *val, u8 type,
				  u8 pin_len, bool *persistent)
2282 2283
{
	struct link_key *key, *old_key;
2284
	u8 old_key_type;
2285 2286 2287 2288 2289 2290

	old_key = hci_find_link_key(hdev, bdaddr);
	if (old_key) {
		old_key_type = old_key->type;
		key = old_key;
	} else {
2291
		old_key_type = conn ? conn->key_type : 0xff;
2292
		key = kzalloc(sizeof(*key), GFP_KERNEL);
2293
		if (!key)
2294
			return NULL;
2295
		list_add_rcu(&key->list, &hdev->link_keys);
2296 2297
	}

2298
	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2299

2300 2301 2302 2303
	/* Some buggy controller combinations generate a changed
	 * combination key for legacy pairing even when there's no
	 * previous key */
	if (type == HCI_LK_CHANGED_COMBINATION &&
2304
	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2305
		type = HCI_LK_COMBINATION;
2306 2307 2308
		if (conn)
			conn->key_type = type;
	}
2309

2310
	bacpy(&key->bdaddr, bdaddr);
2311
	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2312 2313
	key->pin_len = pin_len;

2314
	if (type == HCI_LK_CHANGED_COMBINATION)
2315
		key->type = old_key_type;
2316 2317 2318
	else
		key->type = type;

2319 2320 2321
	if (persistent)
		*persistent = hci_persistent_key(hdev, conn, type,
						 old_key_type);
2322

2323
	return key;
2324 2325
}

2326
struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2327
			    u8 addr_type, u8 type, u8 authenticated,
2328
			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2329
{
2330
	struct smp_ltk *key, *old_key;
2331
	u8 role = ltk_role(type);
2332

2333
	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2334
	if (old_key)
2335
		key = old_key;
2336
	else {
2337
		key = kzalloc(sizeof(*key), GFP_KERNEL);
2338
		if (!key)
2339
			return NULL;
J
Johan Hedberg 已提交
2340
		list_add_rcu(&key->list, &hdev->long_term_keys);
2341 2342 2343
	}

	bacpy(&key->bdaddr, bdaddr);
2344 2345 2346 2347
	key->bdaddr_type = addr_type;
	memcpy(key->val, tk, sizeof(key->val));
	key->authenticated = authenticated;
	key->ediv = ediv;
2348
	key->rand = rand;
2349 2350
	key->enc_size = enc_size;
	key->type = type;
2351

2352
	return key;
2353 2354
}

2355 2356
struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
2357 2358 2359 2360 2361 2362 2363
{
	struct smp_irk *irk;

	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
	if (!irk) {
		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
		if (!irk)
2364
			return NULL;
2365 2366 2367 2368

		bacpy(&irk->bdaddr, bdaddr);
		irk->addr_type = addr_type;

J
Johan Hedberg 已提交
2369
		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2370 2371 2372 2373 2374
	}

	memcpy(irk->val, val, 16);
	bacpy(&irk->rpa, rpa);

2375
	return irk;
2376 2377
}

2378 2379 2380 2381 2382 2383 2384 2385
int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
	struct link_key *key;

	key = hci_find_link_key(hdev, bdaddr);
	if (!key)
		return -ENOENT;

2386
	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2387

2388 2389
	list_del_rcu(&key->list);
	kfree_rcu(key, rcu);
2390 2391 2392 2393

	return 0;
}

2394
int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2395
{
J
Johan Hedberg 已提交
2396
	struct smp_ltk *k;
2397
	int removed = 0;
2398

J
Johan Hedberg 已提交
2399
	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2400
		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2401 2402
			continue;

2403
		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2404

J
Johan Hedberg 已提交
2405 2406
		list_del_rcu(&k->list);
		kfree_rcu(k, rcu);
2407
		removed++;
2408 2409
	}

2410
	return removed ? 0 : -ENOENT;
2411 2412
}

2413 2414
void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
{
J
Johan Hedberg 已提交
2415
	struct smp_irk *k;
2416

J
Johan Hedberg 已提交
2417
	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2418 2419 2420 2421 2422
		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
			continue;

		BT_DBG("%s removing %pMR", hdev->name, bdaddr);

J
Johan Hedberg 已提交
2423 2424
		list_del_rcu(&k->list);
		kfree_rcu(k, rcu);
2425 2426 2427
	}
}

2428 2429 2430
bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
	struct smp_ltk *k;
2431
	struct smp_irk *irk;
2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445
	u8 addr_type;

	if (type == BDADDR_BREDR) {
		if (hci_find_link_key(hdev, bdaddr))
			return true;
		return false;
	}

	/* Convert to HCI addr type which struct smp_ltk uses */
	if (type == BDADDR_LE_PUBLIC)
		addr_type = ADDR_LE_DEV_PUBLIC;
	else
		addr_type = ADDR_LE_DEV_RANDOM;

2446 2447 2448 2449 2450 2451
	irk = hci_get_irk(hdev, bdaddr, addr_type);
	if (irk) {
		bdaddr = &irk->bdaddr;
		addr_type = irk->addr_type;
	}

2452 2453
	rcu_read_lock();
	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2454 2455
		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
			rcu_read_unlock();
2456
			return true;
2457
		}
2458 2459 2460 2461 2462 2463
	}
	rcu_read_unlock();

	return false;
}

2464
/* HCI command timer function */
2465
static void hci_cmd_timeout(struct work_struct *work)
2466
{
2467 2468
	struct hci_dev *hdev = container_of(work, struct hci_dev,
					    cmd_timer.work);
2469

2470 2471 2472 2473 2474 2475 2476 2477 2478
	if (hdev->sent_cmd) {
		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
		u16 opcode = __le16_to_cpu(sent->opcode);

		BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
	} else {
		BT_ERR("%s command tx timeout", hdev->name);
	}

2479
	atomic_set(&hdev->cmd_cnt, 1);
2480
	queue_work(hdev->workqueue, &hdev->cmd_work);
2481 2482
}

2483
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2484
					  bdaddr_t *bdaddr, u8 bdaddr_type)
2485 2486 2487
{
	struct oob_data *data;

2488 2489 2490 2491 2492 2493 2494
	list_for_each_entry(data, &hdev->remote_oob_data, list) {
		if (bacmp(bdaddr, &data->bdaddr) != 0)
			continue;
		if (data->bdaddr_type != bdaddr_type)
			continue;
		return data;
	}
2495 2496 2497 2498

	return NULL;
}

2499 2500
int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
			       u8 bdaddr_type)
2501 2502 2503
{
	struct oob_data *data;

2504
	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2505 2506 2507
	if (!data)
		return -ENOENT;

2508
	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2509 2510 2511 2512 2513 2514 2515

	list_del(&data->list);
	kfree(data);

	return 0;
}

2516
void hci_remote_oob_data_clear(struct hci_dev *hdev)
2517 2518 2519 2520 2521 2522 2523 2524 2525
{
	struct oob_data *data, *n;

	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
		list_del(&data->list);
		kfree(data);
	}
}

2526
int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2527
			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
2528
			    u8 *hash256, u8 *rand256)
2529 2530 2531
{
	struct oob_data *data;

2532
	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2533
	if (!data) {
2534
		data = kmalloc(sizeof(*data), GFP_KERNEL);
2535 2536 2537 2538
		if (!data)
			return -ENOMEM;

		bacpy(&data->bdaddr, bdaddr);
2539
		data->bdaddr_type = bdaddr_type;
2540 2541 2542
		list_add(&data->list, &hdev->remote_oob_data);
	}

2543 2544 2545
	if (hash192 && rand192) {
		memcpy(data->hash192, hash192, sizeof(data->hash192));
		memcpy(data->rand192, rand192, sizeof(data->rand192));
2546 2547
		if (hash256 && rand256)
			data->present = 0x03;
2548 2549 2550
	} else {
		memset(data->hash192, 0, sizeof(data->hash192));
		memset(data->rand192, 0, sizeof(data->rand192));
2551 2552 2553 2554
		if (hash256 && rand256)
			data->present = 0x02;
		else
			data->present = 0x00;
2555 2556
	}

2557 2558 2559 2560 2561 2562
	if (hash256 && rand256) {
		memcpy(data->hash256, hash256, sizeof(data->hash256));
		memcpy(data->rand256, rand256, sizeof(data->rand256));
	} else {
		memset(data->hash256, 0, sizeof(data->hash256));
		memset(data->rand256, 0, sizeof(data->rand256));
2563 2564
		if (hash192 && rand192)
			data->present = 0x01;
2565
	}
2566

2567
	BT_DBG("%s for %pMR", hdev->name, bdaddr);
2568 2569 2570 2571

	return 0;
}

2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585
/* This function requires the caller holds hdev->lock */
struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
{
	struct adv_info *adv_instance;

	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
		if (adv_instance->instance == instance)
			return adv_instance;
	}

	return NULL;
}

/* This function requires the caller holds hdev->lock */
2586 2587
struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
{
2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612
	struct adv_info *cur_instance;

	cur_instance = hci_find_adv_instance(hdev, instance);
	if (!cur_instance)
		return NULL;

	if (cur_instance == list_last_entry(&hdev->adv_instances,
					    struct adv_info, list))
		return list_first_entry(&hdev->adv_instances,
						 struct adv_info, list);
	else
		return list_next_entry(cur_instance, list);
}

/* This function requires the caller holds hdev->lock */
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
{
	struct adv_info *adv_instance;

	adv_instance = hci_find_adv_instance(hdev, instance);
	if (!adv_instance)
		return -ENOENT;

	BT_DBG("%s removing %dMR", hdev->name, instance);

2613 2614 2615 2616 2617 2618
	if (hdev->cur_adv_instance == instance) {
		if (hdev->adv_instance_timeout) {
			cancel_delayed_work(&hdev->adv_instance_expire);
			hdev->adv_instance_timeout = 0;
		}
		hdev->cur_adv_instance = 0x00;
2619 2620
	}

2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633
	list_del(&adv_instance->list);
	kfree(adv_instance);

	hdev->adv_instance_cnt--;

	return 0;
}

/* This function requires the caller holds hdev->lock */
void hci_adv_instances_clear(struct hci_dev *hdev)
{
	struct adv_info *adv_instance, *n;

2634 2635 2636 2637 2638
	if (hdev->adv_instance_timeout) {
		cancel_delayed_work(&hdev->adv_instance_expire);
		hdev->adv_instance_timeout = 0;
	}

2639 2640 2641 2642 2643 2644
	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
		list_del(&adv_instance->list);
		kfree(adv_instance);
	}

	hdev->adv_instance_cnt = 0;
2645
	hdev->cur_adv_instance = 0x00;
2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666
}

/* This function requires the caller holds hdev->lock */
int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
			 u16 adv_data_len, u8 *adv_data,
			 u16 scan_rsp_len, u8 *scan_rsp_data,
			 u16 timeout, u16 duration)
{
	struct adv_info *adv_instance;

	adv_instance = hci_find_adv_instance(hdev, instance);
	if (adv_instance) {
		memset(adv_instance->adv_data, 0,
		       sizeof(adv_instance->adv_data));
		memset(adv_instance->scan_rsp_data, 0,
		       sizeof(adv_instance->scan_rsp_data));
	} else {
		if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
		    instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
			return -EOVERFLOW;

2667
		adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2668 2669 2670
		if (!adv_instance)
			return -ENOMEM;

2671
		adv_instance->pending = true;
2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688
		adv_instance->instance = instance;
		list_add(&adv_instance->list, &hdev->adv_instances);
		hdev->adv_instance_cnt++;
	}

	adv_instance->flags = flags;
	adv_instance->adv_data_len = adv_data_len;
	adv_instance->scan_rsp_len = scan_rsp_len;

	if (adv_data_len)
		memcpy(adv_instance->adv_data, adv_data, adv_data_len);

	if (scan_rsp_len)
		memcpy(adv_instance->scan_rsp_data,
		       scan_rsp_data, scan_rsp_len);

	adv_instance->timeout = timeout;
2689
	adv_instance->remaining_time = timeout;
2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700

	if (duration == 0)
		adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
	else
		adv_instance->duration = duration;

	BT_DBG("%s for %dMR", hdev->name, instance);

	return 0;
}

2701
struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2702
					 bdaddr_t *bdaddr, u8 type)
2703
{
2704
	struct bdaddr_list *b;
2705

2706
	list_for_each_entry(b, bdaddr_list, list) {
2707
		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2708
			return b;
2709
	}
2710 2711 2712 2713

	return NULL;
}

2714
void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2715
{
G
Geliang Tang 已提交
2716
	struct bdaddr_list *b, *n;
2717

G
Geliang Tang 已提交
2718 2719
	list_for_each_entry_safe(b, n, bdaddr_list, list) {
		list_del(&b->list);
2720 2721 2722 2723
		kfree(b);
	}
}

2724
int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2725 2726 2727
{
	struct bdaddr_list *entry;

2728
	if (!bacmp(bdaddr, BDADDR_ANY))
2729 2730
		return -EBADF;

2731
	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2732
		return -EEXIST;
2733

2734
	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2735 2736
	if (!entry)
		return -ENOMEM;
2737 2738

	bacpy(&entry->bdaddr, bdaddr);
2739
	entry->bdaddr_type = type;
2740

2741
	list_add(&entry->list, list);
2742

2743
	return 0;
2744 2745
}

2746
int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2747 2748 2749
{
	struct bdaddr_list *entry;

2750
	if (!bacmp(bdaddr, BDADDR_ANY)) {
2751
		hci_bdaddr_list_clear(list);
2752 2753
		return 0;
	}
2754

2755
	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2756 2757 2758 2759 2760 2761 2762 2763 2764
	if (!entry)
		return -ENOENT;

	list_del(&entry->list);
	kfree(entry);

	return 0;
}

2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780
/* This function requires the caller holds hdev->lock */
struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
					       bdaddr_t *addr, u8 addr_type)
{
	struct hci_conn_params *params;

	list_for_each_entry(params, &hdev->le_conn_params, list) {
		if (bacmp(&params->addr, addr) == 0 &&
		    params->addr_type == addr_type) {
			return params;
		}
	}

	return NULL;
}

2781
/* This function requires the caller holds hdev->lock */
2782 2783
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
						  bdaddr_t *addr, u8 addr_type)
2784
{
2785
	struct hci_conn_params *param;
2786

2787
	list_for_each_entry(param, list, action) {
2788 2789 2790
		if (bacmp(&param->addr, addr) == 0 &&
		    param->addr_type == addr_type)
			return param;
2791 2792 2793
	}

	return NULL;
2794 2795
}

2796
/* This function requires the caller holds hdev->lock */
2797 2798
struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
					    bdaddr_t *addr, u8 addr_type)
2799 2800 2801 2802
{
	struct hci_conn_params *params;

	params = hci_conn_params_lookup(hdev, addr, addr_type);
2803
	if (params)
2804
		return params;
2805 2806 2807 2808

	params = kzalloc(sizeof(*params), GFP_KERNEL);
	if (!params) {
		BT_ERR("Out of memory");
2809
		return NULL;
2810 2811 2812 2813
	}

	bacpy(&params->addr, addr);
	params->addr_type = addr_type;
2814 2815

	list_add(&params->list, &hdev->le_conn_params);
2816
	INIT_LIST_HEAD(&params->action);
2817

2818 2819 2820 2821 2822 2823 2824 2825
	params->conn_min_interval = hdev->le_conn_min_interval;
	params->conn_max_interval = hdev->le_conn_max_interval;
	params->conn_latency = hdev->le_conn_latency;
	params->supervision_timeout = hdev->le_supv_timeout;
	params->auto_connect = HCI_AUTO_CONN_DISABLED;

	BT_DBG("addr %pMR (type %u)", addr, addr_type);

2826
	return params;
2827 2828
}

2829
static void hci_conn_params_free(struct hci_conn_params *params)
2830
{
2831
	if (params->conn) {
2832
		hci_conn_drop(params->conn);
2833 2834
		hci_conn_put(params->conn);
	}
2835

2836
	list_del(&params->action);
2837 2838
	list_del(&params->list);
	kfree(params);
2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850
}

/* This function requires the caller holds hdev->lock */
void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
{
	struct hci_conn_params *params;

	params = hci_conn_params_lookup(hdev, addr, addr_type);
	if (!params)
		return;

	hci_conn_params_free(params);
2851

2852 2853
	hci_update_background_scan(hdev);

2854 2855 2856 2857
	BT_DBG("addr %pMR (type %u)", addr, addr_type);
}

/* This function requires the caller holds hdev->lock */
2858
void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2859 2860 2861 2862
{
	struct hci_conn_params *params, *tmp;

	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2863 2864
		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
			continue;
2865 2866 2867 2868 2869 2870 2871 2872 2873

		/* If trying to estabilish one time connection to disabled
		 * device, leave the params, but mark them as just once.
		 */
		if (params->explicit_connect) {
			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
			continue;
		}

2874 2875 2876 2877
		list_del(&params->list);
		kfree(params);
	}

2878
	BT_DBG("All LE disabled connection parameters were removed");
2879 2880 2881
}

/* This function requires the caller holds hdev->lock */
2882
static void hci_conn_params_clear_all(struct hci_dev *hdev)
2883
{
2884
	struct hci_conn_params *params, *tmp;
2885

2886 2887
	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
		hci_conn_params_free(params);
2888

2889
	BT_DBG("All LE connection parameters were removed");
2890 2891
}

2892 2893 2894 2895 2896 2897 2898 2899
/* Copy the Identity Address of the controller.
 *
 * If the controller has a public BD_ADDR, then by default use that one.
 * If this is a LE only controller without a public address, default to
 * the static random address.
 *
 * For debugging purposes it is possible to force controllers with a
 * public address to use the static random address instead.
2900 2901 2902 2903
 *
 * In case BR/EDR has been disabled on a dual-mode controller and
 * userspace has configured a static address, then that address
 * becomes the identity address instead of the public BR/EDR address.
2904 2905 2906 2907
 */
void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
			       u8 *bdaddr_type)
{
2908
	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2909
	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2910
	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2911
	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2912 2913 2914 2915 2916 2917 2918 2919
		bacpy(bdaddr, &hdev->static_addr);
		*bdaddr_type = ADDR_LE_DEV_RANDOM;
	} else {
		bacpy(bdaddr, &hdev->bdaddr);
		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
	}
}

2920 2921 2922 2923 2924
/* Alloc HCI device */
struct hci_dev *hci_alloc_dev(void)
{
	struct hci_dev *hdev;

2925
	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2926 2927 2928
	if (!hdev)
		return NULL;

2929 2930 2931
	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
	hdev->esco_type = (ESCO_HV1);
	hdev->link_mode = (HCI_LM_ACCEPT);
2932 2933
	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
	hdev->io_capability = 0x03;	/* No Input No Output */
2934
	hdev->manufacturer = 0xffff;	/* Default to internal use */
2935 2936
	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2937 2938
	hdev->adv_instance_cnt = 0;
	hdev->cur_adv_instance = 0x00;
2939
	hdev->adv_instance_timeout = 0;
2940 2941 2942 2943

	hdev->sniff_max_interval = 800;
	hdev->sniff_min_interval = 80;

2944
	hdev->le_adv_channel_map = 0x07;
2945 2946
	hdev->le_adv_min_interval = 0x0800;
	hdev->le_adv_max_interval = 0x0800;
2947 2948
	hdev->le_scan_interval = 0x0060;
	hdev->le_scan_window = 0x0030;
2949 2950
	hdev->le_conn_min_interval = 0x0028;
	hdev->le_conn_max_interval = 0x0038;
2951 2952
	hdev->le_conn_latency = 0x0000;
	hdev->le_supv_timeout = 0x002a;
2953 2954 2955 2956 2957 2958
	hdev->le_def_tx_len = 0x001b;
	hdev->le_def_tx_time = 0x0148;
	hdev->le_max_tx_len = 0x001b;
	hdev->le_max_tx_time = 0x0148;
	hdev->le_max_rx_len = 0x001b;
	hdev->le_max_rx_time = 0x0148;
2959

2960
	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2961
	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2962 2963
	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2964

2965 2966 2967 2968 2969
	mutex_init(&hdev->lock);
	mutex_init(&hdev->req_lock);

	INIT_LIST_HEAD(&hdev->mgmt_pending);
	INIT_LIST_HEAD(&hdev->blacklist);
2970
	INIT_LIST_HEAD(&hdev->whitelist);
2971 2972 2973
	INIT_LIST_HEAD(&hdev->uuids);
	INIT_LIST_HEAD(&hdev->link_keys);
	INIT_LIST_HEAD(&hdev->long_term_keys);
2974
	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2975
	INIT_LIST_HEAD(&hdev->remote_oob_data);
2976
	INIT_LIST_HEAD(&hdev->le_white_list);
2977
	INIT_LIST_HEAD(&hdev->le_conn_params);
2978
	INIT_LIST_HEAD(&hdev->pend_le_conns);
2979
	INIT_LIST_HEAD(&hdev->pend_le_reports);
2980
	INIT_LIST_HEAD(&hdev->conn_hash.list);
2981
	INIT_LIST_HEAD(&hdev->adv_instances);
2982 2983 2984 2985 2986

	INIT_WORK(&hdev->rx_work, hci_rx_work);
	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
	INIT_WORK(&hdev->tx_work, hci_tx_work);
	INIT_WORK(&hdev->power_on, hci_power_on);
2987
	INIT_WORK(&hdev->error_reset, hci_error_reset);
2988 2989 2990 2991 2992 2993 2994 2995 2996

	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);

	skb_queue_head_init(&hdev->rx_q);
	skb_queue_head_init(&hdev->cmd_q);
	skb_queue_head_init(&hdev->raw_q);

	init_waitqueue_head(&hdev->req_wait_q);

2997
	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2998

2999 3000
	hci_request_setup(hdev);

3001 3002
	hci_init_sysfs(hdev);
	discovery_init(hdev);
3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015

	return hdev;
}
EXPORT_SYMBOL(hci_alloc_dev);

/* Free HCI device */
void hci_free_dev(struct hci_dev *hdev)
{
	/* will free via device release */
	put_device(&hdev->dev);
}
EXPORT_SYMBOL(hci_free_dev);

L
Linus Torvalds 已提交
3016 3017 3018
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
{
3019
	int id, error;
L
Linus Torvalds 已提交
3020

3021
	if (!hdev->open || !hdev->close || !hdev->send)
L
Linus Torvalds 已提交
3022 3023
		return -EINVAL;

3024 3025 3026
	/* Do not allow HCI_AMP devices to register at index 0,
	 * so the index can be used as the AMP controller ID.
	 */
3027 3028 3029 3030 3031 3032 3033 3034 3035
	switch (hdev->dev_type) {
	case HCI_BREDR:
		id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
		break;
	case HCI_AMP:
		id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
		break;
	default:
		return -EINVAL;
L
Linus Torvalds 已提交
3036
	}
3037

3038 3039 3040
	if (id < 0)
		return id;

L
Linus Torvalds 已提交
3041 3042
	sprintf(hdev->name, "hci%d", id);
	hdev->id = id;
3043 3044 3045

	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);

3046 3047
	hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
					  WQ_MEM_RECLAIM, 1, hdev->name);
3048 3049 3050 3051
	if (!hdev->workqueue) {
		error = -ENOMEM;
		goto err;
	}
3052

3053 3054
	hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
					      WQ_MEM_RECLAIM, 1, hdev->name);
3055 3056 3057 3058 3059 3060
	if (!hdev->req_workqueue) {
		destroy_workqueue(hdev->workqueue);
		error = -ENOMEM;
		goto err;
	}

3061 3062 3063
	if (!IS_ERR_OR_NULL(bt_debugfs))
		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);

3064 3065 3066
	dev_set_name(&hdev->dev, "%s", hdev->name);

	error = device_add(&hdev->dev);
3067
	if (error < 0)
3068
		goto err_wqueue;
L
Linus Torvalds 已提交
3069

3070
	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3071 3072
				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
				    hdev);
3073 3074 3075 3076 3077 3078 3079
	if (hdev->rfkill) {
		if (rfkill_register(hdev->rfkill) < 0) {
			rfkill_destroy(hdev->rfkill);
			hdev->rfkill = NULL;
		}
	}

3080
	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3081
		hci_dev_set_flag(hdev, HCI_RFKILLED);
3082

3083 3084
	hci_dev_set_flag(hdev, HCI_SETUP);
	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3085

3086
	if (hdev->dev_type == HCI_BREDR) {
3087 3088 3089
		/* Assume BR/EDR support until proven otherwise (such as
		 * through reading supported features during init.
		 */
3090
		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3091
	}
3092

3093 3094 3095 3096
	write_lock(&hci_dev_list_lock);
	list_add(&hdev->list, &hci_dev_list);
	write_unlock(&hci_dev_list_lock);

3097 3098
	/* Devices that are marked for raw-only usage are unconfigured
	 * and should not be included in normal operation.
3099 3100
	 */
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3101
		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3102

3103
	hci_sock_dev_event(hdev, HCI_DEV_REG);
3104
	hci_dev_hold(hdev);
L
Linus Torvalds 已提交
3105

3106
	queue_work(hdev->req_workqueue, &hdev->power_on);
3107

L
Linus Torvalds 已提交
3108
	return id;
3109

3110 3111
err_wqueue:
	destroy_workqueue(hdev->workqueue);
3112
	destroy_workqueue(hdev->req_workqueue);
3113
err:
3114
	ida_simple_remove(&hci_index_ida, hdev->id);
3115

3116
	return error;
L
Linus Torvalds 已提交
3117 3118 3119 3120
}
EXPORT_SYMBOL(hci_register_dev);

/* Unregister HCI device */
3121
void hci_unregister_dev(struct hci_dev *hdev)
L
Linus Torvalds 已提交
3122
{
3123
	int id;
3124

3125
	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
L
Linus Torvalds 已提交
3126

3127
	hci_dev_set_flag(hdev, HCI_UNREGISTER);
3128

3129 3130
	id = hdev->id;

3131
	write_lock(&hci_dev_list_lock);
L
Linus Torvalds 已提交
3132
	list_del(&hdev->list);
3133
	write_unlock(&hci_dev_list_lock);
L
Linus Torvalds 已提交
3134 3135 3136

	hci_dev_do_close(hdev);

3137 3138
	cancel_work_sync(&hdev->power_on);

3139
	if (!test_bit(HCI_INIT, &hdev->flags) &&
3140 3141
	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3142
		hci_dev_lock(hdev);
3143
		mgmt_index_removed(hdev);
3144
		hci_dev_unlock(hdev);
3145
	}
3146

3147 3148 3149 3150
	/* mgmt_index_removed should take care of emptying the
	 * pending list */
	BUG_ON(!list_empty(&hdev->mgmt_pending));

3151
	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
L
Linus Torvalds 已提交
3152

3153 3154 3155 3156 3157
	if (hdev->rfkill) {
		rfkill_unregister(hdev->rfkill);
		rfkill_destroy(hdev->rfkill);
	}

3158
	device_del(&hdev->dev);
3159

3160 3161
	debugfs_remove_recursive(hdev->debugfs);

3162
	destroy_workqueue(hdev->workqueue);
3163
	destroy_workqueue(hdev->req_workqueue);
3164

3165
	hci_dev_lock(hdev);
3166
	hci_bdaddr_list_clear(&hdev->blacklist);
3167
	hci_bdaddr_list_clear(&hdev->whitelist);
3168
	hci_uuids_clear(hdev);
3169
	hci_link_keys_clear(hdev);
3170
	hci_smp_ltks_clear(hdev);
3171
	hci_smp_irks_clear(hdev);
3172
	hci_remote_oob_data_clear(hdev);
3173
	hci_adv_instances_clear(hdev);
3174
	hci_bdaddr_list_clear(&hdev->le_white_list);
3175
	hci_conn_params_clear_all(hdev);
3176
	hci_discovery_filter_clear(hdev);
3177
	hci_dev_unlock(hdev);
3178

3179
	hci_dev_put(hdev);
3180 3181

	ida_simple_remove(&hci_index_ida, id);
L
Linus Torvalds 已提交
3182 3183 3184 3185 3186 3187
}
EXPORT_SYMBOL(hci_unregister_dev);

/* Suspend HCI device */
int hci_suspend_dev(struct hci_dev *hdev)
{
3188
	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
L
Linus Torvalds 已提交
3189 3190 3191 3192 3193 3194 3195
	return 0;
}
EXPORT_SYMBOL(hci_suspend_dev);

/* Resume HCI device */
int hci_resume_dev(struct hci_dev *hdev)
{
3196
	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
L
Linus Torvalds 已提交
3197 3198 3199 3200
	return 0;
}
EXPORT_SYMBOL(hci_resume_dev);

3201 3202 3203 3204 3205 3206 3207 3208 3209 3210
/* Reset HCI device */
int hci_reset_dev(struct hci_dev *hdev)
{
	const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
	struct sk_buff *skb;

	skb = bt_skb_alloc(3, GFP_ATOMIC);
	if (!skb)
		return -ENOMEM;

3211
	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3212 3213 3214 3215 3216 3217 3218
	memcpy(skb_put(skb, 3), hw_err, 3);

	/* Send Hardware Error to upper stack */
	return hci_recv_frame(hdev, skb);
}
EXPORT_SYMBOL(hci_reset_dev);

3219
/* Receive frame from HCI drivers */
3220
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3221 3222
{
	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3223
		      && !test_bit(HCI_INIT, &hdev->flags))) {
3224 3225 3226 3227
		kfree_skb(skb);
		return -ENXIO;
	}

3228 3229 3230
	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
	    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
	    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3231 3232 3233 3234
		kfree_skb(skb);
		return -EINVAL;
	}

3235
	/* Incoming skb */
3236 3237 3238 3239 3240 3241
	bt_cb(skb)->incoming = 1;

	/* Time stamp */
	__net_timestamp(skb);

	skb_queue_tail(&hdev->rx_q, skb);
3242
	queue_work(hdev->workqueue, &hdev->rx_work);
3243

3244 3245 3246 3247
	return 0;
}
EXPORT_SYMBOL(hci_recv_frame);

3248 3249 3250
/* Receive diagnostic message from HCI drivers */
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
{
3251
	/* Mark as diagnostic packet */
3252
	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3253

3254 3255 3256
	/* Time stamp */
	__net_timestamp(skb);

3257 3258
	skb_queue_tail(&hdev->rx_q, skb);
	queue_work(hdev->workqueue, &hdev->rx_work);
3259 3260 3261 3262 3263

	return 0;
}
EXPORT_SYMBOL(hci_recv_diag);

L
Linus Torvalds 已提交
3264 3265 3266 3267 3268 3269
/* ---- Interface to upper protocols ---- */

int hci_register_cb(struct hci_cb *cb)
{
	BT_DBG("%p name %s", cb, cb->name);

3270
	mutex_lock(&hci_cb_list_lock);
3271
	list_add_tail(&cb->list, &hci_cb_list);
3272
	mutex_unlock(&hci_cb_list_lock);
L
Linus Torvalds 已提交
3273 3274 3275 3276 3277 3278 3279 3280 3281

	return 0;
}
EXPORT_SYMBOL(hci_register_cb);

int hci_unregister_cb(struct hci_cb *cb)
{
	BT_DBG("%p name %s", cb, cb->name);

3282
	mutex_lock(&hci_cb_list_lock);
L
Linus Torvalds 已提交
3283
	list_del(&cb->list);
3284
	mutex_unlock(&hci_cb_list_lock);
L
Linus Torvalds 已提交
3285 3286 3287 3288 3289

	return 0;
}
EXPORT_SYMBOL(hci_unregister_cb);

3290
static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
L
Linus Torvalds 已提交
3291
{
3292 3293
	int err;

3294 3295
	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
	       skb->len);
L
Linus Torvalds 已提交
3296

3297 3298
	/* Time stamp */
	__net_timestamp(skb);
L
Linus Torvalds 已提交
3299

3300 3301 3302 3303 3304
	/* Send copy to monitor */
	hci_send_to_monitor(hdev, skb);

	if (atomic_read(&hdev->promisc)) {
		/* Send copy to the sockets */
3305
		hci_send_to_sock(hdev, skb);
L
Linus Torvalds 已提交
3306 3307 3308 3309 3310
	}

	/* Get rid of skb owner, prior to sending to the driver. */
	skb_orphan(skb);

3311 3312 3313 3314 3315
	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
		kfree_skb(skb);
		return;
	}

3316 3317 3318 3319 3320
	err = hdev->send(hdev, skb);
	if (err < 0) {
		BT_ERR("%s sending frame failed (%d)", hdev->name, err);
		kfree_skb(skb);
	}
L
Linus Torvalds 已提交
3321 3322
}

3323
/* Send HCI command */
3324 3325
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
		 const void *param)
3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336
{
	struct sk_buff *skb;

	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);

	skb = hci_prepare_cmd(hdev, opcode, plen, param);
	if (!skb) {
		BT_ERR("%s no memory for command", hdev->name);
		return -ENOMEM;
	}

S
Stephen Hemminger 已提交
3337
	/* Stand-alone HCI commands must be flagged as
3338 3339
	 * single-command requests.
	 */
3340
	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3341

L
Linus Torvalds 已提交
3342
	skb_queue_tail(&hdev->cmd_q, skb);
3343
	queue_work(hdev->workqueue, &hdev->cmd_work);
L
Linus Torvalds 已提交
3344 3345 3346 3347 3348

	return 0;
}

/* Get data from the previously sent command */
3349
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
L
Linus Torvalds 已提交
3350 3351 3352 3353 3354 3355 3356 3357
{
	struct hci_command_hdr *hdr;

	if (!hdev->sent_cmd)
		return NULL;

	hdr = (void *) hdev->sent_cmd->data;

3358
	if (hdr->opcode != cpu_to_le16(opcode))
L
Linus Torvalds 已提交
3359 3360
		return NULL;

3361
	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
L
Linus Torvalds 已提交
3362 3363 3364 3365

	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
}

3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376
/* Send HCI command and wait for command commplete event */
struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
			     const void *param, u32 timeout)
{
	struct sk_buff *skb;

	if (!test_bit(HCI_UP, &hdev->flags))
		return ERR_PTR(-ENETDOWN);

	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);

3377
	hci_req_sync_lock(hdev);
3378
	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3379
	hci_req_sync_unlock(hdev);
3380 3381 3382 3383 3384

	return skb;
}
EXPORT_SYMBOL(hci_cmd_sync);

L
Linus Torvalds 已提交
3385 3386 3387 3388 3389 3390
/* Send ACL data */
static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
{
	struct hci_acl_hdr *hdr;
	int len = skb->len;

3391 3392
	skb_push(skb, HCI_ACL_HDR_SIZE);
	skb_reset_transport_header(skb);
3393
	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3394 3395
	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
	hdr->dlen   = cpu_to_le16(len);
L
Linus Torvalds 已提交
3396 3397
}

3398
static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3399
			  struct sk_buff *skb, __u16 flags)
L
Linus Torvalds 已提交
3400
{
3401
	struct hci_conn *conn = chan->conn;
L
Linus Torvalds 已提交
3402 3403 3404
	struct hci_dev *hdev = conn->hdev;
	struct sk_buff *list;

3405 3406 3407
	skb->len = skb_headlen(skb);
	skb->data_len = 0;

3408
	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420

	switch (hdev->dev_type) {
	case HCI_BREDR:
		hci_add_acl_hdr(skb, conn->handle, flags);
		break;
	case HCI_AMP:
		hci_add_acl_hdr(skb, chan->handle, flags);
		break;
	default:
		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
		return;
	}
3421

A
Andrei Emeltchenko 已提交
3422 3423
	list = skb_shinfo(skb)->frag_list;
	if (!list) {
L
Linus Torvalds 已提交
3424 3425 3426
		/* Non fragmented */
		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);

3427
		skb_queue_tail(queue, skb);
L
Linus Torvalds 已提交
3428 3429 3430 3431 3432 3433
	} else {
		/* Fragmented */
		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);

		skb_shinfo(skb)->frag_list = NULL;

3434 3435 3436 3437 3438 3439
		/* Queue all fragments atomically. We need to use spin_lock_bh
		 * here because of 6LoWPAN links, as there this function is
		 * called from softirq and using normal spin lock could cause
		 * deadlocks.
		 */
		spin_lock_bh(&queue->lock);
L
Linus Torvalds 已提交
3440

3441
		__skb_queue_tail(queue, skb);
3442 3443 3444

		flags &= ~ACL_START;
		flags |= ACL_CONT;
L
Linus Torvalds 已提交
3445 3446
		do {
			skb = list; list = list->next;
3447

3448
			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3449
			hci_add_acl_hdr(skb, conn->handle, flags);
L
Linus Torvalds 已提交
3450 3451 3452

			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);

3453
			__skb_queue_tail(queue, skb);
L
Linus Torvalds 已提交
3454 3455
		} while (list);

3456
		spin_unlock_bh(&queue->lock);
L
Linus Torvalds 已提交
3457
	}
3458 3459 3460 3461
}

void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
{
3462
	struct hci_dev *hdev = chan->conn->hdev;
3463

3464
	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3465

3466
	hci_queue_acl(chan, &chan->data_q, skb, flags);
L
Linus Torvalds 已提交
3467

3468
	queue_work(hdev->workqueue, &hdev->tx_work);
L
Linus Torvalds 已提交
3469 3470 3471
}

/* Send SCO data */
3472
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
L
Linus Torvalds 已提交
3473 3474 3475 3476 3477 3478
{
	struct hci_dev *hdev = conn->hdev;
	struct hci_sco_hdr hdr;

	BT_DBG("%s len %d", hdev->name, skb->len);

3479
	hdr.handle = cpu_to_le16(conn->handle);
L
Linus Torvalds 已提交
3480 3481
	hdr.dlen   = skb->len;

3482 3483
	skb_push(skb, HCI_SCO_HDR_SIZE);
	skb_reset_transport_header(skb);
3484
	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
L
Linus Torvalds 已提交
3485

3486
	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3487

L
Linus Torvalds 已提交
3488
	skb_queue_tail(&conn->data_q, skb);
3489
	queue_work(hdev->workqueue, &hdev->tx_work);
L
Linus Torvalds 已提交
3490 3491 3492 3493 3494
}

/* ---- HCI TX task (outgoing data) ---- */

/* HCI Connection scheduler */
3495 3496
static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
				     int *quote)
L
Linus Torvalds 已提交
3497 3498
{
	struct hci_conn_hash *h = &hdev->conn_hash;
3499
	struct hci_conn *conn = NULL, *c;
3500
	unsigned int num = 0, min = ~0;
L
Linus Torvalds 已提交
3501

3502
	/* We don't have to lock device here. Connections are always
L
Linus Torvalds 已提交
3503
	 * added and removed with TX task disabled. */
3504 3505 3506 3507

	rcu_read_lock();

	list_for_each_entry_rcu(c, &h->list, list) {
3508
		if (c->type != type || skb_queue_empty(&c->data_q))
L
Linus Torvalds 已提交
3509
			continue;
3510 3511 3512 3513

		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
			continue;

L
Linus Torvalds 已提交
3514 3515 3516 3517 3518 3519
		num++;

		if (c->sent < min) {
			min  = c->sent;
			conn = c;
		}
3520 3521 3522

		if (hci_conn_num(hdev, type) == num)
			break;
L
Linus Torvalds 已提交
3523 3524
	}

3525 3526
	rcu_read_unlock();

L
Linus Torvalds 已提交
3527
	if (conn) {
3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546
		int cnt, q;

		switch (conn->type) {
		case ACL_LINK:
			cnt = hdev->acl_cnt;
			break;
		case SCO_LINK:
		case ESCO_LINK:
			cnt = hdev->sco_cnt;
			break;
		case LE_LINK:
			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
			break;
		default:
			cnt = 0;
			BT_ERR("Unknown link type");
		}

		q = cnt / num;
L
Linus Torvalds 已提交
3547 3548 3549 3550 3551 3552 3553 3554
		*quote = q ? q : 1;
	} else
		*quote = 0;

	BT_DBG("conn %p quote %d", conn, *quote);
	return conn;
}

3555
static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
L
Linus Torvalds 已提交
3556 3557
{
	struct hci_conn_hash *h = &hdev->conn_hash;
3558
	struct hci_conn *c;
L
Linus Torvalds 已提交
3559

3560
	BT_ERR("%s link tx timeout", hdev->name);
L
Linus Torvalds 已提交
3561

3562 3563
	rcu_read_lock();

L
Linus Torvalds 已提交
3564
	/* Kill stalled connections */
3565
	list_for_each_entry_rcu(c, &h->list, list) {
3566
		if (c->type == type && c->sent) {
3567 3568
			BT_ERR("%s killing stalled connection %pMR",
			       hdev->name, &c->dst);
A
Andre Guedes 已提交
3569
			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
L
Linus Torvalds 已提交
3570 3571
		}
	}
3572 3573

	rcu_read_unlock();
L
Linus Torvalds 已提交
3574 3575
}

3576 3577
static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
				      int *quote)
L
Linus Torvalds 已提交
3578
{
3579 3580
	struct hci_conn_hash *h = &hdev->conn_hash;
	struct hci_chan *chan = NULL;
3581
	unsigned int num = 0, min = ~0, cur_prio = 0;
L
Linus Torvalds 已提交
3582
	struct hci_conn *conn;
3583 3584 3585 3586
	int cnt, q, conn_num = 0;

	BT_DBG("%s", hdev->name);

3587 3588 3589
	rcu_read_lock();

	list_for_each_entry_rcu(conn, &h->list, list) {
3590 3591 3592 3593 3594 3595 3596 3597 3598 3599
		struct hci_chan *tmp;

		if (conn->type != type)
			continue;

		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
			continue;

		conn_num++;

3600
		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627
			struct sk_buff *skb;

			if (skb_queue_empty(&tmp->data_q))
				continue;

			skb = skb_peek(&tmp->data_q);
			if (skb->priority < cur_prio)
				continue;

			if (skb->priority > cur_prio) {
				num = 0;
				min = ~0;
				cur_prio = skb->priority;
			}

			num++;

			if (conn->sent < min) {
				min  = conn->sent;
				chan = tmp;
			}
		}

		if (hci_conn_num(hdev, type) == conn_num)
			break;
	}

3628 3629
	rcu_read_unlock();

3630 3631 3632 3633 3634 3635 3636
	if (!chan)
		return NULL;

	switch (chan->conn->type) {
	case ACL_LINK:
		cnt = hdev->acl_cnt;
		break;
3637 3638 3639
	case AMP_LINK:
		cnt = hdev->block_cnt;
		break;
3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657
	case SCO_LINK:
	case ESCO_LINK:
		cnt = hdev->sco_cnt;
		break;
	case LE_LINK:
		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
		break;
	default:
		cnt = 0;
		BT_ERR("Unknown link type");
	}

	q = cnt / num;
	*quote = q ? q : 1;
	BT_DBG("chan %p quote %d", chan, *quote);
	return chan;
}

3658 3659 3660 3661 3662 3663 3664 3665
static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
{
	struct hci_conn_hash *h = &hdev->conn_hash;
	struct hci_conn *conn;
	int num = 0;

	BT_DBG("%s", hdev->name);

3666 3667 3668
	rcu_read_lock();

	list_for_each_entry_rcu(conn, &h->list, list) {
3669 3670 3671 3672 3673 3674 3675 3676 3677 3678
		struct hci_chan *chan;

		if (conn->type != type)
			continue;

		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
			continue;

		num++;

3679
		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696
			struct sk_buff *skb;

			if (chan->sent) {
				chan->sent = 0;
				continue;
			}

			if (skb_queue_empty(&chan->data_q))
				continue;

			skb = skb_peek(&chan->data_q);
			if (skb->priority >= HCI_PRIO_MAX - 1)
				continue;

			skb->priority = HCI_PRIO_MAX - 1;

			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3697
			       skb->priority);
3698 3699 3700 3701 3702
		}

		if (hci_conn_num(hdev, type) == num)
			break;
	}
3703 3704 3705

	rcu_read_unlock();

3706 3707
}

3708 3709 3710 3711 3712 3713
static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
{
	/* Calculate count of blocks used by this packet */
	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
}

3714
static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3715
{
3716
	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
L
Linus Torvalds 已提交
3717 3718
		/* ACL tx timeout must be longer than maximum
		 * link supervision timeout (40.9 seconds) */
3719
		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3720
				       HCI_ACL_TX_TIMEOUT))
3721
			hci_link_tx_to(hdev, ACL_LINK);
L
Linus Torvalds 已提交
3722
	}
3723
}
L
Linus Torvalds 已提交
3724

3725
static void hci_sched_acl_pkt(struct hci_dev *hdev)
3726 3727 3728 3729 3730 3731 3732
{
	unsigned int cnt = hdev->acl_cnt;
	struct hci_chan *chan;
	struct sk_buff *skb;
	int quote;

	__check_timeout(hdev, cnt);
3733

3734
	while (hdev->acl_cnt &&
3735
	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3736 3737
		u32 priority = (skb_peek(&chan->data_q))->priority;
		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3738
			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3739
			       skb->len, skb->priority);
3740

3741 3742 3743 3744 3745 3746
			/* Stop if priority has changed */
			if (skb->priority < priority)
				break;

			skb = skb_dequeue(&chan->data_q);

3747
			hci_conn_enter_active_mode(chan->conn,
3748
						   bt_cb(skb)->force_active);
3749

3750
			hci_send_frame(hdev, skb);
L
Linus Torvalds 已提交
3751 3752 3753
			hdev->acl_last_tx = jiffies;

			hdev->acl_cnt--;
3754 3755
			chan->sent++;
			chan->conn->sent++;
L
Linus Torvalds 已提交
3756 3757
		}
	}
3758 3759 3760

	if (cnt != hdev->acl_cnt)
		hci_prio_recalculate(hdev, ACL_LINK);
L
Linus Torvalds 已提交
3761 3762
}

3763
static void hci_sched_acl_blk(struct hci_dev *hdev)
3764
{
3765
	unsigned int cnt = hdev->block_cnt;
3766 3767 3768
	struct hci_chan *chan;
	struct sk_buff *skb;
	int quote;
3769
	u8 type;
3770

3771
	__check_timeout(hdev, cnt);
3772

3773 3774 3775 3776 3777 3778 3779
	BT_DBG("%s", hdev->name);

	if (hdev->dev_type == HCI_AMP)
		type = AMP_LINK;
	else
		type = ACL_LINK;

3780
	while (hdev->block_cnt > 0 &&
3781
	       (chan = hci_chan_sent(hdev, type, &quote))) {
3782 3783 3784 3785 3786
		u32 priority = (skb_peek(&chan->data_q))->priority;
		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
			int blocks;

			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3787
			       skb->len, skb->priority);
3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799

			/* Stop if priority has changed */
			if (skb->priority < priority)
				break;

			skb = skb_dequeue(&chan->data_q);

			blocks = __get_blocks(hdev, skb);
			if (blocks > hdev->block_cnt)
				return;

			hci_conn_enter_active_mode(chan->conn,
3800
						   bt_cb(skb)->force_active);
3801

3802
			hci_send_frame(hdev, skb);
3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813
			hdev->acl_last_tx = jiffies;

			hdev->block_cnt -= blocks;
			quote -= blocks;

			chan->sent += blocks;
			chan->conn->sent += blocks;
		}
	}

	if (cnt != hdev->block_cnt)
3814
		hci_prio_recalculate(hdev, type);
3815 3816
}

3817
static void hci_sched_acl(struct hci_dev *hdev)
3818 3819 3820
{
	BT_DBG("%s", hdev->name);

3821 3822 3823 3824 3825 3826
	/* No ACL link over BR/EDR controller */
	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
		return;

	/* No AMP link over AMP controller */
	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839
		return;

	switch (hdev->flow_ctl_mode) {
	case HCI_FLOW_CTL_MODE_PACKET_BASED:
		hci_sched_acl_pkt(hdev);
		break;

	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
		hci_sched_acl_blk(hdev);
		break;
	}
}

L
Linus Torvalds 已提交
3840
/* Schedule SCO */
3841
static void hci_sched_sco(struct hci_dev *hdev)
L
Linus Torvalds 已提交
3842 3843 3844 3845 3846 3847 3848
{
	struct hci_conn *conn;
	struct sk_buff *skb;
	int quote;

	BT_DBG("%s", hdev->name);

3849 3850 3851
	if (!hci_conn_num(hdev, SCO_LINK))
		return;

L
Linus Torvalds 已提交
3852 3853 3854
	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
			BT_DBG("skb %p len %d", skb, skb->len);
3855
			hci_send_frame(hdev, skb);
L
Linus Torvalds 已提交
3856 3857 3858 3859 3860 3861 3862 3863

			conn->sent++;
			if (conn->sent == ~0)
				conn->sent = 0;
		}
	}
}

3864
static void hci_sched_esco(struct hci_dev *hdev)
3865 3866 3867 3868 3869 3870 3871
{
	struct hci_conn *conn;
	struct sk_buff *skb;
	int quote;

	BT_DBG("%s", hdev->name);

3872 3873 3874
	if (!hci_conn_num(hdev, ESCO_LINK))
		return;

3875 3876
	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
						     &quote))) {
3877 3878
		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
			BT_DBG("skb %p len %d", skb, skb->len);
3879
			hci_send_frame(hdev, skb);
3880 3881 3882 3883 3884 3885 3886 3887

			conn->sent++;
			if (conn->sent == ~0)
				conn->sent = 0;
		}
	}
}

3888
static void hci_sched_le(struct hci_dev *hdev)
3889
{
3890
	struct hci_chan *chan;
3891
	struct sk_buff *skb;
3892
	int quote, cnt, tmp;
3893 3894 3895

	BT_DBG("%s", hdev->name);

3896 3897 3898
	if (!hci_conn_num(hdev, LE_LINK))
		return;

3899
	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3900 3901
		/* LE tx timeout must be longer than maximum
		 * link supervision timeout (40.9 seconds) */
3902
		if (!hdev->le_cnt && hdev->le_pkts &&
3903
		    time_after(jiffies, hdev->le_last_tx + HZ * 45))
3904
			hci_link_tx_to(hdev, LE_LINK);
3905 3906 3907
	}

	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3908
	tmp = cnt;
3909
	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3910 3911
		u32 priority = (skb_peek(&chan->data_q))->priority;
		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3912
			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3913
			       skb->len, skb->priority);
3914

3915 3916 3917 3918 3919 3920
			/* Stop if priority has changed */
			if (skb->priority < priority)
				break;

			skb = skb_dequeue(&chan->data_q);

3921
			hci_send_frame(hdev, skb);
3922 3923 3924
			hdev->le_last_tx = jiffies;

			cnt--;
3925 3926
			chan->sent++;
			chan->conn->sent++;
3927 3928
		}
	}
3929

3930 3931 3932 3933
	if (hdev->le_pkts)
		hdev->le_cnt = cnt;
	else
		hdev->acl_cnt = cnt;
3934 3935 3936

	if (cnt != tmp)
		hci_prio_recalculate(hdev, LE_LINK);
3937 3938
}

3939
static void hci_tx_work(struct work_struct *work)
L
Linus Torvalds 已提交
3940
{
3941
	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
L
Linus Torvalds 已提交
3942 3943
	struct sk_buff *skb;

3944
	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3945
	       hdev->sco_cnt, hdev->le_cnt);
L
Linus Torvalds 已提交
3946

3947
	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3948 3949 3950 3951 3952 3953
		/* Schedule queues and send stuff to HCI driver */
		hci_sched_acl(hdev);
		hci_sched_sco(hdev);
		hci_sched_esco(hdev);
		hci_sched_le(hdev);
	}
3954

L
Linus Torvalds 已提交
3955 3956
	/* Send next queued raw (unknown type) packet */
	while ((skb = skb_dequeue(&hdev->raw_q)))
3957
		hci_send_frame(hdev, skb);
L
Linus Torvalds 已提交
3958 3959
}

L
Lucas De Marchi 已提交
3960
/* ----- HCI RX task (incoming data processing) ----- */
L
Linus Torvalds 已提交
3961 3962

/* ACL data packet */
3963
static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
L
Linus Torvalds 已提交
3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974
{
	struct hci_acl_hdr *hdr = (void *) skb->data;
	struct hci_conn *conn;
	__u16 handle, flags;

	skb_pull(skb, HCI_ACL_HDR_SIZE);

	handle = __le16_to_cpu(hdr->handle);
	flags  = hci_flags(handle);
	handle = hci_handle(handle);

3975
	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3976
	       handle, flags);
L
Linus Torvalds 已提交
3977 3978 3979 3980 3981 3982

	hdev->stat.acl_rx++;

	hci_dev_lock(hdev);
	conn = hci_conn_hash_lookup_handle(hdev, handle);
	hci_dev_unlock(hdev);
3983

L
Linus Torvalds 已提交
3984
	if (conn) {
3985
		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3986

L
Linus Torvalds 已提交
3987
		/* Send to upper protocol */
3988 3989
		l2cap_recv_acldata(conn, skb, flags);
		return;
L
Linus Torvalds 已提交
3990
	} else {
3991
		BT_ERR("%s ACL packet for unknown connection handle %d",
3992
		       hdev->name, handle);
L
Linus Torvalds 已提交
3993 3994 3995 3996 3997 3998
	}

	kfree_skb(skb);
}

/* SCO data packet */
3999
static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
L
Linus Torvalds 已提交
4000 4001 4002 4003 4004 4005 4006 4007 4008
{
	struct hci_sco_hdr *hdr = (void *) skb->data;
	struct hci_conn *conn;
	__u16 handle;

	skb_pull(skb, HCI_SCO_HDR_SIZE);

	handle = __le16_to_cpu(hdr->handle);

4009
	BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
L
Linus Torvalds 已提交
4010 4011 4012 4013 4014 4015 4016 4017 4018

	hdev->stat.sco_rx++;

	hci_dev_lock(hdev);
	conn = hci_conn_hash_lookup_handle(hdev, handle);
	hci_dev_unlock(hdev);

	if (conn) {
		/* Send to upper protocol */
4019 4020
		sco_recv_scodata(conn, skb);
		return;
L
Linus Torvalds 已提交
4021
	} else {
4022
		BT_ERR("%s SCO packet for unknown connection handle %d",
4023
		       hdev->name, handle);
L
Linus Torvalds 已提交
4024 4025 4026 4027 4028
	}

	kfree_skb(skb);
}

4029 4030 4031 4032 4033 4034 4035 4036
static bool hci_req_is_complete(struct hci_dev *hdev)
{
	struct sk_buff *skb;

	skb = skb_peek(&hdev->cmd_q);
	if (!skb)
		return true;

4037
	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4038 4039
}

4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061
static void hci_resend_last(struct hci_dev *hdev)
{
	struct hci_command_hdr *sent;
	struct sk_buff *skb;
	u16 opcode;

	if (!hdev->sent_cmd)
		return;

	sent = (void *) hdev->sent_cmd->data;
	opcode = __le16_to_cpu(sent->opcode);
	if (opcode == HCI_OP_RESET)
		return;

	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
	if (!skb)
		return;

	skb_queue_head(&hdev->cmd_q, skb);
	queue_work(hdev->workqueue, &hdev->cmd_work);
}

4062 4063 4064
void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
			  hci_req_complete_t *req_complete,
			  hci_req_complete_skb_t *req_complete_skb)
4065 4066 4067 4068 4069 4070
{
	struct sk_buff *skb;
	unsigned long flags;

	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);

4071 4072
	/* If the completed command doesn't match the last one that was
	 * sent we need to do special handling of it.
4073
	 */
4074 4075 4076 4077 4078 4079 4080 4081 4082 4083
	if (!hci_sent_cmd_data(hdev, opcode)) {
		/* Some CSR based controllers generate a spontaneous
		 * reset complete event during init and any pending
		 * command will never be completed. In such a case we
		 * need to resend whatever was the last sent
		 * command.
		 */
		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
			hci_resend_last(hdev);

4084
		return;
4085
	}
4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096

	/* If the command succeeded and there's still more commands in
	 * this request the request is not yet complete.
	 */
	if (!status && !hci_req_is_complete(hdev))
		return;

	/* If this was the last command in a request the complete
	 * callback would be found in hdev->sent_cmd instead of the
	 * command queue (hdev->cmd_q).
	 */
4097 4098
	if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
		*req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4099 4100
		return;
	}
4101

4102 4103
	if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
		*req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4104
		return;
4105 4106 4107 4108 4109
	}

	/* Remove all pending commands belonging to this request */
	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4110
		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4111 4112 4113 4114
			__skb_queue_head(&hdev->cmd_q, skb);
			break;
		}

4115 4116 4117 4118
		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
		else
			*req_complete = bt_cb(skb)->hci.req_complete;
4119 4120 4121 4122 4123
		kfree_skb(skb);
	}
	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
}

4124
static void hci_rx_work(struct work_struct *work)
L
Linus Torvalds 已提交
4125
{
4126
	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
L
Linus Torvalds 已提交
4127 4128 4129 4130 4131
	struct sk_buff *skb;

	BT_DBG("%s", hdev->name);

	while ((skb = skb_dequeue(&hdev->rx_q))) {
4132 4133 4134
		/* Send copy to monitor */
		hci_send_to_monitor(hdev, skb);

L
Linus Torvalds 已提交
4135 4136
		if (atomic_read(&hdev->promisc)) {
			/* Send copy to the sockets */
4137
			hci_send_to_sock(hdev, skb);
L
Linus Torvalds 已提交
4138 4139
		}

4140
		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
L
Linus Torvalds 已提交
4141 4142 4143 4144 4145 4146
			kfree_skb(skb);
			continue;
		}

		if (test_bit(HCI_INIT, &hdev->flags)) {
			/* Don't process data packets in this states. */
4147
			switch (hci_skb_pkt_type(skb)) {
L
Linus Torvalds 已提交
4148 4149 4150 4151
			case HCI_ACLDATA_PKT:
			case HCI_SCODATA_PKT:
				kfree_skb(skb);
				continue;
4152
			}
L
Linus Torvalds 已提交
4153 4154 4155
		}

		/* Process frame */
4156
		switch (hci_skb_pkt_type(skb)) {
L
Linus Torvalds 已提交
4157
		case HCI_EVENT_PKT:
4158
			BT_DBG("%s Event packet", hdev->name);
L
Linus Torvalds 已提交
4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178
			hci_event_packet(hdev, skb);
			break;

		case HCI_ACLDATA_PKT:
			BT_DBG("%s ACL data packet", hdev->name);
			hci_acldata_packet(hdev, skb);
			break;

		case HCI_SCODATA_PKT:
			BT_DBG("%s SCO data packet", hdev->name);
			hci_scodata_packet(hdev, skb);
			break;

		default:
			kfree_skb(skb);
			break;
		}
	}
}

4179
static void hci_cmd_work(struct work_struct *work)
L
Linus Torvalds 已提交
4180
{
4181
	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
L
Linus Torvalds 已提交
4182 4183
	struct sk_buff *skb;

4184 4185
	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
L
Linus Torvalds 已提交
4186 4187

	/* Send queued commands */
4188 4189 4190 4191 4192
	if (atomic_read(&hdev->cmd_cnt)) {
		skb = skb_dequeue(&hdev->cmd_q);
		if (!skb)
			return;

4193
		kfree_skb(hdev->sent_cmd);
L
Linus Torvalds 已提交
4194

4195
		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
A
Andrei Emeltchenko 已提交
4196
		if (hdev->sent_cmd) {
L
Linus Torvalds 已提交
4197
			atomic_dec(&hdev->cmd_cnt);
4198
			hci_send_frame(hdev, skb);
4199
			if (test_bit(HCI_RESET, &hdev->flags))
4200
				cancel_delayed_work(&hdev->cmd_timer);
4201
			else
4202 4203
				schedule_delayed_work(&hdev->cmd_timer,
						      HCI_CMD_TIMEOUT);
L
Linus Torvalds 已提交
4204 4205
		} else {
			skb_queue_head(&hdev->cmd_q, skb);
4206
			queue_work(hdev->workqueue, &hdev->cmd_work);
L
Linus Torvalds 已提交
4207 4208 4209
		}
	}
}