hci_core.c 105.8 KB
Newer Older
1
/*
L
Linus Torvalds 已提交
2 3
   BlueZ - Bluetooth protocol stack for Linux
   Copyright (C) 2000-2001 Qualcomm Incorporated
4
   Copyright (C) 2011 ProFUSION Embedded Systems
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15

   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 17 18
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
L
Linus Torvalds 已提交
19 20
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

21 22
   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
L
Linus Torvalds 已提交
23 24 25 26 27
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI core. */

28
#include <linux/export.h>
29
#include <linux/idr.h>
30
#include <linux/rfkill.h>
31
#include <linux/debugfs.h>
32
#include <linux/crypto.h>
33
#include <asm/unaligned.h>
L
Linus Torvalds 已提交
34 35 36

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
37
#include <net/bluetooth/l2cap.h>
38
#include <net/bluetooth/mgmt.h>
L
Linus Torvalds 已提交
39

40
#include "hci_request.h"
41
#include "hci_debugfs.h"
42 43
#include "smp.h"

44
static void hci_rx_work(struct work_struct *work);
45
static void hci_cmd_work(struct work_struct *work);
46
static void hci_tx_work(struct work_struct *work);
L
Linus Torvalds 已提交
47 48 49 50 51 52 53

/* HCI device list */
LIST_HEAD(hci_dev_list);
DEFINE_RWLOCK(hci_dev_list_lock);

/* HCI callback list */
LIST_HEAD(hci_cb_list);
54
DEFINE_MUTEX(hci_cb_list_lock);
L
Linus Torvalds 已提交
55

56 57 58
/* HCI ID Numbering */
static DEFINE_IDA(hci_index_ida);

59 60 61 62 63 64 65 66 67
/* ----- HCI requests ----- */

#define HCI_REQ_DONE	  0
#define HCI_REQ_PEND	  1
#define HCI_REQ_CANCELED  2

#define hci_req_lock(d)		mutex_lock(&d->req_lock)
#define hci_req_unlock(d)	mutex_unlock(&d->req_lock)

L
Linus Torvalds 已提交
68 69
/* ---- HCI notifications ---- */

70
static void hci_notify(struct hci_dev *hdev, int event)
L
Linus Torvalds 已提交
71
{
72
	hci_sock_dev_event(hdev, event);
L
Linus Torvalds 已提交
73 74
}

75 76
/* ---- HCI debugfs entries ---- */

77 78 79 80 81 82
static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
			     size_t count, loff_t *ppos)
{
	struct hci_dev *hdev = file->private_data;
	char buf[3];

83
	buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
	buf[1] = '\n';
	buf[2] = '\0';
	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}

static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
			      size_t count, loff_t *ppos)
{
	struct hci_dev *hdev = file->private_data;
	struct sk_buff *skb;
	char buf[32];
	size_t buf_size = min(count, (sizeof(buf)-1));
	bool enable;

	if (!test_bit(HCI_UP, &hdev->flags))
		return -ENETDOWN;

	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;

	buf[buf_size] = '\0';
	if (strtobool(buf, &enable))
		return -EINVAL;

108
	if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
		return -EALREADY;

	hci_req_lock(hdev);
	if (enable)
		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
				     HCI_CMD_TIMEOUT);
	else
		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
				     HCI_CMD_TIMEOUT);
	hci_req_unlock(hdev);

	if (IS_ERR(skb))
		return PTR_ERR(skb);

	kfree_skb(skb);

125
	hci_dev_change_flag(hdev, HCI_DUT_MODE);
126 127 128 129 130 131 132 133 134 135 136

	return count;
}

static const struct file_operations dut_mode_fops = {
	.open		= simple_open,
	.read		= dut_mode_read,
	.write		= dut_mode_write,
	.llseek		= default_llseek,
};

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
				size_t count, loff_t *ppos)
{
	struct hci_dev *hdev = file->private_data;
	char buf[3];

	buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
	buf[1] = '\n';
	buf[2] = '\0';
	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}

static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
				 size_t count, loff_t *ppos)
{
	struct hci_dev *hdev = file->private_data;
	char buf[32];
	size_t buf_size = min(count, (sizeof(buf)-1));
	bool enable;
	int err;

	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;

	buf[buf_size] = '\0';
	if (strtobool(buf, &enable))
		return -EINVAL;

165 166 167 168 169 170 171 172 173 174
	/* When the diagnostic flags are not persistent and the transport
	 * is not active, then there is no need for the vendor callback.
	 *
	 * Instead just store the desired value. If needed the setting
	 * will be programmed when the controller gets powered on.
	 */
	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
	    !test_bit(HCI_RUNNING, &hdev->flags))
		goto done;

175 176 177 178 179 180 181
	hci_req_lock(hdev);
	err = hdev->set_diag(hdev, enable);
	hci_req_unlock(hdev);

	if (err < 0)
		return err;

182
done:
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
	if (enable)
		hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
	else
		hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);

	return count;
}

static const struct file_operations vendor_diag_fops = {
	.open		= simple_open,
	.read		= vendor_diag_read,
	.write		= vendor_diag_write,
	.llseek		= default_llseek,
};

198 199 200 201 202 203 204 205 206 207
static void hci_debugfs_create_basic(struct hci_dev *hdev)
{
	debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
			    &dut_mode_fops);

	if (hdev->set_diag)
		debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
				    &vendor_diag_fops);
}

L
Linus Torvalds 已提交
208 209
/* ---- HCI requests ---- */

210 211
static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
				  struct sk_buff *skb)
L
Linus Torvalds 已提交
212
{
213
	BT_DBG("%s result 0x%2.2x", hdev->name, result);
L
Linus Torvalds 已提交
214 215 216 217

	if (hdev->req_status == HCI_REQ_PEND) {
		hdev->req_result = result;
		hdev->req_status = HCI_REQ_DONE;
218 219
		if (skb)
			hdev->req_skb = skb_get(skb);
L
Linus Torvalds 已提交
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
		wake_up_interruptible(&hdev->req_wait_q);
	}
}

static void hci_req_cancel(struct hci_dev *hdev, int err)
{
	BT_DBG("%s err 0x%2.2x", hdev->name, err);

	if (hdev->req_status == HCI_REQ_PEND) {
		hdev->req_result = err;
		hdev->req_status = HCI_REQ_CANCELED;
		wake_up_interruptible(&hdev->req_wait_q);
	}
}

235
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
236
				  const void *param, u8 event, u32 timeout)
237 238 239
{
	DECLARE_WAITQUEUE(wait, current);
	struct hci_request req;
240
	struct sk_buff *skb;
241 242 243 244 245 246
	int err = 0;

	BT_DBG("%s", hdev->name);

	hci_req_init(&req, hdev);

247
	hci_req_add_ev(&req, opcode, plen, param, event);
248 249 250 251 252 253

	hdev->req_status = HCI_REQ_PEND;

	add_wait_queue(&hdev->req_wait_q, &wait);
	set_current_state(TASK_INTERRUPTIBLE);

254
	err = hci_req_run_skb(&req, hci_req_sync_complete);
255 256
	if (err < 0) {
		remove_wait_queue(&hdev->req_wait_q, &wait);
257
		set_current_state(TASK_RUNNING);
258 259 260
		return ERR_PTR(err);
	}

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
	schedule_timeout(timeout);

	remove_wait_queue(&hdev->req_wait_q, &wait);

	if (signal_pending(current))
		return ERR_PTR(-EINTR);

	switch (hdev->req_status) {
	case HCI_REQ_DONE:
		err = -bt_to_errno(hdev->req_result);
		break;

	case HCI_REQ_CANCELED:
		err = -hdev->req_result;
		break;

	default:
		err = -ETIMEDOUT;
		break;
	}

	hdev->req_status = hdev->req_result = 0;
283 284
	skb = hdev->req_skb;
	hdev->req_skb = NULL;
285 286 287

	BT_DBG("%s end: err %d", hdev->name, err);

288 289
	if (err < 0) {
		kfree_skb(skb);
290
		return ERR_PTR(err);
291
	}
292

293 294 295 296
	if (!skb)
		return ERR_PTR(-ENODATA);

	return skb;
297 298 299 300
}
EXPORT_SYMBOL(__hci_cmd_sync_ev);

struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
301
			       const void *param, u32 timeout)
302 303
{
	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
304 305 306
}
EXPORT_SYMBOL(__hci_cmd_sync);

L
Linus Torvalds 已提交
307
/* Execute request and wait for completion. */
308
static int __hci_req_sync(struct hci_dev *hdev,
309 310
			  void (*func)(struct hci_request *req,
				      unsigned long opt),
311
			  unsigned long opt, __u32 timeout)
L
Linus Torvalds 已提交
312
{
313
	struct hci_request req;
L
Linus Torvalds 已提交
314 315 316 317 318
	DECLARE_WAITQUEUE(wait, current);
	int err = 0;

	BT_DBG("%s start", hdev->name);

319 320
	hci_req_init(&req, hdev);

L
Linus Torvalds 已提交
321 322
	hdev->req_status = HCI_REQ_PEND;

323
	func(&req, opt);
324

325 326 327
	add_wait_queue(&hdev->req_wait_q, &wait);
	set_current_state(TASK_INTERRUPTIBLE);

328
	err = hci_req_run_skb(&req, hci_req_sync_complete);
329
	if (err < 0) {
330
		hdev->req_status = 0;
331

332
		remove_wait_queue(&hdev->req_wait_q, &wait);
333
		set_current_state(TASK_RUNNING);
334

335 336 337 338
		/* ENODATA means the HCI request command queue is empty.
		 * This can happen when a request with conditionals doesn't
		 * trigger any commands to be sent. This is normal behavior
		 * and should not trigger an error return.
339
		 */
340 341 342 343
		if (err == -ENODATA)
			return 0;

		return err;
344 345
	}

L
Linus Torvalds 已提交
346 347 348 349 350 351 352 353 354
	schedule_timeout(timeout);

	remove_wait_queue(&hdev->req_wait_q, &wait);

	if (signal_pending(current))
		return -EINTR;

	switch (hdev->req_status) {
	case HCI_REQ_DONE:
355
		err = -bt_to_errno(hdev->req_result);
L
Linus Torvalds 已提交
356 357 358 359 360 361 362 363 364
		break;

	case HCI_REQ_CANCELED:
		err = -hdev->req_result;
		break;

	default:
		err = -ETIMEDOUT;
		break;
365
	}
L
Linus Torvalds 已提交
366

367
	hdev->req_status = hdev->req_result = 0;
L
Linus Torvalds 已提交
368 369 370 371 372 373

	BT_DBG("%s end: err %d", hdev->name, err);

	return err;
}

374
static int hci_req_sync(struct hci_dev *hdev,
375 376
			void (*req)(struct hci_request *req,
				    unsigned long opt),
377
			unsigned long opt, __u32 timeout)
L
Linus Torvalds 已提交
378 379 380
{
	int ret;

381 382 383
	if (!test_bit(HCI_UP, &hdev->flags))
		return -ENETDOWN;

L
Linus Torvalds 已提交
384 385
	/* Serialize all requests */
	hci_req_lock(hdev);
386
	ret = __hci_req_sync(hdev, req, opt, timeout);
L
Linus Torvalds 已提交
387 388 389 390 391
	hci_req_unlock(hdev);

	return ret;
}

392
static void hci_reset_req(struct hci_request *req, unsigned long opt)
L
Linus Torvalds 已提交
393
{
394
	BT_DBG("%s %ld", req->hdev->name, opt);
L
Linus Torvalds 已提交
395 396

	/* Reset device */
397 398
	set_bit(HCI_RESET, &req->hdev->flags);
	hci_req_add(req, HCI_OP_RESET, 0, NULL);
L
Linus Torvalds 已提交
399 400
}

401
static void bredr_init(struct hci_request *req)
L
Linus Torvalds 已提交
402
{
403
	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
404

L
Linus Torvalds 已提交
405
	/* Read Local Supported Features */
406
	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
L
Linus Torvalds 已提交
407

408
	/* Read Local Version */
409
	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
410 411

	/* Read BD Address */
412
	hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
L
Linus Torvalds 已提交
413 414
}

415
static void amp_init1(struct hci_request *req)
416
{
417
	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
418

419
	/* Read Local Version */
420
	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
421

422 423 424
	/* Read Local Supported Commands */
	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);

425
	/* Read Local AMP Info */
426
	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
427 428

	/* Read Data Blk size */
429
	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
430

431 432 433
	/* Read Flow Control Mode */
	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);

434 435
	/* Read Location Data */
	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
436 437
}

438 439 440 441 442 443 444 445 446 447
static void amp_init2(struct hci_request *req)
{
	/* Read Local Supported Features. Not all AMP controllers
	 * support this so it's placed conditionally in the second
	 * stage init.
	 */
	if (req->hdev->commands[14] & 0x20)
		hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
}

448
static void hci_init1_req(struct hci_request *req, unsigned long opt)
449
{
450
	struct hci_dev *hdev = req->hdev;
451 452 453

	BT_DBG("%s %ld", hdev->name, opt);

454 455
	/* Reset */
	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
456
		hci_reset_req(req, 0);
457

458 459
	switch (hdev->dev_type) {
	case HCI_BREDR:
460
		bredr_init(req);
461 462 463
		break;

	case HCI_AMP:
464
		amp_init1(req);
465 466 467 468 469 470 471 472
		break;

	default:
		BT_ERR("Unknown device type %d", hdev->dev_type);
		break;
	}
}

473
static void bredr_setup(struct hci_request *req)
474 475 476 477 478
{
	__le16 param;
	__u8 flt_type;

	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
479
	hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
480 481

	/* Read Class of Device */
482
	hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
483 484

	/* Read Local Name */
485
	hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
486 487

	/* Read Voice Setting */
488
	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
489

490 491 492
	/* Read Number of Supported IAC */
	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);

493 494 495
	/* Read Current IAC LAP */
	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);

496 497
	/* Clear Event Filters */
	flt_type = HCI_FLT_CLEAR_ALL;
498
	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
499 500

	/* Connection accept timeout ~20 secs */
501
	param = cpu_to_le16(0x7d00);
502
	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
503 504
}

505
static void le_setup(struct hci_request *req)
506
{
507 508
	struct hci_dev *hdev = req->hdev;

509
	/* Read LE Buffer Size */
510
	hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
511 512

	/* Read LE Local Supported Features */
513
	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
514

515 516 517
	/* Read LE Supported States */
	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);

518
	/* Read LE White List Size */
519
	hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
520

521 522
	/* Clear LE White List */
	hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
523 524 525

	/* LE-only controllers have LE implicitly enabled */
	if (!lmp_bredr_capable(hdev))
526
		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
527 528
}

529
static void hci_setup_event_mask(struct hci_request *req)
530
{
531 532
	struct hci_dev *hdev = req->hdev;

533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
	/* The second byte is 0xff instead of 0x9f (two reserved bits
	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
	 * command otherwise.
	 */
	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };

	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
	 * any event mask for pre 1.2 devices.
	 */
	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
		return;

	if (lmp_bredr_capable(hdev)) {
		events[4] |= 0x01; /* Flow Specification Complete */
		events[4] |= 0x02; /* Inquiry Result with RSSI */
		events[4] |= 0x04; /* Read Remote Extended Features Complete */
		events[5] |= 0x08; /* Synchronous Connection Complete */
		events[5] |= 0x10; /* Synchronous Connection Changed */
551 552 553 554 555 556 557 558 559 560
	} else {
		/* Use a different default for LE-only devices */
		memset(events, 0, sizeof(events));
		events[0] |= 0x10; /* Disconnection Complete */
		events[1] |= 0x08; /* Read Remote Version Information Complete */
		events[1] |= 0x20; /* Command Complete */
		events[1] |= 0x40; /* Command Status */
		events[1] |= 0x80; /* Hardware Error */
		events[2] |= 0x04; /* Number of Completed Packets */
		events[3] |= 0x02; /* Data Buffer Overflow */
561 562 563 564 565

		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
			events[0] |= 0x80; /* Encryption Change */
			events[5] |= 0x80; /* Encryption Key Refresh Complete */
		}
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
	}

	if (lmp_inq_rssi_capable(hdev))
		events[4] |= 0x02; /* Inquiry Result with RSSI */

	if (lmp_sniffsubr_capable(hdev))
		events[5] |= 0x20; /* Sniff Subrating */

	if (lmp_pause_enc_capable(hdev))
		events[5] |= 0x80; /* Encryption Key Refresh Complete */

	if (lmp_ext_inq_capable(hdev))
		events[5] |= 0x40; /* Extended Inquiry Result */

	if (lmp_no_flush_capable(hdev))
		events[7] |= 0x01; /* Enhanced Flush Complete */

	if (lmp_lsto_capable(hdev))
		events[6] |= 0x80; /* Link Supervision Timeout Changed */

	if (lmp_ssp_capable(hdev)) {
		events[6] |= 0x01;	/* IO Capability Request */
		events[6] |= 0x02;	/* IO Capability Response */
		events[6] |= 0x04;	/* User Confirmation Request */
		events[6] |= 0x08;	/* User Passkey Request */
		events[6] |= 0x10;	/* Remote OOB Data Request */
		events[6] |= 0x20;	/* Simple Pairing Complete */
		events[7] |= 0x04;	/* User Passkey Notification */
		events[7] |= 0x08;	/* Keypress Notification */
		events[7] |= 0x10;	/* Remote Host Supported
					 * Features Notification
					 */
	}

	if (lmp_le_capable(hdev))
		events[7] |= 0x20;	/* LE Meta-Event */

603
	hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
604 605
}

606
static void hci_init2_req(struct hci_request *req, unsigned long opt)
607
{
608 609
	struct hci_dev *hdev = req->hdev;

610 611 612
	if (hdev->dev_type == HCI_AMP)
		return amp_init2(req);

613
	if (lmp_bredr_capable(hdev))
614
		bredr_setup(req);
615
	else
616
		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
617 618

	if (lmp_le_capable(hdev))
619
		le_setup(req);
620

621 622 623 624 625 626 627
	/* All Bluetooth 1.2 and later controllers should support the
	 * HCI command for reading the local supported commands.
	 *
	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
	 * but do not have support for this command. If that is the case,
	 * the driver can quirk the behavior and skip reading the local
	 * supported commands.
628
	 */
629 630
	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
631
		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
632 633

	if (lmp_ssp_capable(hdev)) {
634 635 636 637 638 639 640 641
		/* When SSP is available, then the host features page
		 * should also be available as well. However some
		 * controllers list the max_page as 0 as long as SSP
		 * has not been enabled. To achieve proper debugging
		 * output, force the minimum max_page to 1 at least.
		 */
		hdev->max_page = 0x01;

642
		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
643
			u8 mode = 0x01;
644

645 646
			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
				    sizeof(mode), &mode);
647 648 649 650 651 652
		} else {
			struct hci_cp_write_eir cp;

			memset(hdev->eir, 0, sizeof(hdev->eir));
			memset(&cp, 0, sizeof(cp));

653
			hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
654 655 656
		}
	}

657 658
	if (lmp_inq_rssi_capable(hdev) ||
	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
659 660 661 662 663 664 665 666 667 668
		u8 mode;

		/* If Extended Inquiry Result events are supported, then
		 * they are clearly preferred over Inquiry Result with RSSI
		 * events.
		 */
		mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;

		hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
	}
669 670

	if (lmp_inq_tx_pwr_capable(hdev))
671
		hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
672 673 674 675 676

	if (lmp_ext_feat_capable(hdev)) {
		struct hci_cp_read_local_ext_features cp;

		cp.page = 0x01;
677 678
		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
			    sizeof(cp), &cp);
679 680
	}

681
	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
682
		u8 enable = 1;
683 684
		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
			    &enable);
685 686 687
	}
}

688
static void hci_setup_link_policy(struct hci_request *req)
689
{
690
	struct hci_dev *hdev = req->hdev;
691 692 693 694 695 696 697 698 699 700 701 702 703
	struct hci_cp_write_def_link_policy cp;
	u16 link_policy = 0;

	if (lmp_rswitch_capable(hdev))
		link_policy |= HCI_LP_RSWITCH;
	if (lmp_hold_capable(hdev))
		link_policy |= HCI_LP_HOLD;
	if (lmp_sniff_capable(hdev))
		link_policy |= HCI_LP_SNIFF;
	if (lmp_park_capable(hdev))
		link_policy |= HCI_LP_PARK;

	cp.policy = cpu_to_le16(link_policy);
704
	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
705 706
}

707
static void hci_set_le_support(struct hci_request *req)
708
{
709
	struct hci_dev *hdev = req->hdev;
710 711
	struct hci_cp_write_le_host_supported cp;

712 713 714 715
	/* LE-only devices do not support explicit enablement */
	if (!lmp_bredr_capable(hdev))
		return;

716 717
	memset(&cp, 0, sizeof(cp));

718
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
719
		cp.le = 0x01;
720
		cp.simul = 0x00;
721 722 723
	}

	if (cp.le != lmp_host_le_capable(hdev))
724 725
		hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
			    &cp);
726 727
}

728 729 730 731 732 733 734 735
static void hci_set_event_mask_page_2(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };

	/* If Connectionless Slave Broadcast master role is supported
	 * enable all necessary events for it.
	 */
736
	if (lmp_csb_master_capable(hdev)) {
737 738 739 740 741 742 743 744 745
		events[1] |= 0x40;	/* Triggered Clock Capture */
		events[1] |= 0x80;	/* Synchronization Train Complete */
		events[2] |= 0x10;	/* Slave Page Response Timeout */
		events[2] |= 0x20;	/* CSB Channel Map Change */
	}

	/* If Connectionless Slave Broadcast slave role is supported
	 * enable all necessary events for it.
	 */
746
	if (lmp_csb_slave_capable(hdev)) {
747 748 749 750 751 752
		events[2] |= 0x01;	/* Synchronization Train Received */
		events[2] |= 0x02;	/* CSB Receive */
		events[2] |= 0x04;	/* CSB Timeout */
		events[2] |= 0x08;	/* Truncated Page Complete */
	}

753
	/* Enable Authenticated Payload Timeout Expired event if supported */
754
	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
755 756
		events[2] |= 0x80;

757 758 759
	hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
}

760
static void hci_init3_req(struct hci_request *req, unsigned long opt)
761
{
762
	struct hci_dev *hdev = req->hdev;
763
	u8 p;
764

765 766
	hci_setup_event_mask(req);

767 768
	if (hdev->commands[6] & 0x20 &&
	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
769 770 771 772 773 774 775
		struct hci_cp_read_stored_link_key cp;

		bacpy(&cp.bdaddr, BDADDR_ANY);
		cp.read_all = 0x01;
		hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
	}

776
	if (hdev->commands[5] & 0x10)
777
		hci_setup_link_policy(req);
778

779 780 781 782 783 784 785 786 787 788
	if (hdev->commands[8] & 0x01)
		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);

	/* Some older Broadcom based Bluetooth 1.2 controllers do not
	 * support the Read Page Scan Type command. Check support for
	 * this command in the bit mask of supported commands.
	 */
	if (hdev->commands[13] & 0x01)
		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);

789 790 791 792
	if (lmp_le_capable(hdev)) {
		u8 events[8];

		memset(events, 0, sizeof(events));
793 794 795 796
		events[0] = 0x0f;

		if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
			events[0] |= 0x10;	/* LE Long Term Key Request */
797 798 799 800 801 802 803 804 805

		/* If controller supports the Connection Parameters Request
		 * Link Layer Procedure, enable the corresponding event.
		 */
		if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
			events[0] |= 0x20;	/* LE Remote Connection
						 * Parameter Request
						 */

806 807 808 809 810 811
		/* If the controller supports the Data Length Extension
		 * feature, enable the corresponding event.
		 */
		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
			events[0] |= 0x40;	/* LE Data Length Change */

812 813 814 815 816 817 818 819
		/* If the controller supports Extended Scanner Filter
		 * Policies, enable the correspondig event.
		 */
		if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
			events[1] |= 0x04;	/* LE Direct Advertising
						 * Report
						 */

820 821 822 823 824 825 826 827 828 829 830 831 832 833
		/* If the controller supports the LE Read Local P-256
		 * Public Key command, enable the corresponding event.
		 */
		if (hdev->commands[34] & 0x02)
			events[0] |= 0x80;	/* LE Read Local P-256
						 * Public Key Complete
						 */

		/* If the controller supports the LE Generate DHKey
		 * command, enable the corresponding event.
		 */
		if (hdev->commands[34] & 0x04)
			events[1] |= 0x01;	/* LE Generate DHKey Complete */

834 835 836
		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
			    events);

837 838 839 840 841
		if (hdev->commands[25] & 0x40) {
			/* Read LE Advertising Channel TX Power */
			hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
		}

842 843 844 845 846 847 848 849
		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
			/* Read LE Maximum Data Length */
			hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);

			/* Read LE Suggested Default Data Length */
			hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
		}

850
		hci_set_le_support(req);
851
	}
852 853 854 855 856 857 858 859 860

	/* Read features beyond page 1 if available */
	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
		struct hci_cp_read_local_ext_features cp;

		cp.page = p;
		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
			    sizeof(cp), &cp);
	}
861 862
}

863 864 865 866
static void hci_init4_req(struct hci_request *req, unsigned long opt)
{
	struct hci_dev *hdev = req->hdev;

867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
	/* Some Broadcom based Bluetooth controllers do not support the
	 * Delete Stored Link Key command. They are clearly indicating its
	 * absence in the bit mask of supported commands.
	 *
	 * Check the supported commands and only if the the command is marked
	 * as supported send it. If not supported assume that the controller
	 * does not have actual support for stored link keys which makes this
	 * command redundant anyway.
	 *
	 * Some controllers indicate that they support handling deleting
	 * stored link keys, but they don't. The quirk lets a driver
	 * just disable this command.
	 */
	if (hdev->commands[6] & 0x80 &&
	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
		struct hci_cp_delete_stored_link_key cp;

		bacpy(&cp.bdaddr, BDADDR_ANY);
		cp.delete_all = 0x01;
		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
			    sizeof(cp), &cp);
	}

890 891 892 893
	/* Set event mask page 2 if the HCI command for it is supported */
	if (hdev->commands[22] & 0x04)
		hci_set_event_mask_page_2(req);

894 895 896 897
	/* Read local codec list if the HCI command is supported */
	if (hdev->commands[29] & 0x20)
		hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);

898 899 900 901
	/* Get MWS transport configuration if the HCI command is supported */
	if (hdev->commands[30] & 0x08)
		hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);

902
	/* Check for Synchronization Train support */
903
	if (lmp_sync_train_capable(hdev))
904
		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
905 906

	/* Enable Secure Connections if supported and configured */
907
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
908
	    bredr_sc_enabled(hdev)) {
909
		u8 support = 0x01;
910

911 912 913
		hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
			    sizeof(support), &support);
	}
914 915
}

916 917 918 919 920 921 922 923
static int __hci_init(struct hci_dev *hdev)
{
	int err;

	err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
	if (err < 0)
		return err;

924 925
	if (hci_dev_test_flag(hdev, HCI_SETUP))
		hci_debugfs_create_basic(hdev);
926

927 928 929 930
	err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
	if (err < 0)
		return err;

931 932
	/* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
	 * BR/EDR/LE type controllers. AMP controllers only need the
933
	 * first two stages of init.
934 935 936 937
	 */
	if (hdev->dev_type != HCI_BREDR)
		return 0;

938 939 940 941
	err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
	if (err < 0)
		return err;

942 943 944 945
	err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
	if (err < 0)
		return err;

946 947 948 949 950 951 952 953 954 955 956
	/* This function is only called when the controller is actually in
	 * configured state. When the controller is marked as unconfigured,
	 * this initialization procedure is not run.
	 *
	 * It means that it is possible that a controller runs through its
	 * setup phase and then discovers missing settings. If that is the
	 * case, then this function will not be called. It then will only
	 * be called during the config phase.
	 *
	 * So only when in setup phase or config phase, create the debugfs
	 * entries and register the SMP channels.
957
	 */
958 959
	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
	    !hci_dev_test_flag(hdev, HCI_CONFIG))
960 961
		return 0;

962 963
	hci_debugfs_create_common(hdev);

964
	if (lmp_bredr_capable(hdev))
965
		hci_debugfs_create_bredr(hdev);
966

967
	if (lmp_le_capable(hdev))
968
		hci_debugfs_create_le(hdev);
969

970
	return 0;
971 972
}

973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994
static void hci_init0_req(struct hci_request *req, unsigned long opt)
{
	struct hci_dev *hdev = req->hdev;

	BT_DBG("%s %ld", hdev->name, opt);

	/* Reset */
	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
		hci_reset_req(req, 0);

	/* Read Local Version */
	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);

	/* Read BD Address */
	if (hdev->set_bdaddr)
		hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
}

static int __hci_unconf_init(struct hci_dev *hdev)
{
	int err;

995 996 997
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		return 0;

998 999 1000 1001
	err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
	if (err < 0)
		return err;

1002 1003 1004
	if (hci_dev_test_flag(hdev, HCI_SETUP))
		hci_debugfs_create_basic(hdev);

1005 1006 1007
	return 0;
}

1008
static void hci_scan_req(struct hci_request *req, unsigned long opt)
L
Linus Torvalds 已提交
1009 1010 1011
{
	__u8 scan = opt;

1012
	BT_DBG("%s %x", req->hdev->name, scan);
L
Linus Torvalds 已提交
1013 1014

	/* Inquiry and Page scans */
1015
	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
L
Linus Torvalds 已提交
1016 1017
}

1018
static void hci_auth_req(struct hci_request *req, unsigned long opt)
L
Linus Torvalds 已提交
1019 1020 1021
{
	__u8 auth = opt;

1022
	BT_DBG("%s %x", req->hdev->name, auth);
L
Linus Torvalds 已提交
1023 1024

	/* Authentication */
1025
	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
L
Linus Torvalds 已提交
1026 1027
}

1028
static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
L
Linus Torvalds 已提交
1029 1030 1031
{
	__u8 encrypt = opt;

1032
	BT_DBG("%s %x", req->hdev->name, encrypt);
L
Linus Torvalds 已提交
1033

1034
	/* Encryption */
1035
	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
L
Linus Torvalds 已提交
1036 1037
}

1038
static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1039 1040 1041
{
	__le16 policy = cpu_to_le16(opt);

1042
	BT_DBG("%s %x", req->hdev->name, policy);
1043 1044

	/* Default link policy */
1045
	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1046 1047
}

1048
/* Get HCI device by index.
L
Linus Torvalds 已提交
1049 1050 1051
 * Device is held on return. */
struct hci_dev *hci_dev_get(int index)
{
1052
	struct hci_dev *hdev = NULL, *d;
L
Linus Torvalds 已提交
1053 1054 1055 1056 1057 1058 1059

	BT_DBG("%d", index);

	if (index < 0)
		return NULL;

	read_lock(&hci_dev_list_lock);
1060
	list_for_each_entry(d, &hci_dev_list, list) {
L
Linus Torvalds 已提交
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
		if (d->id == index) {
			hdev = hci_dev_hold(d);
			break;
		}
	}
	read_unlock(&hci_dev_list_lock);
	return hdev;
}

/* ---- Inquiry support ---- */
1071

1072 1073 1074 1075
bool hci_discovery_active(struct hci_dev *hdev)
{
	struct discovery_state *discov = &hdev->discovery;

A
Andre Guedes 已提交
1076
	switch (discov->state) {
1077
	case DISCOVERY_FINDING:
A
Andre Guedes 已提交
1078
	case DISCOVERY_RESOLVING:
1079 1080
		return true;

A
Andre Guedes 已提交
1081 1082 1083
	default:
		return false;
	}
1084 1085
}

1086 1087
void hci_discovery_set_state(struct hci_dev *hdev, int state)
{
1088 1089
	int old_state = hdev->discovery.state;

1090 1091
	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);

1092
	if (old_state == state)
1093 1094
		return;

1095 1096
	hdev->discovery.state = state;

1097 1098
	switch (state) {
	case DISCOVERY_STOPPED:
1099 1100
		hci_update_background_scan(hdev);

1101
		if (old_state != DISCOVERY_STARTING)
1102
			mgmt_discovering(hdev, 0);
1103 1104 1105
		break;
	case DISCOVERY_STARTING:
		break;
1106
	case DISCOVERY_FINDING:
1107 1108
		mgmt_discovering(hdev, 1);
		break;
1109 1110
	case DISCOVERY_RESOLVING:
		break;
1111 1112 1113 1114 1115
	case DISCOVERY_STOPPING:
		break;
	}
}

1116
void hci_inquiry_cache_flush(struct hci_dev *hdev)
L
Linus Torvalds 已提交
1117
{
1118
	struct discovery_state *cache = &hdev->discovery;
1119
	struct inquiry_entry *p, *n;
L
Linus Torvalds 已提交
1120

1121 1122
	list_for_each_entry_safe(p, n, &cache->all, all) {
		list_del(&p->all);
1123
		kfree(p);
L
Linus Torvalds 已提交
1124
	}
1125 1126 1127

	INIT_LIST_HEAD(&cache->unknown);
	INIT_LIST_HEAD(&cache->resolve);
L
Linus Torvalds 已提交
1128 1129
}

1130 1131
struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
					       bdaddr_t *bdaddr)
L
Linus Torvalds 已提交
1132
{
1133
	struct discovery_state *cache = &hdev->discovery;
L
Linus Torvalds 已提交
1134 1135
	struct inquiry_entry *e;

1136
	BT_DBG("cache %p, %pMR", cache, bdaddr);
L
Linus Torvalds 已提交
1137

1138 1139 1140 1141 1142 1143 1144 1145 1146
	list_for_each_entry(e, &cache->all, all) {
		if (!bacmp(&e->data.bdaddr, bdaddr))
			return e;
	}

	return NULL;
}

struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1147
						       bdaddr_t *bdaddr)
1148
{
1149
	struct discovery_state *cache = &hdev->discovery;
1150 1151
	struct inquiry_entry *e;

1152
	BT_DBG("cache %p, %pMR", cache, bdaddr);
1153 1154

	list_for_each_entry(e, &cache->unknown, list) {
L
Linus Torvalds 已提交
1155
		if (!bacmp(&e->data.bdaddr, bdaddr))
1156 1157 1158 1159
			return e;
	}

	return NULL;
L
Linus Torvalds 已提交
1160 1161
}

1162
struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1163 1164
						       bdaddr_t *bdaddr,
						       int state)
1165 1166 1167 1168
{
	struct discovery_state *cache = &hdev->discovery;
	struct inquiry_entry *e;

1169
	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180

	list_for_each_entry(e, &cache->resolve, list) {
		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
			return e;
		if (!bacmp(&e->data.bdaddr, bdaddr))
			return e;
	}

	return NULL;
}

1181
void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1182
				      struct inquiry_entry *ie)
1183 1184 1185 1186 1187 1188 1189 1190 1191
{
	struct discovery_state *cache = &hdev->discovery;
	struct list_head *pos = &cache->resolve;
	struct inquiry_entry *p;

	list_del(&ie->list);

	list_for_each_entry(p, &cache->resolve, list) {
		if (p->name_state != NAME_PENDING &&
1192
		    abs(p->data.rssi) >= abs(ie->data.rssi))
1193 1194 1195 1196 1197 1198 1199
			break;
		pos = &p->list;
	}

	list_add(&ie->list, pos);
}

1200 1201
u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
			     bool name_known)
L
Linus Torvalds 已提交
1202
{
1203
	struct discovery_state *cache = &hdev->discovery;
A
Andrei Emeltchenko 已提交
1204
	struct inquiry_entry *ie;
1205
	u32 flags = 0;
L
Linus Torvalds 已提交
1206

1207
	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
L
Linus Torvalds 已提交
1208

1209
	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1210

1211 1212
	if (!data->ssp_mode)
		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1213

A
Andrei Emeltchenko 已提交
1214
	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1215
	if (ie) {
1216 1217
		if (!ie->data.ssp_mode)
			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1218

1219
		if (ie->name_state == NAME_NEEDED &&
1220
		    data->rssi != ie->data.rssi) {
1221 1222 1223 1224
			ie->data.rssi = data->rssi;
			hci_inquiry_cache_update_resolve(hdev, ie);
		}

1225
		goto update;
1226
	}
1227 1228

	/* Entry not in the cache. Add new one. */
1229
	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1230 1231 1232 1233
	if (!ie) {
		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
		goto done;
	}
1234 1235 1236 1237 1238 1239 1240 1241 1242

	list_add(&ie->all, &cache->all);

	if (name_known) {
		ie->name_state = NAME_KNOWN;
	} else {
		ie->name_state = NAME_NOT_KNOWN;
		list_add(&ie->list, &cache->unknown);
	}
A
Andrei Emeltchenko 已提交
1243

1244 1245
update:
	if (name_known && ie->name_state != NAME_KNOWN &&
1246
	    ie->name_state != NAME_PENDING) {
1247 1248
		ie->name_state = NAME_KNOWN;
		list_del(&ie->list);
L
Linus Torvalds 已提交
1249 1250
	}

A
Andrei Emeltchenko 已提交
1251 1252
	memcpy(&ie->data, data, sizeof(*data));
	ie->timestamp = jiffies;
L
Linus Torvalds 已提交
1253
	cache->timestamp = jiffies;
1254 1255

	if (ie->name_state == NAME_NOT_KNOWN)
1256
		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1257

1258 1259
done:
	return flags;
L
Linus Torvalds 已提交
1260 1261 1262 1263
}

static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
{
1264
	struct discovery_state *cache = &hdev->discovery;
L
Linus Torvalds 已提交
1265 1266 1267 1268
	struct inquiry_info *info = (struct inquiry_info *) buf;
	struct inquiry_entry *e;
	int copied = 0;

1269
	list_for_each_entry(e, &cache->all, all) {
L
Linus Torvalds 已提交
1270
		struct inquiry_data *data = &e->data;
1271 1272 1273 1274

		if (copied >= num)
			break;

L
Linus Torvalds 已提交
1275 1276 1277 1278 1279 1280
		bacpy(&info->bdaddr, &data->bdaddr);
		info->pscan_rep_mode	= data->pscan_rep_mode;
		info->pscan_period_mode	= data->pscan_period_mode;
		info->pscan_mode	= data->pscan_mode;
		memcpy(info->dev_class, data->dev_class, 3);
		info->clock_offset	= data->clock_offset;
1281

L
Linus Torvalds 已提交
1282
		info++;
1283
		copied++;
L
Linus Torvalds 已提交
1284 1285 1286 1287 1288 1289
	}

	BT_DBG("cache %p, copied %d", cache, copied);
	return copied;
}

1290
static void hci_inq_req(struct hci_request *req, unsigned long opt)
L
Linus Torvalds 已提交
1291 1292
{
	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1293
	struct hci_dev *hdev = req->hdev;
L
Linus Torvalds 已提交
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
	struct hci_cp_inquiry cp;

	BT_DBG("%s", hdev->name);

	if (test_bit(HCI_INQUIRY, &hdev->flags))
		return;

	/* Start Inquiry */
	memcpy(&cp.lap, &ir->lap, 3);
	cp.length  = ir->length;
	cp.num_rsp = ir->num_rsp;
1305
	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
L
Linus Torvalds 已提交
1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
}

int hci_inquiry(void __user *arg)
{
	__u8 __user *ptr = arg;
	struct hci_inquiry_req ir;
	struct hci_dev *hdev;
	int err = 0, do_inquiry = 0, max_rsp;
	long timeo;
	__u8 *buf;

	if (copy_from_user(&ir, ptr, sizeof(ir)))
		return -EFAULT;

1320 1321
	hdev = hci_dev_get(ir.dev_id);
	if (!hdev)
L
Linus Torvalds 已提交
1322 1323
		return -ENODEV;

1324
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1325 1326 1327 1328
		err = -EBUSY;
		goto done;
	}

1329
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1330 1331 1332 1333
		err = -EOPNOTSUPP;
		goto done;
	}

1334 1335 1336 1337 1338
	if (hdev->dev_type != HCI_BREDR) {
		err = -EOPNOTSUPP;
		goto done;
	}

1339
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1340 1341 1342 1343
		err = -EOPNOTSUPP;
		goto done;
	}

1344
	hci_dev_lock(hdev);
1345
	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1346
	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1347
		hci_inquiry_cache_flush(hdev);
L
Linus Torvalds 已提交
1348 1349
		do_inquiry = 1;
	}
1350
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
1351

1352
	timeo = ir.length * msecs_to_jiffies(2000);
A
Andrei Emeltchenko 已提交
1353 1354

	if (do_inquiry) {
1355 1356
		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
				   timeo);
A
Andrei Emeltchenko 已提交
1357 1358
		if (err < 0)
			goto done;
1359 1360 1361 1362

		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
		 * cleared). If it is interrupted by a signal, return -EINTR.
		 */
1363
		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1364 1365
				TASK_INTERRUPTIBLE))
			return -EINTR;
A
Andrei Emeltchenko 已提交
1366
	}
L
Linus Torvalds 已提交
1367

1368 1369 1370
	/* for unlimited number of responses we will use buffer with
	 * 255 entries
	 */
L
Linus Torvalds 已提交
1371 1372 1373 1374 1375
	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;

	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
	 * copy it to the user space.
	 */
1376
	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
A
Andrei Emeltchenko 已提交
1377
	if (!buf) {
L
Linus Torvalds 已提交
1378 1379 1380 1381
		err = -ENOMEM;
		goto done;
	}

1382
	hci_dev_lock(hdev);
L
Linus Torvalds 已提交
1383
	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1384
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
1385 1386 1387 1388 1389 1390

	BT_DBG("num_rsp %d", ir.num_rsp);

	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
		ptr += sizeof(ir);
		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1391
				 ir.num_rsp))
L
Linus Torvalds 已提交
1392
			err = -EFAULT;
1393
	} else
L
Linus Torvalds 已提交
1394 1395 1396 1397 1398 1399 1400 1401 1402
		err = -EFAULT;

	kfree(buf);

done:
	hci_dev_put(hdev);
	return err;
}

1403
static int hci_dev_do_open(struct hci_dev *hdev)
L
Linus Torvalds 已提交
1404 1405 1406 1407 1408 1409 1410
{
	int ret = 0;

	BT_DBG("%s %p", hdev->name, hdev);

	hci_req_lock(hdev);

1411
	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1412 1413 1414 1415
		ret = -ENODEV;
		goto done;
	}

1416 1417
	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1418 1419 1420
		/* Check for rfkill but allow the HCI setup stage to
		 * proceed (which in itself doesn't cause any RF activity).
		 */
1421
		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1422 1423 1424 1425 1426 1427 1428 1429 1430
			ret = -ERFKILL;
			goto done;
		}

		/* Check for valid public address or a configured static
		 * random adddress, but let the HCI setup proceed to
		 * be able to determine if there is a public address
		 * or not.
		 *
1431 1432 1433 1434
		 * In case of user channel usage, it is not important
		 * if a public address or static random address is
		 * available.
		 *
1435 1436 1437
		 * This check is only valid for BR/EDR controllers
		 * since AMP controllers do not have an address.
		 */
1438
		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1439
		    hdev->dev_type == HCI_BREDR &&
1440 1441 1442 1443 1444
		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
			ret = -EADDRNOTAVAIL;
			goto done;
		}
1445 1446
	}

L
Linus Torvalds 已提交
1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
	if (test_bit(HCI_UP, &hdev->flags)) {
		ret = -EALREADY;
		goto done;
	}

	if (hdev->open(hdev)) {
		ret = -EIO;
		goto done;
	}

1457
	set_bit(HCI_RUNNING, &hdev->flags);
1458 1459
	hci_notify(hdev, HCI_DEV_OPEN);

1460 1461 1462
	atomic_set(&hdev->cmd_cnt, 1);
	set_bit(HCI_INIT, &hdev->flags);

1463
	if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1464 1465
		hci_sock_dev_event(hdev, HCI_DEV_SETUP);

1466 1467
		if (hdev->setup)
			ret = hdev->setup(hdev);
1468

1469 1470 1471 1472 1473 1474
		/* The transport driver can set these quirks before
		 * creating the HCI device or in its setup callback.
		 *
		 * In case any of them is set, the controller has to
		 * start up as unconfigured.
		 */
1475 1476
		if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
		    test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1477
			hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1478

1479 1480 1481 1482 1483 1484 1485 1486
		/* For an unconfigured controller it is required to
		 * read at least the version information provided by
		 * the Read Local Version Information command.
		 *
		 * If the set_bdaddr driver callback is provided, then
		 * also the original Bluetooth public device address
		 * will be read using the Read BD Address command.
		 */
1487
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1488
			ret = __hci_unconf_init(hdev);
1489 1490
	}

1491
	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1492 1493 1494 1495 1496 1497 1498
		/* If public address change is configured, ensure that
		 * the address gets programmed. If the driver does not
		 * support changing the public address, fail the power
		 * on procedure.
		 */
		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
		    hdev->set_bdaddr)
1499 1500 1501 1502 1503
			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
		else
			ret = -EADDRNOTAVAIL;
	}

1504
	if (!ret) {
1505 1506
		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1507
			ret = __hci_init(hdev);
L
Linus Torvalds 已提交
1508 1509
	}

1510 1511 1512 1513 1514 1515 1516 1517
	/* If the HCI Reset command is clearing all diagnostic settings,
	 * then they need to be reprogrammed after the init procedure
	 * completed.
	 */
	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
		ret = hdev->set_diag(hdev, true);

1518 1519
	clear_bit(HCI_INIT, &hdev->flags);

L
Linus Torvalds 已提交
1520 1521
	if (!ret) {
		hci_dev_hold(hdev);
1522
		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
L
Linus Torvalds 已提交
1523 1524
		set_bit(HCI_UP, &hdev->flags);
		hci_notify(hdev, HCI_DEV_UP);
1525 1526 1527 1528
		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1529
		    hdev->dev_type == HCI_BREDR) {
1530
			hci_dev_lock(hdev);
1531
			mgmt_powered(hdev, 1);
1532
			hci_dev_unlock(hdev);
1533
		}
1534
	} else {
L
Linus Torvalds 已提交
1535
		/* Init failed, cleanup */
1536
		flush_work(&hdev->tx_work);
1537
		flush_work(&hdev->cmd_work);
1538
		flush_work(&hdev->rx_work);
L
Linus Torvalds 已提交
1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550

		skb_queue_purge(&hdev->cmd_q);
		skb_queue_purge(&hdev->rx_q);

		if (hdev->flush)
			hdev->flush(hdev);

		if (hdev->sent_cmd) {
			kfree_skb(hdev->sent_cmd);
			hdev->sent_cmd = NULL;
		}

1551
		clear_bit(HCI_RUNNING, &hdev->flags);
1552 1553
		hci_notify(hdev, HCI_DEV_CLOSE);

L
Linus Torvalds 已提交
1554
		hdev->close(hdev);
1555
		hdev->flags &= BIT(HCI_RAW);
L
Linus Torvalds 已提交
1556 1557 1558 1559 1560 1561 1562
	}

done:
	hci_req_unlock(hdev);
	return ret;
}

1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
/* ---- HCI ioctl helpers ---- */

int hci_dev_open(__u16 dev)
{
	struct hci_dev *hdev;
	int err;

	hdev = hci_dev_get(dev);
	if (!hdev)
		return -ENODEV;

1574
	/* Devices that are marked as unconfigured can only be powered
1575 1576 1577 1578 1579 1580 1581 1582
	 * up as user channel. Trying to bring them up as normal devices
	 * will result into a failure. Only user channel operation is
	 * possible.
	 *
	 * When this function is called for a user channel, the flag
	 * HCI_USER_CHANNEL will be set first before attempting to
	 * open the device.
	 */
1583 1584
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1585 1586 1587 1588
		err = -EOPNOTSUPP;
		goto done;
	}

1589 1590 1591 1592 1593
	/* We need to ensure that no other power on/off work is pending
	 * before proceeding to call hci_dev_do_open. This is
	 * particularly important if the setup procedure has not yet
	 * completed.
	 */
1594
	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1595 1596
		cancel_delayed_work(&hdev->power_off);

1597 1598 1599 1600
	/* After this call it is guaranteed that the setup procedure
	 * has finished. This means that error conditions like RFKILL
	 * or no valid public or static random address apply.
	 */
1601 1602
	flush_workqueue(hdev->req_workqueue);

1603
	/* For controllers not using the management interface and that
1604
	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1605 1606 1607 1608
	 * so that pairing works for them. Once the management interface
	 * is in use this bit will be cleared again and userspace has
	 * to explicitly enable it.
	 */
1609 1610
	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
	    !hci_dev_test_flag(hdev, HCI_MGMT))
1611
		hci_dev_set_flag(hdev, HCI_BONDABLE);
1612

1613 1614
	err = hci_dev_do_open(hdev);

1615
done:
1616 1617 1618 1619
	hci_dev_put(hdev);
	return err;
}

1620 1621 1622 1623 1624
/* This function requires the caller holds hdev->lock */
static void hci_pend_le_actions_clear(struct hci_dev *hdev)
{
	struct hci_conn_params *p;

1625 1626 1627
	list_for_each_entry(p, &hdev->le_conn_params, list) {
		if (p->conn) {
			hci_conn_drop(p->conn);
1628
			hci_conn_put(p->conn);
1629 1630
			p->conn = NULL;
		}
1631
		list_del_init(&p->action);
1632
	}
1633 1634 1635 1636

	BT_DBG("All LE pending actions cleared");
}

1637
int hci_dev_do_close(struct hci_dev *hdev)
L
Linus Torvalds 已提交
1638
{
1639 1640
	bool auto_off;

L
Linus Torvalds 已提交
1641 1642
	BT_DBG("%s %p", hdev->name, hdev);

1643
	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1644
	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1645
	    test_bit(HCI_UP, &hdev->flags)) {
1646 1647 1648 1649 1650
		/* Execute vendor specific shutdown routine */
		if (hdev->shutdown)
			hdev->shutdown(hdev);
	}

1651 1652
	cancel_delayed_work(&hdev->power_off);

L
Linus Torvalds 已提交
1653 1654 1655 1656
	hci_req_cancel(hdev, ENODEV);
	hci_req_lock(hdev);

	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1657
		cancel_delayed_work_sync(&hdev->cmd_timer);
L
Linus Torvalds 已提交
1658 1659 1660 1661
		hci_req_unlock(hdev);
		return 0;
	}

1662 1663
	/* Flush RX and TX works */
	flush_work(&hdev->tx_work);
1664
	flush_work(&hdev->rx_work);
L
Linus Torvalds 已提交
1665

1666
	if (hdev->discov_timeout > 0) {
1667
		cancel_delayed_work(&hdev->discov_off);
1668
		hdev->discov_timeout = 0;
1669 1670
		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1671 1672
	}

1673
	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1674 1675
		cancel_delayed_work(&hdev->service_cache);

A
Andre Guedes 已提交
1676
	cancel_delayed_work_sync(&hdev->le_scan_disable);
1677
	cancel_delayed_work_sync(&hdev->le_scan_restart);
1678

1679
	if (hci_dev_test_flag(hdev, HCI_MGMT))
1680
		cancel_delayed_work_sync(&hdev->rpa_expired);
A
Andre Guedes 已提交
1681

1682 1683 1684 1685 1686
	if (hdev->adv_instance_timeout) {
		cancel_delayed_work_sync(&hdev->adv_instance_expire);
		hdev->adv_instance_timeout = 0;
	}

1687 1688 1689 1690 1691
	/* Avoid potential lockdep warnings from the *_flush() calls by
	 * ensuring the workqueue is empty up front.
	 */
	drain_workqueue(hdev->workqueue);

1692
	hci_dev_lock(hdev);
1693

1694 1695
	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);

1696 1697 1698 1699
	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);

	if (!auto_off && hdev->dev_type == HCI_BREDR)
		mgmt_powered(hdev, 0);
1700

1701
	hci_inquiry_cache_flush(hdev);
1702
	hci_pend_le_actions_clear(hdev);
1703
	hci_conn_hash_flush(hdev);
1704
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
1705

1706 1707
	smp_unregister(hdev);

L
Linus Torvalds 已提交
1708 1709 1710 1711 1712 1713 1714 1715
	hci_notify(hdev, HCI_DEV_DOWN);

	if (hdev->flush)
		hdev->flush(hdev);

	/* Reset device */
	skb_queue_purge(&hdev->cmd_q);
	atomic_set(&hdev->cmd_cnt, 1);
1716 1717
	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
L
Linus Torvalds 已提交
1718
		set_bit(HCI_INIT, &hdev->flags);
1719
		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
L
Linus Torvalds 已提交
1720 1721 1722
		clear_bit(HCI_INIT, &hdev->flags);
	}

1723 1724
	/* flush cmd  work */
	flush_work(&hdev->cmd_work);
L
Linus Torvalds 已提交
1725 1726 1727 1728 1729 1730 1731 1732

	/* Drop queues */
	skb_queue_purge(&hdev->rx_q);
	skb_queue_purge(&hdev->cmd_q);
	skb_queue_purge(&hdev->raw_q);

	/* Drop last sent command */
	if (hdev->sent_cmd) {
1733
		cancel_delayed_work_sync(&hdev->cmd_timer);
L
Linus Torvalds 已提交
1734 1735 1736 1737
		kfree_skb(hdev->sent_cmd);
		hdev->sent_cmd = NULL;
	}

1738
	clear_bit(HCI_RUNNING, &hdev->flags);
1739 1740
	hci_notify(hdev, HCI_DEV_CLOSE);

L
Linus Torvalds 已提交
1741 1742 1743 1744
	/* After this point our queues are empty
	 * and no tasks are scheduled. */
	hdev->close(hdev);

1745
	/* Clear flags */
1746
	hdev->flags &= BIT(HCI_RAW);
1747
	hci_dev_clear_volatile_flags(hdev);
1748

1749
	/* Controller radio is available but is currently powered down */
1750
	hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1751

1752
	memset(hdev->eir, 0, sizeof(hdev->eir));
1753
	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1754
	bacpy(&hdev->random_addr, BDADDR_ANY);
1755

L
Linus Torvalds 已提交
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766
	hci_req_unlock(hdev);

	hci_dev_put(hdev);
	return 0;
}

int hci_dev_close(__u16 dev)
{
	struct hci_dev *hdev;
	int err;

A
Andrei Emeltchenko 已提交
1767 1768
	hdev = hci_dev_get(dev);
	if (!hdev)
L
Linus Torvalds 已提交
1769
		return -ENODEV;
1770

1771
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1772 1773 1774 1775
		err = -EBUSY;
		goto done;
	}

1776
	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1777 1778
		cancel_delayed_work(&hdev->power_off);

L
Linus Torvalds 已提交
1779
	err = hci_dev_do_close(hdev);
1780

1781
done:
L
Linus Torvalds 已提交
1782 1783 1784 1785
	hci_dev_put(hdev);
	return err;
}

1786
static int hci_dev_do_reset(struct hci_dev *hdev)
L
Linus Torvalds 已提交
1787
{
1788
	int ret;
L
Linus Torvalds 已提交
1789

1790
	BT_DBG("%s %p", hdev->name, hdev);
L
Linus Torvalds 已提交
1791 1792 1793 1794 1795 1796 1797

	hci_req_lock(hdev);

	/* Drop queues */
	skb_queue_purge(&hdev->rx_q);
	skb_queue_purge(&hdev->cmd_q);

1798 1799 1800 1801 1802
	/* Avoid potential lockdep warnings from the *_flush() calls by
	 * ensuring the workqueue is empty up front.
	 */
	drain_workqueue(hdev->workqueue);

1803
	hci_dev_lock(hdev);
1804
	hci_inquiry_cache_flush(hdev);
L
Linus Torvalds 已提交
1805
	hci_conn_hash_flush(hdev);
1806
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
1807 1808 1809 1810

	if (hdev->flush)
		hdev->flush(hdev);

1811
	atomic_set(&hdev->cmd_cnt, 1);
1812
	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
L
Linus Torvalds 已提交
1813

1814
	ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
L
Linus Torvalds 已提交
1815 1816 1817 1818 1819

	hci_req_unlock(hdev);
	return ret;
}

1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
int hci_dev_reset(__u16 dev)
{
	struct hci_dev *hdev;
	int err;

	hdev = hci_dev_get(dev);
	if (!hdev)
		return -ENODEV;

	if (!test_bit(HCI_UP, &hdev->flags)) {
		err = -ENETDOWN;
		goto done;
	}

1834
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1835 1836 1837 1838
		err = -EBUSY;
		goto done;
	}

1839
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850
		err = -EOPNOTSUPP;
		goto done;
	}

	err = hci_dev_do_reset(hdev);

done:
	hci_dev_put(hdev);
	return err;
}

L
Linus Torvalds 已提交
1851 1852 1853 1854 1855
int hci_dev_reset_stat(__u16 dev)
{
	struct hci_dev *hdev;
	int ret = 0;

A
Andrei Emeltchenko 已提交
1856 1857
	hdev = hci_dev_get(dev);
	if (!hdev)
L
Linus Torvalds 已提交
1858 1859
		return -ENODEV;

1860
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1861 1862 1863 1864
		ret = -EBUSY;
		goto done;
	}

1865
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1866 1867 1868 1869
		ret = -EOPNOTSUPP;
		goto done;
	}

L
Linus Torvalds 已提交
1870 1871
	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));

1872
done:
L
Linus Torvalds 已提交
1873 1874 1875 1876
	hci_dev_put(hdev);
	return ret;
}

1877 1878
static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
{
1879
	bool conn_changed, discov_changed;
1880 1881 1882 1883

	BT_DBG("%s scan 0x%02x", hdev->name, scan);

	if ((scan & SCAN_PAGE))
1884 1885
		conn_changed = !hci_dev_test_and_set_flag(hdev,
							  HCI_CONNECTABLE);
1886
	else
1887 1888
		conn_changed = hci_dev_test_and_clear_flag(hdev,
							   HCI_CONNECTABLE);
1889

1890
	if ((scan & SCAN_INQUIRY)) {
1891 1892
		discov_changed = !hci_dev_test_and_set_flag(hdev,
							    HCI_DISCOVERABLE);
1893
	} else {
1894
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1895 1896
		discov_changed = hci_dev_test_and_clear_flag(hdev,
							     HCI_DISCOVERABLE);
1897 1898
	}

1899
	if (!hci_dev_test_flag(hdev, HCI_MGMT))
1900 1901
		return;

1902 1903
	if (conn_changed || discov_changed) {
		/* In case this was disabled through mgmt */
1904
		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1905

1906
		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1907 1908
			mgmt_update_adv_data(hdev);

1909
		mgmt_new_settings(hdev);
1910
	}
1911 1912
}

L
Linus Torvalds 已提交
1913 1914 1915 1916 1917 1918 1919 1920 1921
int hci_dev_cmd(unsigned int cmd, void __user *arg)
{
	struct hci_dev *hdev;
	struct hci_dev_req dr;
	int err = 0;

	if (copy_from_user(&dr, arg, sizeof(dr)))
		return -EFAULT;

A
Andrei Emeltchenko 已提交
1922 1923
	hdev = hci_dev_get(dr.dev_id);
	if (!hdev)
L
Linus Torvalds 已提交
1924 1925
		return -ENODEV;

1926
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1927 1928 1929 1930
		err = -EBUSY;
		goto done;
	}

1931
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1932 1933 1934 1935
		err = -EOPNOTSUPP;
		goto done;
	}

1936 1937 1938 1939 1940
	if (hdev->dev_type != HCI_BREDR) {
		err = -EOPNOTSUPP;
		goto done;
	}

1941
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1942 1943 1944 1945
		err = -EOPNOTSUPP;
		goto done;
	}

L
Linus Torvalds 已提交
1946 1947
	switch (cmd) {
	case HCISETAUTH:
1948 1949
		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
				   HCI_INIT_TIMEOUT);
L
Linus Torvalds 已提交
1950 1951 1952 1953 1954 1955 1956 1957 1958 1959
		break;

	case HCISETENCRYPT:
		if (!lmp_encrypt_capable(hdev)) {
			err = -EOPNOTSUPP;
			break;
		}

		if (!test_bit(HCI_AUTH, &hdev->flags)) {
			/* Auth must be enabled first */
1960 1961
			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
					   HCI_INIT_TIMEOUT);
L
Linus Torvalds 已提交
1962 1963 1964 1965
			if (err)
				break;
		}

1966 1967
		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
				   HCI_INIT_TIMEOUT);
L
Linus Torvalds 已提交
1968 1969 1970
		break;

	case HCISETSCAN:
1971 1972
		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
				   HCI_INIT_TIMEOUT);
1973

1974 1975
		/* Ensure that the connectable and discoverable states
		 * get correctly modified as this was a non-mgmt change.
1976
		 */
1977 1978
		if (!err)
			hci_update_scan_state(hdev, dr.dev_opt);
L
Linus Torvalds 已提交
1979 1980 1981
		break;

	case HCISETLINKPOL:
1982 1983
		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
				   HCI_INIT_TIMEOUT);
L
Linus Torvalds 已提交
1984 1985 1986
		break;

	case HCISETLINKMODE:
1987 1988 1989 1990 1991 1992
		hdev->link_mode = ((__u16) dr.dev_opt) &
					(HCI_LM_MASTER | HCI_LM_ACCEPT);
		break;

	case HCISETPTYPE:
		hdev->pkt_type = (__u16) dr.dev_opt;
L
Linus Torvalds 已提交
1993 1994 1995
		break;

	case HCISETACLMTU:
1996 1997
		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
L
Linus Torvalds 已提交
1998 1999 2000
		break;

	case HCISETSCOMTU:
2001 2002
		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
L
Linus Torvalds 已提交
2003 2004 2005 2006 2007 2008
		break;

	default:
		err = -EINVAL;
		break;
	}
2009

2010
done:
L
Linus Torvalds 已提交
2011 2012 2013 2014 2015 2016
	hci_dev_put(hdev);
	return err;
}

int hci_get_dev_list(void __user *arg)
{
2017
	struct hci_dev *hdev;
L
Linus Torvalds 已提交
2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030
	struct hci_dev_list_req *dl;
	struct hci_dev_req *dr;
	int n = 0, size, err;
	__u16 dev_num;

	if (get_user(dev_num, (__u16 __user *) arg))
		return -EFAULT;

	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
		return -EINVAL;

	size = sizeof(*dl) + dev_num * sizeof(*dr);

A
Andrei Emeltchenko 已提交
2031 2032
	dl = kzalloc(size, GFP_KERNEL);
	if (!dl)
L
Linus Torvalds 已提交
2033 2034 2035 2036
		return -ENOMEM;

	dr = dl->dev_req;

2037
	read_lock(&hci_dev_list_lock);
2038
	list_for_each_entry(hdev, &hci_dev_list, list) {
2039
		unsigned long flags = hdev->flags;
2040

2041 2042 2043 2044
		/* When the auto-off is configured it means the transport
		 * is running, but in that case still indicate that the
		 * device is actually down.
		 */
2045
		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2046
			flags &= ~BIT(HCI_UP);
2047

L
Linus Torvalds 已提交
2048
		(dr + n)->dev_id  = hdev->id;
2049
		(dr + n)->dev_opt = flags;
2050

L
Linus Torvalds 已提交
2051 2052 2053
		if (++n >= dev_num)
			break;
	}
2054
	read_unlock(&hci_dev_list_lock);
L
Linus Torvalds 已提交
2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068

	dl->dev_num = n;
	size = sizeof(*dl) + n * sizeof(*dr);

	err = copy_to_user(arg, dl, size);
	kfree(dl);

	return err ? -EFAULT : 0;
}

int hci_get_dev_info(void __user *arg)
{
	struct hci_dev *hdev;
	struct hci_dev_info di;
2069
	unsigned long flags;
L
Linus Torvalds 已提交
2070 2071 2072 2073 2074
	int err = 0;

	if (copy_from_user(&di, arg, sizeof(di)))
		return -EFAULT;

A
Andrei Emeltchenko 已提交
2075 2076
	hdev = hci_dev_get(di.dev_id);
	if (!hdev)
L
Linus Torvalds 已提交
2077 2078
		return -ENODEV;

2079 2080 2081 2082
	/* When the auto-off is configured it means the transport
	 * is running, but in that case still indicate that the
	 * device is actually down.
	 */
2083
	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2084 2085 2086
		flags = hdev->flags & ~BIT(HCI_UP);
	else
		flags = hdev->flags;
2087

L
Linus Torvalds 已提交
2088 2089
	strcpy(di.name, hdev->name);
	di.bdaddr   = hdev->bdaddr;
2090
	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2091
	di.flags    = flags;
L
Linus Torvalds 已提交
2092
	di.pkt_type = hdev->pkt_type;
2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
	if (lmp_bredr_capable(hdev)) {
		di.acl_mtu  = hdev->acl_mtu;
		di.acl_pkts = hdev->acl_pkts;
		di.sco_mtu  = hdev->sco_mtu;
		di.sco_pkts = hdev->sco_pkts;
	} else {
		di.acl_mtu  = hdev->le_mtu;
		di.acl_pkts = hdev->le_pkts;
		di.sco_mtu  = 0;
		di.sco_pkts = 0;
	}
L
Linus Torvalds 已提交
2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119
	di.link_policy = hdev->link_policy;
	di.link_mode   = hdev->link_mode;

	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
	memcpy(&di.features, &hdev->features, sizeof(di.features));

	if (copy_to_user(arg, &di, sizeof(di)))
		err = -EFAULT;

	hci_dev_put(hdev);

	return err;
}

/* ---- Interface to HCI drivers ---- */

2120 2121 2122 2123 2124 2125
static int hci_rfkill_set_block(void *data, bool blocked)
{
	struct hci_dev *hdev = data;

	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);

2126
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2127 2128
		return -EBUSY;

2129
	if (blocked) {
2130
		hci_dev_set_flag(hdev, HCI_RFKILLED);
2131 2132
		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
		    !hci_dev_test_flag(hdev, HCI_CONFIG))
2133
			hci_dev_do_close(hdev);
2134
	} else {
2135
		hci_dev_clear_flag(hdev, HCI_RFKILLED);
2136
	}
2137 2138 2139 2140 2141 2142 2143 2144

	return 0;
}

static const struct rfkill_ops hci_rfkill_ops = {
	.set_block = hci_rfkill_set_block,
};

2145 2146 2147
static void hci_power_on(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2148
	int err;
2149 2150 2151

	BT_DBG("%s", hdev->name);

2152
	err = hci_dev_do_open(hdev);
2153
	if (err < 0) {
2154
		hci_dev_lock(hdev);
2155
		mgmt_set_powered_failed(hdev, err);
2156
		hci_dev_unlock(hdev);
2157
		return;
2158
	}
2159

2160 2161 2162 2163
	/* During the HCI setup phase, a few error conditions are
	 * ignored and they need to be checked now. If they are still
	 * valid, it is important to turn the device back off.
	 */
2164 2165
	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2166 2167 2168
	    (hdev->dev_type == HCI_BREDR &&
	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2169
		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2170
		hci_dev_do_close(hdev);
2171
	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2172 2173
		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
				   HCI_AUTO_OFF_TIMEOUT);
2174
	}
2175

2176
	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2177 2178 2179
		/* For unconfigured devices, set the HCI_RAW flag
		 * so that userspace can easily identify them.
		 */
2180
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2181
			set_bit(HCI_RAW, &hdev->flags);
2182 2183 2184 2185 2186 2187 2188 2189 2190

		/* For fully configured devices, this will send
		 * the Index Added event. For unconfigured devices,
		 * it will send Unconfigued Index Added event.
		 *
		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
		 * and no event will be send.
		 */
		mgmt_index_added(hdev);
2191
	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2192 2193 2194
		/* When the controller is now configured, then it
		 * is important to clear the HCI_RAW flag.
		 */
2195
		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2196 2197
			clear_bit(HCI_RAW, &hdev->flags);

2198 2199 2200 2201
		/* Powering on the controller with HCI_CONFIG set only
		 * happens with the transition from unconfigured to
		 * configured. This will send the Index Added event.
		 */
2202
		mgmt_index_added(hdev);
2203
	}
2204 2205 2206 2207
}

static void hci_power_off(struct work_struct *work)
{
2208
	struct hci_dev *hdev = container_of(work, struct hci_dev,
2209
					    power_off.work);
2210 2211 2212

	BT_DBG("%s", hdev->name);

2213
	hci_dev_do_close(hdev);
2214 2215
}

2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
static void hci_error_reset(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);

	BT_DBG("%s", hdev->name);

	if (hdev->hw_error)
		hdev->hw_error(hdev, hdev->hw_error_code);
	else
		BT_ERR("%s hardware error 0x%2.2x", hdev->name,
		       hdev->hw_error_code);

	if (hci_dev_do_close(hdev))
		return;

	hci_dev_do_open(hdev);
}

2234 2235 2236 2237 2238 2239 2240 2241
static void hci_discov_off(struct work_struct *work)
{
	struct hci_dev *hdev;

	hdev = container_of(work, struct hci_dev, discov_off.work);

	BT_DBG("%s", hdev->name);

2242
	mgmt_discoverable_timeout(hdev);
2243 2244
}

2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
static void hci_adv_timeout_expire(struct work_struct *work)
{
	struct hci_dev *hdev;

	hdev = container_of(work, struct hci_dev, adv_instance_expire.work);

	BT_DBG("%s", hdev->name);

	mgmt_adv_timeout_expired(hdev);
}

2256
void hci_uuids_clear(struct hci_dev *hdev)
2257
{
2258
	struct bt_uuid *uuid, *tmp;
2259

2260 2261
	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
		list_del(&uuid->list);
2262 2263 2264 2265
		kfree(uuid);
	}
}

2266
void hci_link_keys_clear(struct hci_dev *hdev)
2267
{
2268
	struct link_key *key;
2269

2270 2271 2272
	list_for_each_entry_rcu(key, &hdev->link_keys, list) {
		list_del_rcu(&key->list);
		kfree_rcu(key, rcu);
2273 2274 2275
	}
}

2276
void hci_smp_ltks_clear(struct hci_dev *hdev)
2277
{
J
Johan Hedberg 已提交
2278
	struct smp_ltk *k;
2279

J
Johan Hedberg 已提交
2280 2281 2282
	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
		list_del_rcu(&k->list);
		kfree_rcu(k, rcu);
2283 2284 2285
	}
}

2286 2287
void hci_smp_irks_clear(struct hci_dev *hdev)
{
J
Johan Hedberg 已提交
2288
	struct smp_irk *k;
2289

J
Johan Hedberg 已提交
2290 2291 2292
	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
		list_del_rcu(&k->list);
		kfree_rcu(k, rcu);
2293 2294 2295
	}
}

2296 2297
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
2298
	struct link_key *k;
2299

2300 2301 2302 2303
	rcu_read_lock();
	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
		if (bacmp(bdaddr, &k->bdaddr) == 0) {
			rcu_read_unlock();
2304
			return k;
2305 2306 2307
		}
	}
	rcu_read_unlock();
2308 2309 2310 2311

	return NULL;
}

2312
static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2313
			       u8 key_type, u8 old_key_type)
2314 2315 2316
{
	/* Legacy key */
	if (key_type < 0x03)
2317
		return true;
2318 2319 2320

	/* Debug keys are insecure so don't store them persistently */
	if (key_type == HCI_LK_DEBUG_COMBINATION)
2321
		return false;
2322 2323 2324

	/* Changed combination key and there's no previous one */
	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2325
		return false;
2326 2327 2328

	/* Security mode 3 case */
	if (!conn)
2329
		return true;
2330

2331 2332 2333 2334
	/* BR/EDR key derived using SC from an LE link */
	if (conn->type == LE_LINK)
		return true;

2335 2336
	/* Neither local nor remote side had no-bonding as requirement */
	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2337
		return true;
2338 2339 2340

	/* Local side had dedicated bonding as requirement */
	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2341
		return true;
2342 2343 2344

	/* Remote side had dedicated bonding as requirement */
	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2345
		return true;
2346 2347 2348

	/* If none of the above criteria match, then don't store the key
	 * persistently */
2349
	return false;
2350 2351
}

2352
static u8 ltk_role(u8 type)
2353
{
2354 2355
	if (type == SMP_LTK)
		return HCI_ROLE_MASTER;
2356

2357
	return HCI_ROLE_SLAVE;
2358 2359
}

2360 2361
struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
			     u8 addr_type, u8 role)
2362
{
2363
	struct smp_ltk *k;
2364

J
Johan Hedberg 已提交
2365 2366
	rcu_read_lock();
	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2367 2368 2369
		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
			continue;

2370
		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
J
Johan Hedberg 已提交
2371
			rcu_read_unlock();
2372
			return k;
J
Johan Hedberg 已提交
2373 2374 2375
		}
	}
	rcu_read_unlock();
2376 2377 2378 2379

	return NULL;
}

2380 2381 2382 2383
struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
{
	struct smp_irk *irk;

J
Johan Hedberg 已提交
2384 2385 2386 2387
	rcu_read_lock();
	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
		if (!bacmp(&irk->rpa, rpa)) {
			rcu_read_unlock();
2388
			return irk;
J
Johan Hedberg 已提交
2389
		}
2390 2391
	}

J
Johan Hedberg 已提交
2392
	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2393
		if (smp_irk_matches(hdev, irk->val, rpa)) {
2394
			bacpy(&irk->rpa, rpa);
J
Johan Hedberg 已提交
2395
			rcu_read_unlock();
2396 2397 2398
			return irk;
		}
	}
J
Johan Hedberg 已提交
2399
	rcu_read_unlock();
2400 2401 2402 2403 2404 2405 2406 2407 2408

	return NULL;
}

struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
				     u8 addr_type)
{
	struct smp_irk *irk;

2409 2410 2411 2412
	/* Identity Address must be public or static random */
	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
		return NULL;

J
Johan Hedberg 已提交
2413 2414
	rcu_read_lock();
	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2415
		if (addr_type == irk->addr_type &&
J
Johan Hedberg 已提交
2416 2417
		    bacmp(bdaddr, &irk->bdaddr) == 0) {
			rcu_read_unlock();
2418
			return irk;
J
Johan Hedberg 已提交
2419
		}
2420
	}
J
Johan Hedberg 已提交
2421
	rcu_read_unlock();
2422 2423 2424 2425

	return NULL;
}

2426
struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2427 2428
				  bdaddr_t *bdaddr, u8 *val, u8 type,
				  u8 pin_len, bool *persistent)
2429 2430
{
	struct link_key *key, *old_key;
2431
	u8 old_key_type;
2432 2433 2434 2435 2436 2437

	old_key = hci_find_link_key(hdev, bdaddr);
	if (old_key) {
		old_key_type = old_key->type;
		key = old_key;
	} else {
2438
		old_key_type = conn ? conn->key_type : 0xff;
2439
		key = kzalloc(sizeof(*key), GFP_KERNEL);
2440
		if (!key)
2441
			return NULL;
2442
		list_add_rcu(&key->list, &hdev->link_keys);
2443 2444
	}

2445
	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2446

2447 2448 2449 2450
	/* Some buggy controller combinations generate a changed
	 * combination key for legacy pairing even when there's no
	 * previous key */
	if (type == HCI_LK_CHANGED_COMBINATION &&
2451
	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2452
		type = HCI_LK_COMBINATION;
2453 2454 2455
		if (conn)
			conn->key_type = type;
	}
2456

2457
	bacpy(&key->bdaddr, bdaddr);
2458
	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2459 2460
	key->pin_len = pin_len;

2461
	if (type == HCI_LK_CHANGED_COMBINATION)
2462
		key->type = old_key_type;
2463 2464 2465
	else
		key->type = type;

2466 2467 2468
	if (persistent)
		*persistent = hci_persistent_key(hdev, conn, type,
						 old_key_type);
2469

2470
	return key;
2471 2472
}

2473
struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2474
			    u8 addr_type, u8 type, u8 authenticated,
2475
			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2476
{
2477
	struct smp_ltk *key, *old_key;
2478
	u8 role = ltk_role(type);
2479

2480
	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2481
	if (old_key)
2482
		key = old_key;
2483
	else {
2484
		key = kzalloc(sizeof(*key), GFP_KERNEL);
2485
		if (!key)
2486
			return NULL;
J
Johan Hedberg 已提交
2487
		list_add_rcu(&key->list, &hdev->long_term_keys);
2488 2489 2490
	}

	bacpy(&key->bdaddr, bdaddr);
2491 2492 2493 2494
	key->bdaddr_type = addr_type;
	memcpy(key->val, tk, sizeof(key->val));
	key->authenticated = authenticated;
	key->ediv = ediv;
2495
	key->rand = rand;
2496 2497
	key->enc_size = enc_size;
	key->type = type;
2498

2499
	return key;
2500 2501
}

2502 2503
struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
2504 2505 2506 2507 2508 2509 2510
{
	struct smp_irk *irk;

	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
	if (!irk) {
		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
		if (!irk)
2511
			return NULL;
2512 2513 2514 2515

		bacpy(&irk->bdaddr, bdaddr);
		irk->addr_type = addr_type;

J
Johan Hedberg 已提交
2516
		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2517 2518 2519 2520 2521
	}

	memcpy(irk->val, val, 16);
	bacpy(&irk->rpa, rpa);

2522
	return irk;
2523 2524
}

2525 2526 2527 2528 2529 2530 2531 2532
int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
	struct link_key *key;

	key = hci_find_link_key(hdev, bdaddr);
	if (!key)
		return -ENOENT;

2533
	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2534

2535 2536
	list_del_rcu(&key->list);
	kfree_rcu(key, rcu);
2537 2538 2539 2540

	return 0;
}

2541
int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2542
{
J
Johan Hedberg 已提交
2543
	struct smp_ltk *k;
2544
	int removed = 0;
2545

J
Johan Hedberg 已提交
2546
	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2547
		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2548 2549
			continue;

2550
		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2551

J
Johan Hedberg 已提交
2552 2553
		list_del_rcu(&k->list);
		kfree_rcu(k, rcu);
2554
		removed++;
2555 2556
	}

2557
	return removed ? 0 : -ENOENT;
2558 2559
}

2560 2561
void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
{
J
Johan Hedberg 已提交
2562
	struct smp_irk *k;
2563

J
Johan Hedberg 已提交
2564
	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2565 2566 2567 2568 2569
		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
			continue;

		BT_DBG("%s removing %pMR", hdev->name, bdaddr);

J
Johan Hedberg 已提交
2570 2571
		list_del_rcu(&k->list);
		kfree_rcu(k, rcu);
2572 2573 2574
	}
}

2575 2576 2577
bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
	struct smp_ltk *k;
2578
	struct smp_irk *irk;
2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592
	u8 addr_type;

	if (type == BDADDR_BREDR) {
		if (hci_find_link_key(hdev, bdaddr))
			return true;
		return false;
	}

	/* Convert to HCI addr type which struct smp_ltk uses */
	if (type == BDADDR_LE_PUBLIC)
		addr_type = ADDR_LE_DEV_PUBLIC;
	else
		addr_type = ADDR_LE_DEV_RANDOM;

2593 2594 2595 2596 2597 2598
	irk = hci_get_irk(hdev, bdaddr, addr_type);
	if (irk) {
		bdaddr = &irk->bdaddr;
		addr_type = irk->addr_type;
	}

2599 2600
	rcu_read_lock();
	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2601 2602
		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
			rcu_read_unlock();
2603
			return true;
2604
		}
2605 2606 2607 2608 2609 2610
	}
	rcu_read_unlock();

	return false;
}

2611
/* HCI command timer function */
2612
static void hci_cmd_timeout(struct work_struct *work)
2613
{
2614 2615
	struct hci_dev *hdev = container_of(work, struct hci_dev,
					    cmd_timer.work);
2616

2617 2618 2619 2620 2621 2622 2623 2624 2625
	if (hdev->sent_cmd) {
		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
		u16 opcode = __le16_to_cpu(sent->opcode);

		BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
	} else {
		BT_ERR("%s command tx timeout", hdev->name);
	}

2626
	atomic_set(&hdev->cmd_cnt, 1);
2627
	queue_work(hdev->workqueue, &hdev->cmd_work);
2628 2629
}

2630
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2631
					  bdaddr_t *bdaddr, u8 bdaddr_type)
2632 2633 2634
{
	struct oob_data *data;

2635 2636 2637 2638 2639 2640 2641
	list_for_each_entry(data, &hdev->remote_oob_data, list) {
		if (bacmp(bdaddr, &data->bdaddr) != 0)
			continue;
		if (data->bdaddr_type != bdaddr_type)
			continue;
		return data;
	}
2642 2643 2644 2645

	return NULL;
}

2646 2647
int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
			       u8 bdaddr_type)
2648 2649 2650
{
	struct oob_data *data;

2651
	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2652 2653 2654
	if (!data)
		return -ENOENT;

2655
	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2656 2657 2658 2659 2660 2661 2662

	list_del(&data->list);
	kfree(data);

	return 0;
}

2663
void hci_remote_oob_data_clear(struct hci_dev *hdev)
2664 2665 2666 2667 2668 2669 2670 2671 2672
{
	struct oob_data *data, *n;

	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
		list_del(&data->list);
		kfree(data);
	}
}

2673
int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2674
			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
2675
			    u8 *hash256, u8 *rand256)
2676 2677 2678
{
	struct oob_data *data;

2679
	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2680
	if (!data) {
2681
		data = kmalloc(sizeof(*data), GFP_KERNEL);
2682 2683 2684 2685
		if (!data)
			return -ENOMEM;

		bacpy(&data->bdaddr, bdaddr);
2686
		data->bdaddr_type = bdaddr_type;
2687 2688 2689
		list_add(&data->list, &hdev->remote_oob_data);
	}

2690 2691 2692
	if (hash192 && rand192) {
		memcpy(data->hash192, hash192, sizeof(data->hash192));
		memcpy(data->rand192, rand192, sizeof(data->rand192));
2693 2694
		if (hash256 && rand256)
			data->present = 0x03;
2695 2696 2697
	} else {
		memset(data->hash192, 0, sizeof(data->hash192));
		memset(data->rand192, 0, sizeof(data->rand192));
2698 2699 2700 2701
		if (hash256 && rand256)
			data->present = 0x02;
		else
			data->present = 0x00;
2702 2703
	}

2704 2705 2706 2707 2708 2709
	if (hash256 && rand256) {
		memcpy(data->hash256, hash256, sizeof(data->hash256));
		memcpy(data->rand256, rand256, sizeof(data->rand256));
	} else {
		memset(data->hash256, 0, sizeof(data->hash256));
		memset(data->rand256, 0, sizeof(data->rand256));
2710 2711
		if (hash192 && rand192)
			data->present = 0x01;
2712
	}
2713

2714
	BT_DBG("%s for %pMR", hdev->name, bdaddr);
2715 2716 2717 2718

	return 0;
}

2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758
/* This function requires the caller holds hdev->lock */
struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
{
	struct adv_info *adv_instance;

	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
		if (adv_instance->instance == instance)
			return adv_instance;
	}

	return NULL;
}

/* This function requires the caller holds hdev->lock */
struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
	struct adv_info *cur_instance;

	cur_instance = hci_find_adv_instance(hdev, instance);
	if (!cur_instance)
		return NULL;

	if (cur_instance == list_last_entry(&hdev->adv_instances,
					    struct adv_info, list))
		return list_first_entry(&hdev->adv_instances,
						 struct adv_info, list);
	else
		return list_next_entry(cur_instance, list);
}

/* This function requires the caller holds hdev->lock */
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
{
	struct adv_info *adv_instance;

	adv_instance = hci_find_adv_instance(hdev, instance);
	if (!adv_instance)
		return -ENOENT;

	BT_DBG("%s removing %dMR", hdev->name, instance);

2759 2760 2761 2762 2763
	if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
		cancel_delayed_work(&hdev->adv_instance_expire);
		hdev->adv_instance_timeout = 0;
	}

2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776
	list_del(&adv_instance->list);
	kfree(adv_instance);

	hdev->adv_instance_cnt--;

	return 0;
}

/* This function requires the caller holds hdev->lock */
void hci_adv_instances_clear(struct hci_dev *hdev)
{
	struct adv_info *adv_instance, *n;

2777 2778 2779 2780 2781
	if (hdev->adv_instance_timeout) {
		cancel_delayed_work(&hdev->adv_instance_expire);
		hdev->adv_instance_timeout = 0;
	}

2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808
	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
		list_del(&adv_instance->list);
		kfree(adv_instance);
	}

	hdev->adv_instance_cnt = 0;
}

/* This function requires the caller holds hdev->lock */
int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
			 u16 adv_data_len, u8 *adv_data,
			 u16 scan_rsp_len, u8 *scan_rsp_data,
			 u16 timeout, u16 duration)
{
	struct adv_info *adv_instance;

	adv_instance = hci_find_adv_instance(hdev, instance);
	if (adv_instance) {
		memset(adv_instance->adv_data, 0,
		       sizeof(adv_instance->adv_data));
		memset(adv_instance->scan_rsp_data, 0,
		       sizeof(adv_instance->scan_rsp_data));
	} else {
		if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
		    instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
			return -EOVERFLOW;

2809
		adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2810 2811 2812
		if (!adv_instance)
			return -ENOMEM;

2813
		adv_instance->pending = true;
2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830
		adv_instance->instance = instance;
		list_add(&adv_instance->list, &hdev->adv_instances);
		hdev->adv_instance_cnt++;
	}

	adv_instance->flags = flags;
	adv_instance->adv_data_len = adv_data_len;
	adv_instance->scan_rsp_len = scan_rsp_len;

	if (adv_data_len)
		memcpy(adv_instance->adv_data, adv_data, adv_data_len);

	if (scan_rsp_len)
		memcpy(adv_instance->scan_rsp_data,
		       scan_rsp_data, scan_rsp_len);

	adv_instance->timeout = timeout;
2831
	adv_instance->remaining_time = timeout;
2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842

	if (duration == 0)
		adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
	else
		adv_instance->duration = duration;

	BT_DBG("%s for %dMR", hdev->name, instance);

	return 0;
}

2843
struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2844
					 bdaddr_t *bdaddr, u8 type)
2845
{
2846
	struct bdaddr_list *b;
2847

2848
	list_for_each_entry(b, bdaddr_list, list) {
2849
		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2850
			return b;
2851
	}
2852 2853 2854 2855

	return NULL;
}

2856
void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2857 2858 2859
{
	struct list_head *p, *n;

2860
	list_for_each_safe(p, n, bdaddr_list) {
2861
		struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2862 2863 2864 2865 2866 2867

		list_del(p);
		kfree(b);
	}
}

2868
int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2869 2870 2871
{
	struct bdaddr_list *entry;

2872
	if (!bacmp(bdaddr, BDADDR_ANY))
2873 2874
		return -EBADF;

2875
	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2876
		return -EEXIST;
2877

2878
	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2879 2880
	if (!entry)
		return -ENOMEM;
2881 2882

	bacpy(&entry->bdaddr, bdaddr);
2883
	entry->bdaddr_type = type;
2884

2885
	list_add(&entry->list, list);
2886

2887
	return 0;
2888 2889
}

2890
int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2891 2892 2893
{
	struct bdaddr_list *entry;

2894
	if (!bacmp(bdaddr, BDADDR_ANY)) {
2895
		hci_bdaddr_list_clear(list);
2896 2897
		return 0;
	}
2898

2899
	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2900 2901 2902 2903 2904 2905 2906 2907 2908
	if (!entry)
		return -ENOENT;

	list_del(&entry->list);
	kfree(entry);

	return 0;
}

2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924
/* This function requires the caller holds hdev->lock */
struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
					       bdaddr_t *addr, u8 addr_type)
{
	struct hci_conn_params *params;

	list_for_each_entry(params, &hdev->le_conn_params, list) {
		if (bacmp(&params->addr, addr) == 0 &&
		    params->addr_type == addr_type) {
			return params;
		}
	}

	return NULL;
}

2925
/* This function requires the caller holds hdev->lock */
2926 2927
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
						  bdaddr_t *addr, u8 addr_type)
2928
{
2929
	struct hci_conn_params *param;
2930

2931
	list_for_each_entry(param, list, action) {
2932 2933 2934
		if (bacmp(&param->addr, addr) == 0 &&
		    param->addr_type == addr_type)
			return param;
2935 2936 2937
	}

	return NULL;
2938 2939
}

2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956
/* This function requires the caller holds hdev->lock */
struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
						    bdaddr_t *addr,
						    u8 addr_type)
{
	struct hci_conn_params *param;

	list_for_each_entry(param, &hdev->pend_le_conns, action) {
		if (bacmp(&param->addr, addr) == 0 &&
		    param->addr_type == addr_type &&
		    param->explicit_connect)
			return param;
	}

	return NULL;
}

2957
/* This function requires the caller holds hdev->lock */
2958 2959
struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
					    bdaddr_t *addr, u8 addr_type)
2960 2961 2962 2963
{
	struct hci_conn_params *params;

	params = hci_conn_params_lookup(hdev, addr, addr_type);
2964
	if (params)
2965
		return params;
2966 2967 2968 2969

	params = kzalloc(sizeof(*params), GFP_KERNEL);
	if (!params) {
		BT_ERR("Out of memory");
2970
		return NULL;
2971 2972 2973 2974
	}

	bacpy(&params->addr, addr);
	params->addr_type = addr_type;
2975 2976

	list_add(&params->list, &hdev->le_conn_params);
2977
	INIT_LIST_HEAD(&params->action);
2978

2979 2980 2981 2982 2983 2984 2985 2986
	params->conn_min_interval = hdev->le_conn_min_interval;
	params->conn_max_interval = hdev->le_conn_max_interval;
	params->conn_latency = hdev->le_conn_latency;
	params->supervision_timeout = hdev->le_supv_timeout;
	params->auto_connect = HCI_AUTO_CONN_DISABLED;

	BT_DBG("addr %pMR (type %u)", addr, addr_type);

2987
	return params;
2988 2989
}

2990
static void hci_conn_params_free(struct hci_conn_params *params)
2991
{
2992
	if (params->conn) {
2993
		hci_conn_drop(params->conn);
2994 2995
		hci_conn_put(params->conn);
	}
2996

2997
	list_del(&params->action);
2998 2999
	list_del(&params->list);
	kfree(params);
3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011
}

/* This function requires the caller holds hdev->lock */
void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
{
	struct hci_conn_params *params;

	params = hci_conn_params_lookup(hdev, addr, addr_type);
	if (!params)
		return;

	hci_conn_params_free(params);
3012

3013 3014
	hci_update_background_scan(hdev);

3015 3016 3017 3018
	BT_DBG("addr %pMR (type %u)", addr, addr_type);
}

/* This function requires the caller holds hdev->lock */
3019
void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3020 3021 3022 3023
{
	struct hci_conn_params *params, *tmp;

	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3024 3025
		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
			continue;
3026 3027 3028 3029 3030 3031 3032 3033 3034

		/* If trying to estabilish one time connection to disabled
		 * device, leave the params, but mark them as just once.
		 */
		if (params->explicit_connect) {
			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
			continue;
		}

3035 3036 3037 3038
		list_del(&params->list);
		kfree(params);
	}

3039
	BT_DBG("All LE disabled connection parameters were removed");
3040 3041 3042
}

/* This function requires the caller holds hdev->lock */
3043
void hci_conn_params_clear_all(struct hci_dev *hdev)
3044
{
3045
	struct hci_conn_params *params, *tmp;
3046

3047 3048
	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
		hci_conn_params_free(params);
3049

3050
	hci_update_background_scan(hdev);
3051

3052
	BT_DBG("All LE connection parameters were removed");
3053 3054
}

3055
static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
A
Andre Guedes 已提交
3056
{
3057 3058
	if (status) {
		BT_ERR("Failed to start inquiry: status %d", status);
A
Andre Guedes 已提交
3059

3060 3061 3062 3063 3064
		hci_dev_lock(hdev);
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
		hci_dev_unlock(hdev);
		return;
	}
A
Andre Guedes 已提交
3065 3066
}

3067 3068
static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
					  u16 opcode)
A
Andre Guedes 已提交
3069
{
3070 3071 3072
	/* General inquiry access code (GIAC) */
	u8 lap[3] = { 0x33, 0x8b, 0x9e };
	struct hci_cp_inquiry cp;
A
Andre Guedes 已提交
3073 3074
	int err;

3075 3076 3077 3078
	if (status) {
		BT_ERR("Failed to disable LE scanning: status %d", status);
		return;
	}
A
Andre Guedes 已提交
3079

3080 3081
	hdev->discovery.scan_start = 0;

3082 3083 3084 3085 3086 3087
	switch (hdev->discovery.type) {
	case DISCOV_TYPE_LE:
		hci_dev_lock(hdev);
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
		hci_dev_unlock(hdev);
		break;
A
Andre Guedes 已提交
3088

3089 3090
	case DISCOV_TYPE_INTERLEAVED:
		hci_dev_lock(hdev);
3091

3092 3093 3094 3095 3096 3097
		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
			     &hdev->quirks)) {
			/* If we were running LE only scan, change discovery
			 * state. If we were running both LE and BR/EDR inquiry
			 * simultaneously, and BR/EDR inquiry is already
			 * finished, stop discovery, otherwise BR/EDR inquiry
3098 3099
			 * will stop discovery when finished. If we will resolve
			 * remote device name, do not change discovery state.
3100
			 */
3101 3102
			if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
			    hdev->discovery.state != DISCOVERY_RESOLVING)
3103 3104 3105
				hci_discovery_set_state(hdev,
							DISCOVERY_STOPPED);
		} else {
3106 3107
			struct hci_request req;

3108 3109
			hci_inquiry_cache_flush(hdev);

3110 3111 3112 3113 3114 3115 3116
			hci_req_init(&req, hdev);

			memset(&cp, 0, sizeof(cp));
			memcpy(&cp.lap, lap, sizeof(cp.lap));
			cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
			hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);

3117 3118 3119 3120 3121 3122
			err = hci_req_run(&req, inquiry_complete);
			if (err) {
				BT_ERR("Inquiry request failed: err %d", err);
				hci_discovery_set_state(hdev,
							DISCOVERY_STOPPED);
			}
3123
		}
3124

3125 3126
		hci_dev_unlock(hdev);
		break;
3127 3128 3129
	}
}

A
Andre Guedes 已提交
3130 3131 3132
static void le_scan_disable_work(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
3133
					    le_scan_disable.work);
3134 3135
	struct hci_request req;
	int err;
A
Andre Guedes 已提交
3136 3137 3138

	BT_DBG("%s", hdev->name);

3139 3140
	cancel_delayed_work_sync(&hdev->le_scan_restart);

3141
	hci_req_init(&req, hdev);
A
Andre Guedes 已提交
3142

3143
	hci_req_add_le_scan_disable(&req);
A
Andre Guedes 已提交
3144

3145 3146 3147
	err = hci_req_run(&req, le_scan_disable_work_complete);
	if (err)
		BT_ERR("Disable LE scanning request failed: err %d", err);
A
Andre Guedes 已提交
3148 3149
}

3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200
static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
					  u16 opcode)
{
	unsigned long timeout, duration, scan_start, now;

	BT_DBG("%s", hdev->name);

	if (status) {
		BT_ERR("Failed to restart LE scan: status %d", status);
		return;
	}

	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
	    !hdev->discovery.scan_start)
		return;

	/* When the scan was started, hdev->le_scan_disable has been queued
	 * after duration from scan_start. During scan restart this job
	 * has been canceled, and we need to queue it again after proper
	 * timeout, to make sure that scan does not run indefinitely.
	 */
	duration = hdev->discovery.scan_duration;
	scan_start = hdev->discovery.scan_start;
	now = jiffies;
	if (now - scan_start <= duration) {
		int elapsed;

		if (now >= scan_start)
			elapsed = now - scan_start;
		else
			elapsed = ULONG_MAX - scan_start + now;

		timeout = duration - elapsed;
	} else {
		timeout = 0;
	}
	queue_delayed_work(hdev->workqueue,
			   &hdev->le_scan_disable, timeout);
}

static void le_scan_restart_work(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
					    le_scan_restart.work);
	struct hci_request req;
	struct hci_cp_le_set_scan_enable cp;
	int err;

	BT_DBG("%s", hdev->name);

	/* If controller is not scanning we are done. */
3201
	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217
		return;

	hci_req_init(&req, hdev);

	hci_req_add_le_scan_disable(&req);

	memset(&cp, 0, sizeof(cp));
	cp.enable = LE_SCAN_ENABLE;
	cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
	hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);

	err = hci_req_run(&req, le_scan_restart_work_complete);
	if (err)
		BT_ERR("Restart LE scan request failed: err %d", err);
}

3218 3219 3220 3221 3222 3223 3224 3225
/* Copy the Identity Address of the controller.
 *
 * If the controller has a public BD_ADDR, then by default use that one.
 * If this is a LE only controller without a public address, default to
 * the static random address.
 *
 * For debugging purposes it is possible to force controllers with a
 * public address to use the static random address instead.
3226 3227 3228 3229
 *
 * In case BR/EDR has been disabled on a dual-mode controller and
 * userspace has configured a static address, then that address
 * becomes the identity address instead of the public BR/EDR address.
3230 3231 3232 3233
 */
void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
			       u8 *bdaddr_type)
{
3234
	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3235
	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3236
	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3237
	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
3238 3239 3240 3241 3242 3243 3244 3245
		bacpy(bdaddr, &hdev->static_addr);
		*bdaddr_type = ADDR_LE_DEV_RANDOM;
	} else {
		bacpy(bdaddr, &hdev->bdaddr);
		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
	}
}

3246 3247 3248 3249 3250
/* Alloc HCI device */
struct hci_dev *hci_alloc_dev(void)
{
	struct hci_dev *hdev;

3251
	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3252 3253 3254
	if (!hdev)
		return NULL;

3255 3256 3257
	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
	hdev->esco_type = (ESCO_HV1);
	hdev->link_mode = (HCI_LM_ACCEPT);
3258 3259
	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
	hdev->io_capability = 0x03;	/* No Input No Output */
3260
	hdev->manufacturer = 0xffff;	/* Default to internal use */
3261 3262
	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3263 3264
	hdev->adv_instance_cnt = 0;
	hdev->cur_adv_instance = 0x00;
3265
	hdev->adv_instance_timeout = 0;
3266 3267 3268 3269

	hdev->sniff_max_interval = 800;
	hdev->sniff_min_interval = 80;

3270
	hdev->le_adv_channel_map = 0x07;
3271 3272
	hdev->le_adv_min_interval = 0x0800;
	hdev->le_adv_max_interval = 0x0800;
3273 3274
	hdev->le_scan_interval = 0x0060;
	hdev->le_scan_window = 0x0030;
3275 3276
	hdev->le_conn_min_interval = 0x0028;
	hdev->le_conn_max_interval = 0x0038;
3277 3278
	hdev->le_conn_latency = 0x0000;
	hdev->le_supv_timeout = 0x002a;
3279 3280 3281 3282 3283 3284
	hdev->le_def_tx_len = 0x001b;
	hdev->le_def_tx_time = 0x0148;
	hdev->le_max_tx_len = 0x001b;
	hdev->le_max_tx_time = 0x0148;
	hdev->le_max_rx_len = 0x001b;
	hdev->le_max_rx_time = 0x0148;
3285

3286
	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3287
	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3288 3289
	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3290

3291 3292 3293 3294 3295
	mutex_init(&hdev->lock);
	mutex_init(&hdev->req_lock);

	INIT_LIST_HEAD(&hdev->mgmt_pending);
	INIT_LIST_HEAD(&hdev->blacklist);
3296
	INIT_LIST_HEAD(&hdev->whitelist);
3297 3298 3299
	INIT_LIST_HEAD(&hdev->uuids);
	INIT_LIST_HEAD(&hdev->link_keys);
	INIT_LIST_HEAD(&hdev->long_term_keys);
3300
	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3301
	INIT_LIST_HEAD(&hdev->remote_oob_data);
3302
	INIT_LIST_HEAD(&hdev->le_white_list);
3303
	INIT_LIST_HEAD(&hdev->le_conn_params);
3304
	INIT_LIST_HEAD(&hdev->pend_le_conns);
3305
	INIT_LIST_HEAD(&hdev->pend_le_reports);
3306
	INIT_LIST_HEAD(&hdev->conn_hash.list);
3307
	INIT_LIST_HEAD(&hdev->adv_instances);
3308 3309 3310 3311 3312

	INIT_WORK(&hdev->rx_work, hci_rx_work);
	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
	INIT_WORK(&hdev->tx_work, hci_tx_work);
	INIT_WORK(&hdev->power_on, hci_power_on);
3313
	INIT_WORK(&hdev->error_reset, hci_error_reset);
3314 3315 3316 3317

	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
	INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3318
	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3319
	INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3320 3321 3322 3323 3324 3325 3326

	skb_queue_head_init(&hdev->rx_q);
	skb_queue_head_init(&hdev->cmd_q);
	skb_queue_head_init(&hdev->raw_q);

	init_waitqueue_head(&hdev->req_wait_q);

3327
	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3328 3329 3330

	hci_init_sysfs(hdev);
	discovery_init(hdev);
3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343

	return hdev;
}
EXPORT_SYMBOL(hci_alloc_dev);

/* Free HCI device */
void hci_free_dev(struct hci_dev *hdev)
{
	/* will free via device release */
	put_device(&hdev->dev);
}
EXPORT_SYMBOL(hci_free_dev);

L
Linus Torvalds 已提交
3344 3345 3346
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
{
3347
	int id, error;
L
Linus Torvalds 已提交
3348

3349
	if (!hdev->open || !hdev->close || !hdev->send)
L
Linus Torvalds 已提交
3350 3351
		return -EINVAL;

3352 3353 3354
	/* Do not allow HCI_AMP devices to register at index 0,
	 * so the index can be used as the AMP controller ID.
	 */
3355 3356 3357 3358 3359 3360 3361 3362 3363
	switch (hdev->dev_type) {
	case HCI_BREDR:
		id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
		break;
	case HCI_AMP:
		id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
		break;
	default:
		return -EINVAL;
L
Linus Torvalds 已提交
3364
	}
3365

3366 3367 3368
	if (id < 0)
		return id;

L
Linus Torvalds 已提交
3369 3370
	sprintf(hdev->name, "hci%d", id);
	hdev->id = id;
3371 3372 3373

	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);

3374 3375
	hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
					  WQ_MEM_RECLAIM, 1, hdev->name);
3376 3377 3378 3379
	if (!hdev->workqueue) {
		error = -ENOMEM;
		goto err;
	}
3380

3381 3382
	hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
					      WQ_MEM_RECLAIM, 1, hdev->name);
3383 3384 3385 3386 3387 3388
	if (!hdev->req_workqueue) {
		destroy_workqueue(hdev->workqueue);
		error = -ENOMEM;
		goto err;
	}

3389 3390 3391
	if (!IS_ERR_OR_NULL(bt_debugfs))
		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);

3392 3393 3394
	dev_set_name(&hdev->dev, "%s", hdev->name);

	error = device_add(&hdev->dev);
3395
	if (error < 0)
3396
		goto err_wqueue;
L
Linus Torvalds 已提交
3397

3398
	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3399 3400
				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
				    hdev);
3401 3402 3403 3404 3405 3406 3407
	if (hdev->rfkill) {
		if (rfkill_register(hdev->rfkill) < 0) {
			rfkill_destroy(hdev->rfkill);
			hdev->rfkill = NULL;
		}
	}

3408
	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3409
		hci_dev_set_flag(hdev, HCI_RFKILLED);
3410

3411 3412
	hci_dev_set_flag(hdev, HCI_SETUP);
	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3413

3414
	if (hdev->dev_type == HCI_BREDR) {
3415 3416 3417
		/* Assume BR/EDR support until proven otherwise (such as
		 * through reading supported features during init.
		 */
3418
		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3419
	}
3420

3421 3422 3423 3424
	write_lock(&hci_dev_list_lock);
	list_add(&hdev->list, &hci_dev_list);
	write_unlock(&hci_dev_list_lock);

3425 3426
	/* Devices that are marked for raw-only usage are unconfigured
	 * and should not be included in normal operation.
3427 3428
	 */
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3429
		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3430

L
Linus Torvalds 已提交
3431
	hci_notify(hdev, HCI_DEV_REG);
3432
	hci_dev_hold(hdev);
L
Linus Torvalds 已提交
3433

3434
	queue_work(hdev->req_workqueue, &hdev->power_on);
3435

L
Linus Torvalds 已提交
3436
	return id;
3437

3438 3439
err_wqueue:
	destroy_workqueue(hdev->workqueue);
3440
	destroy_workqueue(hdev->req_workqueue);
3441
err:
3442
	ida_simple_remove(&hci_index_ida, hdev->id);
3443

3444
	return error;
L
Linus Torvalds 已提交
3445 3446 3447 3448
}
EXPORT_SYMBOL(hci_register_dev);

/* Unregister HCI device */
3449
void hci_unregister_dev(struct hci_dev *hdev)
L
Linus Torvalds 已提交
3450
{
3451
	int id;
3452

3453
	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
L
Linus Torvalds 已提交
3454

3455
	hci_dev_set_flag(hdev, HCI_UNREGISTER);
3456

3457 3458
	id = hdev->id;

3459
	write_lock(&hci_dev_list_lock);
L
Linus Torvalds 已提交
3460
	list_del(&hdev->list);
3461
	write_unlock(&hci_dev_list_lock);
L
Linus Torvalds 已提交
3462 3463 3464

	hci_dev_do_close(hdev);

3465 3466
	cancel_work_sync(&hdev->power_on);

3467
	if (!test_bit(HCI_INIT, &hdev->flags) &&
3468 3469
	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3470
		hci_dev_lock(hdev);
3471
		mgmt_index_removed(hdev);
3472
		hci_dev_unlock(hdev);
3473
	}
3474

3475 3476 3477 3478
	/* mgmt_index_removed should take care of emptying the
	 * pending list */
	BUG_ON(!list_empty(&hdev->mgmt_pending));

L
Linus Torvalds 已提交
3479 3480
	hci_notify(hdev, HCI_DEV_UNREG);

3481 3482 3483 3484 3485
	if (hdev->rfkill) {
		rfkill_unregister(hdev->rfkill);
		rfkill_destroy(hdev->rfkill);
	}

3486
	device_del(&hdev->dev);
3487

3488 3489
	debugfs_remove_recursive(hdev->debugfs);

3490
	destroy_workqueue(hdev->workqueue);
3491
	destroy_workqueue(hdev->req_workqueue);
3492

3493
	hci_dev_lock(hdev);
3494
	hci_bdaddr_list_clear(&hdev->blacklist);
3495
	hci_bdaddr_list_clear(&hdev->whitelist);
3496
	hci_uuids_clear(hdev);
3497
	hci_link_keys_clear(hdev);
3498
	hci_smp_ltks_clear(hdev);
3499
	hci_smp_irks_clear(hdev);
3500
	hci_remote_oob_data_clear(hdev);
3501
	hci_adv_instances_clear(hdev);
3502
	hci_bdaddr_list_clear(&hdev->le_white_list);
3503
	hci_conn_params_clear_all(hdev);
3504
	hci_discovery_filter_clear(hdev);
3505
	hci_dev_unlock(hdev);
3506

3507
	hci_dev_put(hdev);
3508 3509

	ida_simple_remove(&hci_index_ida, id);
L
Linus Torvalds 已提交
3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528
}
EXPORT_SYMBOL(hci_unregister_dev);

/* Suspend HCI device */
int hci_suspend_dev(struct hci_dev *hdev)
{
	hci_notify(hdev, HCI_DEV_SUSPEND);
	return 0;
}
EXPORT_SYMBOL(hci_suspend_dev);

/* Resume HCI device */
int hci_resume_dev(struct hci_dev *hdev)
{
	hci_notify(hdev, HCI_DEV_RESUME);
	return 0;
}
EXPORT_SYMBOL(hci_resume_dev);

3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546
/* Reset HCI device */
int hci_reset_dev(struct hci_dev *hdev)
{
	const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
	struct sk_buff *skb;

	skb = bt_skb_alloc(3, GFP_ATOMIC);
	if (!skb)
		return -ENOMEM;

	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
	memcpy(skb_put(skb, 3), hw_err, 3);

	/* Send Hardware Error to upper stack */
	return hci_recv_frame(hdev, skb);
}
EXPORT_SYMBOL(hci_reset_dev);

3547
/* Receive frame from HCI drivers */
3548
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3549 3550
{
	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3551
		      && !test_bit(HCI_INIT, &hdev->flags))) {
3552 3553 3554 3555
		kfree_skb(skb);
		return -ENXIO;
	}

3556 3557 3558 3559 3560 3561 3562
	if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
	    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
	    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
		kfree_skb(skb);
		return -EINVAL;
	}

3563
	/* Incoming skb */
3564 3565 3566 3567 3568 3569
	bt_cb(skb)->incoming = 1;

	/* Time stamp */
	__net_timestamp(skb);

	skb_queue_tail(&hdev->rx_q, skb);
3570
	queue_work(hdev->workqueue, &hdev->rx_work);
3571

3572 3573 3574 3575
	return 0;
}
EXPORT_SYMBOL(hci_recv_frame);

3576 3577 3578
/* Receive diagnostic message from HCI drivers */
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
{
3579 3580 3581
	/* Mark as diagnostic packet */
	bt_cb(skb)->pkt_type = HCI_DIAG_PKT;

3582 3583 3584
	/* Time stamp */
	__net_timestamp(skb);

3585 3586
	skb_queue_tail(&hdev->rx_q, skb);
	queue_work(hdev->workqueue, &hdev->rx_work);
3587 3588 3589 3590 3591

	return 0;
}
EXPORT_SYMBOL(hci_recv_diag);

L
Linus Torvalds 已提交
3592 3593 3594 3595 3596 3597
/* ---- Interface to upper protocols ---- */

int hci_register_cb(struct hci_cb *cb)
{
	BT_DBG("%p name %s", cb, cb->name);

3598
	mutex_lock(&hci_cb_list_lock);
3599
	list_add_tail(&cb->list, &hci_cb_list);
3600
	mutex_unlock(&hci_cb_list_lock);
L
Linus Torvalds 已提交
3601 3602 3603 3604 3605 3606 3607 3608 3609

	return 0;
}
EXPORT_SYMBOL(hci_register_cb);

int hci_unregister_cb(struct hci_cb *cb)
{
	BT_DBG("%p name %s", cb, cb->name);

3610
	mutex_lock(&hci_cb_list_lock);
L
Linus Torvalds 已提交
3611
	list_del(&cb->list);
3612
	mutex_unlock(&hci_cb_list_lock);
L
Linus Torvalds 已提交
3613 3614 3615 3616 3617

	return 0;
}
EXPORT_SYMBOL(hci_unregister_cb);

3618
static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
L
Linus Torvalds 已提交
3619
{
3620 3621
	int err;

3622
	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
L
Linus Torvalds 已提交
3623

3624 3625
	/* Time stamp */
	__net_timestamp(skb);
L
Linus Torvalds 已提交
3626

3627 3628 3629 3630 3631
	/* Send copy to monitor */
	hci_send_to_monitor(hdev, skb);

	if (atomic_read(&hdev->promisc)) {
		/* Send copy to the sockets */
3632
		hci_send_to_sock(hdev, skb);
L
Linus Torvalds 已提交
3633 3634 3635 3636 3637
	}

	/* Get rid of skb owner, prior to sending to the driver. */
	skb_orphan(skb);

3638 3639 3640 3641 3642
	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
		kfree_skb(skb);
		return;
	}

3643 3644 3645 3646 3647
	err = hdev->send(hdev, skb);
	if (err < 0) {
		BT_ERR("%s sending frame failed (%d)", hdev->name, err);
		kfree_skb(skb);
	}
L
Linus Torvalds 已提交
3648 3649
}

3650
/* Send HCI command */
3651 3652
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
		 const void *param)
3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663
{
	struct sk_buff *skb;

	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);

	skb = hci_prepare_cmd(hdev, opcode, plen, param);
	if (!skb) {
		BT_ERR("%s no memory for command", hdev->name);
		return -ENOMEM;
	}

S
Stephen Hemminger 已提交
3664
	/* Stand-alone HCI commands must be flagged as
3665 3666
	 * single-command requests.
	 */
3667
	bt_cb(skb)->req.start = true;
3668

L
Linus Torvalds 已提交
3669
	skb_queue_tail(&hdev->cmd_q, skb);
3670
	queue_work(hdev->workqueue, &hdev->cmd_work);
L
Linus Torvalds 已提交
3671 3672 3673 3674 3675

	return 0;
}

/* Get data from the previously sent command */
3676
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
L
Linus Torvalds 已提交
3677 3678 3679 3680 3681 3682 3683 3684
{
	struct hci_command_hdr *hdr;

	if (!hdev->sent_cmd)
		return NULL;

	hdr = (void *) hdev->sent_cmd->data;

3685
	if (hdr->opcode != cpu_to_le16(opcode))
L
Linus Torvalds 已提交
3686 3687
		return NULL;

3688
	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
L
Linus Torvalds 已提交
3689 3690 3691 3692

	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
}

3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711
/* Send HCI command and wait for command commplete event */
struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
			     const void *param, u32 timeout)
{
	struct sk_buff *skb;

	if (!test_bit(HCI_UP, &hdev->flags))
		return ERR_PTR(-ENETDOWN);

	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);

	hci_req_lock(hdev);
	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
	hci_req_unlock(hdev);

	return skb;
}
EXPORT_SYMBOL(hci_cmd_sync);

L
Linus Torvalds 已提交
3712 3713 3714 3715 3716 3717
/* Send ACL data */
static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
{
	struct hci_acl_hdr *hdr;
	int len = skb->len;

3718 3719
	skb_push(skb, HCI_ACL_HDR_SIZE);
	skb_reset_transport_header(skb);
3720
	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3721 3722
	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
	hdr->dlen   = cpu_to_le16(len);
L
Linus Torvalds 已提交
3723 3724
}

3725
static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3726
			  struct sk_buff *skb, __u16 flags)
L
Linus Torvalds 已提交
3727
{
3728
	struct hci_conn *conn = chan->conn;
L
Linus Torvalds 已提交
3729 3730 3731
	struct hci_dev *hdev = conn->hdev;
	struct sk_buff *list;

3732 3733 3734 3735
	skb->len = skb_headlen(skb);
	skb->data_len = 0;

	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747

	switch (hdev->dev_type) {
	case HCI_BREDR:
		hci_add_acl_hdr(skb, conn->handle, flags);
		break;
	case HCI_AMP:
		hci_add_acl_hdr(skb, chan->handle, flags);
		break;
	default:
		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
		return;
	}
3748

A
Andrei Emeltchenko 已提交
3749 3750
	list = skb_shinfo(skb)->frag_list;
	if (!list) {
L
Linus Torvalds 已提交
3751 3752 3753
		/* Non fragmented */
		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);

3754
		skb_queue_tail(queue, skb);
L
Linus Torvalds 已提交
3755 3756 3757 3758 3759 3760
	} else {
		/* Fragmented */
		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);

		skb_shinfo(skb)->frag_list = NULL;

3761 3762 3763 3764 3765 3766
		/* Queue all fragments atomically. We need to use spin_lock_bh
		 * here because of 6LoWPAN links, as there this function is
		 * called from softirq and using normal spin lock could cause
		 * deadlocks.
		 */
		spin_lock_bh(&queue->lock);
L
Linus Torvalds 已提交
3767

3768
		__skb_queue_tail(queue, skb);
3769 3770 3771

		flags &= ~ACL_START;
		flags |= ACL_CONT;
L
Linus Torvalds 已提交
3772 3773
		do {
			skb = list; list = list->next;
3774

3775
			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3776
			hci_add_acl_hdr(skb, conn->handle, flags);
L
Linus Torvalds 已提交
3777 3778 3779

			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);

3780
			__skb_queue_tail(queue, skb);
L
Linus Torvalds 已提交
3781 3782
		} while (list);

3783
		spin_unlock_bh(&queue->lock);
L
Linus Torvalds 已提交
3784
	}
3785 3786 3787 3788
}

void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
{
3789
	struct hci_dev *hdev = chan->conn->hdev;
3790

3791
	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3792

3793
	hci_queue_acl(chan, &chan->data_q, skb, flags);
L
Linus Torvalds 已提交
3794

3795
	queue_work(hdev->workqueue, &hdev->tx_work);
L
Linus Torvalds 已提交
3796 3797 3798
}

/* Send SCO data */
3799
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
L
Linus Torvalds 已提交
3800 3801 3802 3803 3804 3805
{
	struct hci_dev *hdev = conn->hdev;
	struct hci_sco_hdr hdr;

	BT_DBG("%s len %d", hdev->name, skb->len);

3806
	hdr.handle = cpu_to_le16(conn->handle);
L
Linus Torvalds 已提交
3807 3808
	hdr.dlen   = skb->len;

3809 3810
	skb_push(skb, HCI_SCO_HDR_SIZE);
	skb_reset_transport_header(skb);
3811
	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
L
Linus Torvalds 已提交
3812

3813
	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3814

L
Linus Torvalds 已提交
3815
	skb_queue_tail(&conn->data_q, skb);
3816
	queue_work(hdev->workqueue, &hdev->tx_work);
L
Linus Torvalds 已提交
3817 3818 3819 3820 3821
}

/* ---- HCI TX task (outgoing data) ---- */

/* HCI Connection scheduler */
3822 3823
static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
				     int *quote)
L
Linus Torvalds 已提交
3824 3825
{
	struct hci_conn_hash *h = &hdev->conn_hash;
3826
	struct hci_conn *conn = NULL, *c;
3827
	unsigned int num = 0, min = ~0;
L
Linus Torvalds 已提交
3828

3829
	/* We don't have to lock device here. Connections are always
L
Linus Torvalds 已提交
3830
	 * added and removed with TX task disabled. */
3831 3832 3833 3834

	rcu_read_lock();

	list_for_each_entry_rcu(c, &h->list, list) {
3835
		if (c->type != type || skb_queue_empty(&c->data_q))
L
Linus Torvalds 已提交
3836
			continue;
3837 3838 3839 3840

		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
			continue;

L
Linus Torvalds 已提交
3841 3842 3843 3844 3845 3846
		num++;

		if (c->sent < min) {
			min  = c->sent;
			conn = c;
		}
3847 3848 3849

		if (hci_conn_num(hdev, type) == num)
			break;
L
Linus Torvalds 已提交
3850 3851
	}

3852 3853
	rcu_read_unlock();

L
Linus Torvalds 已提交
3854
	if (conn) {
3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873
		int cnt, q;

		switch (conn->type) {
		case ACL_LINK:
			cnt = hdev->acl_cnt;
			break;
		case SCO_LINK:
		case ESCO_LINK:
			cnt = hdev->sco_cnt;
			break;
		case LE_LINK:
			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
			break;
		default:
			cnt = 0;
			BT_ERR("Unknown link type");
		}

		q = cnt / num;
L
Linus Torvalds 已提交
3874 3875 3876 3877 3878 3879 3880 3881
		*quote = q ? q : 1;
	} else
		*quote = 0;

	BT_DBG("conn %p quote %d", conn, *quote);
	return conn;
}

3882
static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
L
Linus Torvalds 已提交
3883 3884
{
	struct hci_conn_hash *h = &hdev->conn_hash;
3885
	struct hci_conn *c;
L
Linus Torvalds 已提交
3886

3887
	BT_ERR("%s link tx timeout", hdev->name);
L
Linus Torvalds 已提交
3888

3889 3890
	rcu_read_lock();

L
Linus Torvalds 已提交
3891
	/* Kill stalled connections */
3892
	list_for_each_entry_rcu(c, &h->list, list) {
3893
		if (c->type == type && c->sent) {
3894 3895
			BT_ERR("%s killing stalled connection %pMR",
			       hdev->name, &c->dst);
A
Andre Guedes 已提交
3896
			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
L
Linus Torvalds 已提交
3897 3898
		}
	}
3899 3900

	rcu_read_unlock();
L
Linus Torvalds 已提交
3901 3902
}

3903 3904
static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
				      int *quote)
L
Linus Torvalds 已提交
3905
{
3906 3907
	struct hci_conn_hash *h = &hdev->conn_hash;
	struct hci_chan *chan = NULL;
3908
	unsigned int num = 0, min = ~0, cur_prio = 0;
L
Linus Torvalds 已提交
3909
	struct hci_conn *conn;
3910 3911 3912 3913
	int cnt, q, conn_num = 0;

	BT_DBG("%s", hdev->name);

3914 3915 3916
	rcu_read_lock();

	list_for_each_entry_rcu(conn, &h->list, list) {
3917 3918 3919 3920 3921 3922 3923 3924 3925 3926
		struct hci_chan *tmp;

		if (conn->type != type)
			continue;

		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
			continue;

		conn_num++;

3927
		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954
			struct sk_buff *skb;

			if (skb_queue_empty(&tmp->data_q))
				continue;

			skb = skb_peek(&tmp->data_q);
			if (skb->priority < cur_prio)
				continue;

			if (skb->priority > cur_prio) {
				num = 0;
				min = ~0;
				cur_prio = skb->priority;
			}

			num++;

			if (conn->sent < min) {
				min  = conn->sent;
				chan = tmp;
			}
		}

		if (hci_conn_num(hdev, type) == conn_num)
			break;
	}

3955 3956
	rcu_read_unlock();

3957 3958 3959 3960 3961 3962 3963
	if (!chan)
		return NULL;

	switch (chan->conn->type) {
	case ACL_LINK:
		cnt = hdev->acl_cnt;
		break;
3964 3965 3966
	case AMP_LINK:
		cnt = hdev->block_cnt;
		break;
3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984
	case SCO_LINK:
	case ESCO_LINK:
		cnt = hdev->sco_cnt;
		break;
	case LE_LINK:
		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
		break;
	default:
		cnt = 0;
		BT_ERR("Unknown link type");
	}

	q = cnt / num;
	*quote = q ? q : 1;
	BT_DBG("chan %p quote %d", chan, *quote);
	return chan;
}

3985 3986 3987 3988 3989 3990 3991 3992
static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
{
	struct hci_conn_hash *h = &hdev->conn_hash;
	struct hci_conn *conn;
	int num = 0;

	BT_DBG("%s", hdev->name);

3993 3994 3995
	rcu_read_lock();

	list_for_each_entry_rcu(conn, &h->list, list) {
3996 3997 3998 3999 4000 4001 4002 4003 4004 4005
		struct hci_chan *chan;

		if (conn->type != type)
			continue;

		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
			continue;

		num++;

4006
		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023
			struct sk_buff *skb;

			if (chan->sent) {
				chan->sent = 0;
				continue;
			}

			if (skb_queue_empty(&chan->data_q))
				continue;

			skb = skb_peek(&chan->data_q);
			if (skb->priority >= HCI_PRIO_MAX - 1)
				continue;

			skb->priority = HCI_PRIO_MAX - 1;

			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4024
			       skb->priority);
4025 4026 4027 4028 4029
		}

		if (hci_conn_num(hdev, type) == num)
			break;
	}
4030 4031 4032

	rcu_read_unlock();

4033 4034
}

4035 4036 4037 4038 4039 4040
static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
{
	/* Calculate count of blocks used by this packet */
	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
}

4041
static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4042
{
4043
	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
L
Linus Torvalds 已提交
4044 4045
		/* ACL tx timeout must be longer than maximum
		 * link supervision timeout (40.9 seconds) */
4046
		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4047
				       HCI_ACL_TX_TIMEOUT))
4048
			hci_link_tx_to(hdev, ACL_LINK);
L
Linus Torvalds 已提交
4049
	}
4050
}
L
Linus Torvalds 已提交
4051

4052
static void hci_sched_acl_pkt(struct hci_dev *hdev)
4053 4054 4055 4056 4057 4058 4059
{
	unsigned int cnt = hdev->acl_cnt;
	struct hci_chan *chan;
	struct sk_buff *skb;
	int quote;

	__check_timeout(hdev, cnt);
4060

4061
	while (hdev->acl_cnt &&
4062
	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4063 4064
		u32 priority = (skb_peek(&chan->data_q))->priority;
		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4065
			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4066
			       skb->len, skb->priority);
4067

4068 4069 4070 4071 4072 4073
			/* Stop if priority has changed */
			if (skb->priority < priority)
				break;

			skb = skb_dequeue(&chan->data_q);

4074
			hci_conn_enter_active_mode(chan->conn,
4075
						   bt_cb(skb)->force_active);
4076

4077
			hci_send_frame(hdev, skb);
L
Linus Torvalds 已提交
4078 4079 4080
			hdev->acl_last_tx = jiffies;

			hdev->acl_cnt--;
4081 4082
			chan->sent++;
			chan->conn->sent++;
L
Linus Torvalds 已提交
4083 4084
		}
	}
4085 4086 4087

	if (cnt != hdev->acl_cnt)
		hci_prio_recalculate(hdev, ACL_LINK);
L
Linus Torvalds 已提交
4088 4089
}

4090
static void hci_sched_acl_blk(struct hci_dev *hdev)
4091
{
4092
	unsigned int cnt = hdev->block_cnt;
4093 4094 4095
	struct hci_chan *chan;
	struct sk_buff *skb;
	int quote;
4096
	u8 type;
4097

4098
	__check_timeout(hdev, cnt);
4099

4100 4101 4102 4103 4104 4105 4106
	BT_DBG("%s", hdev->name);

	if (hdev->dev_type == HCI_AMP)
		type = AMP_LINK;
	else
		type = ACL_LINK;

4107
	while (hdev->block_cnt > 0 &&
4108
	       (chan = hci_chan_sent(hdev, type, &quote))) {
4109 4110 4111 4112 4113
		u32 priority = (skb_peek(&chan->data_q))->priority;
		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
			int blocks;

			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4114
			       skb->len, skb->priority);
4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126

			/* Stop if priority has changed */
			if (skb->priority < priority)
				break;

			skb = skb_dequeue(&chan->data_q);

			blocks = __get_blocks(hdev, skb);
			if (blocks > hdev->block_cnt)
				return;

			hci_conn_enter_active_mode(chan->conn,
4127
						   bt_cb(skb)->force_active);
4128

4129
			hci_send_frame(hdev, skb);
4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140
			hdev->acl_last_tx = jiffies;

			hdev->block_cnt -= blocks;
			quote -= blocks;

			chan->sent += blocks;
			chan->conn->sent += blocks;
		}
	}

	if (cnt != hdev->block_cnt)
4141
		hci_prio_recalculate(hdev, type);
4142 4143
}

4144
static void hci_sched_acl(struct hci_dev *hdev)
4145 4146 4147
{
	BT_DBG("%s", hdev->name);

4148 4149 4150 4151 4152 4153
	/* No ACL link over BR/EDR controller */
	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
		return;

	/* No AMP link over AMP controller */
	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166
		return;

	switch (hdev->flow_ctl_mode) {
	case HCI_FLOW_CTL_MODE_PACKET_BASED:
		hci_sched_acl_pkt(hdev);
		break;

	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
		hci_sched_acl_blk(hdev);
		break;
	}
}

L
Linus Torvalds 已提交
4167
/* Schedule SCO */
4168
static void hci_sched_sco(struct hci_dev *hdev)
L
Linus Torvalds 已提交
4169 4170 4171 4172 4173 4174 4175
{
	struct hci_conn *conn;
	struct sk_buff *skb;
	int quote;

	BT_DBG("%s", hdev->name);

4176 4177 4178
	if (!hci_conn_num(hdev, SCO_LINK))
		return;

L
Linus Torvalds 已提交
4179 4180 4181
	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
			BT_DBG("skb %p len %d", skb, skb->len);
4182
			hci_send_frame(hdev, skb);
L
Linus Torvalds 已提交
4183 4184 4185 4186 4187 4188 4189 4190

			conn->sent++;
			if (conn->sent == ~0)
				conn->sent = 0;
		}
	}
}

4191
static void hci_sched_esco(struct hci_dev *hdev)
4192 4193 4194 4195 4196 4197 4198
{
	struct hci_conn *conn;
	struct sk_buff *skb;
	int quote;

	BT_DBG("%s", hdev->name);

4199 4200 4201
	if (!hci_conn_num(hdev, ESCO_LINK))
		return;

4202 4203
	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
						     &quote))) {
4204 4205
		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
			BT_DBG("skb %p len %d", skb, skb->len);
4206
			hci_send_frame(hdev, skb);
4207 4208 4209 4210 4211 4212 4213 4214

			conn->sent++;
			if (conn->sent == ~0)
				conn->sent = 0;
		}
	}
}

4215
static void hci_sched_le(struct hci_dev *hdev)
4216
{
4217
	struct hci_chan *chan;
4218
	struct sk_buff *skb;
4219
	int quote, cnt, tmp;
4220 4221 4222

	BT_DBG("%s", hdev->name);

4223 4224 4225
	if (!hci_conn_num(hdev, LE_LINK))
		return;

4226
	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4227 4228
		/* LE tx timeout must be longer than maximum
		 * link supervision timeout (40.9 seconds) */
4229
		if (!hdev->le_cnt && hdev->le_pkts &&
4230
		    time_after(jiffies, hdev->le_last_tx + HZ * 45))
4231
			hci_link_tx_to(hdev, LE_LINK);
4232 4233 4234
	}

	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4235
	tmp = cnt;
4236
	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4237 4238
		u32 priority = (skb_peek(&chan->data_q))->priority;
		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4239
			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4240
			       skb->len, skb->priority);
4241

4242 4243 4244 4245 4246 4247
			/* Stop if priority has changed */
			if (skb->priority < priority)
				break;

			skb = skb_dequeue(&chan->data_q);

4248
			hci_send_frame(hdev, skb);
4249 4250 4251
			hdev->le_last_tx = jiffies;

			cnt--;
4252 4253
			chan->sent++;
			chan->conn->sent++;
4254 4255
		}
	}
4256

4257 4258 4259 4260
	if (hdev->le_pkts)
		hdev->le_cnt = cnt;
	else
		hdev->acl_cnt = cnt;
4261 4262 4263

	if (cnt != tmp)
		hci_prio_recalculate(hdev, LE_LINK);
4264 4265
}

4266
static void hci_tx_work(struct work_struct *work)
L
Linus Torvalds 已提交
4267
{
4268
	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
L
Linus Torvalds 已提交
4269 4270
	struct sk_buff *skb;

4271
	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4272
	       hdev->sco_cnt, hdev->le_cnt);
L
Linus Torvalds 已提交
4273

4274
	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4275 4276 4277 4278 4279 4280
		/* Schedule queues and send stuff to HCI driver */
		hci_sched_acl(hdev);
		hci_sched_sco(hdev);
		hci_sched_esco(hdev);
		hci_sched_le(hdev);
	}
4281

L
Linus Torvalds 已提交
4282 4283
	/* Send next queued raw (unknown type) packet */
	while ((skb = skb_dequeue(&hdev->raw_q)))
4284
		hci_send_frame(hdev, skb);
L
Linus Torvalds 已提交
4285 4286
}

L
Lucas De Marchi 已提交
4287
/* ----- HCI RX task (incoming data processing) ----- */
L
Linus Torvalds 已提交
4288 4289

/* ACL data packet */
4290
static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
L
Linus Torvalds 已提交
4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301
{
	struct hci_acl_hdr *hdr = (void *) skb->data;
	struct hci_conn *conn;
	__u16 handle, flags;

	skb_pull(skb, HCI_ACL_HDR_SIZE);

	handle = __le16_to_cpu(hdr->handle);
	flags  = hci_flags(handle);
	handle = hci_handle(handle);

4302
	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4303
	       handle, flags);
L
Linus Torvalds 已提交
4304 4305 4306 4307 4308 4309

	hdev->stat.acl_rx++;

	hci_dev_lock(hdev);
	conn = hci_conn_hash_lookup_handle(hdev, handle);
	hci_dev_unlock(hdev);
4310

L
Linus Torvalds 已提交
4311
	if (conn) {
4312
		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4313

L
Linus Torvalds 已提交
4314
		/* Send to upper protocol */
4315 4316
		l2cap_recv_acldata(conn, skb, flags);
		return;
L
Linus Torvalds 已提交
4317
	} else {
4318
		BT_ERR("%s ACL packet for unknown connection handle %d",
4319
		       hdev->name, handle);
L
Linus Torvalds 已提交
4320 4321 4322 4323 4324 4325
	}

	kfree_skb(skb);
}

/* SCO data packet */
4326
static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
L
Linus Torvalds 已提交
4327 4328 4329 4330 4331 4332 4333 4334 4335
{
	struct hci_sco_hdr *hdr = (void *) skb->data;
	struct hci_conn *conn;
	__u16 handle;

	skb_pull(skb, HCI_SCO_HDR_SIZE);

	handle = __le16_to_cpu(hdr->handle);

4336
	BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
L
Linus Torvalds 已提交
4337 4338 4339 4340 4341 4342 4343 4344 4345

	hdev->stat.sco_rx++;

	hci_dev_lock(hdev);
	conn = hci_conn_hash_lookup_handle(hdev, handle);
	hci_dev_unlock(hdev);

	if (conn) {
		/* Send to upper protocol */
4346 4347
		sco_recv_scodata(conn, skb);
		return;
L
Linus Torvalds 已提交
4348
	} else {
4349
		BT_ERR("%s SCO packet for unknown connection handle %d",
4350
		       hdev->name, handle);
L
Linus Torvalds 已提交
4351 4352 4353 4354 4355
	}

	kfree_skb(skb);
}

4356 4357 4358 4359 4360 4361 4362 4363
static bool hci_req_is_complete(struct hci_dev *hdev)
{
	struct sk_buff *skb;

	skb = skb_peek(&hdev->cmd_q);
	if (!skb)
		return true;

4364
	return bt_cb(skb)->req.start;
4365 4366
}

4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388
static void hci_resend_last(struct hci_dev *hdev)
{
	struct hci_command_hdr *sent;
	struct sk_buff *skb;
	u16 opcode;

	if (!hdev->sent_cmd)
		return;

	sent = (void *) hdev->sent_cmd->data;
	opcode = __le16_to_cpu(sent->opcode);
	if (opcode == HCI_OP_RESET)
		return;

	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
	if (!skb)
		return;

	skb_queue_head(&hdev->cmd_q, skb);
	queue_work(hdev->workqueue, &hdev->cmd_work);
}

4389 4390 4391
void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
			  hci_req_complete_t *req_complete,
			  hci_req_complete_skb_t *req_complete_skb)
4392 4393 4394 4395 4396 4397
{
	struct sk_buff *skb;
	unsigned long flags;

	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);

4398 4399
	/* If the completed command doesn't match the last one that was
	 * sent we need to do special handling of it.
4400
	 */
4401 4402 4403 4404 4405 4406 4407 4408 4409 4410
	if (!hci_sent_cmd_data(hdev, opcode)) {
		/* Some CSR based controllers generate a spontaneous
		 * reset complete event during init and any pending
		 * command will never be completed. In such a case we
		 * need to resend whatever was the last sent
		 * command.
		 */
		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
			hci_resend_last(hdev);

4411
		return;
4412
	}
4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423

	/* If the command succeeded and there's still more commands in
	 * this request the request is not yet complete.
	 */
	if (!status && !hci_req_is_complete(hdev))
		return;

	/* If this was the last command in a request the complete
	 * callback would be found in hdev->sent_cmd instead of the
	 * command queue (hdev->cmd_q).
	 */
4424 4425 4426 4427
	if (bt_cb(hdev->sent_cmd)->req.complete) {
		*req_complete = bt_cb(hdev->sent_cmd)->req.complete;
		return;
	}
4428

4429 4430 4431
	if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
		*req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
		return;
4432 4433 4434 4435 4436
	}

	/* Remove all pending commands belonging to this request */
	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4437
		if (bt_cb(skb)->req.start) {
4438 4439 4440 4441
			__skb_queue_head(&hdev->cmd_q, skb);
			break;
		}

4442 4443
		*req_complete = bt_cb(skb)->req.complete;
		*req_complete_skb = bt_cb(skb)->req.complete_skb;
4444 4445 4446 4447 4448
		kfree_skb(skb);
	}
	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
}

4449
static void hci_rx_work(struct work_struct *work)
L
Linus Torvalds 已提交
4450
{
4451
	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
L
Linus Torvalds 已提交
4452 4453 4454 4455 4456
	struct sk_buff *skb;

	BT_DBG("%s", hdev->name);

	while ((skb = skb_dequeue(&hdev->rx_q))) {
4457 4458 4459
		/* Send copy to monitor */
		hci_send_to_monitor(hdev, skb);

L
Linus Torvalds 已提交
4460 4461
		if (atomic_read(&hdev->promisc)) {
			/* Send copy to the sockets */
4462
			hci_send_to_sock(hdev, skb);
L
Linus Torvalds 已提交
4463 4464
		}

4465
		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
L
Linus Torvalds 已提交
4466 4467 4468 4469 4470 4471
			kfree_skb(skb);
			continue;
		}

		if (test_bit(HCI_INIT, &hdev->flags)) {
			/* Don't process data packets in this states. */
4472
			switch (bt_cb(skb)->pkt_type) {
L
Linus Torvalds 已提交
4473 4474 4475 4476
			case HCI_ACLDATA_PKT:
			case HCI_SCODATA_PKT:
				kfree_skb(skb);
				continue;
4477
			}
L
Linus Torvalds 已提交
4478 4479 4480
		}

		/* Process frame */
4481
		switch (bt_cb(skb)->pkt_type) {
L
Linus Torvalds 已提交
4482
		case HCI_EVENT_PKT:
4483
			BT_DBG("%s Event packet", hdev->name);
L
Linus Torvalds 已提交
4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503
			hci_event_packet(hdev, skb);
			break;

		case HCI_ACLDATA_PKT:
			BT_DBG("%s ACL data packet", hdev->name);
			hci_acldata_packet(hdev, skb);
			break;

		case HCI_SCODATA_PKT:
			BT_DBG("%s SCO data packet", hdev->name);
			hci_scodata_packet(hdev, skb);
			break;

		default:
			kfree_skb(skb);
			break;
		}
	}
}

4504
static void hci_cmd_work(struct work_struct *work)
L
Linus Torvalds 已提交
4505
{
4506
	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
L
Linus Torvalds 已提交
4507 4508
	struct sk_buff *skb;

4509 4510
	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
L
Linus Torvalds 已提交
4511 4512

	/* Send queued commands */
4513 4514 4515 4516 4517
	if (atomic_read(&hdev->cmd_cnt)) {
		skb = skb_dequeue(&hdev->cmd_q);
		if (!skb)
			return;

4518
		kfree_skb(hdev->sent_cmd);
L
Linus Torvalds 已提交
4519

4520
		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
A
Andrei Emeltchenko 已提交
4521
		if (hdev->sent_cmd) {
L
Linus Torvalds 已提交
4522
			atomic_dec(&hdev->cmd_cnt);
4523
			hci_send_frame(hdev, skb);
4524
			if (test_bit(HCI_RESET, &hdev->flags))
4525
				cancel_delayed_work(&hdev->cmd_timer);
4526
			else
4527 4528
				schedule_delayed_work(&hdev->cmd_timer,
						      HCI_CMD_TIMEOUT);
L
Linus Torvalds 已提交
4529 4530
		} else {
			skb_queue_head(&hdev->cmd_q, skb);
4531
			queue_work(hdev->workqueue, &hdev->cmd_work);
L
Linus Torvalds 已提交
4532 4533 4534
		}
	}
}