mgmt.c 182.6 KB
Newer Older
1 2
/*
   BlueZ - Bluetooth protocol stack for Linux
3

4
   Copyright (C) 2010  Nokia Corporation
5
   Copyright (C) 2011-2012 Intel Corporation
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI Management interface */

27
#include <linux/module.h>
28 29 30 31
#include <asm/unaligned.h>

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
32
#include <net/bluetooth/hci_sock.h>
33
#include <net/bluetooth/l2cap.h>
34
#include <net/bluetooth/mgmt.h>
35

36
#include "hci_request.h"
37
#include "smp.h"
38

39
#define MGMT_VERSION	1
40
#define MGMT_REVISION	9
41

42 43 44 45 46 47 48
static const u16 mgmt_commands[] = {
	MGMT_OP_READ_INDEX_LIST,
	MGMT_OP_READ_INFO,
	MGMT_OP_SET_POWERED,
	MGMT_OP_SET_DISCOVERABLE,
	MGMT_OP_SET_CONNECTABLE,
	MGMT_OP_SET_FAST_CONNECTABLE,
49
	MGMT_OP_SET_BONDABLE,
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
	MGMT_OP_SET_LINK_SECURITY,
	MGMT_OP_SET_SSP,
	MGMT_OP_SET_HS,
	MGMT_OP_SET_LE,
	MGMT_OP_SET_DEV_CLASS,
	MGMT_OP_SET_LOCAL_NAME,
	MGMT_OP_ADD_UUID,
	MGMT_OP_REMOVE_UUID,
	MGMT_OP_LOAD_LINK_KEYS,
	MGMT_OP_LOAD_LONG_TERM_KEYS,
	MGMT_OP_DISCONNECT,
	MGMT_OP_GET_CONNECTIONS,
	MGMT_OP_PIN_CODE_REPLY,
	MGMT_OP_PIN_CODE_NEG_REPLY,
	MGMT_OP_SET_IO_CAPABILITY,
	MGMT_OP_PAIR_DEVICE,
	MGMT_OP_CANCEL_PAIR_DEVICE,
	MGMT_OP_UNPAIR_DEVICE,
	MGMT_OP_USER_CONFIRM_REPLY,
	MGMT_OP_USER_CONFIRM_NEG_REPLY,
	MGMT_OP_USER_PASSKEY_REPLY,
	MGMT_OP_USER_PASSKEY_NEG_REPLY,
	MGMT_OP_READ_LOCAL_OOB_DATA,
	MGMT_OP_ADD_REMOTE_OOB_DATA,
	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
	MGMT_OP_START_DISCOVERY,
	MGMT_OP_STOP_DISCOVERY,
	MGMT_OP_CONFIRM_NAME,
	MGMT_OP_BLOCK_DEVICE,
	MGMT_OP_UNBLOCK_DEVICE,
80
	MGMT_OP_SET_DEVICE_ID,
81
	MGMT_OP_SET_ADVERTISING,
82
	MGMT_OP_SET_BREDR,
83
	MGMT_OP_SET_STATIC_ADDRESS,
84
	MGMT_OP_SET_SCAN_PARAMS,
85
	MGMT_OP_SET_SECURE_CONN,
86
	MGMT_OP_SET_DEBUG_KEYS,
87
	MGMT_OP_SET_PRIVACY,
88
	MGMT_OP_LOAD_IRKS,
89
	MGMT_OP_GET_CONN_INFO,
90
	MGMT_OP_GET_CLOCK_INFO,
91 92
	MGMT_OP_ADD_DEVICE,
	MGMT_OP_REMOVE_DEVICE,
93
	MGMT_OP_LOAD_CONN_PARAM,
94
	MGMT_OP_READ_UNCONF_INDEX_LIST,
95
	MGMT_OP_READ_CONFIG_INFO,
96
	MGMT_OP_SET_EXTERNAL_CONFIG,
97
	MGMT_OP_SET_PUBLIC_ADDRESS,
98
	MGMT_OP_START_SERVICE_DISCOVERY,
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
};

static const u16 mgmt_events[] = {
	MGMT_EV_CONTROLLER_ERROR,
	MGMT_EV_INDEX_ADDED,
	MGMT_EV_INDEX_REMOVED,
	MGMT_EV_NEW_SETTINGS,
	MGMT_EV_CLASS_OF_DEV_CHANGED,
	MGMT_EV_LOCAL_NAME_CHANGED,
	MGMT_EV_NEW_LINK_KEY,
	MGMT_EV_NEW_LONG_TERM_KEY,
	MGMT_EV_DEVICE_CONNECTED,
	MGMT_EV_DEVICE_DISCONNECTED,
	MGMT_EV_CONNECT_FAILED,
	MGMT_EV_PIN_CODE_REQUEST,
	MGMT_EV_USER_CONFIRM_REQUEST,
	MGMT_EV_USER_PASSKEY_REQUEST,
	MGMT_EV_AUTH_FAILED,
	MGMT_EV_DEVICE_FOUND,
	MGMT_EV_DISCOVERING,
	MGMT_EV_DEVICE_BLOCKED,
	MGMT_EV_DEVICE_UNBLOCKED,
	MGMT_EV_DEVICE_UNPAIRED,
122
	MGMT_EV_PASSKEY_NOTIFY,
123
	MGMT_EV_NEW_IRK,
124
	MGMT_EV_NEW_CSRK,
125 126
	MGMT_EV_DEVICE_ADDED,
	MGMT_EV_DEVICE_REMOVED,
127
	MGMT_EV_NEW_CONN_PARAM,
128
	MGMT_EV_UNCONF_INDEX_ADDED,
129
	MGMT_EV_UNCONF_INDEX_REMOVED,
130
	MGMT_EV_NEW_CONFIG_OPTIONS,
131 132
	MGMT_EV_EXT_INDEX_ADDED,
	MGMT_EV_EXT_INDEX_REMOVED,
133 134
};

135
#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
136

137 138 139
#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
		 "\x00\x00\x00\x00\x00\x00\x00\x00"

140
struct mgmt_pending_cmd {
141
	struct list_head list;
142
	u16 opcode;
143
	int index;
144
	void *param;
145
	size_t param_len;
146
	struct sock *sk;
147
	void *user_data;
148
	int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status);
149 150
};

151 152 153 154 155 156 157 158
/* HCI to MGMT error code conversion table */
static u8 mgmt_status_table[] = {
	MGMT_STATUS_SUCCESS,
	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
	MGMT_STATUS_FAILED,		/* Hardware Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
159
	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
	MGMT_STATUS_BUSY,		/* Command Disallowed */
	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
	MGMT_STATUS_REJECTED,		/* Rejected Security */
	MGMT_STATUS_REJECTED,		/* Rejected Personal */
	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
	MGMT_STATUS_BUSY,		/* Repeated Attempts */
	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
	MGMT_STATUS_FAILED,		/* Unspecified Error */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
	MGMT_STATUS_FAILED,		/* Transaction Collision */
	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
	MGMT_STATUS_REJECTED,		/* QoS Rejected */
	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
	MGMT_STATUS_REJECTED,		/* Insufficient Security */
	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
	MGMT_STATUS_BUSY,		/* Role Switch Pending */
	MGMT_STATUS_FAILED,		/* Slot Violation */
	MGMT_STATUS_FAILED,		/* Role Switch Failed */
	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
	MGMT_STATUS_BUSY,		/* Controller Busy */
	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
};

static u8 mgmt_status(u8 hci_status)
{
	if (hci_status < ARRAY_SIZE(mgmt_status_table))
		return mgmt_status_table[hci_status];

	return MGMT_STATUS_FAILED;
}

224 225 226
static int mgmt_send_event(u16 event, struct hci_dev *hdev,
			   unsigned short channel, void *data, u16 data_len,
			   struct sock *skip_sk)
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;

	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));
	hdr->opcode = cpu_to_le16(event);
	if (hdev)
		hdr->index = cpu_to_le16(hdev->id);
	else
		hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
	hdr->len = cpu_to_le16(data_len);

	if (data)
		memcpy(skb_put(skb, data_len), data, data_len);

	/* Time stamp */
	__net_timestamp(skb);

249
	hci_send_to_channel(channel, skb, skip_sk);
250 251 252 253 254
	kfree_skb(skb);

	return 0;
}

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
static int mgmt_index_event(u16 event, struct hci_dev *hdev,
			    void *data, u16 data_len, int flag)
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;

	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));
	hdr->opcode = cpu_to_le16(event);
	hdr->index = cpu_to_le16(hdev->id);
	hdr->len = cpu_to_le16(data_len);

	if (data)
		memcpy(skb_put(skb, data_len), data, data_len);

	/* Time stamp */
	__net_timestamp(skb);

	hci_send_to_flagged_channel(HCI_CHANNEL_CONTROL, skb, flag);
	kfree_skb(skb);

	return 0;
}

282 283 284 285 286 287 288
static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
		      struct sock *skip_sk)
{
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
			       skip_sk);
}

289
static int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
290 291 292 293
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;
	struct mgmt_ev_cmd_status *ev;
294
	int err;
295

296
	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
297

298
	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
299 300 301 302 303
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));

304
	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
305
	hdr->index = cpu_to_le16(index);
306 307 308 309
	hdr->len = cpu_to_le16(sizeof(*ev));

	ev = (void *) skb_put(skb, sizeof(*ev));
	ev->status = status;
310
	ev->opcode = cpu_to_le16(cmd);
311

312 313
	err = sock_queue_rcv_skb(sk, skb);
	if (err < 0)
314 315
		kfree_skb(skb);

316
	return err;
317 318
}

319 320
static int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
			     void *rp, size_t rp_len)
321 322 323 324
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;
	struct mgmt_ev_cmd_complete *ev;
325
	int err;
326 327 328

	BT_DBG("sock %p", sk);

329
	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
330 331 332 333 334
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));

335
	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
336
	hdr->index = cpu_to_le16(index);
337
	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
338

339
	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
340
	ev->opcode = cpu_to_le16(cmd);
341
	ev->status = status;
342 343 344

	if (rp)
		memcpy(ev->data, rp, rp_len);
345

346 347
	err = sock_queue_rcv_skb(sk, skb);
	if (err < 0)
348 349
		kfree_skb(skb);

350
	return err;
351 352
}

353 354
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
			u16 data_len)
355 356 357 358 359 360
{
	struct mgmt_rp_read_version rp;

	BT_DBG("sock %p", sk);

	rp.version = MGMT_VERSION;
361
	rp.revision = cpu_to_le16(MGMT_REVISION);
362

363 364
	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
				 &rp, sizeof(rp));
365 366
}

367 368
static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 data_len)
369 370
{
	struct mgmt_rp_read_commands *rp;
371 372
	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
	const u16 num_events = ARRAY_SIZE(mgmt_events);
373
	__le16 *opcode;
374 375 376 377 378 379 380 381 382 383 384
	size_t rp_size;
	int i, err;

	BT_DBG("sock %p", sk);

	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));

	rp = kmalloc(rp_size, GFP_KERNEL);
	if (!rp)
		return -ENOMEM;

385 386
	rp->num_commands = cpu_to_le16(num_commands);
	rp->num_events = cpu_to_le16(num_events);
387 388 389 390 391 392 393

	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
		put_unaligned_le16(mgmt_commands[i], opcode);

	for (i = 0; i < num_events; i++, opcode++)
		put_unaligned_le16(mgmt_events[i], opcode);

394 395
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
				rp, rp_size);
396 397 398 399 400
	kfree(rp);

	return err;
}

401 402
static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
403 404
{
	struct mgmt_rp_read_index_list *rp;
405
	struct hci_dev *d;
406
	size_t rp_len;
407
	u16 count;
408
	int err;
409 410 411 412 413 414

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
415
	list_for_each_entry(d, &hci_dev_list, list) {
416
		if (d->dev_type == HCI_BREDR &&
417
		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
418
			count++;
419 420
	}

421 422 423
	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
424
		read_unlock(&hci_dev_list_lock);
425
		return -ENOMEM;
426
	}
427

428
	count = 0;
429
	list_for_each_entry(d, &hci_dev_list, list) {
430 431 432
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
433 434
			continue;

435 436 437 438
		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
439 440
			continue;

441
		if (d->dev_type == HCI_BREDR &&
442
		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
443 444 445
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
446 447
	}

448 449 450
	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

451 452
	read_unlock(&hci_dev_list_lock);

453 454
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
				0, rp, rp_len);
455

456 457 458
	kfree(rp);

	return err;
459 460
}

461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
				  void *data, u16 data_len)
{
	struct mgmt_rp_read_unconf_index_list *rp;
	struct hci_dev *d;
	size_t rp_len;
	u16 count;
	int err;

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
		if (d->dev_type == HCI_BREDR &&
477
		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
478 479 480 481 482 483 484 485 486 487 488 489
			count++;
	}

	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		read_unlock(&hci_dev_list_lock);
		return -ENOMEM;
	}

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
490 491 492
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
493 494 495 496 497 498 499 500 501
			continue;

		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
			continue;

		if (d->dev_type == HCI_BREDR &&
502
		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
503 504 505 506 507 508 509 510 511 512
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
	}

	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

	read_unlock(&hci_dev_list_lock);

513 514
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
515 516 517 518 519 520

	kfree(rp);

	return err;
}

521 522 523
static bool is_configured(struct hci_dev *hdev)
{
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
524
	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
525 526 527 528 529 530 531 532 533
		return false;

	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
	    !bacmp(&hdev->public_addr, BDADDR_ANY))
		return false;

	return true;
}

534 535 536 537
static __le32 get_missing_options(struct hci_dev *hdev)
{
	u32 options = 0;

538
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
539
	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
540 541
		options |= MGMT_OPTION_EXTERNAL_CONFIG;

542 543 544 545 546 547 548
	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
	    !bacmp(&hdev->public_addr, BDADDR_ANY))
		options |= MGMT_OPTION_PUBLIC_ADDRESS;

	return cpu_to_le32(options);
}

549 550 551 552 553 554 555 556
static int new_options(struct hci_dev *hdev, struct sock *skip)
{
	__le32 options = get_missing_options(hdev);

	return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
			  sizeof(options), skip);
}

557 558 559 560
static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
{
	__le32 options = get_missing_options(hdev);

561 562
	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
				 sizeof(options));
563 564
}

565 566 567 568
static int read_config_info(struct sock *sk, struct hci_dev *hdev,
			    void *data, u16 data_len)
{
	struct mgmt_rp_read_config_info rp;
569
	u32 options = 0;
570 571 572 573 574 575 576

	BT_DBG("sock %p %s", sk, hdev->name);

	hci_dev_lock(hdev);

	memset(&rp, 0, sizeof(rp));
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
577

578 579 580
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
		options |= MGMT_OPTION_EXTERNAL_CONFIG;

581
	if (hdev->set_bdaddr)
582 583 584 585
		options |= MGMT_OPTION_PUBLIC_ADDRESS;

	rp.supported_options = cpu_to_le32(options);
	rp.missing_options = get_missing_options(hdev);
586 587 588

	hci_dev_unlock(hdev);

589 590
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
				 &rp, sizeof(rp));
591 592
}

593 594 595 596 597
static u32 get_supported_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

	settings |= MGMT_SETTING_POWERED;
598
	settings |= MGMT_SETTING_BONDABLE;
599
	settings |= MGMT_SETTING_DEBUG_KEYS;
600 601
	settings |= MGMT_SETTING_CONNECTABLE;
	settings |= MGMT_SETTING_DISCOVERABLE;
602

603
	if (lmp_bredr_capable(hdev)) {
604 605
		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
			settings |= MGMT_SETTING_FAST_CONNECTABLE;
606 607
		settings |= MGMT_SETTING_BREDR;
		settings |= MGMT_SETTING_LINK_SECURITY;
608 609 610 611 612

		if (lmp_ssp_capable(hdev)) {
			settings |= MGMT_SETTING_SSP;
			settings |= MGMT_SETTING_HS;
		}
613

614
		if (lmp_sc_capable(hdev))
615
			settings |= MGMT_SETTING_SECURE_CONN;
616
	}
617

618
	if (lmp_le_capable(hdev)) {
619
		settings |= MGMT_SETTING_LE;
620
		settings |= MGMT_SETTING_ADVERTISING;
621
		settings |= MGMT_SETTING_SECURE_CONN;
622
		settings |= MGMT_SETTING_PRIVACY;
623
		settings |= MGMT_SETTING_STATIC_ADDRESS;
624
	}
625

626 627
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
	    hdev->set_bdaddr)
628 629
		settings |= MGMT_SETTING_CONFIGURATION;

630 631 632 633 634 635 636
	return settings;
}

static u32 get_current_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

637
	if (hdev_is_powered(hdev))
638 639
		settings |= MGMT_SETTING_POWERED;

640
	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
641 642
		settings |= MGMT_SETTING_CONNECTABLE;

643
	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
644 645
		settings |= MGMT_SETTING_FAST_CONNECTABLE;

646
	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
647 648
		settings |= MGMT_SETTING_DISCOVERABLE;

649
	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
650
		settings |= MGMT_SETTING_BONDABLE;
651

652
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
653 654
		settings |= MGMT_SETTING_BREDR;

655
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
656 657
		settings |= MGMT_SETTING_LE;

658
	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
659 660
		settings |= MGMT_SETTING_LINK_SECURITY;

661
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
662 663
		settings |= MGMT_SETTING_SSP;

664
	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
665 666
		settings |= MGMT_SETTING_HS;

667
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
668 669
		settings |= MGMT_SETTING_ADVERTISING;

670
	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
671 672
		settings |= MGMT_SETTING_SECURE_CONN;

673
	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
674 675
		settings |= MGMT_SETTING_DEBUG_KEYS;

676
	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
677 678
		settings |= MGMT_SETTING_PRIVACY;

679 680 681 682 683 684 685 686 687 688 689 690
	/* The current setting for static address has two purposes. The
	 * first is to indicate if the static address will be used and
	 * the second is to indicate if it is actually set.
	 *
	 * This means if the static address is not configured, this flag
	 * will never bet set. If the address is configured, then if the
	 * address is actually used decides if the flag is set or not.
	 *
	 * For single mode LE only controllers and dual-mode controllers
	 * with BR/EDR disabled, the existence of the static address will
	 * be evaluated.
	 */
691
	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
692
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
693 694 695 696 697
	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
		if (bacmp(&hdev->static_addr, BDADDR_ANY))
			settings |= MGMT_SETTING_STATIC_ADDRESS;
	}

698 699 700
	return settings;
}

701 702
#define PNP_INFO_SVCLASS_ID		0x1200

703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 4)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		u16 uuid16;

		if (uuid->size != 16)
			continue;

		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
		if (uuid16 < 0x1100)
			continue;

		if (uuid16 == PNP_INFO_SVCLASS_ID)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID16_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u16) > len) {
			uuids_start[1] = EIR_UUID16_SOME;
			break;
		}

		*ptr++ = (uuid16 & 0x00ff);
		*ptr++ = (uuid16 & 0xff00) >> 8;
		uuids_start[0] += sizeof(uuid16);
	}

	return ptr;
}

745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 6)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 32)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID32_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u32) > len) {
			uuids_start[1] = EIR_UUID32_SOME;
			break;
		}

		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
		ptr += sizeof(u32);
		uuids_start[0] += sizeof(u32);
	}

	return ptr;
}

778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 18)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 128)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID128_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + 16 > len) {
			uuids_start[1] = EIR_UUID128_SOME;
			break;
		}

		memcpy(ptr, uuid->uuid, 16);
		ptr += 16;
		uuids_start[0] += 16;
	}

	return ptr;
}

811 812
static struct mgmt_pending_cmd *mgmt_pending_find(u16 opcode,
						  struct hci_dev *hdev)
813
{
814
	struct mgmt_pending_cmd *cmd;
815 816 817 818 819 820 821 822 823

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		if (cmd->opcode == opcode)
			return cmd;
	}

	return NULL;
}

824 825 826
static struct mgmt_pending_cmd *mgmt_pending_find_data(u16 opcode,
						       struct hci_dev *hdev,
						       const void *data)
827
{
828
	struct mgmt_pending_cmd *cmd;
829 830 831 832 833 834 835 836 837 838 839

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		if (cmd->user_data != data)
			continue;
		if (cmd->opcode == opcode)
			return cmd;
	}

	return NULL;
}

840 841
static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
{
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
	u8 ad_len = 0;
	size_t name_len;

	name_len = strlen(hdev->dev_name);
	if (name_len > 0) {
		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;

		if (name_len > max_len) {
			name_len = max_len;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ad_len += (name_len + 2);
		ptr += (name_len + 2);
	}

	return ad_len;
864 865 866 867 868 869 870 871
}

static void update_scan_rsp_data(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_scan_rsp_data cp;
	u8 len;

872
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
873 874 875 876 877 878
		return;

	memset(&cp, 0, sizeof(cp));

	len = create_scan_rsp_data(hdev, cp.data);

879 880
	if (hdev->scan_rsp_data_len == len &&
	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
881 882
		return;

883 884
	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
	hdev->scan_rsp_data_len = len;
885 886 887 888 889 890

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
}

891 892
static u8 get_adv_discov_flags(struct hci_dev *hdev)
{
893
	struct mgmt_pending_cmd *cmd;
894 895 896 897 898 899 900 901 902 903 904 905

	/* If there's a pending mgmt command the flags will not yet have
	 * their final values, so check for this first.
	 */
	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
		if (cp->val == 0x01)
			return LE_AD_GENERAL;
		else if (cp->val == 0x02)
			return LE_AD_LIMITED;
	} else {
906
		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
907
			return LE_AD_LIMITED;
908
		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
909 910 911 912 913 914
			return LE_AD_GENERAL;
	}

	return 0;
}

915
static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
916 917 918
{
	u8 ad_len = 0, flags = 0;

919
	flags |= get_adv_discov_flags(hdev);
920

921
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
		flags |= LE_AD_NO_BREDR;

	if (flags) {
		BT_DBG("adv flags 0x%02x", flags);

		ptr[0] = 2;
		ptr[1] = EIR_FLAGS;
		ptr[2] = flags;

		ad_len += 3;
		ptr += 3;
	}

	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->adv_tx_power;

		ad_len += 3;
		ptr += 3;
	}

	return ad_len;
}

947
static void update_adv_data(struct hci_request *req)
948 949 950 951 952
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_data cp;
	u8 len;

953
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
954 955 956 957
		return;

	memset(&cp, 0, sizeof(cp));

958
	len = create_adv_data(hdev, cp.data);
959 960 961 962 963 964 965 966 967 968 969 970 971

	if (hdev->adv_data_len == len &&
	    memcmp(cp.data, hdev->adv_data, len) == 0)
		return;

	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
	hdev->adv_data_len = len;

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
}

972 973 974 975 976 977 978 979 980 981
int mgmt_update_adv_data(struct hci_dev *hdev)
{
	struct hci_request req;

	hci_req_init(&req, hdev);
	update_adv_data(&req);

	return hci_req_run(&req, NULL);
}

982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
static void create_eir(struct hci_dev *hdev, u8 *data)
{
	u8 *ptr = data;
	size_t name_len;

	name_len = strlen(hdev->dev_name);

	if (name_len > 0) {
		/* EIR Data type */
		if (name_len > 48) {
			name_len = 48;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		/* EIR Data length */
		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ptr += (name_len + 2);
	}

1005
	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1006 1007 1008 1009 1010 1011 1012
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->inq_tx_power;

		ptr += 3;
	}

1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
	if (hdev->devid_source > 0) {
		ptr[0] = 9;
		ptr[1] = EIR_DEVICE_ID;

		put_unaligned_le16(hdev->devid_source, ptr + 2);
		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
		put_unaligned_le16(hdev->devid_product, ptr + 6);
		put_unaligned_le16(hdev->devid_version, ptr + 8);

		ptr += 10;
	}

1025
	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1026
	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1027
	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1028 1029
}

1030
static void update_eir(struct hci_request *req)
1031
{
1032
	struct hci_dev *hdev = req->hdev;
1033 1034
	struct hci_cp_write_eir cp;

1035
	if (!hdev_is_powered(hdev))
1036
		return;
1037

1038
	if (!lmp_ext_inq_capable(hdev))
1039
		return;
1040

1041
	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1042
		return;
1043

1044
	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1045
		return;
1046 1047 1048 1049 1050 1051

	memset(&cp, 0, sizeof(cp));

	create_eir(hdev, cp.data);

	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1052
		return;
1053 1054 1055

	memcpy(hdev->eir, cp.data, sizeof(cp.data));

1056
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
}

static u8 get_service_classes(struct hci_dev *hdev)
{
	struct bt_uuid *uuid;
	u8 val = 0;

	list_for_each_entry(uuid, &hdev->uuids, list)
		val |= uuid->svc_hint;

	return val;
}

1070
static void update_class(struct hci_request *req)
1071
{
1072
	struct hci_dev *hdev = req->hdev;
1073 1074 1075 1076
	u8 cod[3];

	BT_DBG("%s", hdev->name);

1077
	if (!hdev_is_powered(hdev))
1078
		return;
1079

1080
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1081 1082
		return;

1083
	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1084
		return;
1085 1086 1087 1088 1089

	cod[0] = hdev->minor_class;
	cod[1] = hdev->major_class;
	cod[2] = get_service_classes(hdev);

1090
	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1091 1092
		cod[1] |= 0x20;

1093
	if (memcmp(cod, hdev->dev_class, 3) == 0)
1094
		return;
1095

1096
	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1097 1098
}

1099
static bool get_connectable(struct hci_dev *hdev)
1100
{
1101
	struct mgmt_pending_cmd *cmd;
1102 1103 1104 1105 1106 1107 1108

	/* If there's a pending mgmt command the flag will not yet have
	 * it's final value, so check for this first.
	 */
	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
1109
		return cp->val;
1110 1111
	}

1112
	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1113 1114
}

1115 1116 1117 1118 1119 1120 1121
static void disable_advertising(struct hci_request *req)
{
	u8 enable = 0x00;

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

1122 1123 1124 1125
static void enable_advertising(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_param cp;
1126
	u8 own_addr_type, enable = 0x01;
1127
	bool connectable;
1128

1129 1130 1131
	if (hci_conn_num(hdev, LE_LINK) > 0)
		return;

1132
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1133 1134
		disable_advertising(req);

1135
	/* Clear the HCI_LE_ADV bit temporarily so that the
1136 1137 1138 1139
	 * hci_update_random_address knows that it's safe to go ahead
	 * and write a new random address. The flag will be set back on
	 * as soon as the SET_ADV_ENABLE HCI command completes.
	 */
1140
	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1141

1142
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1143 1144 1145
		connectable = true;
	else
		connectable = get_connectable(hdev);
1146

1147 1148 1149 1150 1151
	/* Set require_privacy to true only when non-connectable
	 * advertising is used. In that case it is fine to use a
	 * non-resolvable private address.
	 */
	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1152 1153
		return;

1154
	memset(&cp, 0, sizeof(cp));
1155 1156
	cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
	cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1157
	cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1158
	cp.own_address_type = own_addr_type;
1159 1160 1161 1162 1163 1164 1165
	cp.channel_map = hdev->le_adv_channel_map;

	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

1166 1167 1168
static void service_cache_off(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
1169
					    service_cache.work);
1170
	struct hci_request req;
1171

1172
	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1173 1174
		return;

1175 1176
	hci_req_init(&req, hdev);

1177 1178
	hci_dev_lock(hdev);

1179 1180
	update_eir(&req);
	update_class(&req);
1181 1182

	hci_dev_unlock(hdev);
1183 1184

	hci_req_run(&req, NULL);
1185 1186
}

1187 1188 1189 1190 1191 1192 1193 1194
static void rpa_expired(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
					    rpa_expired.work);
	struct hci_request req;

	BT_DBG("");

1195
	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1196

1197
	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
		return;

	/* The generation of a new RPA and programming it into the
	 * controller happens in the enable_advertising() function.
	 */
	hci_req_init(&req, hdev);
	enable_advertising(&req);
	hci_req_run(&req, NULL);
}

1208
static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1209
{
1210
	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1211 1212
		return;

1213
	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1214
	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1215

1216 1217 1218 1219 1220
	/* Non-mgmt controlled devices get this bit set
	 * implicitly so that pairing works for them, however
	 * for mgmt we require user-space to explicitly enable
	 * it
	 */
1221
	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1222 1223
}

1224
static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1225
				void *data, u16 data_len)
1226
{
1227
	struct mgmt_rp_read_info rp;
1228

1229
	BT_DBG("sock %p %s", sk, hdev->name);
1230

1231
	hci_dev_lock(hdev);
1232

1233 1234
	memset(&rp, 0, sizeof(rp));

1235
	bacpy(&rp.bdaddr, &hdev->bdaddr);
1236

1237
	rp.version = hdev->hci_ver;
1238
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1239 1240 1241

	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1242

1243
	memcpy(rp.dev_class, hdev->dev_class, 3);
1244

1245
	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1246
	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1247

1248
	hci_dev_unlock(hdev);
1249

1250 1251
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
				 sizeof(rp));
1252 1253
}

1254
static void mgmt_pending_free(struct mgmt_pending_cmd *cmd)
1255 1256
{
	sock_put(cmd->sk);
1257
	kfree(cmd->param);
1258 1259 1260
	kfree(cmd);
}

1261 1262 1263
static struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
						 struct hci_dev *hdev,
						 void *data, u16 len)
1264
{
1265
	struct mgmt_pending_cmd *cmd;
1266

1267
	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1268
	if (!cmd)
1269
		return NULL;
1270 1271

	cmd->opcode = opcode;
1272
	cmd->index = hdev->id;
1273

1274
	cmd->param = kmemdup(data, len, GFP_KERNEL);
1275
	if (!cmd->param) {
1276
		kfree(cmd);
1277
		return NULL;
1278 1279
	}

1280
	cmd->param_len = len;
1281 1282 1283 1284

	cmd->sk = sk;
	sock_hold(sk);

1285
	list_add(&cmd->list, &hdev->mgmt_pending);
1286

1287
	return cmd;
1288 1289
}

1290
static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1291
				 void (*cb)(struct mgmt_pending_cmd *cmd,
1292
					    void *data),
1293
				 void *data)
1294
{
1295
	struct mgmt_pending_cmd *cmd, *tmp;
1296

1297
	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1298
		if (opcode > 0 && cmd->opcode != opcode)
1299 1300 1301 1302 1303 1304
			continue;

		cb(cmd, data);
	}
}

1305
static void mgmt_pending_remove(struct mgmt_pending_cmd *cmd)
1306 1307 1308 1309 1310
{
	list_del(&cmd->list);
	mgmt_pending_free(cmd);
}

1311
static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1312
{
1313
	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1314

1315 1316
	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
				 sizeof(settings));
1317 1318
}

1319
static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1320 1321 1322
{
	BT_DBG("%s status 0x%02x", hdev->name, status);

1323 1324
	if (hci_conn_count(hdev) == 0) {
		cancel_delayed_work(&hdev->power_off);
1325
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1326
	}
1327 1328
}

1329
static bool hci_stop_discovery(struct hci_request *req)
1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_remote_name_req_cancel cp;
	struct inquiry_entry *e;

	switch (hdev->discovery.state) {
	case DISCOVERY_FINDING:
		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
		} else {
			cancel_delayed_work(&hdev->le_scan_disable);
			hci_req_add_le_scan_disable(req);
		}

1344
		return true;
1345 1346 1347 1348 1349

	case DISCOVERY_RESOLVING:
		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
						     NAME_PENDING);
		if (!e)
1350
			break;
1351 1352 1353 1354 1355

		bacpy(&cp.bdaddr, &e->data.bdaddr);
		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
			    &cp);

1356
		return true;
1357 1358 1359

	default:
		/* Passive scanning */
1360
		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1361
			hci_req_add_le_scan_disable(req);
1362 1363 1364
			return true;
		}

1365 1366
		break;
	}
1367 1368

	return false;
1369 1370
}

1371 1372 1373 1374
static int clean_up_hci_state(struct hci_dev *hdev)
{
	struct hci_request req;
	struct hci_conn *conn;
1375 1376
	bool discov_stopped;
	int err;
1377 1378 1379 1380 1381 1382 1383 1384 1385

	hci_req_init(&req, hdev);

	if (test_bit(HCI_ISCAN, &hdev->flags) ||
	    test_bit(HCI_PSCAN, &hdev->flags)) {
		u8 scan = 0x00;
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}

1386
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1387 1388
		disable_advertising(&req);

1389
	discov_stopped = hci_stop_discovery(&req);
1390 1391 1392

	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
		struct hci_cp_disconnect dc;
1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
		struct hci_cp_reject_conn_req rej;

		switch (conn->state) {
		case BT_CONNECTED:
		case BT_CONFIG:
			dc.handle = cpu_to_le16(conn->handle);
			dc.reason = 0x15; /* Terminated due to Power Off */
			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
			break;
		case BT_CONNECT:
			if (conn->type == LE_LINK)
				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
					    0, NULL);
			else if (conn->type == ACL_LINK)
				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
					    6, &conn->dst);
			break;
		case BT_CONNECT2:
			bacpy(&rej.bdaddr, &conn->dst);
			rej.reason = 0x15; /* Terminated due to Power Off */
			if (conn->type == ACL_LINK)
				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
					    sizeof(rej), &rej);
			else if (conn->type == SCO_LINK)
				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
					    sizeof(rej), &rej);
			break;
		}
1421 1422
	}

1423 1424 1425 1426 1427
	err = hci_req_run(&req, clean_up_hci_complete);
	if (!err && discov_stopped)
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);

	return err;
1428 1429
}

1430
static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1431
		       u16 len)
1432
{
1433
	struct mgmt_mode *cp = data;
1434
	struct mgmt_pending_cmd *cmd;
1435
	int err;
1436

1437
	BT_DBG("request for %s", hdev->name);
1438

1439
	if (cp->val != 0x00 && cp->val != 0x01)
1440 1441
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				       MGMT_STATUS_INVALID_PARAMS);
1442

1443
	hci_dev_lock(hdev);
1444

1445
	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1446 1447
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				      MGMT_STATUS_BUSY);
1448 1449 1450
		goto failed;
	}

1451
	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1452 1453 1454
		cancel_delayed_work(&hdev->power_off);

		if (cp->val) {
1455 1456 1457
			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
					 data, len);
			err = mgmt_powered(hdev, 1);
1458 1459 1460 1461
			goto failed;
		}
	}

1462
	if (!!cp->val == hdev_is_powered(hdev)) {
1463
		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1464 1465 1466
		goto failed;
	}

1467
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1468 1469
	if (!cmd) {
		err = -ENOMEM;
1470
		goto failed;
1471
	}
1472

1473
	if (cp->val) {
1474
		queue_work(hdev->req_workqueue, &hdev->power_on);
1475 1476 1477 1478
		err = 0;
	} else {
		/* Disconnect connections, stop scans, etc */
		err = clean_up_hci_state(hdev);
1479 1480 1481
		if (!err)
			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
					   HCI_POWER_OFF_TIMEOUT);
1482

1483 1484
		/* ENODATA means there were no HCI commands queued */
		if (err == -ENODATA) {
1485
			cancel_delayed_work(&hdev->power_off);
1486 1487 1488 1489
			queue_work(hdev->req_workqueue, &hdev->power_off.work);
			err = 0;
		}
	}
1490 1491

failed:
1492
	hci_dev_unlock(hdev);
1493
	return err;
1494 1495
}

1496 1497 1498 1499 1500 1501 1502 1503 1504
static int new_settings(struct hci_dev *hdev, struct sock *skip)
{
	__le32 ev;

	ev = cpu_to_le32(get_current_settings(hdev));

	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
}

1505 1506 1507 1508 1509
int mgmt_new_settings(struct hci_dev *hdev)
{
	return new_settings(hdev, NULL);
}

1510 1511 1512 1513 1514 1515
struct cmd_lookup {
	struct sock *sk;
	struct hci_dev *hdev;
	u8 mgmt_status;
};

1516
static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
{
	struct cmd_lookup *match = data;

	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);

	list_del(&cmd->list);

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}

	mgmt_pending_free(cmd);
}

1532
static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1533 1534 1535
{
	u8 *status = data;

1536
	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1537 1538 1539
	mgmt_pending_remove(cmd);
}

1540
static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
{
	if (cmd->cmd_complete) {
		u8 *status = data;

		cmd->cmd_complete(cmd, *status);
		mgmt_pending_remove(cmd);

		return;
	}

	cmd_status_rsp(cmd, data);
}

1554
static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1555
{
1556 1557
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, cmd->param_len);
1558 1559
}

1560
static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1561
{
1562 1563
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, sizeof(struct mgmt_addr_info));
1564 1565
}

1566 1567 1568 1569
static u8 mgmt_bredr_support(struct hci_dev *hdev)
{
	if (!lmp_bredr_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
1570
	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1571 1572 1573 1574 1575 1576 1577 1578 1579
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

static u8 mgmt_le_support(struct hci_dev *hdev)
{
	if (!lmp_le_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
1580
	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1581 1582 1583 1584 1585
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

1586 1587
static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
				      u16 opcode)
1588
{
1589
	struct mgmt_pending_cmd *cmd;
1590
	struct mgmt_mode *cp;
1591
	struct hci_request req;
1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
	bool changed;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);
1604
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1605
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1606 1607 1608 1609
		goto remove_cmd;
	}

	cp = cmd->param;
1610
	if (cp->val) {
1611
		changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1612 1613 1614 1615 1616 1617 1618

		if (hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
					   to);
		}
	} else {
1619
		changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1620
	}
1621 1622 1623 1624 1625 1626

	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);

	if (changed)
		new_settings(hdev, cmd->sk);

1627 1628
	/* When the discoverable mode gets changed, make sure
	 * that class of device has the limited discoverable
1629 1630
	 * bit correctly set. Also update page scan based on whitelist
	 * entries.
1631 1632
	 */
	hci_req_init(&req, hdev);
1633
	__hci_update_page_scan(&req);
1634 1635 1636
	update_class(&req);
	hci_req_run(&req, NULL);

1637 1638 1639 1640 1641 1642 1643
remove_cmd:
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1644
static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1645
			    u16 len)
1646
{
1647
	struct mgmt_cp_set_discoverable *cp = data;
1648
	struct mgmt_pending_cmd *cmd;
1649
	struct hci_request req;
1650
	u16 timeout;
1651
	u8 scan;
1652 1653
	int err;

1654
	BT_DBG("request for %s", hdev->name);
1655

1656 1657
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1658 1659
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_REJECTED);
1660

1661
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1662 1663
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1664

1665
	timeout = __le16_to_cpu(cp->timeout);
1666 1667 1668 1669 1670 1671

	/* Disabling discoverable requires that no timeout is set,
	 * and enabling limited discoverable requires a timeout.
	 */
	if ((cp->val == 0x00 && timeout > 0) ||
	    (cp->val == 0x02 && timeout == 0))
1672 1673
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1674

1675
	hci_dev_lock(hdev);
1676

1677
	if (!hdev_is_powered(hdev) && timeout > 0) {
1678 1679
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_NOT_POWERED);
1680 1681 1682
		goto failed;
	}

1683
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1684
	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1685 1686
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_BUSY);
1687 1688 1689
		goto failed;
	}

1690
	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1691 1692
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_REJECTED);
1693 1694 1695 1696
		goto failed;
	}

	if (!hdev_is_powered(hdev)) {
1697 1698
		bool changed = false;

1699 1700 1701 1702
		/* Setting limited discoverable when powered off is
		 * not a valid operation since it requires a timeout
		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
		 */
1703
		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1704
			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1705 1706 1707
			changed = true;
		}

1708
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1709 1710 1711 1712 1713 1714
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1715 1716 1717
		goto failed;
	}

1718 1719 1720 1721
	/* If the current mode is the same, then just update the timeout
	 * value with the new value. And if only the timeout gets updated,
	 * then no need for any HCI transactions.
	 */
1722 1723 1724
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
						   HCI_LIMITED_DISCOVERABLE)) {
1725 1726
		cancel_delayed_work(&hdev->discov_off);
		hdev->discov_timeout = timeout;
1727

1728 1729
		if (cp->val && hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1730
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1731
					   to);
1732 1733
		}

1734
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1735 1736 1737
		goto failed;
	}

1738
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1739 1740
	if (!cmd) {
		err = -ENOMEM;
1741
		goto failed;
1742
	}
1743

1744 1745 1746 1747 1748 1749 1750
	/* Cancel any potential discoverable timeout that might be
	 * still active and store new timeout value. The arming of
	 * the timeout happens in the complete handler.
	 */
	cancel_delayed_work(&hdev->discov_off);
	hdev->discov_timeout = timeout;

1751 1752
	/* Limited discoverable mode */
	if (cp->val == 0x02)
1753
		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1754
	else
1755
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1756

1757 1758
	hci_req_init(&req, hdev);

1759 1760 1761
	/* The procedure for LE-only controllers is much simpler - just
	 * update the advertising data.
	 */
1762
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1763 1764
		goto update_ad;

1765 1766
	scan = SCAN_PAGE;

1767 1768 1769 1770 1771
	if (cp->val) {
		struct hci_cp_write_current_iac_lap hci_cp;

		if (cp->val == 0x02) {
			/* Limited discoverable mode */
1772
			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789
			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
			hci_cp.iac_lap[4] = 0x8b;
			hci_cp.iac_lap[5] = 0x9e;
		} else {
			/* General discoverable mode */
			hci_cp.num_iac = 1;
			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
		}

		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
			    (hci_cp.num_iac * 3) + 1, &hci_cp);

1790
		scan |= SCAN_INQUIRY;
1791
	} else {
1792
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1793
	}
1794

1795
	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1796

1797 1798 1799
update_ad:
	update_adv_data(&req);

1800
	err = hci_req_run(&req, set_discoverable_complete);
1801
	if (err < 0)
1802
		mgmt_pending_remove(cmd);
1803 1804

failed:
1805
	hci_dev_unlock(hdev);
1806 1807 1808
	return err;
}

1809 1810
static void write_fast_connectable(struct hci_request *req, bool enable)
{
1811
	struct hci_dev *hdev = req->hdev;
1812 1813 1814
	struct hci_cp_write_page_scan_activity acp;
	u8 type;

1815
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1816 1817
		return;

1818 1819 1820
	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
		return;

1821 1822 1823 1824
	if (enable) {
		type = PAGE_SCAN_TYPE_INTERLACED;

		/* 160 msec page scan interval */
1825
		acp.interval = cpu_to_le16(0x0100);
1826 1827 1828 1829
	} else {
		type = PAGE_SCAN_TYPE_STANDARD;	/* default */

		/* default 1.28 sec page scan */
1830
		acp.interval = cpu_to_le16(0x0800);
1831 1832
	}

1833
	acp.window = cpu_to_le16(0x0012);
1834

1835 1836 1837 1838 1839 1840 1841
	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
			    sizeof(acp), &acp);

	if (hdev->page_scan_type != type)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1842 1843
}

1844 1845
static void set_connectable_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
1846
{
1847
	struct mgmt_pending_cmd *cmd;
1848
	struct mgmt_mode *cp;
1849
	bool conn_changed, discov_changed;
1850 1851 1852 1853 1854 1855 1856 1857 1858

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (!cmd)
		goto unlock;

1859 1860
	if (status) {
		u8 mgmt_err = mgmt_status(status);
1861
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1862 1863 1864
		goto remove_cmd;
	}

1865
	cp = cmd->param;
1866
	if (cp->val) {
1867 1868
		conn_changed = !hci_dev_test_and_set_flag(hdev,
							  HCI_CONNECTABLE);
1869 1870
		discov_changed = false;
	} else {
1871 1872 1873 1874
		conn_changed = hci_dev_test_and_clear_flag(hdev,
							   HCI_CONNECTABLE);
		discov_changed = hci_dev_test_and_clear_flag(hdev,
							     HCI_DISCOVERABLE);
1875
	}
1876

1877 1878
	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);

1879
	if (conn_changed || discov_changed) {
1880
		new_settings(hdev, cmd->sk);
1881
		hci_update_page_scan(hdev);
1882 1883
		if (discov_changed)
			mgmt_update_adv_data(hdev);
1884 1885
		hci_update_background_scan(hdev);
	}
1886

1887
remove_cmd:
1888 1889 1890 1891 1892 1893
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1894 1895 1896 1897 1898 1899
static int set_connectable_update_settings(struct hci_dev *hdev,
					   struct sock *sk, u8 val)
{
	bool changed = false;
	int err;

1900
	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1901 1902 1903
		changed = true;

	if (val) {
1904
		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1905
	} else {
1906 1907
		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1908 1909 1910 1911 1912 1913
	}

	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
	if (err < 0)
		return err;

1914
	if (changed) {
1915
		hci_update_page_scan(hdev);
1916
		hci_update_background_scan(hdev);
1917
		return new_settings(hdev, sk);
1918
	}
1919 1920 1921 1922

	return 0;
}

1923
static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1924
			   u16 len)
1925
{
1926
	struct mgmt_mode *cp = data;
1927
	struct mgmt_pending_cmd *cmd;
1928
	struct hci_request req;
1929
	u8 scan;
1930 1931
	int err;

1932
	BT_DBG("request for %s", hdev->name);
1933

1934 1935
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1936 1937
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				       MGMT_STATUS_REJECTED);
1938

1939
	if (cp->val != 0x00 && cp->val != 0x01)
1940 1941
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1942

1943
	hci_dev_lock(hdev);
1944

1945
	if (!hdev_is_powered(hdev)) {
1946
		err = set_connectable_update_settings(hdev, sk, cp->val);
1947 1948 1949
		goto failed;
	}

1950
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1951
	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1952 1953
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				      MGMT_STATUS_BUSY);
1954 1955 1956
		goto failed;
	}

1957
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1958 1959
	if (!cmd) {
		err = -ENOMEM;
1960
		goto failed;
1961
	}
1962

1963
	hci_req_init(&req, hdev);
1964

1965 1966 1967 1968
	/* If BR/EDR is not enabled and we disable advertising as a
	 * by-product of disabling connectable, we need to update the
	 * advertising flags.
	 */
1969
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1970
		if (!cp->val) {
1971 1972
			hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1973 1974 1975
		}
		update_adv_data(&req);
	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1976 1977 1978
		if (cp->val) {
			scan = SCAN_PAGE;
		} else {
1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
			/* If we don't have any whitelist entries just
			 * disable all scanning. If there are entries
			 * and we had both page and inquiry scanning
			 * enabled then fall back to only page scanning.
			 * Otherwise no changes are needed.
			 */
			if (list_empty(&hdev->whitelist))
				scan = SCAN_DISABLED;
			else if (test_bit(HCI_ISCAN, &hdev->flags))
				scan = SCAN_PAGE;
			else
				goto no_scan_update;
1991 1992

			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1993
			    hdev->discov_timeout > 0)
1994 1995
				cancel_delayed_work(&hdev->discov_off);
		}
1996

1997 1998
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}
1999

2000
no_scan_update:
2001
	/* Update the advertising parameters if necessary */
2002
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2003 2004
		enable_advertising(&req);

2005
	err = hci_req_run(&req, set_connectable_complete);
2006
	if (err < 0) {
2007
		mgmt_pending_remove(cmd);
2008
		if (err == -ENODATA)
2009 2010
			err = set_connectable_update_settings(hdev, sk,
							      cp->val);
2011 2012
		goto failed;
	}
2013 2014

failed:
2015
	hci_dev_unlock(hdev);
2016 2017 2018
	return err;
}

2019
static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2020
			u16 len)
2021
{
2022
	struct mgmt_mode *cp = data;
2023
	bool changed;
2024 2025
	int err;

2026
	BT_DBG("request for %s", hdev->name);
2027

2028
	if (cp->val != 0x00 && cp->val != 0x01)
2029 2030
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
				       MGMT_STATUS_INVALID_PARAMS);
2031

2032
	hci_dev_lock(hdev);
2033 2034

	if (cp->val)
2035
		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2036
	else
2037
		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2038

2039
	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2040
	if (err < 0)
2041
		goto unlock;
2042

2043 2044
	if (changed)
		err = new_settings(hdev, sk);
2045

2046
unlock:
2047
	hci_dev_unlock(hdev);
2048 2049 2050
	return err;
}

2051 2052
static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
2053 2054
{
	struct mgmt_mode *cp = data;
2055
	struct mgmt_pending_cmd *cmd;
2056
	u8 val, status;
2057 2058
	int err;

2059
	BT_DBG("request for %s", hdev->name);
2060

2061 2062
	status = mgmt_bredr_support(hdev);
	if (status)
2063 2064
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				       status);
2065

2066
	if (cp->val != 0x00 && cp->val != 0x01)
2067 2068
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				       MGMT_STATUS_INVALID_PARAMS);
2069

2070 2071
	hci_dev_lock(hdev);

2072
	if (!hdev_is_powered(hdev)) {
2073 2074
		bool changed = false;

2075
		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2076
			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

2087 2088 2089 2090
		goto failed;
	}

	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2091 2092
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				      MGMT_STATUS_BUSY);
2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119
		goto failed;
	}

	val = !!cp->val;

	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

2120
static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2121 2122
{
	struct mgmt_mode *cp = data;
2123
	struct mgmt_pending_cmd *cmd;
2124
	u8 status;
2125 2126
	int err;

2127
	BT_DBG("request for %s", hdev->name);
2128

2129 2130
	status = mgmt_bredr_support(hdev);
	if (status)
2131
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2132

2133
	if (!lmp_ssp_capable(hdev))
2134 2135
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				       MGMT_STATUS_NOT_SUPPORTED);
2136

2137
	if (cp->val != 0x00 && cp->val != 0x01)
2138 2139
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				       MGMT_STATUS_INVALID_PARAMS);
2140

2141
	hci_dev_lock(hdev);
2142

2143
	if (!hdev_is_powered(hdev)) {
2144
		bool changed;
2145

2146
		if (cp->val) {
2147 2148
			changed = !hci_dev_test_and_set_flag(hdev,
							     HCI_SSP_ENABLED);
2149
		} else {
2150 2151
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_SSP_ENABLED);
2152
			if (!changed)
2153 2154
				changed = hci_dev_test_and_clear_flag(hdev,
								      HCI_HS_ENABLED);
2155
			else
2156
				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2157 2158 2159 2160 2161 2162 2163 2164 2165
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

2166 2167 2168
		goto failed;
	}

2169
	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2170 2171
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				      MGMT_STATUS_BUSY);
2172 2173 2174
		goto failed;
	}

2175
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2176 2177 2178 2179 2180 2181 2182 2183 2184 2185
		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

2186
	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2187 2188 2189
		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
			     sizeof(cp->val), &cp->val);

2190
	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2191 2192 2193 2194 2195 2196 2197 2198 2199 2200
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

2201
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2202 2203
{
	struct mgmt_mode *cp = data;
2204
	bool changed;
2205
	u8 status;
2206
	int err;
2207

2208
	BT_DBG("request for %s", hdev->name);
2209

2210 2211
	status = mgmt_bredr_support(hdev);
	if (status)
2212
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2213

2214
	if (!lmp_ssp_capable(hdev))
2215 2216
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_NOT_SUPPORTED);
2217

2218
	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2219 2220
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_REJECTED);
2221

2222
	if (cp->val != 0x00 && cp->val != 0x01)
2223 2224
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_INVALID_PARAMS);
2225

2226 2227
	hci_dev_lock(hdev);

2228
	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2229 2230
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				      MGMT_STATUS_BUSY);
2231 2232 2233
		goto unlock;
	}

2234
	if (cp->val) {
2235
		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2236 2237
	} else {
		if (hdev_is_powered(hdev)) {
2238 2239
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
					      MGMT_STATUS_REJECTED);
2240 2241 2242
			goto unlock;
		}

2243
		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2244
	}
2245 2246 2247 2248 2249 2250 2251

	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);
2252

2253 2254 2255
unlock:
	hci_dev_unlock(hdev);
	return err;
2256 2257
}

2258
static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2259 2260 2261
{
	struct cmd_lookup match = { NULL, hdev };

2262 2263
	hci_dev_lock(hdev);

2264 2265 2266 2267 2268
	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
				     &mgmt_err);
2269
		goto unlock;
2270 2271 2272 2273 2274 2275 2276 2277
	}

	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
2278 2279 2280 2281 2282 2283

	/* Make sure the controller has a good default for
	 * advertising data. Restrict the update to when LE
	 * has actually been enabled. During power on, the
	 * update in powered_update_hci will take care of it.
	 */
2284
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2285 2286 2287
		struct hci_request req;

		hci_req_init(&req, hdev);
2288
		update_adv_data(&req);
2289
		update_scan_rsp_data(&req);
2290
		__hci_update_background_scan(&req);
2291 2292
		hci_req_run(&req, NULL);
	}
2293 2294 2295

unlock:
	hci_dev_unlock(hdev);
2296 2297
}

2298
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2299 2300 2301
{
	struct mgmt_mode *cp = data;
	struct hci_cp_write_le_host_supported hci_cp;
2302
	struct mgmt_pending_cmd *cmd;
2303
	struct hci_request req;
2304
	int err;
2305
	u8 val, enabled;
2306

2307
	BT_DBG("request for %s", hdev->name);
2308

2309
	if (!lmp_le_capable(hdev))
2310 2311
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_NOT_SUPPORTED);
2312

2313
	if (cp->val != 0x00 && cp->val != 0x01)
2314 2315
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_INVALID_PARAMS);
2316

2317
	/* LE-only devices do not allow toggling LE on/off */
2318
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2319 2320
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_REJECTED);
2321

2322
	hci_dev_lock(hdev);
2323 2324

	val = !!cp->val;
2325
	enabled = lmp_host_le_capable(hdev);
2326

2327
	if (!hdev_is_powered(hdev) || val == enabled) {
2328 2329
		bool changed = false;

2330
		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2331
			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2332 2333 2334
			changed = true;
		}

2335
		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2336
			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2337 2338 2339
			changed = true;
		}

2340 2341
		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
		if (err < 0)
2342
			goto unlock;
2343 2344 2345 2346

		if (changed)
			err = new_settings(hdev, sk);

2347
		goto unlock;
2348 2349
	}

2350 2351
	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2352 2353
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				      MGMT_STATUS_BUSY);
2354
		goto unlock;
2355 2356 2357 2358 2359
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
2360
		goto unlock;
2361 2362
	}

2363 2364
	hci_req_init(&req, hdev);

2365 2366 2367 2368
	memset(&hci_cp, 0, sizeof(hci_cp));

	if (val) {
		hci_cp.le = val;
2369
		hci_cp.simul = 0x00;
2370
	} else {
2371
		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2372
			disable_advertising(&req);
2373 2374
	}

2375 2376 2377 2378
	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
		    &hci_cp);

	err = hci_req_run(&req, le_enable_complete);
2379
	if (err < 0)
2380 2381
		mgmt_pending_remove(cmd);

2382 2383
unlock:
	hci_dev_unlock(hdev);
2384 2385 2386
	return err;
}

2387 2388 2389 2390 2391 2392 2393 2394
/* This is a helper function to test for pending mgmt commands that can
 * cause CoD or EIR HCI commands. We can only allow one such pending
 * mgmt command at a time since otherwise we cannot easily track what
 * the current values are, will be, and based on that calculate if a new
 * HCI command needs to be sent and if yes with what value.
 */
static bool pending_eir_or_class(struct hci_dev *hdev)
{
2395
	struct mgmt_pending_cmd *cmd;
2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		switch (cmd->opcode) {
		case MGMT_OP_ADD_UUID:
		case MGMT_OP_REMOVE_UUID:
		case MGMT_OP_SET_DEV_CLASS:
		case MGMT_OP_SET_POWERED:
			return true;
		}
	}

	return false;
}

2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428
static const u8 bluetooth_base_uuid[] = {
			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};

static u8 get_uuid_size(const u8 *uuid)
{
	u32 val;

	if (memcmp(uuid, bluetooth_base_uuid, 12))
		return 128;

	val = get_unaligned_le32(&uuid[12]);
	if (val > 0xffff)
		return 32;

	return 16;
}

2429 2430
static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
{
2431
	struct mgmt_pending_cmd *cmd;
2432 2433 2434 2435 2436 2437 2438

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(mgmt_op, hdev);
	if (!cmd)
		goto unlock;

2439 2440
	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
			  mgmt_status(status), hdev->dev_class, 3);
2441 2442 2443 2444 2445 2446 2447

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

2448
static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2449 2450 2451 2452 2453 2454
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
}

2455
static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2456
{
2457
	struct mgmt_cp_add_uuid *cp = data;
2458
	struct mgmt_pending_cmd *cmd;
2459
	struct hci_request req;
2460 2461 2462
	struct bt_uuid *uuid;
	int err;

2463
	BT_DBG("request for %s", hdev->name);
2464

2465
	hci_dev_lock(hdev);
2466

2467
	if (pending_eir_or_class(hdev)) {
2468 2469
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
				      MGMT_STATUS_BUSY);
2470 2471 2472
		goto failed;
	}

2473
	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2474 2475 2476 2477 2478 2479
	if (!uuid) {
		err = -ENOMEM;
		goto failed;
	}

	memcpy(uuid->uuid, cp->uuid, 16);
2480
	uuid->svc_hint = cp->svc_hint;
2481
	uuid->size = get_uuid_size(cp->uuid);
2482

2483
	list_add_tail(&uuid->list, &hdev->uuids);
2484

2485
	hci_req_init(&req, hdev);
2486

2487 2488 2489
	update_class(&req);
	update_eir(&req);

2490 2491 2492 2493
	err = hci_req_run(&req, add_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto failed;
2494

2495 2496
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
					hdev->dev_class, 3);
2497 2498 2499 2500
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2501
	if (!cmd) {
2502
		err = -ENOMEM;
2503 2504 2505 2506
		goto failed;
	}

	err = 0;
2507 2508

failed:
2509
	hci_dev_unlock(hdev);
2510 2511 2512
	return err;
}

2513 2514 2515 2516 2517
static bool enable_service_cache(struct hci_dev *hdev)
{
	if (!hdev_is_powered(hdev))
		return false;

2518
	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2519 2520
		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
				   CACHE_TIMEOUT);
2521 2522 2523 2524 2525 2526
		return true;
	}

	return false;
}

2527
static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2528 2529 2530 2531 2532 2533
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
}

2534
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2535
		       u16 len)
2536
{
2537
	struct mgmt_cp_remove_uuid *cp = data;
2538
	struct mgmt_pending_cmd *cmd;
2539
	struct bt_uuid *match, *tmp;
2540
	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2541
	struct hci_request req;
2542 2543
	int err, found;

2544
	BT_DBG("request for %s", hdev->name);
2545

2546
	hci_dev_lock(hdev);
2547

2548
	if (pending_eir_or_class(hdev)) {
2549 2550
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
				      MGMT_STATUS_BUSY);
2551 2552 2553
		goto unlock;
	}

2554
	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2555
		hci_uuids_clear(hdev);
2556

2557
		if (enable_service_cache(hdev)) {
2558 2559 2560
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_UUID,
						0, hdev->dev_class, 3);
2561 2562
			goto unlock;
		}
2563

2564
		goto update_class;
2565 2566 2567 2568
	}

	found = 0;

2569
	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2570 2571 2572 2573
		if (memcmp(match->uuid, cp->uuid, 16) != 0)
			continue;

		list_del(&match->list);
2574
		kfree(match);
2575 2576 2577 2578
		found++;
	}

	if (found == 0) {
2579 2580
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
				      MGMT_STATUS_INVALID_PARAMS);
2581 2582 2583
		goto unlock;
	}

2584
update_class:
2585
	hci_req_init(&req, hdev);
2586

2587 2588 2589
	update_class(&req);
	update_eir(&req);

2590 2591 2592 2593
	err = hci_req_run(&req, remove_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2594

2595 2596
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
					hdev->dev_class, 3);
2597 2598 2599 2600
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2601
	if (!cmd) {
2602
		err = -ENOMEM;
2603 2604 2605 2606
		goto unlock;
	}

	err = 0;
2607 2608

unlock:
2609
	hci_dev_unlock(hdev);
2610 2611 2612
	return err;
}

2613
static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2614 2615 2616 2617 2618 2619
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
}

2620
static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2621
			 u16 len)
2622
{
2623
	struct mgmt_cp_set_dev_class *cp = data;
2624
	struct mgmt_pending_cmd *cmd;
2625
	struct hci_request req;
2626 2627
	int err;

2628
	BT_DBG("request for %s", hdev->name);
2629

2630
	if (!lmp_bredr_capable(hdev))
2631 2632
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				       MGMT_STATUS_NOT_SUPPORTED);
2633

2634
	hci_dev_lock(hdev);
2635

2636
	if (pending_eir_or_class(hdev)) {
2637 2638
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				      MGMT_STATUS_BUSY);
2639 2640
		goto unlock;
	}
2641

2642
	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2643 2644
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				      MGMT_STATUS_INVALID_PARAMS);
2645 2646
		goto unlock;
	}
2647

2648 2649 2650
	hdev->major_class = cp->major;
	hdev->minor_class = cp->minor;

2651
	if (!hdev_is_powered(hdev)) {
2652 2653
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
					hdev->dev_class, 3);
2654 2655 2656
		goto unlock;
	}

2657 2658
	hci_req_init(&req, hdev);

2659
	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2660 2661 2662
		hci_dev_unlock(hdev);
		cancel_delayed_work_sync(&hdev->service_cache);
		hci_dev_lock(hdev);
2663
		update_eir(&req);
2664
	}
2665

2666 2667
	update_class(&req);

2668 2669 2670 2671
	err = hci_req_run(&req, set_class_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2672

2673 2674
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
					hdev->dev_class, 3);
2675 2676 2677 2678
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2679
	if (!cmd) {
2680
		err = -ENOMEM;
2681 2682 2683 2684
		goto unlock;
	}

	err = 0;
2685

2686
unlock:
2687
	hci_dev_unlock(hdev);
2688 2689 2690
	return err;
}

2691
static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2692
			  u16 len)
2693
{
2694
	struct mgmt_cp_load_link_keys *cp = data;
2695 2696
	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_link_key_info));
2697
	u16 key_count, expected_len;
2698
	bool changed;
2699
	int i;
2700

2701 2702 2703
	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev))
2704 2705
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_NOT_SUPPORTED);
2706

2707
	key_count = __le16_to_cpu(cp->key_count);
2708 2709 2710
	if (key_count > max_key_count) {
		BT_ERR("load_link_keys: too big key_count value %u",
		       key_count);
2711 2712
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2713
	}
2714

2715 2716
	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_link_key_info);
2717
	if (expected_len != len) {
2718
		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2719
		       expected_len, len);
2720 2721
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2722 2723
	}

2724
	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2725 2726
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2727

2728
	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2729
	       key_count);
2730

2731 2732 2733
	for (i = 0; i < key_count; i++) {
		struct mgmt_link_key_info *key = &cp->keys[i];

2734
		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2735 2736 2737
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_LINK_KEYS,
					       MGMT_STATUS_INVALID_PARAMS);
2738 2739
	}

2740
	hci_dev_lock(hdev);
2741 2742 2743 2744

	hci_link_keys_clear(hdev);

	if (cp->debug_keys)
2745
		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2746
	else
2747 2748
		changed = hci_dev_test_and_clear_flag(hdev,
						      HCI_KEEP_DEBUG_KEYS);
2749 2750 2751

	if (changed)
		new_settings(hdev, NULL);
2752

2753
	for (i = 0; i < key_count; i++) {
2754
		struct mgmt_link_key_info *key = &cp->keys[i];
2755

2756 2757 2758 2759 2760 2761
		/* Always ignore debug keys and require a new pairing if
		 * the user wants to use them.
		 */
		if (key->type == HCI_LK_DEBUG_COMBINATION)
			continue;

2762 2763
		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
				 key->type, key->pin_len, NULL);
2764 2765
	}

2766
	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2767

2768
	hci_dev_unlock(hdev);
2769

2770
	return 0;
2771 2772
}

2773
static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2774
			   u8 addr_type, struct sock *skip_sk)
2775 2776 2777 2778 2779 2780 2781
{
	struct mgmt_ev_device_unpaired ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = addr_type;

	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2782
			  skip_sk);
2783 2784
}

2785
static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2786
			 u16 len)
2787
{
2788 2789
	struct mgmt_cp_unpair_device *cp = data;
	struct mgmt_rp_unpair_device rp;
2790
	struct hci_cp_disconnect dc;
2791
	struct mgmt_pending_cmd *cmd;
2792 2793 2794
	struct hci_conn *conn;
	int err;

2795
	memset(&rp, 0, sizeof(rp));
2796 2797
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
2798

2799
	if (!bdaddr_type_is_valid(cp->addr.type))
2800 2801 2802
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2803

2804
	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2805 2806 2807
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2808

2809 2810
	hci_dev_lock(hdev);

2811
	if (!hdev_is_powered(hdev)) {
2812 2813 2814
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
2815 2816 2817
		goto unlock;
	}

2818
	if (cp->addr.type == BDADDR_BREDR) {
2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831
		/* If disconnection is requested, then look up the
		 * connection. If the remote device is connected, it
		 * will be later used to terminate the link.
		 *
		 * Setting it to NULL explicitly will cause no
		 * termination of the link.
		 */
		if (cp->disconnect)
			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
						       &cp->addr.bdaddr);
		else
			conn = NULL;

2832
		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2833 2834 2835
	} else {
		u8 addr_type;

2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
					       &cp->addr.bdaddr);
		if (conn) {
			/* Defer clearing up the connection parameters
			 * until closing to give a chance of keeping
			 * them if a repairing happens.
			 */
			set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);

			/* If disconnection is not requested, then
			 * clear the connection variable so that the
			 * link is not terminated.
			 */
			if (!cp->disconnect)
				conn = NULL;
		}

2853 2854 2855 2856 2857
		if (cp->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

2858 2859
		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);

2860 2861
		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
	}
2862

2863
	if (err < 0) {
2864 2865 2866
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					MGMT_STATUS_NOT_PAIRED, &rp,
					sizeof(rp));
2867 2868 2869
		goto unlock;
	}

2870 2871 2872
	/* If the connection variable is set, then termination of the
	 * link is requested.
	 */
2873
	if (!conn) {
2874 2875
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
					&rp, sizeof(rp));
2876
		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2877 2878
		goto unlock;
	}
2879

2880
	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2881
			       sizeof(*cp));
2882 2883 2884
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
2885 2886
	}

2887 2888
	cmd->cmd_complete = addr_cmd_complete;

2889
	dc.handle = cpu_to_le16(conn->handle);
2890 2891 2892 2893 2894
	dc.reason = 0x13; /* Remote User Terminated Connection */
	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
	if (err < 0)
		mgmt_pending_remove(cmd);

2895
unlock:
2896
	hci_dev_unlock(hdev);
2897 2898 2899
	return err;
}

2900
static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2901
		      u16 len)
2902
{
2903
	struct mgmt_cp_disconnect *cp = data;
2904
	struct mgmt_rp_disconnect rp;
2905
	struct mgmt_pending_cmd *cmd;
2906 2907 2908 2909 2910
	struct hci_conn *conn;
	int err;

	BT_DBG("");

2911 2912 2913 2914
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2915
	if (!bdaddr_type_is_valid(cp->addr.type))
2916 2917 2918
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2919

2920
	hci_dev_lock(hdev);
2921 2922

	if (!test_bit(HCI_UP, &hdev->flags)) {
2923 2924 2925
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
2926 2927 2928
		goto failed;
	}

2929
	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2930 2931
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2932 2933 2934
		goto failed;
	}

2935
	if (cp->addr.type == BDADDR_BREDR)
2936 2937
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
2938 2939
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2940

2941
	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2942 2943 2944
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_NOT_CONNECTED, &rp,
					sizeof(rp));
2945 2946 2947
		goto failed;
	}

2948
	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2949 2950
	if (!cmd) {
		err = -ENOMEM;
2951
		goto failed;
2952
	}
2953

2954 2955
	cmd->cmd_complete = generic_cmd_complete;

2956
	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2957
	if (err < 0)
2958
		mgmt_pending_remove(cmd);
2959 2960

failed:
2961
	hci_dev_unlock(hdev);
2962 2963 2964
	return err;
}

2965
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2966 2967 2968
{
	switch (link_type) {
	case LE_LINK:
2969 2970
		switch (addr_type) {
		case ADDR_LE_DEV_PUBLIC:
2971
			return BDADDR_LE_PUBLIC;
2972

2973
		default:
2974
			/* Fallback to LE Random address type */
2975
			return BDADDR_LE_RANDOM;
2976
		}
2977

2978
	default:
2979
		/* Fallback to BR/EDR type */
2980
		return BDADDR_BREDR;
2981 2982 2983
	}
}

2984 2985
static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
2986 2987
{
	struct mgmt_rp_get_connections *rp;
2988
	struct hci_conn *c;
2989
	size_t rp_len;
2990 2991
	int err;
	u16 i;
2992 2993 2994

	BT_DBG("");

2995
	hci_dev_lock(hdev);
2996

2997
	if (!hdev_is_powered(hdev)) {
2998 2999
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
				      MGMT_STATUS_NOT_POWERED);
3000 3001 3002
		goto unlock;
	}

3003
	i = 0;
3004 3005
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3006
			i++;
3007 3008
	}

3009
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3010
	rp = kmalloc(rp_len, GFP_KERNEL);
3011
	if (!rp) {
3012 3013 3014 3015 3016
		err = -ENOMEM;
		goto unlock;
	}

	i = 0;
3017
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3018 3019
		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
			continue;
3020
		bacpy(&rp->addr[i].bdaddr, &c->dst);
3021
		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3022
		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3023 3024 3025 3026
			continue;
		i++;
	}

3027
	rp->conn_count = cpu_to_le16(i);
3028

3029 3030
	/* Recalculate length in case of filtered SCO connections, etc */
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3031

3032 3033
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
				rp_len);
3034

3035
	kfree(rp);
3036 3037

unlock:
3038
	hci_dev_unlock(hdev);
3039 3040 3041
	return err;
}

3042
static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3043
				   struct mgmt_cp_pin_code_neg_reply *cp)
3044
{
3045
	struct mgmt_pending_cmd *cmd;
3046 3047
	int err;

3048
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3049
			       sizeof(*cp));
3050 3051 3052
	if (!cmd)
		return -ENOMEM;

3053
	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3054
			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3055 3056 3057 3058 3059 3060
	if (err < 0)
		mgmt_pending_remove(cmd);

	return err;
}

3061
static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3062
			  u16 len)
3063
{
3064
	struct hci_conn *conn;
3065
	struct mgmt_cp_pin_code_reply *cp = data;
3066
	struct hci_cp_pin_code_reply reply;
3067
	struct mgmt_pending_cmd *cmd;
3068 3069 3070 3071
	int err;

	BT_DBG("");

3072
	hci_dev_lock(hdev);
3073

3074
	if (!hdev_is_powered(hdev)) {
3075 3076
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
				      MGMT_STATUS_NOT_POWERED);
3077 3078 3079
		goto failed;
	}

3080
	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3081
	if (!conn) {
3082 3083
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
				      MGMT_STATUS_NOT_CONNECTED);
3084 3085 3086 3087
		goto failed;
	}

	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3088 3089 3090
		struct mgmt_cp_pin_code_neg_reply ncp;

		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3091 3092 3093

		BT_ERR("PIN code is not 16 bytes long");

3094
		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3095
		if (err >= 0)
3096 3097
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
					      MGMT_STATUS_INVALID_PARAMS);
3098 3099 3100 3101

		goto failed;
	}

3102
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3103 3104
	if (!cmd) {
		err = -ENOMEM;
3105
		goto failed;
3106
	}
3107

3108 3109
	cmd->cmd_complete = addr_cmd_complete;

3110
	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3111
	reply.pin_len = cp->pin_len;
3112
	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3113 3114 3115

	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
	if (err < 0)
3116
		mgmt_pending_remove(cmd);
3117 3118

failed:
3119
	hci_dev_unlock(hdev);
3120 3121 3122
	return err;
}

3123 3124
static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
3125
{
3126
	struct mgmt_cp_set_io_capability *cp = data;
3127 3128 3129

	BT_DBG("");

3130
	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3131 3132
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
					 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3133

3134
	hci_dev_lock(hdev);
3135 3136 3137 3138

	hdev->io_capability = cp->io_capability;

	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3139
	       hdev->io_capability);
3140

3141
	hci_dev_unlock(hdev);
3142

3143 3144
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
				 NULL, 0);
3145 3146
}

3147
static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3148 3149
{
	struct hci_dev *hdev = conn->hdev;
3150
	struct mgmt_pending_cmd *cmd;
3151

3152
	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164
		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
			continue;

		if (cmd->user_data != conn)
			continue;

		return cmd;
	}

	return NULL;
}

3165
static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3166 3167 3168
{
	struct mgmt_rp_pair_device rp;
	struct hci_conn *conn = cmd->user_data;
3169
	int err;
3170

3171 3172
	bacpy(&rp.addr.bdaddr, &conn->dst);
	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3173

3174 3175
	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
				status, &rp, sizeof(rp));
3176 3177 3178 3179 3180 3181

	/* So we don't get further callbacks for this connection */
	conn->connect_cfm_cb = NULL;
	conn->security_cfm_cb = NULL;
	conn->disconn_cfm_cb = NULL;

3182
	hci_conn_drop(conn);
3183 3184 3185 3186 3187

	/* The device is paired so there is no need to remove
	 * its connection parameters anymore.
	 */
	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3188 3189

	hci_conn_put(conn);
3190 3191

	return err;
3192 3193
}

3194 3195 3196
void mgmt_smp_complete(struct hci_conn *conn, bool complete)
{
	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3197
	struct mgmt_pending_cmd *cmd;
3198 3199

	cmd = find_pairing(conn);
3200
	if (cmd) {
3201
		cmd->cmd_complete(cmd, status);
3202 3203
		mgmt_pending_remove(cmd);
	}
3204 3205
}

3206 3207
static void pairing_complete_cb(struct hci_conn *conn, u8 status)
{
3208
	struct mgmt_pending_cmd *cmd;
3209 3210 3211 3212

	BT_DBG("status %u", status);

	cmd = find_pairing(conn);
3213
	if (!cmd) {
3214
		BT_DBG("Unable to find a pending command");
3215 3216 3217 3218 3219
		return;
	}

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);
3220 3221
}

3222
static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3223
{
3224
	struct mgmt_pending_cmd *cmd;
3225 3226 3227 3228 3229 3230 3231

	BT_DBG("status %u", status);

	if (!status)
		return;

	cmd = find_pairing(conn);
3232
	if (!cmd) {
3233
		BT_DBG("Unable to find a pending command");
3234 3235 3236 3237 3238
		return;
	}

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);
3239 3240
}

3241
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3242
		       u16 len)
3243
{
3244
	struct mgmt_cp_pair_device *cp = data;
3245
	struct mgmt_rp_pair_device rp;
3246
	struct mgmt_pending_cmd *cmd;
3247 3248 3249 3250 3251 3252
	u8 sec_level, auth_type;
	struct hci_conn *conn;
	int err;

	BT_DBG("");

3253 3254 3255 3256
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

3257
	if (!bdaddr_type_is_valid(cp->addr.type))
3258 3259 3260
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
3261

3262
	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3263 3264 3265
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
3266

3267
	hci_dev_lock(hdev);
3268

3269
	if (!hdev_is_powered(hdev)) {
3270 3271 3272
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
3273 3274 3275
		goto unlock;
	}

3276 3277 3278 3279 3280 3281 3282
	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_ALREADY_PAIRED, &rp,
					sizeof(rp));
		goto unlock;
	}

3283
	sec_level = BT_SECURITY_MEDIUM;
3284
	auth_type = HCI_AT_DEDICATED_BONDING;
3285

3286
	if (cp->addr.type == BDADDR_BREDR) {
3287 3288
		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
				       auth_type);
3289 3290 3291 3292 3293 3294 3295 3296 3297 3298
	} else {
		u8 addr_type;

		/* Convert from L2CAP channel address type to HCI address type
		 */
		if (cp->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309
		/* When pairing a new device, it is expected to remember
		 * this device for future connections. Adding the connection
		 * parameter information ahead of time allows tracking
		 * of the slave preferred values and will speed up any
		 * further connection establishment.
		 *
		 * If connection parameters already exist, then they
		 * will be kept and this function does nothing.
		 */
		hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);

3310
		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3311 3312
				      sec_level, HCI_LE_CONN_TIMEOUT,
				      HCI_ROLE_MASTER);
3313
	}
3314

3315
	if (IS_ERR(conn)) {
3316 3317 3318 3319
		int status;

		if (PTR_ERR(conn) == -EBUSY)
			status = MGMT_STATUS_BUSY;
3320 3321 3322 3323
		else if (PTR_ERR(conn) == -EOPNOTSUPP)
			status = MGMT_STATUS_NOT_SUPPORTED;
		else if (PTR_ERR(conn) == -ECONNREFUSED)
			status = MGMT_STATUS_REJECTED;
3324 3325 3326
		else
			status = MGMT_STATUS_CONNECT_FAILED;

3327 3328
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					status, &rp, sizeof(rp));
3329 3330 3331 3332
		goto unlock;
	}

	if (conn->connect_cfm_cb) {
3333
		hci_conn_drop(conn);
3334 3335
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3336 3337 3338
		goto unlock;
	}

3339
	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3340 3341
	if (!cmd) {
		err = -ENOMEM;
3342
		hci_conn_drop(conn);
3343 3344 3345
		goto unlock;
	}

3346 3347
	cmd->cmd_complete = pairing_complete;

3348
	/* For LE, just connecting isn't a proof that the pairing finished */
3349
	if (cp->addr.type == BDADDR_BREDR) {
3350
		conn->connect_cfm_cb = pairing_complete_cb;
3351 3352 3353 3354 3355 3356 3357
		conn->security_cfm_cb = pairing_complete_cb;
		conn->disconn_cfm_cb = pairing_complete_cb;
	} else {
		conn->connect_cfm_cb = le_pairing_complete_cb;
		conn->security_cfm_cb = le_pairing_complete_cb;
		conn->disconn_cfm_cb = le_pairing_complete_cb;
	}
3358

3359
	conn->io_capability = cp->io_cap;
3360
	cmd->user_data = hci_conn_get(conn);
3361

3362
	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3363 3364 3365 3366
	    hci_conn_security(conn, sec_level, auth_type, true)) {
		cmd->cmd_complete(cmd, 0);
		mgmt_pending_remove(cmd);
	}
3367 3368 3369 3370

	err = 0;

unlock:
3371
	hci_dev_unlock(hdev);
3372 3373 3374
	return err;
}

3375 3376
static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3377
{
3378
	struct mgmt_addr_info *addr = data;
3379
	struct mgmt_pending_cmd *cmd;
3380 3381 3382 3383 3384 3385 3386
	struct hci_conn *conn;
	int err;

	BT_DBG("");

	hci_dev_lock(hdev);

3387
	if (!hdev_is_powered(hdev)) {
3388 3389
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_NOT_POWERED);
3390 3391 3392
		goto unlock;
	}

3393 3394
	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
	if (!cmd) {
3395 3396
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_INVALID_PARAMS);
3397 3398 3399 3400 3401 3402
		goto unlock;
	}

	conn = cmd->user_data;

	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3403 3404
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_INVALID_PARAMS);
3405 3406 3407
		goto unlock;
	}

3408 3409
	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
	mgmt_pending_remove(cmd);
3410

3411 3412
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
				addr, sizeof(*addr));
3413 3414 3415 3416 3417
unlock:
	hci_dev_unlock(hdev);
	return err;
}

3418
static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3419
			     struct mgmt_addr_info *addr, u16 mgmt_op,
3420
			     u16 hci_op, __le32 passkey)
3421
{
3422
	struct mgmt_pending_cmd *cmd;
3423
	struct hci_conn *conn;
3424 3425
	int err;

3426
	hci_dev_lock(hdev);
3427

3428
	if (!hdev_is_powered(hdev)) {
3429 3430 3431
		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
					MGMT_STATUS_NOT_POWERED, addr,
					sizeof(*addr));
3432
		goto done;
3433 3434
	}

3435 3436
	if (addr->type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3437
	else
3438
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3439 3440

	if (!conn) {
3441 3442 3443
		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
					MGMT_STATUS_NOT_CONNECTED, addr,
					sizeof(*addr));
3444 3445
		goto done;
	}
3446

3447
	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3448 3449
		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
		if (!err)
3450 3451 3452
			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
						MGMT_STATUS_SUCCESS, addr,
						sizeof(*addr));
3453
		else
3454 3455 3456
			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
						MGMT_STATUS_FAILED, addr,
						sizeof(*addr));
3457 3458 3459 3460

		goto done;
	}

3461
	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3462 3463
	if (!cmd) {
		err = -ENOMEM;
3464
		goto done;
3465 3466
	}

3467 3468
	cmd->cmd_complete = addr_cmd_complete;

3469
	/* Continue with pairing via HCI */
3470 3471 3472
	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
		struct hci_cp_user_passkey_reply cp;

3473
		bacpy(&cp.bdaddr, &addr->bdaddr);
3474 3475 3476
		cp.passkey = passkey;
		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
	} else
3477 3478
		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
				   &addr->bdaddr);
3479

3480 3481
	if (err < 0)
		mgmt_pending_remove(cmd);
3482

3483
done:
3484
	hci_dev_unlock(hdev);
3485 3486 3487
	return err;
}

3488 3489 3490 3491 3492 3493 3494
static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_pin_code_neg_reply *cp = data;

	BT_DBG("");

3495
	return user_pairing_resp(sk, hdev, &cp->addr,
3496 3497 3498 3499
				MGMT_OP_PIN_CODE_NEG_REPLY,
				HCI_OP_PIN_CODE_NEG_REPLY, 0);
}

3500 3501
static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3502
{
3503
	struct mgmt_cp_user_confirm_reply *cp = data;
3504 3505 3506 3507

	BT_DBG("");

	if (len != sizeof(*cp))
3508 3509
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
				       MGMT_STATUS_INVALID_PARAMS);
3510

3511
	return user_pairing_resp(sk, hdev, &cp->addr,
3512 3513
				 MGMT_OP_USER_CONFIRM_REPLY,
				 HCI_OP_USER_CONFIRM_REPLY, 0);
3514 3515
}

3516
static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3517
				  void *data, u16 len)
3518
{
3519
	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3520 3521 3522

	BT_DBG("");

3523
	return user_pairing_resp(sk, hdev, &cp->addr,
3524 3525
				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3526 3527
}

3528 3529
static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3530
{
3531
	struct mgmt_cp_user_passkey_reply *cp = data;
3532 3533 3534

	BT_DBG("");

3535
	return user_pairing_resp(sk, hdev, &cp->addr,
3536 3537
				 MGMT_OP_USER_PASSKEY_REPLY,
				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3538 3539
}

3540
static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3541
				  void *data, u16 len)
3542
{
3543
	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3544 3545 3546

	BT_DBG("");

3547
	return user_pairing_resp(sk, hdev, &cp->addr,
3548 3549
				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3550 3551
}

3552
static void update_name(struct hci_request *req)
3553
{
3554
	struct hci_dev *hdev = req->hdev;
3555 3556
	struct hci_cp_write_local_name cp;

3557
	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3558

3559
	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3560 3561
}

3562
static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3563 3564
{
	struct mgmt_cp_set_local_name *cp;
3565
	struct mgmt_pending_cmd *cmd;
3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
	if (!cmd)
		goto unlock;

	cp = cmd->param;

	if (status)
3578 3579
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
			        mgmt_status(status));
3580
	else
3581 3582
		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
				  cp, sizeof(*cp));
3583 3584 3585 3586 3587 3588 3589

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

3590
static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3591
			  u16 len)
3592
{
3593
	struct mgmt_cp_set_local_name *cp = data;
3594
	struct mgmt_pending_cmd *cmd;
3595
	struct hci_request req;
3596 3597 3598 3599
	int err;

	BT_DBG("");

3600
	hci_dev_lock(hdev);
3601

3602 3603 3604 3605 3606 3607
	/* If the old values are the same as the new ones just return a
	 * direct command complete event.
	 */
	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
	    !memcmp(hdev->short_name, cp->short_name,
		    sizeof(hdev->short_name))) {
3608 3609
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
					data, len);
3610 3611 3612
		goto failed;
	}

3613
	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3614

3615
	if (!hdev_is_powered(hdev)) {
3616
		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3617

3618 3619
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
					data, len);
3620 3621 3622 3623
		if (err < 0)
			goto failed;

		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3624
				 sk);
3625

3626 3627 3628
		goto failed;
	}

3629
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3630 3631 3632 3633 3634
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

3635 3636
	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));

3637
	hci_req_init(&req, hdev);
3638 3639 3640 3641 3642 3643

	if (lmp_bredr_capable(hdev)) {
		update_name(&req);
		update_eir(&req);
	}

3644 3645 3646
	/* The name is stored in the scan response data and so
	 * no need to udpate the advertising data here.
	 */
3647
	if (lmp_le_capable(hdev))
3648
		update_scan_rsp_data(&req);
3649

3650
	err = hci_req_run(&req, set_name_complete);
3651 3652 3653 3654
	if (err < 0)
		mgmt_pending_remove(cmd);

failed:
3655
	hci_dev_unlock(hdev);
3656 3657 3658
	return err;
}

3659
static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3660
			       void *data, u16 data_len)
3661
{
3662
	struct mgmt_pending_cmd *cmd;
3663 3664
	int err;

3665
	BT_DBG("%s", hdev->name);
3666

3667
	hci_dev_lock(hdev);
3668

3669
	if (!hdev_is_powered(hdev)) {
3670 3671
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_NOT_POWERED);
3672 3673 3674
		goto unlock;
	}

3675
	if (!lmp_ssp_capable(hdev)) {
3676 3677
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_NOT_SUPPORTED);
3678 3679 3680
		goto unlock;
	}

3681
	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3682 3683
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_BUSY);
3684 3685 3686
		goto unlock;
	}

3687
	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3688 3689 3690 3691 3692
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

3693
	if (bredr_sc_enabled(hdev))
3694 3695 3696 3697 3698
		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
				   0, NULL);
	else
		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);

3699 3700 3701 3702
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
3703
	hci_dev_unlock(hdev);
3704 3705 3706
	return err;
}

3707
static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3708
			       void *data, u16 len)
3709
{
3710
	struct mgmt_addr_info *addr = data;
3711 3712
	int err;

3713
	BT_DBG("%s ", hdev->name);
3714

3715
	if (!bdaddr_type_is_valid(addr->type))
3716 3717 3718 3719
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_ADD_REMOTE_OOB_DATA,
					 MGMT_STATUS_INVALID_PARAMS,
					 addr, sizeof(*addr));
3720

3721
	hci_dev_lock(hdev);
3722

3723 3724 3725
	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_data *cp = data;
		u8 status;
3726

3727
		if (cp->addr.type != BDADDR_BREDR) {
3728 3729 3730 3731
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_ADD_REMOTE_OOB_DATA,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
3732 3733 3734
			goto unlock;
		}

3735
		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3736 3737
					      cp->addr.type, cp->hash,
					      cp->rand, NULL, NULL);
3738 3739 3740 3741 3742
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

3743 3744 3745
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
					&cp->addr, sizeof(cp->addr));
3746 3747
	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3748
		u8 *rand192, *hash192, *rand256, *hash256;
3749 3750
		u8 status;

3751
		if (bdaddr_type_is_le(cp->addr.type)) {
3752 3753 3754 3755 3756
			/* Enforce zero-valued 192-bit parameters as
			 * long as legacy SMP OOB isn't implemented.
			 */
			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
			    memcmp(cp->hash192, ZERO_KEY, 16)) {
3757 3758 3759 3760
				err = mgmt_cmd_complete(sk, hdev->id,
							MGMT_OP_ADD_REMOTE_OOB_DATA,
							MGMT_STATUS_INVALID_PARAMS,
							addr, sizeof(*addr));
3761 3762 3763
				goto unlock;
			}

3764 3765 3766
			rand192 = NULL;
			hash192 = NULL;
		} else {
3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789
			/* In case one of the P-192 values is set to zero,
			 * then just disable OOB data for P-192.
			 */
			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
				rand192 = NULL;
				hash192 = NULL;
			} else {
				rand192 = cp->rand192;
				hash192 = cp->hash192;
			}
		}

		/* In case one of the P-256 values is set to zero, then just
		 * disable OOB data for P-256.
		 */
		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
			rand256 = NULL;
			hash256 = NULL;
		} else {
			rand256 = cp->rand256;
			hash256 = cp->hash256;
3790 3791
		}

3792
		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3793
					      cp->addr.type, hash192, rand192,
3794
					      hash256, rand256);
3795 3796 3797 3798 3799
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

3800 3801 3802
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_ADD_REMOTE_OOB_DATA,
					status, &cp->addr, sizeof(cp->addr));
3803 3804
	} else {
		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3805 3806
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
				      MGMT_STATUS_INVALID_PARAMS);
3807
	}
3808

3809
unlock:
3810
	hci_dev_unlock(hdev);
3811 3812 3813
	return err;
}

3814
static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3815
				  void *data, u16 len)
3816
{
3817
	struct mgmt_cp_remove_remote_oob_data *cp = data;
3818
	u8 status;
3819 3820
	int err;

3821
	BT_DBG("%s", hdev->name);
3822

3823
	if (cp->addr.type != BDADDR_BREDR)
3824 3825 3826 3827
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
3828

3829
	hci_dev_lock(hdev);
3830

3831 3832 3833 3834 3835 3836
	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
		hci_remote_oob_data_clear(hdev);
		status = MGMT_STATUS_SUCCESS;
		goto done;
	}

3837
	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3838
	if (err < 0)
3839
		status = MGMT_STATUS_INVALID_PARAMS;
3840
	else
3841
		status = MGMT_STATUS_SUCCESS;
3842

3843
done:
3844 3845
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
				status, &cp->addr, sizeof(cp->addr));
3846

3847
	hci_dev_unlock(hdev);
3848 3849 3850
	return err;
}

3851
static bool trigger_discovery(struct hci_request *req, u8 *status)
3852
{
3853 3854 3855 3856 3857 3858 3859
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_scan_param param_cp;
	struct hci_cp_le_set_scan_enable enable_cp;
	struct hci_cp_inquiry inq_cp;
	/* General inquiry access code (GIAC) */
	u8 lap[3] = { 0x33, 0x8b, 0x9e };
	u8 own_addr_type;
3860 3861
	int err;

3862 3863 3864 3865 3866
	switch (hdev->discovery.type) {
	case DISCOV_TYPE_BREDR:
		*status = mgmt_bredr_support(hdev);
		if (*status)
			return false;
3867

3868 3869 3870 3871
		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
			*status = MGMT_STATUS_BUSY;
			return false;
		}
3872

3873
		hci_inquiry_cache_flush(hdev);
3874

3875 3876 3877 3878 3879
		memset(&inq_cp, 0, sizeof(inq_cp));
		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
		hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
		break;
3880

3881 3882 3883 3884 3885 3886 3887
	case DISCOV_TYPE_LE:
	case DISCOV_TYPE_INTERLEAVED:
		*status = mgmt_le_support(hdev);
		if (*status)
			return false;

		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3888
		    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
3889 3890 3891 3892
			*status = MGMT_STATUS_NOT_SUPPORTED;
			return false;
		}

3893
		if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910
			/* Don't let discovery abort an outgoing
			 * connection attempt that's using directed
			 * advertising.
			 */
			if (hci_conn_hash_lookup_state(hdev, LE_LINK,
						       BT_CONNECT)) {
				*status = MGMT_STATUS_REJECTED;
				return false;
			}

			disable_advertising(req);
		}

		/* If controller is scanning, it means the background scanning
		 * is running. Thus, we should temporarily stop it in order to
		 * set the discovery scanning parameters.
		 */
3911
		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
3912 3913 3914 3915 3916 3917
			hci_req_add_le_scan_disable(req);

		memset(&param_cp, 0, sizeof(param_cp));

		/* All active scans will be done with either a resolvable
		 * private address (when privacy feature has been enabled)
3918
		 * or non-resolvable private address.
3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945
		 */
		err = hci_update_random_address(req, true, &own_addr_type);
		if (err < 0) {
			*status = MGMT_STATUS_FAILED;
			return false;
		}

		param_cp.type = LE_SCAN_ACTIVE;
		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
		param_cp.own_address_type = own_addr_type;
		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
			    &param_cp);

		memset(&enable_cp, 0, sizeof(enable_cp));
		enable_cp.enable = LE_SCAN_ENABLE;
		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
			    &enable_cp);
		break;

	default:
		*status = MGMT_STATUS_INVALID_PARAMS;
		return false;
	}

	return true;
3946 3947
}

3948 3949
static void start_discovery_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
3950
{
3951
	struct mgmt_pending_cmd *cmd;
3952
	unsigned long timeout;
3953

3954 3955
	BT_DBG("status %d", status);

3956
	hci_dev_lock(hdev);
3957

3958
	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3959 3960 3961
	if (!cmd)
		cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);

3962
	if (cmd) {
3963
		cmd->cmd_complete(cmd, mgmt_status(status));
3964 3965
		mgmt_pending_remove(cmd);
	}
3966 3967

	if (status) {
3968 3969
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
		goto unlock;
3970 3971 3972 3973
	}

	hci_discovery_set_state(hdev, DISCOVERY_FINDING);

3974 3975 3976
	/* If the scan involves LE scan, pick proper timeout to schedule
	 * hdev->le_scan_disable that will stop it.
	 */
3977 3978
	switch (hdev->discovery.type) {
	case DISCOV_TYPE_LE:
3979
		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3980 3981
		break;
	case DISCOV_TYPE_INTERLEAVED:
3982
		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3983 3984
		break;
	case DISCOV_TYPE_BREDR:
3985
		timeout = 0;
3986 3987 3988
		break;
	default:
		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3989 3990
		timeout = 0;
		break;
3991
	}
3992

3993 3994 3995 3996 3997 3998 3999 4000
	if (timeout) {
		/* When service discovery is used and the controller has
		 * a strict duplicate filter, it is important to remember
		 * the start and duration of the scan. This is required
		 * for restarting scanning during the discovery phase.
		 */
		if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
			     &hdev->quirks) &&
4001
		    hdev->discovery.result_filtering) {
4002 4003 4004 4005
			hdev->discovery.scan_start = jiffies;
			hdev->discovery.scan_duration = timeout;
		}

4006 4007
		queue_delayed_work(hdev->workqueue,
				   &hdev->le_scan_disable, timeout);
4008
	}
4009

4010 4011
unlock:
	hci_dev_unlock(hdev);
4012 4013
}

4014
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4015
			   void *data, u16 len)
4016
{
4017
	struct mgmt_cp_start_discovery *cp = data;
4018
	struct mgmt_pending_cmd *cmd;
4019
	struct hci_request req;
4020
	u8 status;
4021 4022
	int err;

4023
	BT_DBG("%s", hdev->name);
4024

4025
	hci_dev_lock(hdev);
4026

4027
	if (!hdev_is_powered(hdev)) {
4028 4029 4030
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					MGMT_STATUS_NOT_POWERED,
					&cp->type, sizeof(cp->type));
4031 4032 4033
		goto failed;
	}

4034
	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4035
	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4036 4037 4038
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					MGMT_STATUS_BUSY, &cp->type,
					sizeof(cp->type));
4039 4040 4041
		goto failed;
	}

4042
	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4043 4044 4045 4046 4047
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

4048 4049
	cmd->cmd_complete = generic_cmd_complete;

4050 4051 4052 4053 4054
	/* Clear the discovery filter first to free any previously
	 * allocated memory for the UUID list.
	 */
	hci_discovery_filter_clear(hdev);

A
Andre Guedes 已提交
4055
	hdev->discovery.type = cp->type;
4056
	hdev->discovery.report_invalid_rssi = false;
A
Andre Guedes 已提交
4057

4058 4059
	hci_req_init(&req, hdev);

4060
	if (!trigger_discovery(&req, &status)) {
4061 4062
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					status, &cp->type, sizeof(cp->type));
4063 4064
		mgmt_pending_remove(cmd);
		goto failed;
4065
	}
4066

4067
	err = hci_req_run(&req, start_discovery_complete);
4068
	if (err < 0) {
4069
		mgmt_pending_remove(cmd);
4070 4071
		goto failed;
	}
4072

4073
	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4074

4075
failed:
4076
	hci_dev_unlock(hdev);
4077 4078
	return err;
}
4079

4080 4081
static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
					  u8 status)
4082
{
4083 4084
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, 1);
4085
}
4086

4087 4088 4089 4090
static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
				   void *data, u16 len)
{
	struct mgmt_cp_start_service_discovery *cp = data;
4091
	struct mgmt_pending_cmd *cmd;
4092 4093 4094 4095 4096
	struct hci_request req;
	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
	u16 uuid_count, expected_len;
	u8 status;
	int err;
4097

4098
	BT_DBG("%s", hdev->name);
4099

4100
	hci_dev_lock(hdev);
4101

4102
	if (!hdev_is_powered(hdev)) {
4103 4104 4105 4106
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_NOT_POWERED,
					&cp->type, sizeof(cp->type));
4107 4108
		goto failed;
	}
4109

4110
	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4111
	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4112 4113 4114 4115
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_BUSY, &cp->type,
					sizeof(cp->type));
4116 4117
		goto failed;
	}
4118

4119 4120 4121 4122
	uuid_count = __le16_to_cpu(cp->uuid_count);
	if (uuid_count > max_uuid_count) {
		BT_ERR("service_discovery: too big uuid_count value %u",
		       uuid_count);
4123 4124 4125 4126
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS, &cp->type,
					sizeof(cp->type));
4127 4128 4129 4130 4131 4132 4133
		goto failed;
	}

	expected_len = sizeof(*cp) + uuid_count * 16;
	if (expected_len != len) {
		BT_ERR("service_discovery: expected %u bytes, got %u bytes",
		       expected_len, len);
4134 4135 4136 4137
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS, &cp->type,
					sizeof(cp->type));
4138 4139 4140 4141
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4142
			       hdev, data, len);
4143 4144 4145 4146 4147
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

4148 4149
	cmd->cmd_complete = service_discovery_cmd_complete;

4150 4151 4152 4153 4154
	/* Clear the discovery filter first to free any previously
	 * allocated memory for the UUID list.
	 */
	hci_discovery_filter_clear(hdev);

4155
	hdev->discovery.result_filtering = true;
4156 4157 4158 4159 4160 4161 4162 4163
	hdev->discovery.type = cp->type;
	hdev->discovery.rssi = cp->rssi;
	hdev->discovery.uuid_count = uuid_count;

	if (uuid_count > 0) {
		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
						GFP_KERNEL);
		if (!hdev->discovery.uuids) {
4164 4165 4166 4167
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_START_SERVICE_DISCOVERY,
						MGMT_STATUS_FAILED,
						&cp->type, sizeof(cp->type));
4168 4169 4170
			mgmt_pending_remove(cmd);
			goto failed;
		}
4171
	}
4172

4173
	hci_req_init(&req, hdev);
4174

4175
	if (!trigger_discovery(&req, &status)) {
4176 4177 4178
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					status, &cp->type, sizeof(cp->type));
4179 4180
		mgmt_pending_remove(cmd);
		goto failed;
4181
	}
4182

4183
	err = hci_req_run(&req, start_discovery_complete);
4184
	if (err < 0) {
4185
		mgmt_pending_remove(cmd);
4186 4187 4188 4189
		goto failed;
	}

	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4190 4191

failed:
4192
	hci_dev_unlock(hdev);
4193 4194 4195
	return err;
}

4196
static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4197
{
4198
	struct mgmt_pending_cmd *cmd;
4199

4200 4201 4202 4203
	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

4204 4205
	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
	if (cmd) {
4206
		cmd->cmd_complete(cmd, mgmt_status(status));
4207
		mgmt_pending_remove(cmd);
4208 4209
	}

4210 4211
	if (!status)
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4212 4213 4214 4215

	hci_dev_unlock(hdev);
}

4216
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4217
			  u16 len)
4218
{
4219
	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4220
	struct mgmt_pending_cmd *cmd;
4221
	struct hci_request req;
4222 4223
	int err;

4224
	BT_DBG("%s", hdev->name);
4225

4226
	hci_dev_lock(hdev);
4227

4228
	if (!hci_discovery_active(hdev)) {
4229 4230 4231
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
					MGMT_STATUS_REJECTED, &mgmt_cp->type,
					sizeof(mgmt_cp->type));
4232 4233 4234 4235
		goto unlock;
	}

	if (hdev->discovery.type != mgmt_cp->type) {
4236 4237 4238
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS,
					&mgmt_cp->type, sizeof(mgmt_cp->type));
4239
		goto unlock;
4240 4241
	}

4242
	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4243 4244
	if (!cmd) {
		err = -ENOMEM;
4245 4246 4247
		goto unlock;
	}

4248 4249
	cmd->cmd_complete = generic_cmd_complete;

4250 4251
	hci_req_init(&req, hdev);

4252
	hci_stop_discovery(&req);
4253

4254 4255 4256
	err = hci_req_run(&req, stop_discovery_complete);
	if (!err) {
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4257
		goto unlock;
4258 4259
	}

4260 4261 4262 4263
	mgmt_pending_remove(cmd);

	/* If no HCI commands were sent we're done */
	if (err == -ENODATA) {
4264 4265
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
					&mgmt_cp->type, sizeof(mgmt_cp->type));
4266 4267
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
	}
4268

4269
unlock:
4270
	hci_dev_unlock(hdev);
4271 4272 4273
	return err;
}

4274
static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4275
			u16 len)
4276
{
4277
	struct mgmt_cp_confirm_name *cp = data;
4278 4279 4280
	struct inquiry_entry *e;
	int err;

4281
	BT_DBG("%s", hdev->name);
4282 4283 4284

	hci_dev_lock(hdev);

4285
	if (!hci_discovery_active(hdev)) {
4286 4287 4288
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
					MGMT_STATUS_FAILED, &cp->addr,
					sizeof(cp->addr));
4289 4290 4291
		goto failed;
	}

4292
	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4293
	if (!e) {
4294 4295 4296
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
					sizeof(cp->addr));
4297 4298 4299 4300 4301 4302 4303 4304
		goto failed;
	}

	if (cp->name_known) {
		e->name_state = NAME_KNOWN;
		list_del(&e->list);
	} else {
		e->name_state = NAME_NEEDED;
4305
		hci_inquiry_cache_update_resolve(hdev, e);
4306 4307
	}

4308 4309
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
				&cp->addr, sizeof(cp->addr));
4310 4311 4312 4313 4314 4315

failed:
	hci_dev_unlock(hdev);
	return err;
}

4316
static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4317
			u16 len)
4318
{
4319
	struct mgmt_cp_block_device *cp = data;
4320
	u8 status;
4321 4322
	int err;

4323
	BT_DBG("%s", hdev->name);
4324

4325
	if (!bdaddr_type_is_valid(cp->addr.type))
4326 4327 4328
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
4329

4330
	hci_dev_lock(hdev);
4331

4332 4333
	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
				  cp->addr.type);
4334
	if (err < 0) {
4335
		status = MGMT_STATUS_FAILED;
4336 4337 4338 4339 4340 4341
		goto done;
	}

	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
		   sk);
	status = MGMT_STATUS_SUCCESS;
4342

4343
done:
4344 4345
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
				&cp->addr, sizeof(cp->addr));
4346

4347
	hci_dev_unlock(hdev);
4348 4349 4350 4351

	return err;
}

4352
static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4353
			  u16 len)
4354
{
4355
	struct mgmt_cp_unblock_device *cp = data;
4356
	u8 status;
4357 4358
	int err;

4359
	BT_DBG("%s", hdev->name);
4360

4361
	if (!bdaddr_type_is_valid(cp->addr.type))
4362 4363 4364
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
4365

4366
	hci_dev_lock(hdev);
4367

4368 4369
	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
				  cp->addr.type);
4370
	if (err < 0) {
4371
		status = MGMT_STATUS_INVALID_PARAMS;
4372 4373 4374 4375 4376 4377
		goto done;
	}

	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
		   sk);
	status = MGMT_STATUS_SUCCESS;
4378

4379
done:
4380 4381
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
				&cp->addr, sizeof(cp->addr));
4382

4383
	hci_dev_unlock(hdev);
4384 4385 4386 4387

	return err;
}

4388 4389 4390 4391
static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_set_device_id *cp = data;
4392
	struct hci_request req;
4393
	int err;
4394
	__u16 source;
4395 4396 4397

	BT_DBG("%s", hdev->name);

4398 4399 4400
	source = __le16_to_cpu(cp->source);

	if (source > 0x0002)
4401 4402
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
				       MGMT_STATUS_INVALID_PARAMS);
4403

4404 4405
	hci_dev_lock(hdev);

4406
	hdev->devid_source = source;
4407 4408 4409 4410
	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
	hdev->devid_product = __le16_to_cpu(cp->product);
	hdev->devid_version = __le16_to_cpu(cp->version);

4411 4412
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
				NULL, 0);
4413

4414 4415 4416
	hci_req_init(&req, hdev);
	update_eir(&req);
	hci_req_run(&req, NULL);
4417 4418 4419 4420 4421 4422

	hci_dev_unlock(hdev);

	return err;
}

4423 4424
static void set_advertising_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
4425 4426 4427
{
	struct cmd_lookup match = { NULL, hdev };

4428 4429
	hci_dev_lock(hdev);

4430 4431 4432 4433 4434
	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
				     cmd_status_rsp, &mgmt_err);
4435
		goto unlock;
4436 4437
	}

4438
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4439
		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4440
	else
4441
		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4442

4443 4444 4445 4446 4447 4448 4449
	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
			     &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
4450 4451 4452

unlock:
	hci_dev_unlock(hdev);
4453 4454
}

4455 4456
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
4457 4458
{
	struct mgmt_mode *cp = data;
4459
	struct mgmt_pending_cmd *cmd;
4460
	struct hci_request req;
4461
	u8 val, status;
4462 4463 4464 4465
	int err;

	BT_DBG("request for %s", hdev->name);

4466 4467
	status = mgmt_le_support(hdev);
	if (status)
4468 4469
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				       status);
4470

4471
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4472 4473
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);
4474 4475 4476 4477 4478

	hci_dev_lock(hdev);

	val = !!cp->val;

4479 4480 4481 4482 4483
	/* The following conditions are ones which mean that we should
	 * not do any HCI communication but directly send a mgmt
	 * response to user space (after toggling the flag if
	 * necessary).
	 */
4484
	if (!hdev_is_powered(hdev) ||
4485 4486
	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4487
	    hci_conn_num(hdev, LE_LINK) > 0 ||
4488
	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4489
	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4490
		bool changed;
4491

4492
		if (cp->val) {
4493
			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4494
			if (cp->val == 0x02)
4495
				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4496
			else
4497
				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4498
		} else {
4499
			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4500
			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
		if (err < 0)
			goto unlock;

		if (changed)
			err = new_settings(hdev, sk);

		goto unlock;
	}

	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4515 4516
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				      MGMT_STATUS_BUSY);
4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

4528
	if (cp->val == 0x02)
4529
		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4530
	else
4531
		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4532

4533 4534 4535 4536
	if (val)
		enable_advertising(&req);
	else
		disable_advertising(&req);
4537 4538 4539 4540 4541 4542 4543 4544 4545 4546

	err = hci_req_run(&req, set_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4547 4548 4549 4550 4551 4552 4553 4554
static int set_static_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_static_address *cp = data;
	int err;

	BT_DBG("%s", hdev->name);

4555
	if (!lmp_le_capable(hdev))
4556 4557
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				       MGMT_STATUS_NOT_SUPPORTED);
4558 4559

	if (hdev_is_powered(hdev))
4560 4561
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				       MGMT_STATUS_REJECTED);
4562 4563 4564

	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4565 4566 4567
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_SET_STATIC_ADDRESS,
					       MGMT_STATUS_INVALID_PARAMS);
4568 4569 4570

		/* Two most significant bits shall be set */
		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4571 4572 4573
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_SET_STATIC_ADDRESS,
					       MGMT_STATUS_INVALID_PARAMS);
4574 4575 4576 4577 4578 4579
	}

	hci_dev_lock(hdev);

	bacpy(&hdev->static_addr, &cp->bdaddr);

4580 4581 4582 4583 4584
	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
	if (err < 0)
		goto unlock;

	err = new_settings(hdev, sk);
4585

4586
unlock:
4587 4588 4589 4590
	hci_dev_unlock(hdev);
	return err;
}

4591 4592 4593 4594 4595 4596 4597 4598 4599 4600
static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_cp_set_scan_params *cp = data;
	__u16 interval, window;
	int err;

	BT_DBG("%s", hdev->name);

	if (!lmp_le_capable(hdev))
4601 4602
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_NOT_SUPPORTED);
4603 4604 4605 4606

	interval = __le16_to_cpu(cp->interval);

	if (interval < 0x0004 || interval > 0x4000)
4607 4608
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4609 4610 4611 4612

	window = __le16_to_cpu(cp->window);

	if (window < 0x0004 || window > 0x4000)
4613 4614
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4615

4616
	if (window > interval)
4617 4618
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4619

4620 4621 4622 4623 4624
	hci_dev_lock(hdev);

	hdev->le_scan_interval = interval;
	hdev->le_scan_window = window;

4625 4626
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
				NULL, 0);
4627

4628 4629 4630
	/* If background scan is running, restart it so new parameters are
	 * loaded.
	 */
4631
	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642
	    hdev->discovery.state == DISCOVERY_STOPPED) {
		struct hci_request req;

		hci_req_init(&req, hdev);

		hci_req_add_le_scan_disable(&req);
		hci_req_add_le_passive_scan(&req);

		hci_req_run(&req, NULL);
	}

4643 4644 4645 4646 4647
	hci_dev_unlock(hdev);

	return err;
}

4648 4649
static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
				      u16 opcode)
4650
{
4651
	struct mgmt_pending_cmd *cmd;
4652 4653 4654 4655 4656 4657 4658 4659 4660 4661

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
4662 4663
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
			        mgmt_status(status));
4664
	} else {
4665 4666 4667
		struct mgmt_mode *cp = cmd->param;

		if (cp->val)
4668
			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4669
		else
4670
			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4671

4672 4673 4674 4675 4676 4677 4678 4679 4680 4681
		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

4682
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4683
				void *data, u16 len)
4684
{
4685
	struct mgmt_mode *cp = data;
4686
	struct mgmt_pending_cmd *cmd;
4687
	struct hci_request req;
4688 4689
	int err;

4690
	BT_DBG("%s", hdev->name);
4691

4692
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4693
	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4694 4695
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				       MGMT_STATUS_NOT_SUPPORTED);
4696

4697
	if (cp->val != 0x00 && cp->val != 0x01)
4698 4699
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				       MGMT_STATUS_INVALID_PARAMS);
4700

4701 4702
	hci_dev_lock(hdev);

4703
	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4704 4705
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				      MGMT_STATUS_BUSY);
4706 4707 4708
		goto unlock;
	}

4709
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4710 4711 4712 4713 4714
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		goto unlock;
	}

4715
	if (!hdev_is_powered(hdev)) {
4716
		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4717 4718 4719 4720 4721 4722
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		new_settings(hdev, sk);
		goto unlock;
	}

4723 4724 4725 4726 4727
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
			       data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
4728 4729
	}

4730 4731
	hci_req_init(&req, hdev);

4732
	write_fast_connectable(&req, cp->val);
4733 4734

	err = hci_req_run(&req, fast_connectable_complete);
4735
	if (err < 0) {
4736 4737
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				      MGMT_STATUS_FAILED);
4738
		mgmt_pending_remove(cmd);
4739 4740
	}

4741
unlock:
4742
	hci_dev_unlock(hdev);
4743

4744 4745 4746
	return err;
}

4747
static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4748
{
4749
	struct mgmt_pending_cmd *cmd;
4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		/* We need to restore the flag if related HCI commands
		 * failed.
		 */
4765
		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4766

4767
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781
	} else {
		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
	struct mgmt_mode *cp = data;
4782
	struct mgmt_pending_cmd *cmd;
4783 4784 4785 4786 4787 4788
	struct hci_request req;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4789 4790
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_NOT_SUPPORTED);
4791

4792
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4793 4794
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_REJECTED);
4795 4796

	if (cp->val != 0x00 && cp->val != 0x01)
4797 4798
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_INVALID_PARAMS);
4799 4800 4801

	hci_dev_lock(hdev);

4802
	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4803 4804 4805 4806 4807 4808
		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		goto unlock;
	}

	if (!hdev_is_powered(hdev)) {
		if (!cp->val) {
4809 4810 4811 4812 4813
			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4814 4815
		}

4816
		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827

		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		if (err < 0)
			goto unlock;

		err = new_settings(hdev, sk);
		goto unlock;
	}

	/* Reject disabling when powered on */
	if (!cp->val) {
4828 4829
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				      MGMT_STATUS_REJECTED);
4830
		goto unlock;
4831 4832 4833 4834 4835 4836 4837 4838
	} else {
		/* When configuring a dual-mode controller to operate
		 * with LE only and using a static address, then switching
		 * BR/EDR back on is not allowed.
		 *
		 * Dual-mode controllers shall operate with the public
		 * address as its identity address for BR/EDR and LE. So
		 * reject the attempt to create an invalid configuration.
4839 4840 4841 4842 4843 4844
		 *
		 * The same restrictions applies when secure connections
		 * has been enabled. For BR/EDR this is a controller feature
		 * while for LE it is a host stack feature. This means that
		 * switching BR/EDR back on when secure connections has been
		 * enabled is not a supported transaction.
4845
		 */
4846
		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4847
		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4848
		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4849 4850
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
					      MGMT_STATUS_REJECTED);
4851 4852
			goto unlock;
		}
4853 4854 4855
	}

	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4856 4857
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				      MGMT_STATUS_BUSY);
4858 4859 4860 4861 4862 4863 4864 4865 4866
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

4867
	/* We need to flip the bit already here so that update_adv_data
4868 4869
	 * generates the correct flags.
	 */
4870
	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4871 4872

	hci_req_init(&req, hdev);
4873

4874
	write_fast_connectable(&req, false);
4875
	__hci_update_page_scan(&req);
4876

4877 4878 4879
	/* Since only the advertising data flags will change, there
	 * is no need to update the scan response data.
	 */
4880
	update_adv_data(&req);
4881

4882 4883 4884 4885 4886 4887 4888 4889 4890
	err = hci_req_run(&req, set_bredr_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4891 4892
static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
4893
	struct mgmt_pending_cmd *cmd;
4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904
	struct mgmt_mode *cp;

	BT_DBG("%s status %u", hdev->name, status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
4905 4906
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
			        mgmt_status(status));
4907 4908 4909 4910 4911 4912 4913
		goto remove;
	}

	cp = cmd->param;

	switch (cp->val) {
	case 0x00:
4914 4915
		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4916 4917
		break;
	case 0x01:
4918
		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4919
		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4920 4921
		break;
	case 0x02:
4922 4923
		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
		hci_dev_set_flag(hdev, HCI_SC_ONLY);
4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935
		break;
	}

	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
	new_settings(hdev, cmd->sk);

remove:
	mgmt_pending_remove(cmd);
unlock:
	hci_dev_unlock(hdev);
}

4936 4937 4938 4939
static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_mode *cp = data;
4940
	struct mgmt_pending_cmd *cmd;
4941
	struct hci_request req;
4942
	u8 val;
4943 4944 4945 4946
	int err;

	BT_DBG("request for %s", hdev->name);

4947
	if (!lmp_sc_capable(hdev) &&
4948
	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4949 4950
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				       MGMT_STATUS_NOT_SUPPORTED);
4951

4952
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4953
	    lmp_sc_capable(hdev) &&
4954
	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4955 4956
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				       MGMT_STATUS_REJECTED);
4957

4958
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4959
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4960 4961 4962 4963
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

4964
	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4965
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4966 4967
		bool changed;

4968
		if (cp->val) {
4969 4970
			changed = !hci_dev_test_and_set_flag(hdev,
							     HCI_SC_ENABLED);
4971
			if (cp->val == 0x02)
4972
				hci_dev_set_flag(hdev, HCI_SC_ONLY);
4973
			else
4974
				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4975
		} else {
4976 4977
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_SC_ENABLED);
4978
			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4979
		}
4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991

		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

		goto failed;
	}

	if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4992 4993
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				      MGMT_STATUS_BUSY);
4994 4995 4996
		goto failed;
	}

4997 4998
	val = !!cp->val;

4999 5000
	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5001 5002 5003 5004 5005 5006 5007 5008 5009 5010
		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

5011 5012 5013
	hci_req_init(&req, hdev);
	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
	err = hci_req_run(&req, sc_enable_complete);
5014 5015 5016 5017 5018 5019 5020 5021 5022 5023
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

5024 5025 5026 5027
static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
			  void *data, u16 len)
{
	struct mgmt_mode *cp = data;
5028
	bool changed, use_changed;
5029 5030 5031 5032
	int err;

	BT_DBG("request for %s", hdev->name);

5033
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5034 5035
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5036 5037 5038 5039

	hci_dev_lock(hdev);

	if (cp->val)
5040
		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5041
	else
5042 5043
		changed = hci_dev_test_and_clear_flag(hdev,
						      HCI_KEEP_DEBUG_KEYS);
5044

5045
	if (cp->val == 0x02)
5046 5047
		use_changed = !hci_dev_test_and_set_flag(hdev,
							 HCI_USE_DEBUG_KEYS);
5048
	else
5049 5050
		use_changed = hci_dev_test_and_clear_flag(hdev,
							  HCI_USE_DEBUG_KEYS);
5051 5052

	if (hdev_is_powered(hdev) && use_changed &&
5053
	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5054 5055 5056 5057 5058
		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
			     sizeof(mode), &mode);
	}

5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070
	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5071 5072 5073 5074 5075 5076 5077 5078 5079 5080
static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
		       u16 len)
{
	struct mgmt_cp_set_privacy *cp = cp_data;
	bool changed;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
5081 5082
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_NOT_SUPPORTED);
5083 5084

	if (cp->privacy != 0x00 && cp->privacy != 0x01)
5085 5086
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_INVALID_PARAMS);
5087 5088

	if (hdev_is_powered(hdev))
5089 5090
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_REJECTED);
5091 5092 5093

	hci_dev_lock(hdev);

5094 5095 5096
	/* If user space supports this command it is also expected to
	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
	 */
5097
	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5098

5099
	if (cp->privacy) {
5100
		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5101
		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5102
		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5103
	} else {
5104
		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5105
		memset(hdev->irk, 0, sizeof(hdev->irk));
5106
		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120
	}

	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140
static bool irk_is_valid(struct mgmt_irk_info *irk)
{
	switch (irk->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
}

static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
		     u16 len)
{
	struct mgmt_cp_load_irks *cp = cp_data;
5141 5142
	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_irk_info));
5143 5144 5145 5146 5147 5148
	u16 irk_count, expected_len;
	int i, err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
5149 5150
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_NOT_SUPPORTED);
5151 5152

	irk_count = __le16_to_cpu(cp->irk_count);
5153 5154
	if (irk_count > max_irk_count) {
		BT_ERR("load_irks: too big irk_count value %u", irk_count);
5155 5156
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_INVALID_PARAMS);
5157
	}
5158 5159 5160 5161

	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
	if (expected_len != len) {
		BT_ERR("load_irks: expected %u bytes, got %u bytes",
5162
		       expected_len, len);
5163 5164
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_INVALID_PARAMS);
5165 5166 5167 5168 5169 5170 5171 5172
	}

	BT_DBG("%s irk_count %u", hdev->name, irk_count);

	for (i = 0; i < irk_count; i++) {
		struct mgmt_irk_info *key = &cp->irks[i];

		if (!irk_is_valid(key))
5173 5174 5175
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_IRKS,
					       MGMT_STATUS_INVALID_PARAMS);
5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194
	}

	hci_dev_lock(hdev);

	hci_smp_irks_clear(hdev);

	for (i = 0; i < irk_count; i++) {
		struct mgmt_irk_info *irk = &cp->irks[i];
		u8 addr_type;

		if (irk->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
			    BDADDR_ANY);
	}

5195
	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5196

5197
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5198 5199 5200 5201 5202 5203

	hci_dev_unlock(hdev);

	return err;
}

5204 5205 5206 5207
static bool ltk_is_valid(struct mgmt_ltk_info *key)
{
	if (key->master != 0x00 && key->master != 0x01)
		return false;
5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220

	switch (key->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
5221 5222
}

5223
static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5224
			       void *cp_data, u16 len)
5225 5226
{
	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5227 5228
	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_ltk_info));
5229
	u16 key_count, expected_len;
5230
	int i, err;
5231

5232 5233 5234
	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
5235 5236
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_NOT_SUPPORTED);
5237

5238
	key_count = __le16_to_cpu(cp->key_count);
5239 5240
	if (key_count > max_key_count) {
		BT_ERR("load_ltks: too big key_count value %u", key_count);
5241 5242
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5243
	}
5244 5245 5246 5247 5248

	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_ltk_info);
	if (expected_len != len) {
		BT_ERR("load_keys: expected %u bytes, got %u bytes",
5249
		       expected_len, len);
5250 5251
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5252 5253
	}

5254
	BT_DBG("%s key_count %u", hdev->name, key_count);
5255

5256 5257 5258
	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];

5259
		if (!ltk_is_valid(key))
5260 5261 5262
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_LONG_TERM_KEYS,
					       MGMT_STATUS_INVALID_PARAMS);
5263 5264
	}

5265 5266 5267 5268 5269 5270
	hci_dev_lock(hdev);

	hci_smp_ltks_clear(hdev);

	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];
5271
		u8 type, addr_type, authenticated;
5272 5273 5274 5275 5276

		if (key->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;
5277

5278 5279
		switch (key->type) {
		case MGMT_LTK_UNAUTHENTICATED:
5280
			authenticated = 0x00;
5281
			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5282 5283
			break;
		case MGMT_LTK_AUTHENTICATED:
5284
			authenticated = 0x01;
5285 5286 5287 5288 5289
			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
			break;
		case MGMT_LTK_P256_UNAUTH:
			authenticated = 0x00;
			type = SMP_LTK_P256;
5290
			break;
5291 5292 5293
		case MGMT_LTK_P256_AUTH:
			authenticated = 0x01;
			type = SMP_LTK_P256;
5294
			break;
5295 5296 5297
		case MGMT_LTK_P256_DEBUG:
			authenticated = 0x00;
			type = SMP_LTK_P256_DEBUG;
5298 5299 5300
		default:
			continue;
		}
5301

5302
		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5303
			    authenticated, key->val, key->enc_size, key->ediv,
5304
			    key->rand);
5305 5306
	}

5307
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5308 5309
			   NULL, 0);

5310 5311
	hci_dev_unlock(hdev);

5312
	return err;
5313 5314
}

5315
static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5316 5317
{
	struct hci_conn *conn = cmd->user_data;
5318
	struct mgmt_rp_get_conn_info rp;
5319
	int err;
5320

5321
	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5322

5323
	if (status == MGMT_STATUS_SUCCESS) {
5324
		rp.rssi = conn->rssi;
5325 5326 5327 5328 5329 5330
		rp.tx_power = conn->tx_power;
		rp.max_tx_power = conn->max_tx_power;
	} else {
		rp.rssi = HCI_RSSI_INVALID;
		rp.tx_power = HCI_TX_POWER_INVALID;
		rp.max_tx_power = HCI_TX_POWER_INVALID;
5331 5332
	}

5333 5334
	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
				status, &rp, sizeof(rp));
5335 5336

	hci_conn_drop(conn);
5337
	hci_conn_put(conn);
5338 5339

	return err;
5340 5341
}

5342 5343
static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
				       u16 opcode)
5344 5345
{
	struct hci_cp_read_rssi *cp;
5346
	struct mgmt_pending_cmd *cmd;
5347 5348
	struct hci_conn *conn;
	u16 handle;
5349
	u8 status;
5350

5351
	BT_DBG("status 0x%02x", hci_status);
5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366

	hci_dev_lock(hdev);

	/* Commands sent in request are either Read RSSI or Read Transmit Power
	 * Level so we check which one was last sent to retrieve connection
	 * handle.  Both commands have handle as first parameter so it's safe to
	 * cast data on the same command struct.
	 *
	 * First command sent is always Read RSSI and we fail only if it fails.
	 * In other case we simply override error to indicate success as we
	 * already remembered if TX power value is actually valid.
	 */
	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
	if (!cp) {
		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5367 5368 5369
		status = MGMT_STATUS_SUCCESS;
	} else {
		status = mgmt_status(hci_status);
5370 5371 5372
	}

	if (!cp) {
5373
		BT_ERR("invalid sent_cmd in conn_info response");
5374 5375 5376 5377 5378 5379
		goto unlock;
	}

	handle = __le16_to_cpu(cp->handle);
	conn = hci_conn_hash_lookup_handle(hdev, handle);
	if (!conn) {
5380
		BT_ERR("unknown handle (%d) in conn_info response", handle);
5381 5382 5383
		goto unlock;
	}

5384 5385 5386
	cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
	if (!cmd)
		goto unlock;
5387

5388 5389
	cmd->cmd_complete(cmd, status);
	mgmt_pending_remove(cmd);
5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410

unlock:
	hci_dev_unlock(hdev);
}

static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_get_conn_info *cp = data;
	struct mgmt_rp_get_conn_info rp;
	struct hci_conn *conn;
	unsigned long conn_info_age;
	int err = 0;

	BT_DBG("%s", hdev->name);

	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

	if (!bdaddr_type_is_valid(cp->addr.type))
5411 5412 5413
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
5414 5415 5416 5417

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
5418 5419 5420
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
5421 5422 5423 5424 5425 5426 5427 5428 5429 5430
		goto unlock;
	}

	if (cp->addr.type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);

	if (!conn || conn->state != BT_CONNECTED) {
5431 5432 5433
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_NOT_CONNECTED, &rp,
					sizeof(rp));
5434 5435 5436
		goto unlock;
	}

5437
	if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5438 5439
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
5440 5441 5442
		goto unlock;
	}

5443 5444 5445 5446 5447 5448 5449 5450 5451 5452
	/* To avoid client trying to guess when to poll again for information we
	 * calculate conn info age as random value between min/max set in hdev.
	 */
	conn_info_age = hdev->conn_info_min_age +
			prandom_u32_max(hdev->conn_info_max_age -
					hdev->conn_info_min_age);

	/* Query controller to refresh cached values if they are too old or were
	 * never read.
	 */
5453 5454
	if (time_after(jiffies, conn->conn_info_timestamp +
		       msecs_to_jiffies(conn_info_age)) ||
5455 5456 5457 5458
	    !conn->conn_info_timestamp) {
		struct hci_request req;
		struct hci_cp_read_tx_power req_txp_cp;
		struct hci_cp_read_rssi req_rssi_cp;
5459
		struct mgmt_pending_cmd *cmd;
5460 5461 5462 5463 5464 5465

		hci_req_init(&req, hdev);
		req_rssi_cp.handle = cpu_to_le16(conn->handle);
		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
			    &req_rssi_cp);

5466 5467 5468 5469 5470 5471 5472 5473 5474 5475
		/* For LE links TX power does not change thus we don't need to
		 * query for it once value is known.
		 */
		if (!bdaddr_type_is_le(cp->addr.type) ||
		    conn->tx_power == HCI_TX_POWER_INVALID) {
			req_txp_cp.handle = cpu_to_le16(conn->handle);
			req_txp_cp.type = 0x00;
			hci_req_add(&req, HCI_OP_READ_TX_POWER,
				    sizeof(req_txp_cp), &req_txp_cp);
		}
5476

5477 5478 5479 5480 5481 5482 5483 5484
		/* Max TX power needs to be read only once per connection */
		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
			req_txp_cp.handle = cpu_to_le16(conn->handle);
			req_txp_cp.type = 0x01;
			hci_req_add(&req, HCI_OP_READ_TX_POWER,
				    sizeof(req_txp_cp), &req_txp_cp);
		}

5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496
		err = hci_req_run(&req, conn_info_refresh_complete);
		if (err < 0)
			goto unlock;

		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
				       data, len);
		if (!cmd) {
			err = -ENOMEM;
			goto unlock;
		}

		hci_conn_hold(conn);
5497
		cmd->user_data = hci_conn_get(conn);
5498
		cmd->cmd_complete = conn_info_cmd_complete;
5499 5500 5501 5502 5503 5504

		conn->conn_info_timestamp = jiffies;
	} else {
		/* Cache is valid, just reply with values cached in hci_conn */
		rp.rssi = conn->rssi;
		rp.tx_power = conn->tx_power;
5505
		rp.max_tx_power = conn->max_tx_power;
5506

5507 5508
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5509 5510 5511 5512 5513 5514 5515
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5516
static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5517
{
5518
	struct hci_conn *conn = cmd->user_data;
5519
	struct mgmt_rp_get_clock_info rp;
5520
	struct hci_dev *hdev;
5521
	int err;
5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540

	memset(&rp, 0, sizeof(rp));
	memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));

	if (status)
		goto complete;

	hdev = hci_dev_get(cmd->index);
	if (hdev) {
		rp.local_clock = cpu_to_le32(hdev->clock);
		hci_dev_put(hdev);
	}

	if (conn) {
		rp.piconet_clock = cpu_to_le32(conn->clock);
		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
	}

complete:
5541 5542
	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
				sizeof(rp));
5543 5544 5545 5546 5547

	if (conn) {
		hci_conn_drop(conn);
		hci_conn_put(conn);
	}
5548 5549

	return err;
5550 5551
}

5552
static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5553
{
5554
	struct hci_cp_read_clock *hci_cp;
5555
	struct mgmt_pending_cmd *cmd;
5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576
	struct hci_conn *conn;

	BT_DBG("%s status %u", hdev->name, status);

	hci_dev_lock(hdev);

	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
	if (!hci_cp)
		goto unlock;

	if (hci_cp->which) {
		u16 handle = __le16_to_cpu(hci_cp->handle);
		conn = hci_conn_hash_lookup_handle(hdev, handle);
	} else {
		conn = NULL;
	}

	cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
	if (!cmd)
		goto unlock;

5577
	cmd->cmd_complete(cmd, mgmt_status(status));
5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_get_clock_info *cp = data;
	struct mgmt_rp_get_clock_info rp;
	struct hci_cp_read_clock hci_cp;
5590
	struct mgmt_pending_cmd *cmd;
5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601
	struct hci_request req;
	struct hci_conn *conn;
	int err;

	BT_DBG("%s", hdev->name);

	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

	if (cp->addr.type != BDADDR_BREDR)
5602 5603 5604
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
5605 5606 5607 5608

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
5609 5610 5611
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
5612 5613 5614 5615 5616 5617 5618
		goto unlock;
	}

	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
		if (!conn || conn->state != BT_CONNECTED) {
5619 5620 5621 5622
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_GET_CLOCK_INFO,
						MGMT_STATUS_NOT_CONNECTED,
						&rp, sizeof(rp));
5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634
			goto unlock;
		}
	} else {
		conn = NULL;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

5635 5636
	cmd->cmd_complete = clock_info_cmd_complete;

5637 5638 5639 5640 5641 5642 5643
	hci_req_init(&req, hdev);

	memset(&hci_cp, 0, sizeof(hci_cp));
	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);

	if (conn) {
		hci_conn_hold(conn);
5644
		cmd->user_data = hci_conn_get(conn);
5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659

		hci_cp.handle = cpu_to_le16(conn->handle);
		hci_cp.which = 0x01; /* Piconet clock */
		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
	}

	err = hci_req_run(&req, get_clock_info_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718
static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
{
	struct hci_conn *conn;

	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
	if (!conn)
		return false;

	if (conn->dst_type != type)
		return false;

	if (conn->state != BT_CONNECTED)
		return false;

	return true;
}

/* This function requires the caller holds hdev->lock */
static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
			       u8 addr_type, u8 auto_connect)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_conn_params *params;

	params = hci_conn_params_add(hdev, addr, addr_type);
	if (!params)
		return -EIO;

	if (params->auto_connect == auto_connect)
		return 0;

	list_del_init(&params->action);

	switch (auto_connect) {
	case HCI_AUTO_CONN_DISABLED:
	case HCI_AUTO_CONN_LINK_LOSS:
		__hci_update_background_scan(req);
		break;
	case HCI_AUTO_CONN_REPORT:
		list_add(&params->action, &hdev->pend_le_reports);
		__hci_update_background_scan(req);
		break;
	case HCI_AUTO_CONN_DIRECT:
	case HCI_AUTO_CONN_ALWAYS:
		if (!is_connected(hdev, addr, addr_type)) {
			list_add(&params->action, &hdev->pend_le_conns);
			__hci_update_background_scan(req);
		}
		break;
	}

	params->auto_connect = auto_connect;

	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
	       auto_connect);

	return 0;
}

5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730
static void device_added(struct sock *sk, struct hci_dev *hdev,
			 bdaddr_t *bdaddr, u8 type, u8 action)
{
	struct mgmt_ev_device_added ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
	ev.action = action;

	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
}

5731
static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5732
{
5733
	struct mgmt_pending_cmd *cmd;
5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
	if (!cmd)
		goto unlock;

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

5750 5751 5752 5753
static int add_device(struct sock *sk, struct hci_dev *hdev,
		      void *data, u16 len)
{
	struct mgmt_cp_add_device *cp = data;
5754
	struct mgmt_pending_cmd *cmd;
5755
	struct hci_request req;
5756 5757 5758 5759 5760
	u8 auto_conn, addr_type;
	int err;

	BT_DBG("%s", hdev->name);

5761
	if (!bdaddr_type_is_valid(cp->addr.type) ||
5762
	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5763 5764 5765
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
5766

5767
	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5768 5769 5770
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
5771

5772 5773
	hci_req_init(&req, hdev);

5774 5775
	hci_dev_lock(hdev);

5776 5777 5778 5779 5780 5781 5782 5783
	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	cmd->cmd_complete = addr_cmd_complete;

5784
	if (cp->addr.type == BDADDR_BREDR) {
5785
		/* Only incoming connections action is supported for now */
5786
		if (cp->action != 0x01) {
5787 5788
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
5789
			mgmt_pending_remove(cmd);
5790 5791 5792 5793 5794 5795 5796
			goto unlock;
		}

		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
					  cp->addr.type);
		if (err)
			goto unlock;
5797

5798
		__hci_update_page_scan(&req);
5799

5800 5801 5802
		goto added;
	}

5803 5804 5805 5806 5807
	if (cp->addr.type == BDADDR_LE_PUBLIC)
		addr_type = ADDR_LE_DEV_PUBLIC;
	else
		addr_type = ADDR_LE_DEV_RANDOM;

5808
	if (cp->action == 0x02)
5809
		auto_conn = HCI_AUTO_CONN_ALWAYS;
5810 5811
	else if (cp->action == 0x01)
		auto_conn = HCI_AUTO_CONN_DIRECT;
5812
	else
5813
		auto_conn = HCI_AUTO_CONN_REPORT;
5814

5815 5816 5817
	/* If the connection parameters don't exist for this device,
	 * they will be created and configured with defaults.
	 */
5818
	if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5819
				auto_conn) < 0) {
5820
		err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5821
		mgmt_pending_remove(cmd);
5822 5823 5824
		goto unlock;
	}

5825
added:
5826 5827
	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);

5828 5829 5830 5831 5832
	err = hci_req_run(&req, add_device_complete);
	if (err < 0) {
		/* ENODATA means no HCI commands were needed (e.g. if
		 * the adapter is powered off).
		 */
5833 5834
		if (err == -ENODATA)
			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5835 5836
		mgmt_pending_remove(cmd);
	}
5837 5838 5839 5840 5841 5842

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853
static void device_removed(struct sock *sk, struct hci_dev *hdev,
			   bdaddr_t *bdaddr, u8 type)
{
	struct mgmt_ev_device_removed ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;

	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
}

5854
static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5855
{
5856
	struct mgmt_pending_cmd *cmd;
5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
	if (!cmd)
		goto unlock;

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

5873 5874 5875 5876
static int remove_device(struct sock *sk, struct hci_dev *hdev,
			 void *data, u16 len)
{
	struct mgmt_cp_remove_device *cp = data;
5877
	struct mgmt_pending_cmd *cmd;
5878
	struct hci_request req;
5879 5880 5881 5882
	int err;

	BT_DBG("%s", hdev->name);

5883 5884
	hci_req_init(&req, hdev);

5885 5886
	hci_dev_lock(hdev);

5887 5888 5889 5890 5891 5892 5893 5894
	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	cmd->cmd_complete = addr_cmd_complete;

5895
	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5896
		struct hci_conn_params *params;
5897 5898
		u8 addr_type;

5899
		if (!bdaddr_type_is_valid(cp->addr.type)) {
5900 5901
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
5902
			mgmt_pending_remove(cmd);
5903 5904 5905
			goto unlock;
		}

5906 5907 5908 5909 5910
		if (cp->addr.type == BDADDR_BREDR) {
			err = hci_bdaddr_list_del(&hdev->whitelist,
						  &cp->addr.bdaddr,
						  cp->addr.type);
			if (err) {
5911 5912
				err = cmd->cmd_complete(cmd,
							MGMT_STATUS_INVALID_PARAMS);
5913
				mgmt_pending_remove(cmd);
5914 5915 5916
				goto unlock;
			}

5917
			__hci_update_page_scan(&req);
5918

5919 5920 5921 5922 5923
			device_removed(sk, hdev, &cp->addr.bdaddr,
				       cp->addr.type);
			goto complete;
		}

5924 5925 5926 5927 5928
		if (cp->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

5929 5930 5931
		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
						addr_type);
		if (!params) {
5932 5933
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
5934
			mgmt_pending_remove(cmd);
5935 5936 5937 5938
			goto unlock;
		}

		if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5939 5940
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
5941
			mgmt_pending_remove(cmd);
5942 5943 5944
			goto unlock;
		}

5945
		list_del(&params->action);
5946 5947
		list_del(&params->list);
		kfree(params);
5948
		__hci_update_background_scan(&req);
5949 5950

		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5951
	} else {
5952
		struct hci_conn_params *p, *tmp;
5953
		struct bdaddr_list *b, *btmp;
5954

5955
		if (cp->addr.type) {
5956 5957
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
5958
			mgmt_pending_remove(cmd);
5959 5960 5961
			goto unlock;
		}

5962 5963 5964 5965 5966 5967
		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
			list_del(&b->list);
			kfree(b);
		}

5968
		__hci_update_page_scan(&req);
5969

5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980
		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
				continue;
			device_removed(sk, hdev, &p->addr, p->addr_type);
			list_del(&p->action);
			list_del(&p->list);
			kfree(p);
		}

		BT_DBG("All LE connection parameters were removed");

5981
		__hci_update_background_scan(&req);
5982 5983
	}

5984
complete:
5985 5986 5987 5988 5989
	err = hci_req_run(&req, remove_device_complete);
	if (err < 0) {
		/* ENODATA means no HCI commands were needed (e.g. if
		 * the adapter is powered off).
		 */
5990 5991
		if (err == -ENODATA)
			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5992 5993
		mgmt_pending_remove(cmd);
	}
5994 5995 5996 5997 5998 5999

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6000 6001 6002 6003
static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
{
	struct mgmt_cp_load_conn_param *cp = data;
6004 6005
	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
				     sizeof(struct mgmt_conn_param));
6006 6007 6008 6009
	u16 param_count, expected_len;
	int i;

	if (!lmp_le_capable(hdev))
6010 6011
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_NOT_SUPPORTED);
6012 6013

	param_count = __le16_to_cpu(cp->param_count);
6014 6015 6016
	if (param_count > max_param_count) {
		BT_ERR("load_conn_param: too big param_count value %u",
		       param_count);
6017 6018
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_INVALID_PARAMS);
6019
	}
6020 6021 6022 6023 6024 6025

	expected_len = sizeof(*cp) + param_count *
					sizeof(struct mgmt_conn_param);
	if (expected_len != len) {
		BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
		       expected_len, len);
6026 6027
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_INVALID_PARAMS);
6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081
	}

	BT_DBG("%s param_count %u", hdev->name, param_count);

	hci_dev_lock(hdev);

	hci_conn_params_clear_disabled(hdev);

	for (i = 0; i < param_count; i++) {
		struct mgmt_conn_param *param = &cp->params[i];
		struct hci_conn_params *hci_param;
		u16 min, max, latency, timeout;
		u8 addr_type;

		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
		       param->addr.type);

		if (param->addr.type == BDADDR_LE_PUBLIC) {
			addr_type = ADDR_LE_DEV_PUBLIC;
		} else if (param->addr.type == BDADDR_LE_RANDOM) {
			addr_type = ADDR_LE_DEV_RANDOM;
		} else {
			BT_ERR("Ignoring invalid connection parameters");
			continue;
		}

		min = le16_to_cpu(param->min_interval);
		max = le16_to_cpu(param->max_interval);
		latency = le16_to_cpu(param->latency);
		timeout = le16_to_cpu(param->timeout);

		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
		       min, max, latency, timeout);

		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
			BT_ERR("Ignoring invalid connection parameters");
			continue;
		}

		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
						addr_type);
		if (!hci_param) {
			BT_ERR("Failed to add connection parameters");
			continue;
		}

		hci_param->conn_min_interval = min;
		hci_param->conn_max_interval = max;
		hci_param->conn_latency = latency;
		hci_param->supervision_timeout = timeout;
	}

	hci_dev_unlock(hdev);

6082 6083
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
				 NULL, 0);
6084 6085
}

6086 6087 6088 6089 6090 6091 6092 6093 6094 6095
static int set_external_config(struct sock *sk, struct hci_dev *hdev,
			       void *data, u16 len)
{
	struct mgmt_cp_set_external_config *cp = data;
	bool changed;
	int err;

	BT_DBG("%s", hdev->name);

	if (hdev_is_powered(hdev))
6096 6097
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				       MGMT_STATUS_REJECTED);
6098 6099

	if (cp->config != 0x00 && cp->config != 0x01)
6100 6101
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				         MGMT_STATUS_INVALID_PARAMS);
6102 6103

	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6104 6105
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				       MGMT_STATUS_NOT_SUPPORTED);
6106 6107 6108 6109

	hci_dev_lock(hdev);

	if (cp->config)
6110
		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6111
	else
6112
		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6113 6114 6115 6116 6117 6118 6119 6120

	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
	if (err < 0)
		goto unlock;

	if (!changed)
		goto unlock;

6121 6122
	err = new_options(hdev, sk);

6123
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6124
		mgmt_index_removed(hdev);
6125

6126
		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6127 6128
			hci_dev_set_flag(hdev, HCI_CONFIG);
			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6129 6130 6131

			queue_work(hdev->req_workqueue, &hdev->power_on);
		} else {
6132
			set_bit(HCI_RAW, &hdev->flags);
6133 6134
			mgmt_index_added(hdev);
		}
6135 6136 6137 6138 6139 6140 6141
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6142 6143 6144 6145 6146 6147 6148 6149 6150 6151
static int set_public_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_public_address *cp = data;
	bool changed;
	int err;

	BT_DBG("%s", hdev->name);

	if (hdev_is_powered(hdev))
6152 6153
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_REJECTED);
6154 6155

	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6156 6157
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_INVALID_PARAMS);
6158 6159

	if (!hdev->set_bdaddr)
6160 6161
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_NOT_SUPPORTED);
6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174

	hci_dev_lock(hdev);

	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
	bacpy(&hdev->public_addr, &cp->bdaddr);

	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
	if (err < 0)
		goto unlock;

	if (!changed)
		goto unlock;

6175
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6176 6177 6178 6179 6180
		err = new_options(hdev, sk);

	if (is_configured(hdev)) {
		mgmt_index_removed(hdev);

6181
		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6182

6183 6184
		hci_dev_set_flag(hdev, HCI_CONFIG);
		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6185 6186 6187 6188 6189 6190 6191 6192 6193

		queue_work(hdev->req_workqueue, &hdev->power_on);
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6194
static const struct hci_mgmt_handler mgmt_handlers[] = {
6195
	{ NULL }, /* 0x0000 (no command) */
6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266
	{ read_version,            MGMT_READ_VERSION_SIZE,
						HCI_MGMT_NO_HDEV },
	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
						HCI_MGMT_NO_HDEV },
	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
						HCI_MGMT_NO_HDEV },
	{ read_controller_info,    MGMT_READ_INFO_SIZE,                 0 },
	{ set_powered,             MGMT_SETTING_SIZE,                   0 },
	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE,          0 },
	{ set_connectable,         MGMT_SETTING_SIZE,                   0 },
	{ set_fast_connectable,    MGMT_SETTING_SIZE,                   0 },
	{ set_bondable,            MGMT_SETTING_SIZE,                   0 },
	{ set_link_security,       MGMT_SETTING_SIZE,                   0 },
	{ set_ssp,                 MGMT_SETTING_SIZE,                   0 },
	{ set_hs,                  MGMT_SETTING_SIZE,                   0 },
	{ set_le,                  MGMT_SETTING_SIZE,                   0 },
	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE,             0 },
	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE,            0 },
	{ add_uuid,                MGMT_ADD_UUID_SIZE,                  0 },
	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE,               0 },
	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
						HCI_MGMT_VAR_LEN },
	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
						HCI_MGMT_VAR_LEN },
	{ disconnect,              MGMT_DISCONNECT_SIZE,                0 },
	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE,           0 },
	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE,            0 },
	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE,        0 },
	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE,         0 },
	{ pair_device,             MGMT_PAIR_DEVICE_SIZE,               0 },
	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE,        0 },
	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE,             0 },
	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE,        0 },
	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE,    0 },
	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE,        0 },
	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE,    0 },
	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
						HCI_MGMT_VAR_LEN },
	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE,    0 },
	{ start_discovery,         MGMT_START_DISCOVERY_SIZE,           0 },
	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE,            0 },
	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE,              0 },
	{ block_device,            MGMT_BLOCK_DEVICE_SIZE,              0 },
	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE,            0 },
	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE,             0 },
	{ set_advertising,         MGMT_SETTING_SIZE,                   0 },
	{ set_bredr,               MGMT_SETTING_SIZE,                   0 },
	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE,        0 },
	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE,           0 },
	{ set_secure_conn,         MGMT_SETTING_SIZE,                   0 },
	{ set_debug_keys,          MGMT_SETTING_SIZE,                   0 },
	{ set_privacy,             MGMT_SET_PRIVACY_SIZE,               0 },
	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
						HCI_MGMT_VAR_LEN },
	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE,             0 },
	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE,            0 },
	{ add_device,              MGMT_ADD_DEVICE_SIZE,                0 },
	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE,             0 },
	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
						HCI_MGMT_VAR_LEN },
	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
						HCI_MGMT_NO_HDEV },
	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
						HCI_MGMT_UNCONFIGURED },
	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
						HCI_MGMT_UNCONFIGURED },
	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
						HCI_MGMT_UNCONFIGURED },
	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
						HCI_MGMT_VAR_LEN },
6267 6268
};

6269 6270
int mgmt_control(struct hci_mgmt_chan *chan, struct sock *sk,
		 struct msghdr *msg, size_t msglen)
6271
{
6272 6273
	void *buf;
	u8 *cp;
6274
	struct mgmt_hdr *hdr;
6275
	u16 opcode, index, len;
6276
	struct hci_dev *hdev = NULL;
6277
	const struct hci_mgmt_handler *handler;
6278
	bool var_len, no_hdev;
6279 6280 6281 6282 6283 6284 6285
	int err;

	BT_DBG("got %zu bytes", msglen);

	if (msglen < sizeof(*hdr))
		return -EINVAL;

6286
	buf = kmalloc(msglen, GFP_KERNEL);
6287 6288 6289
	if (!buf)
		return -ENOMEM;

A
Al Viro 已提交
6290
	if (memcpy_from_msg(buf, msg, msglen)) {
6291 6292 6293 6294
		err = -EFAULT;
		goto done;
	}

6295
	hdr = buf;
6296 6297 6298
	opcode = __le16_to_cpu(hdr->opcode);
	index = __le16_to_cpu(hdr->index);
	len = __le16_to_cpu(hdr->len);
6299 6300 6301 6302 6303 6304

	if (len != msglen - sizeof(*hdr)) {
		err = -EINVAL;
		goto done;
	}

6305 6306 6307
	if (opcode >= chan->handler_count ||
	    chan->handlers[opcode].func == NULL) {
		BT_DBG("Unknown op %u", opcode);
6308 6309
		err = mgmt_cmd_status(sk, index, opcode,
				      MGMT_STATUS_UNKNOWN_COMMAND);
6310 6311 6312 6313 6314
		goto done;
	}

	handler = &chan->handlers[opcode];

6315
	if (index != MGMT_INDEX_NONE) {
6316 6317
		hdev = hci_dev_get(index);
		if (!hdev) {
6318 6319
			err = mgmt_cmd_status(sk, index, opcode,
					      MGMT_STATUS_INVALID_INDEX);
6320 6321
			goto done;
		}
6322

6323 6324 6325
		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
6326 6327
			err = mgmt_cmd_status(sk, index, opcode,
					      MGMT_STATUS_INVALID_INDEX);
6328 6329
			goto done;
		}
6330

6331
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
6332
		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
6333 6334
			err = mgmt_cmd_status(sk, index, opcode,
					      MGMT_STATUS_INVALID_INDEX);
6335 6336
			goto done;
		}
6337 6338
	}

6339 6340
	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
	if (no_hdev != !hdev) {
6341 6342
		err = mgmt_cmd_status(sk, index, opcode,
				      MGMT_STATUS_INVALID_INDEX);
6343
		goto done;
6344 6345
	}

6346 6347 6348
	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
	if ((var_len && len < handler->data_len) ||
	    (!var_len && len != handler->data_len)) {
6349 6350
		err = mgmt_cmd_status(sk, index, opcode,
				      MGMT_STATUS_INVALID_PARAMS);
6351 6352 6353
		goto done;
	}

6354 6355 6356 6357 6358
	if (hdev)
		mgmt_init_hdev(sk, hdev);

	cp = buf + sizeof(*hdr);

6359
	err = handler->func(sk, hdev, cp, len);
6360 6361 6362
	if (err < 0)
		goto done;

6363 6364 6365
	err = msglen;

done:
6366 6367 6368
	if (hdev)
		hci_dev_put(hdev);

6369 6370 6371
	kfree(buf);
	return err;
}
6372

6373
void mgmt_index_added(struct hci_dev *hdev)
6374
{
6375
	struct mgmt_ev_ext_index ev;
6376

6377 6378 6379
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		return;

6380 6381 6382 6383 6384
	switch (hdev->dev_type) {
	case HCI_BREDR:
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6385
			ev.type = 0x01;
6386 6387 6388
		} else {
			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
					 HCI_MGMT_INDEX_EVENTS);
6389
			ev.type = 0x00;
6390 6391
		}
		break;
6392 6393 6394 6395 6396
	case HCI_AMP:
		ev.type = 0x02;
		break;
	default:
		return;
6397
	}
6398 6399 6400 6401 6402

	ev.bus = hdev->bus;

	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
			 HCI_MGMT_EXT_INDEX_EVENTS);
6403 6404
}

6405
void mgmt_index_removed(struct hci_dev *hdev)
6406
{
6407
	struct mgmt_ev_ext_index ev;
6408
	u8 status = MGMT_STATUS_INVALID_INDEX;
6409

6410 6411 6412
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		return;

6413 6414 6415
	switch (hdev->dev_type) {
	case HCI_BREDR:
		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6416

6417 6418 6419
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6420
			ev.type = 0x01;
6421 6422 6423
		} else {
			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
					 HCI_MGMT_INDEX_EVENTS);
6424
			ev.type = 0x00;
6425 6426
		}
		break;
6427 6428 6429 6430 6431
	case HCI_AMP:
		ev.type = 0x02;
		break;
	default:
		return;
6432
	}
6433 6434 6435 6436 6437

	ev.bus = hdev->bus;

	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
			 HCI_MGMT_EXT_INDEX_EVENTS);
6438 6439
}

6440
/* This function requires the caller holds hdev->lock */
6441
static void restart_le_actions(struct hci_request *req)
6442
{
6443
	struct hci_dev *hdev = req->hdev;
6444 6445 6446
	struct hci_conn_params *p;

	list_for_each_entry(p, &hdev->le_conn_params, list) {
6447 6448 6449 6450 6451 6452
		/* Needed for AUTO_OFF case where might not "really"
		 * have been powered off.
		 */
		list_del_init(&p->action);

		switch (p->auto_connect) {
6453
		case HCI_AUTO_CONN_DIRECT:
6454 6455 6456 6457 6458 6459 6460 6461
		case HCI_AUTO_CONN_ALWAYS:
			list_add(&p->action, &hdev->pend_le_conns);
			break;
		case HCI_AUTO_CONN_REPORT:
			list_add(&p->action, &hdev->pend_le_reports);
			break;
		default:
			break;
6462
		}
6463
	}
6464

6465
	__hci_update_background_scan(req);
6466 6467
}

6468
static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6469 6470 6471 6472 6473
{
	struct cmd_lookup match = { NULL, hdev };

	BT_DBG("status 0x%02x", status);

6474 6475 6476 6477 6478 6479 6480 6481 6482
	if (!status) {
		/* Register the available SMP channels (BR/EDR and LE) only
		 * when successfully powering on the controller. This late
		 * registration is required so that LE SMP can clearly
		 * decide if the public address or static address is used.
		 */
		smp_register(hdev);
	}

6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494
	hci_dev_lock(hdev);

	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	hci_dev_unlock(hdev);

	if (match.sk)
		sock_put(match.sk);
}

6495
static int powered_update_hci(struct hci_dev *hdev)
6496
{
6497
	struct hci_request req;
6498
	u8 link_sec;
6499

6500 6501
	hci_req_init(&req, hdev);

6502
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
6503
	    !lmp_host_ssp_capable(hdev)) {
6504
		u8 mode = 0x01;
6505

6506 6507 6508 6509
		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);

		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
			u8 support = 0x01;
6510

6511 6512 6513
			hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
				    sizeof(support), &support);
		}
6514 6515
	}

6516
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
6517
	    lmp_bredr_capable(hdev)) {
6518
		struct hci_cp_write_le_host_supported cp;
6519

6520 6521
		cp.le = 0x01;
		cp.simul = 0x00;
6522

6523 6524 6525 6526 6527
		/* Check first if we already have the right
		 * host state (host features set)
		 */
		if (cp.le != lmp_host_le_capable(hdev) ||
		    cp.simul != lmp_host_le_br_capable(hdev))
6528 6529
			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
				    sizeof(cp), &cp);
6530
	}
6531

6532
	if (lmp_le_capable(hdev)) {
6533 6534 6535 6536
		/* Make sure the controller has a good default for
		 * advertising data. This also applies to the case
		 * where BR/EDR was toggled during the AUTO_OFF phase.
		 */
6537
		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
6538
			update_adv_data(&req);
6539 6540
			update_scan_rsp_data(&req);
		}
6541

6542
		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6543
			enable_advertising(&req);
6544 6545

		restart_le_actions(&req);
6546 6547
	}

6548
	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
6549
	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6550 6551
		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
			    sizeof(link_sec), &link_sec);
6552

6553
	if (lmp_bredr_capable(hdev)) {
6554
		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
6555 6556 6557
			write_fast_connectable(&req, true);
		else
			write_fast_connectable(&req, false);
6558
		__hci_update_page_scan(&req);
6559
		update_class(&req);
6560
		update_name(&req);
6561
		update_eir(&req);
6562
	}
6563

6564
	return hci_req_run(&req, powered_complete);
6565
}
6566

6567 6568 6569
int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
	struct cmd_lookup match = { NULL, hdev };
6570
	u8 status, zero_cod[] = { 0, 0, 0 };
6571
	int err;
6572

6573
	if (!hci_dev_test_flag(hdev, HCI_MGMT))
6574 6575 6576
		return 0;

	if (powered) {
6577 6578
		if (powered_update_hci(hdev) == 0)
			return 0;
6579

6580 6581 6582
		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
				     &match);
		goto new_settings;
6583 6584
	}

6585
	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6586 6587 6588 6589 6590 6591 6592 6593

	/* If the power off is because of hdev unregistration let
	 * use the appropriate INVALID_INDEX status. Otherwise use
	 * NOT_POWERED. We cover both scenarios here since later in
	 * mgmt_index_removed() any hci_conn callbacks will have already
	 * been triggered, potentially causing misleading DISCONNECTED
	 * status responses.
	 */
6594
	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
6595 6596 6597 6598 6599
		status = MGMT_STATUS_INVALID_INDEX;
	else
		status = MGMT_STATUS_NOT_POWERED;

	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6600 6601 6602 6603 6604 6605

	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
			   zero_cod, sizeof(zero_cod), NULL);

new_settings:
6606
	err = new_settings(hdev, match.sk);
6607 6608 6609 6610

	if (match.sk)
		sock_put(match.sk);

6611
	return err;
6612
}
6613

6614
void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6615
{
6616
	struct mgmt_pending_cmd *cmd;
6617 6618 6619 6620
	u8 status;

	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
	if (!cmd)
6621
		return;
6622 6623 6624 6625 6626 6627

	if (err == -ERFKILL)
		status = MGMT_STATUS_RFKILLED;
	else
		status = MGMT_STATUS_FAILED;

6628
	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6629 6630 6631 6632

	mgmt_pending_remove(cmd);
}

6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643
void mgmt_discoverable_timeout(struct hci_dev *hdev)
{
	struct hci_request req;

	hci_dev_lock(hdev);

	/* When discoverable timeout triggers, then just make sure
	 * the limited discoverable flag is cleared. Even in the case
	 * of a timeout triggered from general discoverable, it is
	 * safe to unconditionally clear the flag.
	 */
6644 6645
	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6646 6647

	hci_req_init(&req, hdev);
6648
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6649 6650 6651 6652
		u8 scan = SCAN_PAGE;
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
			    sizeof(scan), &scan);
	}
6653
	update_class(&req);
6654
	update_adv_data(&req);
6655 6656 6657 6658
	hci_req_run(&req, NULL);

	hdev->discov_timeout = 0;

6659 6660
	new_settings(hdev, NULL);

6661 6662 6663
	hci_dev_unlock(hdev);
}

6664 6665
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
		       bool persistent)
6666
{
6667
	struct mgmt_ev_new_link_key ev;
6668

6669
	memset(&ev, 0, sizeof(ev));
6670

6671
	ev.store_hint = persistent;
6672
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6673
	ev.key.addr.type = BDADDR_BREDR;
6674
	ev.key.type = key->type;
6675
	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6676
	ev.key.pin_len = key->pin_len;
6677

6678
	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6679
}
6680

6681 6682
static u8 mgmt_ltk_type(struct smp_ltk *ltk)
{
6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695
	switch (ltk->type) {
	case SMP_LTK:
	case SMP_LTK_SLAVE:
		if (ltk->authenticated)
			return MGMT_LTK_AUTHENTICATED;
		return MGMT_LTK_UNAUTHENTICATED;
	case SMP_LTK_P256:
		if (ltk->authenticated)
			return MGMT_LTK_P256_AUTH;
		return MGMT_LTK_P256_UNAUTH;
	case SMP_LTK_P256_DEBUG:
		return MGMT_LTK_P256_DEBUG;
	}
6696 6697 6698 6699

	return MGMT_LTK_UNAUTHENTICATED;
}

6700
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6701 6702 6703 6704 6705
{
	struct mgmt_ev_new_long_term_key ev;

	memset(&ev, 0, sizeof(ev));

6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716
	/* Devices using resolvable or non-resolvable random addresses
	 * without providing an indentity resolving key don't require
	 * to store long term keys. Their addresses will change the
	 * next time around.
	 *
	 * Only when a remote device provides an identity address
	 * make sure the long term key is stored. If the remote
	 * identity is known, the long term keys are internally
	 * mapped to the identity address. So allow static random
	 * and public addresses here.
	 */
6717 6718 6719 6720
	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
		ev.store_hint = 0x00;
	else
6721
		ev.store_hint = persistent;
6722

6723
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6724
	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6725
	ev.key.type = mgmt_ltk_type(key);
6726 6727
	ev.key.enc_size = key->enc_size;
	ev.key.ediv = key->ediv;
6728
	ev.key.rand = key->rand;
6729

6730
	if (key->type == SMP_LTK)
6731 6732 6733 6734
		ev.key.master = 1;

	memcpy(ev.key.val, key->val, sizeof(key->val));

6735
	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6736 6737
}

6738 6739 6740 6741 6742 6743
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
{
	struct mgmt_ev_new_irk ev;

	memset(&ev, 0, sizeof(ev));

6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759
	/* For identity resolving keys from devices that are already
	 * using a public address or static random address, do not
	 * ask for storing this key. The identity resolving key really
	 * is only mandatory for devices using resovlable random
	 * addresses.
	 *
	 * Storing all identity resolving keys has the downside that
	 * they will be also loaded on next boot of they system. More
	 * identity resolving keys, means more time during scanning is
	 * needed to actually resolve these addresses.
	 */
	if (bacmp(&irk->rpa, BDADDR_ANY))
		ev.store_hint = 0x01;
	else
		ev.store_hint = 0x00;

6760 6761 6762 6763 6764 6765 6766 6767
	bacpy(&ev.rpa, &irk->rpa);
	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
	memcpy(ev.irk.val, irk->val, sizeof(irk->val));

	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
}

6768 6769
void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
		   bool persistent)
6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787
{
	struct mgmt_ev_new_csrk ev;

	memset(&ev, 0, sizeof(ev));

	/* Devices using resolvable or non-resolvable random addresses
	 * without providing an indentity resolving key don't require
	 * to store signature resolving keys. Their addresses will change
	 * the next time around.
	 *
	 * Only when a remote device provides an identity address
	 * make sure the signature resolving key is stored. So allow
	 * static random and public addresses here.
	 */
	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
		ev.store_hint = 0x00;
	else
6788
		ev.store_hint = persistent;
6789 6790 6791

	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6792
	ev.key.type = csrk->type;
6793 6794 6795 6796 6797
	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));

	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
}

6798
void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6799 6800
			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
			 u16 max_interval, u16 latency, u16 timeout)
6801 6802 6803
{
	struct mgmt_ev_new_conn_param ev;

6804 6805 6806
	if (!hci_is_identity_address(bdaddr, bdaddr_type))
		return;

6807 6808 6809
	memset(&ev, 0, sizeof(ev));
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6810
	ev.store_hint = store_hint;
6811 6812 6813 6814 6815 6816 6817 6818
	ev.min_interval = cpu_to_le16(min_interval);
	ev.max_interval = cpu_to_le16(max_interval);
	ev.latency = cpu_to_le16(latency);
	ev.timeout = cpu_to_le16(timeout);

	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
}

6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
				  u8 data_len)
{
	eir[eir_len++] = sizeof(type) + data_len;
	eir[eir_len++] = type;
	memcpy(&eir[eir_len], data, data_len);
	eir_len += data_len;

	return eir_len;
}

6830 6831
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
			   u32 flags, u8 *name, u8 name_len)
6832
{
6833 6834 6835
	char buf[512];
	struct mgmt_ev_device_connected *ev = (void *) buf;
	u16 eir_len = 0;
6836

6837 6838
	bacpy(&ev->addr.bdaddr, &conn->dst);
	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6839

6840
	ev->flags = __cpu_to_le32(flags);
6841

6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853
	/* We must ensure that the EIR Data fields are ordered and
	 * unique. Keep it simple for now and avoid the problem by not
	 * adding any BR/EDR data to the LE adv.
	 */
	if (conn->le_adv_data_len > 0) {
		memcpy(&ev->eir[eir_len],
		       conn->le_adv_data, conn->le_adv_data_len);
		eir_len = conn->le_adv_data_len;
	} else {
		if (name_len > 0)
			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
						  name, name_len);
6854

6855
		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6856 6857 6858 6859
			eir_len = eir_append_data(ev->eir, eir_len,
						  EIR_CLASS_OF_DEV,
						  conn->dev_class, 3);
	}
6860

6861
	ev->eir_len = cpu_to_le16(eir_len);
6862

6863 6864
	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
		    sizeof(*ev) + eir_len, NULL);
6865 6866
}

6867
static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
6868 6869 6870
{
	struct sock **sk = data;

6871
	cmd->cmd_complete(cmd, 0);
6872 6873 6874 6875

	*sk = cmd->sk;
	sock_hold(*sk);

6876
	mgmt_pending_remove(cmd);
6877 6878
}

6879
static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
6880
{
6881
	struct hci_dev *hdev = data;
6882
	struct mgmt_cp_unpair_device *cp = cmd->param;
6883

6884 6885
	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);

6886
	cmd->cmd_complete(cmd, 0);
6887 6888 6889
	mgmt_pending_remove(cmd);
}

6890 6891
bool mgmt_powering_down(struct hci_dev *hdev)
{
6892
	struct mgmt_pending_cmd *cmd;
6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905
	struct mgmt_mode *cp;

	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
	if (!cmd)
		return false;

	cp = cmd->param;
	if (!cp->val)
		return true;

	return false;
}

6906
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6907 6908
			      u8 link_type, u8 addr_type, u8 reason,
			      bool mgmt_connected)
6909
{
6910
	struct mgmt_ev_device_disconnected ev;
6911 6912
	struct sock *sk = NULL;

6913 6914 6915 6916 6917 6918
	/* The connection is still in hci_conn_hash so test for 1
	 * instead of 0 to know if this is the last one.
	 */
	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
		cancel_delayed_work(&hdev->power_off);
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
6919 6920
	}

6921 6922 6923
	if (!mgmt_connected)
		return;

6924 6925 6926
	if (link_type != ACL_LINK && link_type != LE_LINK)
		return;

6927
	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6928

6929 6930 6931
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.reason = reason;
6932

6933
	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6934 6935

	if (sk)
6936
		sock_put(sk);
6937

6938
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6939
			     hdev);
6940 6941
}

6942 6943
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
			    u8 link_type, u8 addr_type, u8 status)
6944
{
6945 6946
	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
	struct mgmt_cp_disconnect *cp;
6947
	struct mgmt_pending_cmd *cmd;
6948

6949 6950 6951
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
			     hdev);

6952
	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6953
	if (!cmd)
6954
		return;
6955

6956 6957 6958 6959 6960 6961 6962 6963
	cp = cmd->param;

	if (bacmp(bdaddr, &cp->addr.bdaddr))
		return;

	if (cp->addr.type != bdaddr_type)
		return;

6964
	cmd->cmd_complete(cmd, mgmt_status(status));
6965
	mgmt_pending_remove(cmd);
6966
}
6967

6968 6969
void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			 u8 addr_type, u8 status)
6970 6971
{
	struct mgmt_ev_connect_failed ev;
6972

6973 6974 6975 6976 6977 6978
	/* The connection is still in hci_conn_hash so test for 1
	 * instead of 0 to know if this is the last one.
	 */
	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
		cancel_delayed_work(&hdev->power_off);
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
6979
	}
6980

6981
	bacpy(&ev.addr.bdaddr, bdaddr);
6982
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6983
	ev.status = mgmt_status(status);
6984

6985
	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6986
}
6987

6988
void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6989 6990 6991
{
	struct mgmt_ev_pin_code_request ev;

6992
	bacpy(&ev.addr.bdaddr, bdaddr);
6993
	ev.addr.type = BDADDR_BREDR;
6994
	ev.secure = secure;
6995

6996
	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6997 6998
}

6999 7000
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				  u8 status)
7001
{
7002
	struct mgmt_pending_cmd *cmd;
7003

7004
	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7005
	if (!cmd)
7006
		return;
7007

7008
	cmd->cmd_complete(cmd, mgmt_status(status));
7009
	mgmt_pending_remove(cmd);
7010 7011
}

7012 7013
void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				      u8 status)
7014
{
7015
	struct mgmt_pending_cmd *cmd;
7016

7017
	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7018
	if (!cmd)
7019
		return;
7020

7021
	cmd->cmd_complete(cmd, mgmt_status(status));
7022
	mgmt_pending_remove(cmd);
7023
}
7024

7025
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7026
			      u8 link_type, u8 addr_type, u32 value,
7027
			      u8 confirm_hint)
7028 7029 7030
{
	struct mgmt_ev_user_confirm_request ev;

7031
	BT_DBG("%s", hdev->name);
7032

7033
	bacpy(&ev.addr.bdaddr, bdaddr);
7034
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7035
	ev.confirm_hint = confirm_hint;
7036
	ev.value = cpu_to_le32(value);
7037

7038
	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7039
			  NULL);
7040 7041
}

7042
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7043
			      u8 link_type, u8 addr_type)
7044 7045 7046 7047 7048
{
	struct mgmt_ev_user_passkey_request ev;

	BT_DBG("%s", hdev->name);

7049
	bacpy(&ev.addr.bdaddr, bdaddr);
7050
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7051 7052

	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7053
			  NULL);
7054 7055
}

7056
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7057 7058
				      u8 link_type, u8 addr_type, u8 status,
				      u8 opcode)
7059
{
7060
	struct mgmt_pending_cmd *cmd;
7061

7062
	cmd = mgmt_pending_find(opcode, hdev);
7063 7064 7065
	if (!cmd)
		return -ENOENT;

7066
	cmd->cmd_complete(cmd, mgmt_status(status));
7067
	mgmt_pending_remove(cmd);
7068

7069
	return 0;
7070 7071
}

7072
int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7073
				     u8 link_type, u8 addr_type, u8 status)
7074
{
7075
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7076
					  status, MGMT_OP_USER_CONFIRM_REPLY);
7077 7078
}

7079
int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7080
					 u8 link_type, u8 addr_type, u8 status)
7081
{
7082
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7083 7084
					  status,
					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
7085
}
7086

7087
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7088
				     u8 link_type, u8 addr_type, u8 status)
7089
{
7090
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7091
					  status, MGMT_OP_USER_PASSKEY_REPLY);
7092 7093
}

7094
int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7095
					 u8 link_type, u8 addr_type, u8 status)
7096
{
7097
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7098 7099
					  status,
					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
7100 7101
}

7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117
int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
			     u8 link_type, u8 addr_type, u32 passkey,
			     u8 entered)
{
	struct mgmt_ev_passkey_notify ev;

	BT_DBG("%s", hdev->name);

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.passkey = __cpu_to_le32(passkey);
	ev.entered = entered;

	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
}

7118
void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7119 7120
{
	struct mgmt_ev_auth_failed ev;
7121
	struct mgmt_pending_cmd *cmd;
7122
	u8 status = mgmt_status(hci_status);
7123

7124 7125 7126
	bacpy(&ev.addr.bdaddr, &conn->dst);
	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
	ev.status = status;
7127

7128 7129 7130 7131 7132
	cmd = find_pairing(conn);

	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
		    cmd ? cmd->sk : NULL);

7133 7134 7135 7136
	if (cmd) {
		cmd->cmd_complete(cmd, status);
		mgmt_pending_remove(cmd);
	}
7137
}
7138

7139
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7140 7141
{
	struct cmd_lookup match = { NULL, hdev };
7142
	bool changed;
7143 7144 7145 7146

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7147
				     cmd_status_rsp, &mgmt_err);
7148
		return;
7149 7150
	}

7151
	if (test_bit(HCI_AUTH, &hdev->flags))
7152
		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7153
	else
7154
		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7155

7156
	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7157
			     &match);
7158

7159
	if (changed)
7160
		new_settings(hdev, match.sk);
7161 7162 7163 7164 7165

	if (match.sk)
		sock_put(match.sk);
}

7166
static void clear_eir(struct hci_request *req)
7167
{
7168
	struct hci_dev *hdev = req->hdev;
7169 7170
	struct hci_cp_write_eir cp;

7171
	if (!lmp_ext_inq_capable(hdev))
7172
		return;
7173

7174 7175
	memset(hdev->eir, 0, sizeof(hdev->eir));

7176 7177
	memset(&cp, 0, sizeof(cp));

7178
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7179 7180
}

7181
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7182 7183
{
	struct cmd_lookup match = { NULL, hdev };
7184
	struct hci_request req;
7185
	bool changed = false;
7186 7187 7188

	if (status) {
		u8 mgmt_err = mgmt_status(status);
7189

7190 7191
		if (enable && hci_dev_test_and_clear_flag(hdev,
							  HCI_SSP_ENABLED)) {
7192
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7193
			new_settings(hdev, NULL);
7194
		}
7195

7196 7197
		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
				     &mgmt_err);
7198
		return;
7199 7200 7201
	}

	if (enable) {
7202
		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7203
	} else {
7204
		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7205
		if (!changed)
7206 7207
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_HS_ENABLED);
7208
		else
7209
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7210 7211 7212 7213
	}

	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);

7214
	if (changed)
7215
		new_settings(hdev, match.sk);
7216

7217
	if (match.sk)
7218 7219
		sock_put(match.sk);

7220 7221
	hci_req_init(&req, hdev);

7222 7223
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7224 7225
			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
				    sizeof(enable), &enable);
7226
		update_eir(&req);
7227
	} else {
7228
		clear_eir(&req);
7229
	}
7230 7231

	hci_req_run(&req, NULL);
7232 7233
}

7234
static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7235 7236 7237 7238 7239 7240 7241 7242 7243
{
	struct cmd_lookup *match = data;

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}
}

7244 7245
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
				    u8 status)
7246
{
7247
	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7248

7249 7250 7251
	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7252 7253

	if (!status)
7254 7255
		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
			   NULL);
7256 7257 7258

	if (match.sk)
		sock_put(match.sk);
7259 7260
}

7261
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7262 7263
{
	struct mgmt_cp_set_local_name ev;
7264
	struct mgmt_pending_cmd *cmd;
7265

7266
	if (status)
7267
		return;
7268 7269 7270

	memset(&ev, 0, sizeof(ev));
	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7271
	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7272

7273
	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7274 7275
	if (!cmd) {
		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7276

7277 7278 7279 7280
		/* If this is a HCI command related to powering on the
		 * HCI dev don't send any mgmt signals.
		 */
		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7281
			return;
7282
	}
7283

7284 7285
	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
		   cmd ? cmd->sk : NULL);
7286
}
7287

7288
void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7289 7290
				       u8 *rand192, u8 *hash256, u8 *rand256,
				       u8 status)
7291
{
7292
	struct mgmt_pending_cmd *cmd;
7293

7294
	BT_DBG("%s status %u", hdev->name, status);
7295

7296
	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7297
	if (!cmd)
7298
		return;
7299 7300

	if (status) {
7301 7302
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
			        mgmt_status(status));
7303
	} else {
7304 7305
		struct mgmt_rp_read_local_oob_data rp;
		size_t rp_size = sizeof(rp);
7306

7307 7308
		memcpy(rp.hash192, hash192, sizeof(rp.hash192));
		memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7309

7310
		if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7311
			memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7312
			memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7313
		} else {
7314
			rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7315
		}
7316

7317 7318 7319
		mgmt_cmd_complete(cmd->sk, hdev->id,
				  MGMT_OP_READ_LOCAL_OOB_DATA, 0,
				  &rp, rp_size);
7320 7321 7322 7323
	}

	mgmt_pending_remove(cmd);
}
7324

7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336
static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
{
	int i;

	for (i = 0; i < uuid_count; i++) {
		if (!memcmp(uuid, uuids[i], 16))
			return true;
	}

	return false;
}

7337 7338
static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
{
7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355
	u16 parsed = 0;

	while (parsed < eir_len) {
		u8 field_len = eir[0];
		u8 uuid[16];
		int i;

		if (field_len == 0)
			break;

		if (eir_len - parsed < field_len + 1)
			break;

		switch (eir[1]) {
		case EIR_UUID16_ALL:
		case EIR_UUID16_SOME:
			for (i = 0; i + 3 <= field_len; i += 2) {
7356
				memcpy(uuid, bluetooth_base_uuid, 16);
7357 7358 7359 7360 7361 7362 7363 7364 7365
				uuid[13] = eir[i + 3];
				uuid[12] = eir[i + 2];
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		case EIR_UUID32_ALL:
		case EIR_UUID32_SOME:
			for (i = 0; i + 5 <= field_len; i += 4) {
7366
				memcpy(uuid, bluetooth_base_uuid, 16);
7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388
				uuid[15] = eir[i + 5];
				uuid[14] = eir[i + 4];
				uuid[13] = eir[i + 3];
				uuid[12] = eir[i + 2];
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		case EIR_UUID128_ALL:
		case EIR_UUID128_SOME:
			for (i = 0; i + 17 <= field_len; i += 16) {
				memcpy(uuid, eir + i + 2, 16);
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		}

		parsed += field_len + 1;
		eir += field_len + 1;
	}

7389 7390 7391
	return false;
}

7392 7393 7394
static void restart_le_scan(struct hci_dev *hdev)
{
	/* If controller is not scanning we are done. */
7395
	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406
		return;

	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
		       hdev->discovery.scan_start +
		       hdev->discovery.scan_duration))
		return;

	queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
			   DISCOV_LE_RESTART_DELAY);
}

7407 7408
static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7409
{
7410 7411 7412 7413 7414
	/* If a RSSI threshold has been specified, and
	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
	 * is set, let it through for further processing, as we might need to
	 * restart the scan.
7415 7416 7417
	 *
	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
	 * the results are also dropped.
7418 7419
	 */
	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7420 7421 7422
	    (rssi == HCI_RSSI_INVALID ||
	    (rssi < hdev->discovery.rssi &&
	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7423
		return  false;
7424

7425 7426 7427
	if (hdev->discovery.uuid_count != 0) {
		/* If a list of UUIDs is provided in filter, results with no
		 * matching UUID should be dropped.
7428
		 */
7429 7430 7431 7432 7433 7434
		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
				   hdev->discovery.uuids) &&
		    !eir_has_uuids(scan_rsp, scan_rsp_len,
				   hdev->discovery.uuid_count,
				   hdev->discovery.uuids))
			return false;
7435
	}
7436

7437 7438
	/* If duplicate filtering does not report RSSI changes, then restart
	 * scanning to ensure updated result with updated RSSI values.
7439
	 */
7440 7441 7442 7443 7444 7445 7446 7447
	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
		restart_le_scan(hdev);

		/* Validate RSSI value against the RSSI threshold once more. */
		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
		    rssi < hdev->discovery.rssi)
			return false;
	}
7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470

	return true;
}

void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
{
	char buf[512];
	struct mgmt_ev_device_found *ev = (void *)buf;
	size_t ev_size;

	/* Don't send events for a non-kernel initiated discovery. With
	 * LE one exception is if we have pend_le_reports > 0 in which
	 * case we're doing passive scanning and want these events.
	 */
	if (!hci_discovery_active(hdev)) {
		if (link_type == ACL_LINK)
			return;
		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
			return;
	}

7471
	if (hdev->discovery.result_filtering) {
7472 7473 7474 7475 7476 7477 7478 7479 7480 7481
		/* We are using service discovery */
		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
				     scan_rsp_len))
			return;
	}

	/* Make sure that the buffer is big enough. The 5 extra bytes
	 * are for the potential CoD field.
	 */
	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7482 7483
		return;

7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514
	memset(buf, 0, sizeof(buf));

	/* In case of device discovery with BR/EDR devices (pre 1.2), the
	 * RSSI value was reported as 0 when not available. This behavior
	 * is kept when using device discovery. This is required for full
	 * backwards compatibility with the API.
	 *
	 * However when using service discovery, the value 127 will be
	 * returned when the RSSI is not available.
	 */
	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
	    link_type == ACL_LINK)
		rssi = 0;

	bacpy(&ev->addr.bdaddr, bdaddr);
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
	ev->rssi = rssi;
	ev->flags = cpu_to_le32(flags);

	if (eir_len > 0)
		/* Copy EIR or advertising data into event */
		memcpy(ev->eir, eir, eir_len);

	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
					  dev_class, 3);

	if (scan_rsp_len > 0)
		/* Append scan response data to event */
		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);

7515 7516
	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7517

7518
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7519
}
7520

7521 7522
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7523
{
7524 7525 7526
	struct mgmt_ev_device_found *ev;
	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
	u16 eir_len;
7527

7528
	ev = (struct mgmt_ev_device_found *) buf;
7529

7530 7531 7532
	memset(buf, 0, sizeof(buf));

	bacpy(&ev->addr.bdaddr, bdaddr);
7533
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7534 7535 7536
	ev->rssi = rssi;

	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7537
				  name_len);
7538

7539
	ev->eir_len = cpu_to_le16(eir_len);
7540

7541
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7542
}
7543

7544
void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7545
{
7546
	struct mgmt_ev_discovering ev;
7547

7548 7549
	BT_DBG("%s discovering %u", hdev->name, discovering);

7550 7551 7552 7553
	memset(&ev, 0, sizeof(ev));
	ev.type = hdev->discovery.type;
	ev.discovering = discovering;

7554
	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7555
}
7556

7557
static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7558 7559 7560 7561 7562 7563 7564 7565
{
	BT_DBG("%s status %u", hdev->name, status);
}

void mgmt_reenable_advertising(struct hci_dev *hdev)
{
	struct hci_request req;

7566
	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
7567 7568 7569 7570
		return;

	hci_req_init(&req, hdev);
	enable_advertising(&req);
7571
	hci_req_run(&req, adv_enable_complete);
7572
}
7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588

static struct hci_mgmt_chan chan = {
	.channel	= HCI_CHANNEL_CONTROL,
	.handler_count	= ARRAY_SIZE(mgmt_handlers),
	.handlers	= mgmt_handlers,
};

int mgmt_init(void)
{
	return hci_mgmt_chan_register(&chan);
}

void mgmt_exit(void)
{
	hci_mgmt_chan_unregister(&chan);
}