mgmt.c 184.2 KB
Newer Older
1 2
/*
   BlueZ - Bluetooth protocol stack for Linux
3

4
   Copyright (C) 2010  Nokia Corporation
5
   Copyright (C) 2011-2012 Intel Corporation
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI Management interface */

27
#include <linux/module.h>
28 29 30 31
#include <asm/unaligned.h>

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
32
#include <net/bluetooth/hci_sock.h>
33
#include <net/bluetooth/l2cap.h>
34
#include <net/bluetooth/mgmt.h>
35

36
#include "hci_request.h"
37
#include "smp.h"
38
#include "mgmt_util.h"
39

40
#define MGMT_VERSION	1
41
#define MGMT_REVISION	9
42

43 44 45 46 47 48 49
static const u16 mgmt_commands[] = {
	MGMT_OP_READ_INDEX_LIST,
	MGMT_OP_READ_INFO,
	MGMT_OP_SET_POWERED,
	MGMT_OP_SET_DISCOVERABLE,
	MGMT_OP_SET_CONNECTABLE,
	MGMT_OP_SET_FAST_CONNECTABLE,
50
	MGMT_OP_SET_BONDABLE,
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
	MGMT_OP_SET_LINK_SECURITY,
	MGMT_OP_SET_SSP,
	MGMT_OP_SET_HS,
	MGMT_OP_SET_LE,
	MGMT_OP_SET_DEV_CLASS,
	MGMT_OP_SET_LOCAL_NAME,
	MGMT_OP_ADD_UUID,
	MGMT_OP_REMOVE_UUID,
	MGMT_OP_LOAD_LINK_KEYS,
	MGMT_OP_LOAD_LONG_TERM_KEYS,
	MGMT_OP_DISCONNECT,
	MGMT_OP_GET_CONNECTIONS,
	MGMT_OP_PIN_CODE_REPLY,
	MGMT_OP_PIN_CODE_NEG_REPLY,
	MGMT_OP_SET_IO_CAPABILITY,
	MGMT_OP_PAIR_DEVICE,
	MGMT_OP_CANCEL_PAIR_DEVICE,
	MGMT_OP_UNPAIR_DEVICE,
	MGMT_OP_USER_CONFIRM_REPLY,
	MGMT_OP_USER_CONFIRM_NEG_REPLY,
	MGMT_OP_USER_PASSKEY_REPLY,
	MGMT_OP_USER_PASSKEY_NEG_REPLY,
	MGMT_OP_READ_LOCAL_OOB_DATA,
	MGMT_OP_ADD_REMOTE_OOB_DATA,
	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
	MGMT_OP_START_DISCOVERY,
	MGMT_OP_STOP_DISCOVERY,
	MGMT_OP_CONFIRM_NAME,
	MGMT_OP_BLOCK_DEVICE,
	MGMT_OP_UNBLOCK_DEVICE,
81
	MGMT_OP_SET_DEVICE_ID,
82
	MGMT_OP_SET_ADVERTISING,
83
	MGMT_OP_SET_BREDR,
84
	MGMT_OP_SET_STATIC_ADDRESS,
85
	MGMT_OP_SET_SCAN_PARAMS,
86
	MGMT_OP_SET_SECURE_CONN,
87
	MGMT_OP_SET_DEBUG_KEYS,
88
	MGMT_OP_SET_PRIVACY,
89
	MGMT_OP_LOAD_IRKS,
90
	MGMT_OP_GET_CONN_INFO,
91
	MGMT_OP_GET_CLOCK_INFO,
92 93
	MGMT_OP_ADD_DEVICE,
	MGMT_OP_REMOVE_DEVICE,
94
	MGMT_OP_LOAD_CONN_PARAM,
95
	MGMT_OP_READ_UNCONF_INDEX_LIST,
96
	MGMT_OP_READ_CONFIG_INFO,
97
	MGMT_OP_SET_EXTERNAL_CONFIG,
98
	MGMT_OP_SET_PUBLIC_ADDRESS,
99
	MGMT_OP_START_SERVICE_DISCOVERY,
100
	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101
	MGMT_OP_READ_EXT_INDEX_LIST,
102
	MGMT_OP_READ_ADV_FEATURES,
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
};

static const u16 mgmt_events[] = {
	MGMT_EV_CONTROLLER_ERROR,
	MGMT_EV_INDEX_ADDED,
	MGMT_EV_INDEX_REMOVED,
	MGMT_EV_NEW_SETTINGS,
	MGMT_EV_CLASS_OF_DEV_CHANGED,
	MGMT_EV_LOCAL_NAME_CHANGED,
	MGMT_EV_NEW_LINK_KEY,
	MGMT_EV_NEW_LONG_TERM_KEY,
	MGMT_EV_DEVICE_CONNECTED,
	MGMT_EV_DEVICE_DISCONNECTED,
	MGMT_EV_CONNECT_FAILED,
	MGMT_EV_PIN_CODE_REQUEST,
	MGMT_EV_USER_CONFIRM_REQUEST,
	MGMT_EV_USER_PASSKEY_REQUEST,
	MGMT_EV_AUTH_FAILED,
	MGMT_EV_DEVICE_FOUND,
	MGMT_EV_DISCOVERING,
	MGMT_EV_DEVICE_BLOCKED,
	MGMT_EV_DEVICE_UNBLOCKED,
	MGMT_EV_DEVICE_UNPAIRED,
126
	MGMT_EV_PASSKEY_NOTIFY,
127
	MGMT_EV_NEW_IRK,
128
	MGMT_EV_NEW_CSRK,
129 130
	MGMT_EV_DEVICE_ADDED,
	MGMT_EV_DEVICE_REMOVED,
131
	MGMT_EV_NEW_CONN_PARAM,
132
	MGMT_EV_UNCONF_INDEX_ADDED,
133
	MGMT_EV_UNCONF_INDEX_REMOVED,
134
	MGMT_EV_NEW_CONFIG_OPTIONS,
135 136
	MGMT_EV_EXT_INDEX_ADDED,
	MGMT_EV_EXT_INDEX_REMOVED,
137
	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
138 139
};

140
#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
141

142 143 144
#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
		 "\x00\x00\x00\x00\x00\x00\x00\x00"

145 146 147 148 149 150 151 152
/* HCI to MGMT error code conversion table */
static u8 mgmt_status_table[] = {
	MGMT_STATUS_SUCCESS,
	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
	MGMT_STATUS_FAILED,		/* Hardware Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
153
	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
	MGMT_STATUS_BUSY,		/* Command Disallowed */
	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
	MGMT_STATUS_REJECTED,		/* Rejected Security */
	MGMT_STATUS_REJECTED,		/* Rejected Personal */
	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
	MGMT_STATUS_BUSY,		/* Repeated Attempts */
	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
	MGMT_STATUS_FAILED,		/* Unspecified Error */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
	MGMT_STATUS_FAILED,		/* Transaction Collision */
	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
	MGMT_STATUS_REJECTED,		/* QoS Rejected */
	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
	MGMT_STATUS_REJECTED,		/* Insufficient Security */
	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
	MGMT_STATUS_BUSY,		/* Role Switch Pending */
	MGMT_STATUS_FAILED,		/* Slot Violation */
	MGMT_STATUS_FAILED,		/* Role Switch Failed */
	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
	MGMT_STATUS_BUSY,		/* Controller Busy */
	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
};

static u8 mgmt_status(u8 hci_status)
{
	if (hci_status < ARRAY_SIZE(mgmt_status_table))
		return mgmt_status_table[hci_status];

	return MGMT_STATUS_FAILED;
}

218 219
static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
			    u16 len, int flag)
220
{
221 222
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
			       flag, NULL);
223 224
}

225 226 227 228 229 230 231
static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
			      u16 len, int flag, struct sock *skip_sk)
{
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
			       flag, skip_sk);
}

232 233 234 235 236 237 238
static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
			      u16 len, struct sock *skip_sk)
{
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
			       HCI_MGMT_GENERIC_EVENTS, skip_sk);
}

239 240 241 242
static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
		      struct sock *skip_sk)
{
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
243
			       HCI_SOCK_TRUSTED, skip_sk);
244 245
}

246 247
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
			u16 data_len)
248 249 250 251 252 253
{
	struct mgmt_rp_read_version rp;

	BT_DBG("sock %p", sk);

	rp.version = MGMT_VERSION;
254
	rp.revision = cpu_to_le16(MGMT_REVISION);
255

256 257
	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
				 &rp, sizeof(rp));
258 259
}

260 261
static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 data_len)
262 263
{
	struct mgmt_rp_read_commands *rp;
264 265
	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
	const u16 num_events = ARRAY_SIZE(mgmt_events);
266
	__le16 *opcode;
267 268 269 270 271 272 273 274 275 276 277
	size_t rp_size;
	int i, err;

	BT_DBG("sock %p", sk);

	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));

	rp = kmalloc(rp_size, GFP_KERNEL);
	if (!rp)
		return -ENOMEM;

278 279
	rp->num_commands = cpu_to_le16(num_commands);
	rp->num_events = cpu_to_le16(num_events);
280 281 282 283 284 285 286

	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
		put_unaligned_le16(mgmt_commands[i], opcode);

	for (i = 0; i < num_events; i++, opcode++)
		put_unaligned_le16(mgmt_events[i], opcode);

287 288
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
				rp, rp_size);
289 290 291 292 293
	kfree(rp);

	return err;
}

294 295
static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
296 297
{
	struct mgmt_rp_read_index_list *rp;
298
	struct hci_dev *d;
299
	size_t rp_len;
300
	u16 count;
301
	int err;
302 303 304 305 306 307

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
308
	list_for_each_entry(d, &hci_dev_list, list) {
309
		if (d->dev_type == HCI_BREDR &&
310
		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
311
			count++;
312 313
	}

314 315 316
	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
317
		read_unlock(&hci_dev_list_lock);
318
		return -ENOMEM;
319
	}
320

321
	count = 0;
322
	list_for_each_entry(d, &hci_dev_list, list) {
323 324 325
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
326 327
			continue;

328 329 330 331
		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
332 333
			continue;

334
		if (d->dev_type == HCI_BREDR &&
335
		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
336 337 338
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
339 340
	}

341 342 343
	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

344 345
	read_unlock(&hci_dev_list_lock);

346 347
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
				0, rp, rp_len);
348

349 350 351
	kfree(rp);

	return err;
352 353
}

354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
				  void *data, u16 data_len)
{
	struct mgmt_rp_read_unconf_index_list *rp;
	struct hci_dev *d;
	size_t rp_len;
	u16 count;
	int err;

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
		if (d->dev_type == HCI_BREDR &&
370
		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
371 372 373 374 375 376 377 378 379 380 381 382
			count++;
	}

	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		read_unlock(&hci_dev_list_lock);
		return -ENOMEM;
	}

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
383 384 385
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
386 387 388 389 390 391 392 393 394
			continue;

		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
			continue;

		if (d->dev_type == HCI_BREDR &&
395
		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
396 397 398 399 400 401 402 403 404 405
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
	}

	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

	read_unlock(&hci_dev_list_lock);

406 407
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
408 409 410 411 412 413

	kfree(rp);

	return err;
}

414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
			       void *data, u16 data_len)
{
	struct mgmt_rp_read_ext_index_list *rp;
	struct hci_dev *d;
	size_t rp_len;
	u16 count;
	int err;

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
		if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
			count++;
	}

	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		read_unlock(&hci_dev_list_lock);
		return -ENOMEM;
	}

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
			continue;

		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
			continue;

		if (d->dev_type == HCI_BREDR) {
			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
				rp->entry[count].type = 0x01;
			else
				rp->entry[count].type = 0x00;
		} else if (d->dev_type == HCI_AMP) {
			rp->entry[count].type = 0x02;
		} else {
			continue;
		}

		rp->entry[count].bus = d->bus;
		rp->entry[count++].index = cpu_to_le16(d->id);
		BT_DBG("Added hci%u", d->id);
	}

	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);

	read_unlock(&hci_dev_list_lock);

	/* If this command is called at least once, then all the
	 * default index and unconfigured index events are disabled
	 * and from now on only extended index events are used.
	 */
	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);

	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);

	kfree(rp);

	return err;
}

490 491 492
static bool is_configured(struct hci_dev *hdev)
{
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
493
	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
494 495 496 497 498 499 500 501 502
		return false;

	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
	    !bacmp(&hdev->public_addr, BDADDR_ANY))
		return false;

	return true;
}

503 504 505 506
static __le32 get_missing_options(struct hci_dev *hdev)
{
	u32 options = 0;

507
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
508
	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
509 510
		options |= MGMT_OPTION_EXTERNAL_CONFIG;

511 512 513 514 515 516 517
	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
	    !bacmp(&hdev->public_addr, BDADDR_ANY))
		options |= MGMT_OPTION_PUBLIC_ADDRESS;

	return cpu_to_le32(options);
}

518 519 520 521
static int new_options(struct hci_dev *hdev, struct sock *skip)
{
	__le32 options = get_missing_options(hdev);

522 523
	return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
				  sizeof(options), skip);
524 525
}

526 527 528 529
static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
{
	__le32 options = get_missing_options(hdev);

530 531
	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
				 sizeof(options));
532 533
}

534 535 536 537
static int read_config_info(struct sock *sk, struct hci_dev *hdev,
			    void *data, u16 data_len)
{
	struct mgmt_rp_read_config_info rp;
538
	u32 options = 0;
539 540 541 542 543 544 545

	BT_DBG("sock %p %s", sk, hdev->name);

	hci_dev_lock(hdev);

	memset(&rp, 0, sizeof(rp));
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
546

547 548 549
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
		options |= MGMT_OPTION_EXTERNAL_CONFIG;

550
	if (hdev->set_bdaddr)
551 552 553 554
		options |= MGMT_OPTION_PUBLIC_ADDRESS;

	rp.supported_options = cpu_to_le32(options);
	rp.missing_options = get_missing_options(hdev);
555 556 557

	hci_dev_unlock(hdev);

558 559
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
				 &rp, sizeof(rp));
560 561
}

562 563 564 565 566
static u32 get_supported_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

	settings |= MGMT_SETTING_POWERED;
567
	settings |= MGMT_SETTING_BONDABLE;
568
	settings |= MGMT_SETTING_DEBUG_KEYS;
569 570
	settings |= MGMT_SETTING_CONNECTABLE;
	settings |= MGMT_SETTING_DISCOVERABLE;
571

572
	if (lmp_bredr_capable(hdev)) {
573 574
		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
			settings |= MGMT_SETTING_FAST_CONNECTABLE;
575 576
		settings |= MGMT_SETTING_BREDR;
		settings |= MGMT_SETTING_LINK_SECURITY;
577 578 579 580 581

		if (lmp_ssp_capable(hdev)) {
			settings |= MGMT_SETTING_SSP;
			settings |= MGMT_SETTING_HS;
		}
582

583
		if (lmp_sc_capable(hdev))
584
			settings |= MGMT_SETTING_SECURE_CONN;
585
	}
586

587
	if (lmp_le_capable(hdev)) {
588
		settings |= MGMT_SETTING_LE;
589
		settings |= MGMT_SETTING_ADVERTISING;
590
		settings |= MGMT_SETTING_SECURE_CONN;
591
		settings |= MGMT_SETTING_PRIVACY;
592
		settings |= MGMT_SETTING_STATIC_ADDRESS;
593
	}
594

595 596
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
	    hdev->set_bdaddr)
597 598
		settings |= MGMT_SETTING_CONFIGURATION;

599 600 601 602 603 604 605
	return settings;
}

static u32 get_current_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

606
	if (hdev_is_powered(hdev))
607 608
		settings |= MGMT_SETTING_POWERED;

609
	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
610 611
		settings |= MGMT_SETTING_CONNECTABLE;

612
	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
613 614
		settings |= MGMT_SETTING_FAST_CONNECTABLE;

615
	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
616 617
		settings |= MGMT_SETTING_DISCOVERABLE;

618
	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
619
		settings |= MGMT_SETTING_BONDABLE;
620

621
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
622 623
		settings |= MGMT_SETTING_BREDR;

624
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
625 626
		settings |= MGMT_SETTING_LE;

627
	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
628 629
		settings |= MGMT_SETTING_LINK_SECURITY;

630
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
631 632
		settings |= MGMT_SETTING_SSP;

633
	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
634 635
		settings |= MGMT_SETTING_HS;

636
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
637 638
		settings |= MGMT_SETTING_ADVERTISING;

639
	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
640 641
		settings |= MGMT_SETTING_SECURE_CONN;

642
	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
643 644
		settings |= MGMT_SETTING_DEBUG_KEYS;

645
	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
646 647
		settings |= MGMT_SETTING_PRIVACY;

648 649 650 651 652 653 654 655 656 657 658 659
	/* The current setting for static address has two purposes. The
	 * first is to indicate if the static address will be used and
	 * the second is to indicate if it is actually set.
	 *
	 * This means if the static address is not configured, this flag
	 * will never bet set. If the address is configured, then if the
	 * address is actually used decides if the flag is set or not.
	 *
	 * For single mode LE only controllers and dual-mode controllers
	 * with BR/EDR disabled, the existence of the static address will
	 * be evaluated.
	 */
660
	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
661
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
662 663 664 665 666
	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
		if (bacmp(&hdev->static_addr, BDADDR_ANY))
			settings |= MGMT_SETTING_STATIC_ADDRESS;
	}

667 668 669
	return settings;
}

670 671
#define PNP_INFO_SVCLASS_ID		0x1200

672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 4)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		u16 uuid16;

		if (uuid->size != 16)
			continue;

		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
		if (uuid16 < 0x1100)
			continue;

		if (uuid16 == PNP_INFO_SVCLASS_ID)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID16_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u16) > len) {
			uuids_start[1] = EIR_UUID16_SOME;
			break;
		}

		*ptr++ = (uuid16 & 0x00ff);
		*ptr++ = (uuid16 & 0xff00) >> 8;
		uuids_start[0] += sizeof(uuid16);
	}

	return ptr;
}

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 6)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 32)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID32_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u32) > len) {
			uuids_start[1] = EIR_UUID32_SOME;
			break;
		}

		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
		ptr += sizeof(u32);
		uuids_start[0] += sizeof(u32);
	}

	return ptr;
}

747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 18)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 128)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID128_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + 16 > len) {
			uuids_start[1] = EIR_UUID128_SOME;
			break;
		}

		memcpy(ptr, uuid->uuid, 16);
		ptr += 16;
		uuids_start[0] += 16;
	}

	return ptr;
}

780 781 782 783 784 785 786 787 788 789 790 791
static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
{
	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
}

static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
						  struct hci_dev *hdev,
						  const void *data)
{
	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
}

792 793
static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
{
794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
	u8 ad_len = 0;
	size_t name_len;

	name_len = strlen(hdev->dev_name);
	if (name_len > 0) {
		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;

		if (name_len > max_len) {
			name_len = max_len;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ad_len += (name_len + 2);
		ptr += (name_len + 2);
	}

	return ad_len;
816 817 818 819 820 821 822 823
}

static void update_scan_rsp_data(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_scan_rsp_data cp;
	u8 len;

824
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
825 826 827 828 829 830
		return;

	memset(&cp, 0, sizeof(cp));

	len = create_scan_rsp_data(hdev, cp.data);

831 832
	if (hdev->scan_rsp_data_len == len &&
	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
833 834
		return;

835 836
	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
	hdev->scan_rsp_data_len = len;
837 838 839 840 841 842

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
}

843 844
static u8 get_adv_discov_flags(struct hci_dev *hdev)
{
845
	struct mgmt_pending_cmd *cmd;
846 847 848 849

	/* If there's a pending mgmt command the flags will not yet have
	 * their final values, so check for this first.
	 */
850
	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
851 852 853 854 855 856 857
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
		if (cp->val == 0x01)
			return LE_AD_GENERAL;
		else if (cp->val == 0x02)
			return LE_AD_LIMITED;
	} else {
858
		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
859
			return LE_AD_LIMITED;
860
		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
861 862 863 864 865 866
			return LE_AD_GENERAL;
	}

	return 0;
}

867
static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
868 869 870
{
	u8 ad_len = 0, flags = 0;

871
	flags |= get_adv_discov_flags(hdev);
872

873
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
		flags |= LE_AD_NO_BREDR;

	if (flags) {
		BT_DBG("adv flags 0x%02x", flags);

		ptr[0] = 2;
		ptr[1] = EIR_FLAGS;
		ptr[2] = flags;

		ad_len += 3;
		ptr += 3;
	}

	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->adv_tx_power;

		ad_len += 3;
		ptr += 3;
	}

	return ad_len;
}

899
static void update_adv_data(struct hci_request *req)
900 901 902 903 904
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_data cp;
	u8 len;

905
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
906 907 908 909
		return;

	memset(&cp, 0, sizeof(cp));

910
	len = create_adv_data(hdev, cp.data);
911 912 913 914 915 916 917 918 919 920 921 922 923

	if (hdev->adv_data_len == len &&
	    memcmp(cp.data, hdev->adv_data, len) == 0)
		return;

	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
	hdev->adv_data_len = len;

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
}

924 925 926 927 928 929 930 931 932 933
int mgmt_update_adv_data(struct hci_dev *hdev)
{
	struct hci_request req;

	hci_req_init(&req, hdev);
	update_adv_data(&req);

	return hci_req_run(&req, NULL);
}

934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
static void create_eir(struct hci_dev *hdev, u8 *data)
{
	u8 *ptr = data;
	size_t name_len;

	name_len = strlen(hdev->dev_name);

	if (name_len > 0) {
		/* EIR Data type */
		if (name_len > 48) {
			name_len = 48;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		/* EIR Data length */
		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ptr += (name_len + 2);
	}

957
	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
958 959 960 961 962 963 964
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->inq_tx_power;

		ptr += 3;
	}

965 966 967 968 969 970 971 972 973 974 975 976
	if (hdev->devid_source > 0) {
		ptr[0] = 9;
		ptr[1] = EIR_DEVICE_ID;

		put_unaligned_le16(hdev->devid_source, ptr + 2);
		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
		put_unaligned_le16(hdev->devid_product, ptr + 6);
		put_unaligned_le16(hdev->devid_version, ptr + 8);

		ptr += 10;
	}

977
	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
978
	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
979
	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
980 981
}

982
static void update_eir(struct hci_request *req)
983
{
984
	struct hci_dev *hdev = req->hdev;
985 986
	struct hci_cp_write_eir cp;

987
	if (!hdev_is_powered(hdev))
988
		return;
989

990
	if (!lmp_ext_inq_capable(hdev))
991
		return;
992

993
	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
994
		return;
995

996
	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
997
		return;
998 999 1000 1001 1002 1003

	memset(&cp, 0, sizeof(cp));

	create_eir(hdev, cp.data);

	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1004
		return;
1005 1006 1007

	memcpy(hdev->eir, cp.data, sizeof(cp.data));

1008
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
}

static u8 get_service_classes(struct hci_dev *hdev)
{
	struct bt_uuid *uuid;
	u8 val = 0;

	list_for_each_entry(uuid, &hdev->uuids, list)
		val |= uuid->svc_hint;

	return val;
}

1022
static void update_class(struct hci_request *req)
1023
{
1024
	struct hci_dev *hdev = req->hdev;
1025 1026 1027 1028
	u8 cod[3];

	BT_DBG("%s", hdev->name);

1029
	if (!hdev_is_powered(hdev))
1030
		return;
1031

1032
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1033 1034
		return;

1035
	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1036
		return;
1037 1038 1039 1040 1041

	cod[0] = hdev->minor_class;
	cod[1] = hdev->major_class;
	cod[2] = get_service_classes(hdev);

1042
	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1043 1044
		cod[1] |= 0x20;

1045
	if (memcmp(cod, hdev->dev_class, 3) == 0)
1046
		return;
1047

1048
	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1049 1050
}

1051
static bool get_connectable(struct hci_dev *hdev)
1052
{
1053
	struct mgmt_pending_cmd *cmd;
1054 1055 1056 1057

	/* If there's a pending mgmt command the flag will not yet have
	 * it's final value, so check for this first.
	 */
1058
	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1059 1060
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
1061
		return cp->val;
1062 1063
	}

1064
	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1065 1066
}

1067 1068 1069 1070 1071 1072 1073
static void disable_advertising(struct hci_request *req)
{
	u8 enable = 0x00;

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

1074 1075 1076 1077
static void enable_advertising(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_param cp;
1078
	u8 own_addr_type, enable = 0x01;
1079
	bool connectable;
1080

1081 1082 1083
	if (hci_conn_num(hdev, LE_LINK) > 0)
		return;

1084
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1085 1086
		disable_advertising(req);

1087
	/* Clear the HCI_LE_ADV bit temporarily so that the
1088 1089 1090 1091
	 * hci_update_random_address knows that it's safe to go ahead
	 * and write a new random address. The flag will be set back on
	 * as soon as the SET_ADV_ENABLE HCI command completes.
	 */
1092
	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1093

1094
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1095 1096 1097
		connectable = true;
	else
		connectable = get_connectable(hdev);
1098

1099 1100 1101 1102 1103
	/* Set require_privacy to true only when non-connectable
	 * advertising is used. In that case it is fine to use a
	 * non-resolvable private address.
	 */
	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1104 1105
		return;

1106
	memset(&cp, 0, sizeof(cp));
1107 1108
	cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
	cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1109
	cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1110
	cp.own_address_type = own_addr_type;
1111 1112 1113 1114 1115 1116 1117
	cp.channel_map = hdev->le_adv_channel_map;

	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

1118 1119 1120
static void service_cache_off(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
1121
					    service_cache.work);
1122
	struct hci_request req;
1123

1124
	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1125 1126
		return;

1127 1128
	hci_req_init(&req, hdev);

1129 1130
	hci_dev_lock(hdev);

1131 1132
	update_eir(&req);
	update_class(&req);
1133 1134

	hci_dev_unlock(hdev);
1135 1136

	hci_req_run(&req, NULL);
1137 1138
}

1139 1140 1141 1142 1143 1144 1145 1146
static void rpa_expired(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
					    rpa_expired.work);
	struct hci_request req;

	BT_DBG("");

1147
	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1148

1149
	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
		return;

	/* The generation of a new RPA and programming it into the
	 * controller happens in the enable_advertising() function.
	 */
	hci_req_init(&req, hdev);
	enable_advertising(&req);
	hci_req_run(&req, NULL);
}

1160
static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1161
{
1162
	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1163 1164
		return;

1165
	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1166
	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1167

1168 1169 1170 1171 1172
	/* Non-mgmt controlled devices get this bit set
	 * implicitly so that pairing works for them, however
	 * for mgmt we require user-space to explicitly enable
	 * it
	 */
1173
	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1174 1175
}

1176
static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1177
				void *data, u16 data_len)
1178
{
1179
	struct mgmt_rp_read_info rp;
1180

1181
	BT_DBG("sock %p %s", sk, hdev->name);
1182

1183
	hci_dev_lock(hdev);
1184

1185 1186
	memset(&rp, 0, sizeof(rp));

1187
	bacpy(&rp.bdaddr, &hdev->bdaddr);
1188

1189
	rp.version = hdev->hci_ver;
1190
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1191 1192 1193

	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1194

1195
	memcpy(rp.dev_class, hdev->dev_class, 3);
1196

1197
	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1198
	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1199

1200
	hci_dev_unlock(hdev);
1201

1202 1203
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
				 sizeof(rp));
1204 1205
}

1206
static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1207
{
1208
	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1209

1210 1211
	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
				 sizeof(settings));
1212 1213
}

1214
static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1215 1216 1217
{
	BT_DBG("%s status 0x%02x", hdev->name, status);

1218 1219
	if (hci_conn_count(hdev) == 0) {
		cancel_delayed_work(&hdev->power_off);
1220
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1221
	}
1222 1223
}

1224
static bool hci_stop_discovery(struct hci_request *req)
1225 1226 1227 1228 1229 1230 1231
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_remote_name_req_cancel cp;
	struct inquiry_entry *e;

	switch (hdev->discovery.state) {
	case DISCOVERY_FINDING:
1232
		if (test_bit(HCI_INQUIRY, &hdev->flags))
1233
			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1234 1235

		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1236 1237 1238 1239
			cancel_delayed_work(&hdev->le_scan_disable);
			hci_req_add_le_scan_disable(req);
		}

1240
		return true;
1241 1242 1243 1244 1245

	case DISCOVERY_RESOLVING:
		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
						     NAME_PENDING);
		if (!e)
1246
			break;
1247 1248 1249 1250 1251

		bacpy(&cp.bdaddr, &e->data.bdaddr);
		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
			    &cp);

1252
		return true;
1253 1254 1255

	default:
		/* Passive scanning */
1256
		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1257
			hci_req_add_le_scan_disable(req);
1258 1259 1260
			return true;
		}

1261 1262
		break;
	}
1263 1264

	return false;
1265 1266
}

1267 1268 1269 1270
static int clean_up_hci_state(struct hci_dev *hdev)
{
	struct hci_request req;
	struct hci_conn *conn;
1271 1272
	bool discov_stopped;
	int err;
1273 1274 1275 1276 1277 1278 1279 1280 1281

	hci_req_init(&req, hdev);

	if (test_bit(HCI_ISCAN, &hdev->flags) ||
	    test_bit(HCI_PSCAN, &hdev->flags)) {
		u8 scan = 0x00;
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}

1282
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1283 1284
		disable_advertising(&req);

1285
	discov_stopped = hci_stop_discovery(&req);
1286 1287 1288

	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
		struct hci_cp_disconnect dc;
1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
		struct hci_cp_reject_conn_req rej;

		switch (conn->state) {
		case BT_CONNECTED:
		case BT_CONFIG:
			dc.handle = cpu_to_le16(conn->handle);
			dc.reason = 0x15; /* Terminated due to Power Off */
			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
			break;
		case BT_CONNECT:
			if (conn->type == LE_LINK)
				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
					    0, NULL);
			else if (conn->type == ACL_LINK)
				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
					    6, &conn->dst);
			break;
		case BT_CONNECT2:
			bacpy(&rej.bdaddr, &conn->dst);
			rej.reason = 0x15; /* Terminated due to Power Off */
			if (conn->type == ACL_LINK)
				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
					    sizeof(rej), &rej);
			else if (conn->type == SCO_LINK)
				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
					    sizeof(rej), &rej);
			break;
		}
1317 1318
	}

1319 1320 1321 1322 1323
	err = hci_req_run(&req, clean_up_hci_complete);
	if (!err && discov_stopped)
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);

	return err;
1324 1325
}

1326
static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1327
		       u16 len)
1328
{
1329
	struct mgmt_mode *cp = data;
1330
	struct mgmt_pending_cmd *cmd;
1331
	int err;
1332

1333
	BT_DBG("request for %s", hdev->name);
1334

1335
	if (cp->val != 0x00 && cp->val != 0x01)
1336 1337
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				       MGMT_STATUS_INVALID_PARAMS);
1338

1339
	hci_dev_lock(hdev);
1340

1341
	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1342 1343
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				      MGMT_STATUS_BUSY);
1344 1345 1346
		goto failed;
	}

1347
	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1348 1349 1350
		cancel_delayed_work(&hdev->power_off);

		if (cp->val) {
1351 1352 1353
			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
					 data, len);
			err = mgmt_powered(hdev, 1);
1354 1355 1356 1357
			goto failed;
		}
	}

1358
	if (!!cp->val == hdev_is_powered(hdev)) {
1359
		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1360 1361 1362
		goto failed;
	}

1363
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1364 1365
	if (!cmd) {
		err = -ENOMEM;
1366
		goto failed;
1367
	}
1368

1369
	if (cp->val) {
1370
		queue_work(hdev->req_workqueue, &hdev->power_on);
1371 1372 1373 1374
		err = 0;
	} else {
		/* Disconnect connections, stop scans, etc */
		err = clean_up_hci_state(hdev);
1375 1376 1377
		if (!err)
			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
					   HCI_POWER_OFF_TIMEOUT);
1378

1379 1380
		/* ENODATA means there were no HCI commands queued */
		if (err == -ENODATA) {
1381
			cancel_delayed_work(&hdev->power_off);
1382 1383 1384 1385
			queue_work(hdev->req_workqueue, &hdev->power_off.work);
			err = 0;
		}
	}
1386 1387

failed:
1388
	hci_dev_unlock(hdev);
1389
	return err;
1390 1391
}

1392 1393
static int new_settings(struct hci_dev *hdev, struct sock *skip)
{
1394
	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1395

1396 1397
	return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
				  sizeof(ev), skip);
1398 1399
}

1400 1401 1402 1403 1404
int mgmt_new_settings(struct hci_dev *hdev)
{
	return new_settings(hdev, NULL);
}

1405 1406 1407 1408 1409 1410
struct cmd_lookup {
	struct sock *sk;
	struct hci_dev *hdev;
	u8 mgmt_status;
};

1411
static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
{
	struct cmd_lookup *match = data;

	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);

	list_del(&cmd->list);

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}

	mgmt_pending_free(cmd);
}

1427
static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1428 1429 1430
{
	u8 *status = data;

1431
	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1432 1433 1434
	mgmt_pending_remove(cmd);
}

1435
static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
{
	if (cmd->cmd_complete) {
		u8 *status = data;

		cmd->cmd_complete(cmd, *status);
		mgmt_pending_remove(cmd);

		return;
	}

	cmd_status_rsp(cmd, data);
}

1449
static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1450
{
1451 1452
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, cmd->param_len);
1453 1454
}

1455
static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1456
{
1457 1458
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, sizeof(struct mgmt_addr_info));
1459 1460
}

1461 1462 1463 1464
static u8 mgmt_bredr_support(struct hci_dev *hdev)
{
	if (!lmp_bredr_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
1465
	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1466 1467 1468 1469 1470 1471 1472 1473 1474
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

static u8 mgmt_le_support(struct hci_dev *hdev)
{
	if (!lmp_le_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
1475
	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1476 1477 1478 1479 1480
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

1481 1482
static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
				      u16 opcode)
1483
{
1484
	struct mgmt_pending_cmd *cmd;
1485
	struct mgmt_mode *cp;
1486
	struct hci_request req;
1487 1488 1489 1490 1491 1492
	bool changed;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

1493
	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1494 1495 1496 1497 1498
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);
1499
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1500
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1501 1502 1503 1504
		goto remove_cmd;
	}

	cp = cmd->param;
1505
	if (cp->val) {
1506
		changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1507 1508 1509 1510 1511 1512 1513

		if (hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
					   to);
		}
	} else {
1514
		changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1515
	}
1516 1517 1518 1519 1520 1521

	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);

	if (changed)
		new_settings(hdev, cmd->sk);

1522 1523
	/* When the discoverable mode gets changed, make sure
	 * that class of device has the limited discoverable
1524 1525
	 * bit correctly set. Also update page scan based on whitelist
	 * entries.
1526 1527
	 */
	hci_req_init(&req, hdev);
1528
	__hci_update_page_scan(&req);
1529 1530 1531
	update_class(&req);
	hci_req_run(&req, NULL);

1532 1533 1534 1535 1536 1537 1538
remove_cmd:
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1539
static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1540
			    u16 len)
1541
{
1542
	struct mgmt_cp_set_discoverable *cp = data;
1543
	struct mgmt_pending_cmd *cmd;
1544
	struct hci_request req;
1545
	u16 timeout;
1546
	u8 scan;
1547 1548
	int err;

1549
	BT_DBG("request for %s", hdev->name);
1550

1551 1552
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1553 1554
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_REJECTED);
1555

1556
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1557 1558
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1559

1560
	timeout = __le16_to_cpu(cp->timeout);
1561 1562 1563 1564 1565 1566

	/* Disabling discoverable requires that no timeout is set,
	 * and enabling limited discoverable requires a timeout.
	 */
	if ((cp->val == 0x00 && timeout > 0) ||
	    (cp->val == 0x02 && timeout == 0))
1567 1568
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1569

1570
	hci_dev_lock(hdev);
1571

1572
	if (!hdev_is_powered(hdev) && timeout > 0) {
1573 1574
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_NOT_POWERED);
1575 1576 1577
		goto failed;
	}

1578 1579
	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1580 1581
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_BUSY);
1582 1583 1584
		goto failed;
	}

1585
	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1586 1587
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_REJECTED);
1588 1589 1590 1591
		goto failed;
	}

	if (!hdev_is_powered(hdev)) {
1592 1593
		bool changed = false;

1594 1595 1596 1597
		/* Setting limited discoverable when powered off is
		 * not a valid operation since it requires a timeout
		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
		 */
1598
		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1599
			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1600 1601 1602
			changed = true;
		}

1603
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1604 1605 1606 1607 1608 1609
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1610 1611 1612
		goto failed;
	}

1613 1614 1615 1616
	/* If the current mode is the same, then just update the timeout
	 * value with the new value. And if only the timeout gets updated,
	 * then no need for any HCI transactions.
	 */
1617 1618 1619
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
						   HCI_LIMITED_DISCOVERABLE)) {
1620 1621
		cancel_delayed_work(&hdev->discov_off);
		hdev->discov_timeout = timeout;
1622

1623 1624
		if (cp->val && hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1625
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1626
					   to);
1627 1628
		}

1629
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1630 1631 1632
		goto failed;
	}

1633
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1634 1635
	if (!cmd) {
		err = -ENOMEM;
1636
		goto failed;
1637
	}
1638

1639 1640 1641 1642 1643 1644 1645
	/* Cancel any potential discoverable timeout that might be
	 * still active and store new timeout value. The arming of
	 * the timeout happens in the complete handler.
	 */
	cancel_delayed_work(&hdev->discov_off);
	hdev->discov_timeout = timeout;

1646 1647
	/* Limited discoverable mode */
	if (cp->val == 0x02)
1648
		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1649
	else
1650
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1651

1652 1653
	hci_req_init(&req, hdev);

1654 1655 1656
	/* The procedure for LE-only controllers is much simpler - just
	 * update the advertising data.
	 */
1657
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1658 1659
		goto update_ad;

1660 1661
	scan = SCAN_PAGE;

1662 1663 1664 1665 1666
	if (cp->val) {
		struct hci_cp_write_current_iac_lap hci_cp;

		if (cp->val == 0x02) {
			/* Limited discoverable mode */
1667
			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684
			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
			hci_cp.iac_lap[4] = 0x8b;
			hci_cp.iac_lap[5] = 0x9e;
		} else {
			/* General discoverable mode */
			hci_cp.num_iac = 1;
			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
		}

		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
			    (hci_cp.num_iac * 3) + 1, &hci_cp);

1685
		scan |= SCAN_INQUIRY;
1686
	} else {
1687
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1688
	}
1689

1690
	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1691

1692 1693 1694
update_ad:
	update_adv_data(&req);

1695
	err = hci_req_run(&req, set_discoverable_complete);
1696
	if (err < 0)
1697
		mgmt_pending_remove(cmd);
1698 1699

failed:
1700
	hci_dev_unlock(hdev);
1701 1702 1703
	return err;
}

1704 1705
static void write_fast_connectable(struct hci_request *req, bool enable)
{
1706
	struct hci_dev *hdev = req->hdev;
1707 1708 1709
	struct hci_cp_write_page_scan_activity acp;
	u8 type;

1710
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1711 1712
		return;

1713 1714 1715
	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
		return;

1716 1717 1718 1719
	if (enable) {
		type = PAGE_SCAN_TYPE_INTERLACED;

		/* 160 msec page scan interval */
1720
		acp.interval = cpu_to_le16(0x0100);
1721 1722 1723 1724
	} else {
		type = PAGE_SCAN_TYPE_STANDARD;	/* default */

		/* default 1.28 sec page scan */
1725
		acp.interval = cpu_to_le16(0x0800);
1726 1727
	}

1728
	acp.window = cpu_to_le16(0x0012);
1729

1730 1731 1732 1733 1734 1735 1736
	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
			    sizeof(acp), &acp);

	if (hdev->page_scan_type != type)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1737 1738
}

1739 1740
static void set_connectable_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
1741
{
1742
	struct mgmt_pending_cmd *cmd;
1743
	struct mgmt_mode *cp;
1744
	bool conn_changed, discov_changed;
1745 1746 1747 1748 1749

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

1750
	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1751 1752 1753
	if (!cmd)
		goto unlock;

1754 1755
	if (status) {
		u8 mgmt_err = mgmt_status(status);
1756
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1757 1758 1759
		goto remove_cmd;
	}

1760
	cp = cmd->param;
1761
	if (cp->val) {
1762 1763
		conn_changed = !hci_dev_test_and_set_flag(hdev,
							  HCI_CONNECTABLE);
1764 1765
		discov_changed = false;
	} else {
1766 1767 1768 1769
		conn_changed = hci_dev_test_and_clear_flag(hdev,
							   HCI_CONNECTABLE);
		discov_changed = hci_dev_test_and_clear_flag(hdev,
							     HCI_DISCOVERABLE);
1770
	}
1771

1772 1773
	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);

1774
	if (conn_changed || discov_changed) {
1775
		new_settings(hdev, cmd->sk);
1776
		hci_update_page_scan(hdev);
1777 1778
		if (discov_changed)
			mgmt_update_adv_data(hdev);
1779 1780
		hci_update_background_scan(hdev);
	}
1781

1782
remove_cmd:
1783 1784 1785 1786 1787 1788
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1789 1790 1791 1792 1793 1794
static int set_connectable_update_settings(struct hci_dev *hdev,
					   struct sock *sk, u8 val)
{
	bool changed = false;
	int err;

1795
	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1796 1797 1798
		changed = true;

	if (val) {
1799
		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1800
	} else {
1801 1802
		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1803 1804 1805 1806 1807 1808
	}

	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
	if (err < 0)
		return err;

1809
	if (changed) {
1810
		hci_update_page_scan(hdev);
1811
		hci_update_background_scan(hdev);
1812
		return new_settings(hdev, sk);
1813
	}
1814 1815 1816 1817

	return 0;
}

1818
static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1819
			   u16 len)
1820
{
1821
	struct mgmt_mode *cp = data;
1822
	struct mgmt_pending_cmd *cmd;
1823
	struct hci_request req;
1824
	u8 scan;
1825 1826
	int err;

1827
	BT_DBG("request for %s", hdev->name);
1828

1829 1830
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1831 1832
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				       MGMT_STATUS_REJECTED);
1833

1834
	if (cp->val != 0x00 && cp->val != 0x01)
1835 1836
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1837

1838
	hci_dev_lock(hdev);
1839

1840
	if (!hdev_is_powered(hdev)) {
1841
		err = set_connectable_update_settings(hdev, sk, cp->val);
1842 1843 1844
		goto failed;
	}

1845 1846
	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1847 1848
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				      MGMT_STATUS_BUSY);
1849 1850 1851
		goto failed;
	}

1852
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1853 1854
	if (!cmd) {
		err = -ENOMEM;
1855
		goto failed;
1856
	}
1857

1858
	hci_req_init(&req, hdev);
1859

1860 1861 1862 1863
	/* If BR/EDR is not enabled and we disable advertising as a
	 * by-product of disabling connectable, we need to update the
	 * advertising flags.
	 */
1864
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1865
		if (!cp->val) {
1866 1867
			hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1868 1869 1870
		}
		update_adv_data(&req);
	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1871 1872 1873
		if (cp->val) {
			scan = SCAN_PAGE;
		} else {
1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885
			/* If we don't have any whitelist entries just
			 * disable all scanning. If there are entries
			 * and we had both page and inquiry scanning
			 * enabled then fall back to only page scanning.
			 * Otherwise no changes are needed.
			 */
			if (list_empty(&hdev->whitelist))
				scan = SCAN_DISABLED;
			else if (test_bit(HCI_ISCAN, &hdev->flags))
				scan = SCAN_PAGE;
			else
				goto no_scan_update;
1886 1887

			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1888
			    hdev->discov_timeout > 0)
1889 1890
				cancel_delayed_work(&hdev->discov_off);
		}
1891

1892 1893
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}
1894

1895
no_scan_update:
1896
	/* Update the advertising parameters if necessary */
1897
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
1898 1899
		enable_advertising(&req);

1900
	err = hci_req_run(&req, set_connectable_complete);
1901
	if (err < 0) {
1902
		mgmt_pending_remove(cmd);
1903
		if (err == -ENODATA)
1904 1905
			err = set_connectable_update_settings(hdev, sk,
							      cp->val);
1906 1907
		goto failed;
	}
1908 1909

failed:
1910
	hci_dev_unlock(hdev);
1911 1912 1913
	return err;
}

1914
static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1915
			u16 len)
1916
{
1917
	struct mgmt_mode *cp = data;
1918
	bool changed;
1919 1920
	int err;

1921
	BT_DBG("request for %s", hdev->name);
1922

1923
	if (cp->val != 0x00 && cp->val != 0x01)
1924 1925
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1926

1927
	hci_dev_lock(hdev);
1928 1929

	if (cp->val)
1930
		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1931
	else
1932
		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1933

1934
	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1935
	if (err < 0)
1936
		goto unlock;
1937

1938 1939
	if (changed)
		err = new_settings(hdev, sk);
1940

1941
unlock:
1942
	hci_dev_unlock(hdev);
1943 1944 1945
	return err;
}

1946 1947
static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
1948 1949
{
	struct mgmt_mode *cp = data;
1950
	struct mgmt_pending_cmd *cmd;
1951
	u8 val, status;
1952 1953
	int err;

1954
	BT_DBG("request for %s", hdev->name);
1955

1956 1957
	status = mgmt_bredr_support(hdev);
	if (status)
1958 1959
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				       status);
1960

1961
	if (cp->val != 0x00 && cp->val != 0x01)
1962 1963
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				       MGMT_STATUS_INVALID_PARAMS);
1964

1965 1966
	hci_dev_lock(hdev);

1967
	if (!hdev_is_powered(hdev)) {
1968 1969
		bool changed = false;

1970
		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1971
			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1972 1973 1974 1975 1976 1977 1978 1979 1980 1981
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1982 1983 1984
		goto failed;
	}

1985
	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1986 1987
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				      MGMT_STATUS_BUSY);
1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014
		goto failed;
	}

	val = !!cp->val;

	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

2015
static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2016 2017
{
	struct mgmt_mode *cp = data;
2018
	struct mgmt_pending_cmd *cmd;
2019
	u8 status;
2020 2021
	int err;

2022
	BT_DBG("request for %s", hdev->name);
2023

2024 2025
	status = mgmt_bredr_support(hdev);
	if (status)
2026
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2027

2028
	if (!lmp_ssp_capable(hdev))
2029 2030
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				       MGMT_STATUS_NOT_SUPPORTED);
2031

2032
	if (cp->val != 0x00 && cp->val != 0x01)
2033 2034
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				       MGMT_STATUS_INVALID_PARAMS);
2035

2036
	hci_dev_lock(hdev);
2037

2038
	if (!hdev_is_powered(hdev)) {
2039
		bool changed;
2040

2041
		if (cp->val) {
2042 2043
			changed = !hci_dev_test_and_set_flag(hdev,
							     HCI_SSP_ENABLED);
2044
		} else {
2045 2046
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_SSP_ENABLED);
2047
			if (!changed)
2048 2049
				changed = hci_dev_test_and_clear_flag(hdev,
								      HCI_HS_ENABLED);
2050
			else
2051
				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2052 2053 2054 2055 2056 2057 2058 2059 2060
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

2061 2062 2063
		goto failed;
	}

2064
	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2065 2066
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				      MGMT_STATUS_BUSY);
2067 2068 2069
		goto failed;
	}

2070
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2071 2072 2073 2074 2075 2076 2077 2078 2079 2080
		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

2081
	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2082 2083 2084
		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
			     sizeof(cp->val), &cp->val);

2085
	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2086 2087 2088 2089 2090 2091 2092 2093 2094 2095
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

2096
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2097 2098
{
	struct mgmt_mode *cp = data;
2099
	bool changed;
2100
	u8 status;
2101
	int err;
2102

2103
	BT_DBG("request for %s", hdev->name);
2104

2105 2106
	status = mgmt_bredr_support(hdev);
	if (status)
2107
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2108

2109
	if (!lmp_ssp_capable(hdev))
2110 2111
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_NOT_SUPPORTED);
2112

2113
	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2114 2115
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_REJECTED);
2116

2117
	if (cp->val != 0x00 && cp->val != 0x01)
2118 2119
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_INVALID_PARAMS);
2120

2121 2122
	hci_dev_lock(hdev);

2123
	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2124 2125
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				      MGMT_STATUS_BUSY);
2126 2127 2128
		goto unlock;
	}

2129
	if (cp->val) {
2130
		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2131 2132
	} else {
		if (hdev_is_powered(hdev)) {
2133 2134
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
					      MGMT_STATUS_REJECTED);
2135 2136 2137
			goto unlock;
		}

2138
		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2139
	}
2140 2141 2142 2143 2144 2145 2146

	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);
2147

2148 2149 2150
unlock:
	hci_dev_unlock(hdev);
	return err;
2151 2152
}

2153
static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2154 2155 2156
{
	struct cmd_lookup match = { NULL, hdev };

2157 2158
	hci_dev_lock(hdev);

2159 2160 2161 2162 2163
	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
				     &mgmt_err);
2164
		goto unlock;
2165 2166 2167 2168 2169 2170 2171 2172
	}

	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
2173 2174 2175 2176 2177 2178

	/* Make sure the controller has a good default for
	 * advertising data. Restrict the update to when LE
	 * has actually been enabled. During power on, the
	 * update in powered_update_hci will take care of it.
	 */
2179
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2180 2181 2182
		struct hci_request req;

		hci_req_init(&req, hdev);
2183
		update_adv_data(&req);
2184
		update_scan_rsp_data(&req);
2185
		__hci_update_background_scan(&req);
2186 2187
		hci_req_run(&req, NULL);
	}
2188 2189 2190

unlock:
	hci_dev_unlock(hdev);
2191 2192
}

2193
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2194 2195 2196
{
	struct mgmt_mode *cp = data;
	struct hci_cp_write_le_host_supported hci_cp;
2197
	struct mgmt_pending_cmd *cmd;
2198
	struct hci_request req;
2199
	int err;
2200
	u8 val, enabled;
2201

2202
	BT_DBG("request for %s", hdev->name);
2203

2204
	if (!lmp_le_capable(hdev))
2205 2206
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_NOT_SUPPORTED);
2207

2208
	if (cp->val != 0x00 && cp->val != 0x01)
2209 2210
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_INVALID_PARAMS);
2211

2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224
	/* Bluetooth single mode LE only controllers or dual-mode
	 * controllers configured as LE only devices, do not allow
	 * switching LE off. These have either LE enabled explicitly
	 * or BR/EDR has been previously switched off.
	 *
	 * When trying to enable an already enabled LE, then gracefully
	 * send a positive response. Trying to disable it however will
	 * result into rejection.
	 */
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
		if (cp->val == 0x01)
			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);

2225 2226
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_REJECTED);
2227
	}
2228

2229
	hci_dev_lock(hdev);
2230 2231

	val = !!cp->val;
2232
	enabled = lmp_host_le_capable(hdev);
2233

2234
	if (!hdev_is_powered(hdev) || val == enabled) {
2235 2236
		bool changed = false;

2237
		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2238
			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2239 2240 2241
			changed = true;
		}

2242
		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2243
			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2244 2245 2246
			changed = true;
		}

2247 2248
		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
		if (err < 0)
2249
			goto unlock;
2250 2251 2252 2253

		if (changed)
			err = new_settings(hdev, sk);

2254
		goto unlock;
2255 2256
	}

2257 2258
	if (pending_find(MGMT_OP_SET_LE, hdev) ||
	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2259 2260
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				      MGMT_STATUS_BUSY);
2261
		goto unlock;
2262 2263 2264 2265 2266
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
2267
		goto unlock;
2268 2269
	}

2270 2271
	hci_req_init(&req, hdev);

2272 2273 2274 2275
	memset(&hci_cp, 0, sizeof(hci_cp));

	if (val) {
		hci_cp.le = val;
2276
		hci_cp.simul = 0x00;
2277
	} else {
2278
		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2279
			disable_advertising(&req);
2280 2281
	}

2282 2283 2284 2285
	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
		    &hci_cp);

	err = hci_req_run(&req, le_enable_complete);
2286
	if (err < 0)
2287 2288
		mgmt_pending_remove(cmd);

2289 2290
unlock:
	hci_dev_unlock(hdev);
2291 2292 2293
	return err;
}

2294 2295 2296 2297 2298 2299 2300 2301
/* This is a helper function to test for pending mgmt commands that can
 * cause CoD or EIR HCI commands. We can only allow one such pending
 * mgmt command at a time since otherwise we cannot easily track what
 * the current values are, will be, and based on that calculate if a new
 * HCI command needs to be sent and if yes with what value.
 */
static bool pending_eir_or_class(struct hci_dev *hdev)
{
2302
	struct mgmt_pending_cmd *cmd;
2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		switch (cmd->opcode) {
		case MGMT_OP_ADD_UUID:
		case MGMT_OP_REMOVE_UUID:
		case MGMT_OP_SET_DEV_CLASS:
		case MGMT_OP_SET_POWERED:
			return true;
		}
	}

	return false;
}

2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335
static const u8 bluetooth_base_uuid[] = {
			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};

static u8 get_uuid_size(const u8 *uuid)
{
	u32 val;

	if (memcmp(uuid, bluetooth_base_uuid, 12))
		return 128;

	val = get_unaligned_le32(&uuid[12]);
	if (val > 0xffff)
		return 32;

	return 16;
}

2336 2337
static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
{
2338
	struct mgmt_pending_cmd *cmd;
2339 2340 2341

	hci_dev_lock(hdev);

2342
	cmd = pending_find(mgmt_op, hdev);
2343 2344 2345
	if (!cmd)
		goto unlock;

2346 2347
	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
			  mgmt_status(status), hdev->dev_class, 3);
2348 2349 2350 2351 2352 2353 2354

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

2355
static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2356 2357 2358 2359 2360 2361
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
}

2362
static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2363
{
2364
	struct mgmt_cp_add_uuid *cp = data;
2365
	struct mgmt_pending_cmd *cmd;
2366
	struct hci_request req;
2367 2368 2369
	struct bt_uuid *uuid;
	int err;

2370
	BT_DBG("request for %s", hdev->name);
2371

2372
	hci_dev_lock(hdev);
2373

2374
	if (pending_eir_or_class(hdev)) {
2375 2376
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
				      MGMT_STATUS_BUSY);
2377 2378 2379
		goto failed;
	}

2380
	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2381 2382 2383 2384 2385 2386
	if (!uuid) {
		err = -ENOMEM;
		goto failed;
	}

	memcpy(uuid->uuid, cp->uuid, 16);
2387
	uuid->svc_hint = cp->svc_hint;
2388
	uuid->size = get_uuid_size(cp->uuid);
2389

2390
	list_add_tail(&uuid->list, &hdev->uuids);
2391

2392
	hci_req_init(&req, hdev);
2393

2394 2395 2396
	update_class(&req);
	update_eir(&req);

2397 2398 2399 2400
	err = hci_req_run(&req, add_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto failed;
2401

2402 2403
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
					hdev->dev_class, 3);
2404 2405 2406 2407
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2408
	if (!cmd) {
2409
		err = -ENOMEM;
2410 2411 2412 2413
		goto failed;
	}

	err = 0;
2414 2415

failed:
2416
	hci_dev_unlock(hdev);
2417 2418 2419
	return err;
}

2420 2421 2422 2423 2424
static bool enable_service_cache(struct hci_dev *hdev)
{
	if (!hdev_is_powered(hdev))
		return false;

2425
	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2426 2427
		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
				   CACHE_TIMEOUT);
2428 2429 2430 2431 2432 2433
		return true;
	}

	return false;
}

2434
static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2435 2436 2437 2438 2439 2440
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
}

2441
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2442
		       u16 len)
2443
{
2444
	struct mgmt_cp_remove_uuid *cp = data;
2445
	struct mgmt_pending_cmd *cmd;
2446
	struct bt_uuid *match, *tmp;
2447
	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2448
	struct hci_request req;
2449 2450
	int err, found;

2451
	BT_DBG("request for %s", hdev->name);
2452

2453
	hci_dev_lock(hdev);
2454

2455
	if (pending_eir_or_class(hdev)) {
2456 2457
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
				      MGMT_STATUS_BUSY);
2458 2459 2460
		goto unlock;
	}

2461
	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2462
		hci_uuids_clear(hdev);
2463

2464
		if (enable_service_cache(hdev)) {
2465 2466 2467
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_UUID,
						0, hdev->dev_class, 3);
2468 2469
			goto unlock;
		}
2470

2471
		goto update_class;
2472 2473 2474 2475
	}

	found = 0;

2476
	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2477 2478 2479 2480
		if (memcmp(match->uuid, cp->uuid, 16) != 0)
			continue;

		list_del(&match->list);
2481
		kfree(match);
2482 2483 2484 2485
		found++;
	}

	if (found == 0) {
2486 2487
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
				      MGMT_STATUS_INVALID_PARAMS);
2488 2489 2490
		goto unlock;
	}

2491
update_class:
2492
	hci_req_init(&req, hdev);
2493

2494 2495 2496
	update_class(&req);
	update_eir(&req);

2497 2498 2499 2500
	err = hci_req_run(&req, remove_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2501

2502 2503
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
					hdev->dev_class, 3);
2504 2505 2506 2507
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2508
	if (!cmd) {
2509
		err = -ENOMEM;
2510 2511 2512 2513
		goto unlock;
	}

	err = 0;
2514 2515

unlock:
2516
	hci_dev_unlock(hdev);
2517 2518 2519
	return err;
}

2520
static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2521 2522 2523 2524 2525 2526
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
}

2527
static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2528
			 u16 len)
2529
{
2530
	struct mgmt_cp_set_dev_class *cp = data;
2531
	struct mgmt_pending_cmd *cmd;
2532
	struct hci_request req;
2533 2534
	int err;

2535
	BT_DBG("request for %s", hdev->name);
2536

2537
	if (!lmp_bredr_capable(hdev))
2538 2539
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				       MGMT_STATUS_NOT_SUPPORTED);
2540

2541
	hci_dev_lock(hdev);
2542

2543
	if (pending_eir_or_class(hdev)) {
2544 2545
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				      MGMT_STATUS_BUSY);
2546 2547
		goto unlock;
	}
2548

2549
	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2550 2551
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				      MGMT_STATUS_INVALID_PARAMS);
2552 2553
		goto unlock;
	}
2554

2555 2556 2557
	hdev->major_class = cp->major;
	hdev->minor_class = cp->minor;

2558
	if (!hdev_is_powered(hdev)) {
2559 2560
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
					hdev->dev_class, 3);
2561 2562 2563
		goto unlock;
	}

2564 2565
	hci_req_init(&req, hdev);

2566
	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2567 2568 2569
		hci_dev_unlock(hdev);
		cancel_delayed_work_sync(&hdev->service_cache);
		hci_dev_lock(hdev);
2570
		update_eir(&req);
2571
	}
2572

2573 2574
	update_class(&req);

2575 2576 2577 2578
	err = hci_req_run(&req, set_class_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2579

2580 2581
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
					hdev->dev_class, 3);
2582 2583 2584 2585
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2586
	if (!cmd) {
2587
		err = -ENOMEM;
2588 2589 2590 2591
		goto unlock;
	}

	err = 0;
2592

2593
unlock:
2594
	hci_dev_unlock(hdev);
2595 2596 2597
	return err;
}

2598
static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2599
			  u16 len)
2600
{
2601
	struct mgmt_cp_load_link_keys *cp = data;
2602 2603
	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_link_key_info));
2604
	u16 key_count, expected_len;
2605
	bool changed;
2606
	int i;
2607

2608 2609 2610
	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev))
2611 2612
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_NOT_SUPPORTED);
2613

2614
	key_count = __le16_to_cpu(cp->key_count);
2615 2616 2617
	if (key_count > max_key_count) {
		BT_ERR("load_link_keys: too big key_count value %u",
		       key_count);
2618 2619
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2620
	}
2621

2622 2623
	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_link_key_info);
2624
	if (expected_len != len) {
2625
		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2626
		       expected_len, len);
2627 2628
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2629 2630
	}

2631
	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2632 2633
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2634

2635
	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2636
	       key_count);
2637

2638 2639 2640
	for (i = 0; i < key_count; i++) {
		struct mgmt_link_key_info *key = &cp->keys[i];

2641
		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2642 2643 2644
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_LINK_KEYS,
					       MGMT_STATUS_INVALID_PARAMS);
2645 2646
	}

2647
	hci_dev_lock(hdev);
2648 2649 2650 2651

	hci_link_keys_clear(hdev);

	if (cp->debug_keys)
2652
		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2653
	else
2654 2655
		changed = hci_dev_test_and_clear_flag(hdev,
						      HCI_KEEP_DEBUG_KEYS);
2656 2657 2658

	if (changed)
		new_settings(hdev, NULL);
2659

2660
	for (i = 0; i < key_count; i++) {
2661
		struct mgmt_link_key_info *key = &cp->keys[i];
2662

2663 2664 2665 2666 2667 2668
		/* Always ignore debug keys and require a new pairing if
		 * the user wants to use them.
		 */
		if (key->type == HCI_LK_DEBUG_COMBINATION)
			continue;

2669 2670
		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
				 key->type, key->pin_len, NULL);
2671 2672
	}

2673
	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2674

2675
	hci_dev_unlock(hdev);
2676

2677
	return 0;
2678 2679
}

2680
static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2681
			   u8 addr_type, struct sock *skip_sk)
2682 2683 2684 2685 2686 2687 2688
{
	struct mgmt_ev_device_unpaired ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = addr_type;

	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2689
			  skip_sk);
2690 2691
}

2692
static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2693
			 u16 len)
2694
{
2695 2696
	struct mgmt_cp_unpair_device *cp = data;
	struct mgmt_rp_unpair_device rp;
2697
	struct hci_cp_disconnect dc;
2698
	struct mgmt_pending_cmd *cmd;
2699 2700 2701
	struct hci_conn *conn;
	int err;

2702
	memset(&rp, 0, sizeof(rp));
2703 2704
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
2705

2706
	if (!bdaddr_type_is_valid(cp->addr.type))
2707 2708 2709
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2710

2711
	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2712 2713 2714
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2715

2716 2717
	hci_dev_lock(hdev);

2718
	if (!hdev_is_powered(hdev)) {
2719 2720 2721
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
2722 2723 2724
		goto unlock;
	}

2725
	if (cp->addr.type == BDADDR_BREDR) {
2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738
		/* If disconnection is requested, then look up the
		 * connection. If the remote device is connected, it
		 * will be later used to terminate the link.
		 *
		 * Setting it to NULL explicitly will cause no
		 * termination of the link.
		 */
		if (cp->disconnect)
			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
						       &cp->addr.bdaddr);
		else
			conn = NULL;

2739
		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2740 2741 2742
	} else {
		u8 addr_type;

2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
					       &cp->addr.bdaddr);
		if (conn) {
			/* Defer clearing up the connection parameters
			 * until closing to give a chance of keeping
			 * them if a repairing happens.
			 */
			set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);

			/* If disconnection is not requested, then
			 * clear the connection variable so that the
			 * link is not terminated.
			 */
			if (!cp->disconnect)
				conn = NULL;
		}

2760 2761 2762 2763 2764
		if (cp->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

2765 2766
		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);

2767 2768
		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
	}
2769

2770
	if (err < 0) {
2771 2772 2773
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					MGMT_STATUS_NOT_PAIRED, &rp,
					sizeof(rp));
2774 2775 2776
		goto unlock;
	}

2777 2778 2779
	/* If the connection variable is set, then termination of the
	 * link is requested.
	 */
2780
	if (!conn) {
2781 2782
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
					&rp, sizeof(rp));
2783
		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2784 2785
		goto unlock;
	}
2786

2787
	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2788
			       sizeof(*cp));
2789 2790 2791
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
2792 2793
	}

2794 2795
	cmd->cmd_complete = addr_cmd_complete;

2796
	dc.handle = cpu_to_le16(conn->handle);
2797 2798 2799 2800 2801
	dc.reason = 0x13; /* Remote User Terminated Connection */
	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
	if (err < 0)
		mgmt_pending_remove(cmd);

2802
unlock:
2803
	hci_dev_unlock(hdev);
2804 2805 2806
	return err;
}

2807
static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2808
		      u16 len)
2809
{
2810
	struct mgmt_cp_disconnect *cp = data;
2811
	struct mgmt_rp_disconnect rp;
2812
	struct mgmt_pending_cmd *cmd;
2813 2814 2815 2816 2817
	struct hci_conn *conn;
	int err;

	BT_DBG("");

2818 2819 2820 2821
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2822
	if (!bdaddr_type_is_valid(cp->addr.type))
2823 2824 2825
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2826

2827
	hci_dev_lock(hdev);
2828 2829

	if (!test_bit(HCI_UP, &hdev->flags)) {
2830 2831 2832
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
2833 2834 2835
		goto failed;
	}

2836
	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2837 2838
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2839 2840 2841
		goto failed;
	}

2842
	if (cp->addr.type == BDADDR_BREDR)
2843 2844
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
2845 2846
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2847

2848
	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2849 2850 2851
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_NOT_CONNECTED, &rp,
					sizeof(rp));
2852 2853 2854
		goto failed;
	}

2855
	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2856 2857
	if (!cmd) {
		err = -ENOMEM;
2858
		goto failed;
2859
	}
2860

2861 2862
	cmd->cmd_complete = generic_cmd_complete;

2863
	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2864
	if (err < 0)
2865
		mgmt_pending_remove(cmd);
2866 2867

failed:
2868
	hci_dev_unlock(hdev);
2869 2870 2871
	return err;
}

2872
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2873 2874 2875
{
	switch (link_type) {
	case LE_LINK:
2876 2877
		switch (addr_type) {
		case ADDR_LE_DEV_PUBLIC:
2878
			return BDADDR_LE_PUBLIC;
2879

2880
		default:
2881
			/* Fallback to LE Random address type */
2882
			return BDADDR_LE_RANDOM;
2883
		}
2884

2885
	default:
2886
		/* Fallback to BR/EDR type */
2887
		return BDADDR_BREDR;
2888 2889 2890
	}
}

2891 2892
static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
2893 2894
{
	struct mgmt_rp_get_connections *rp;
2895
	struct hci_conn *c;
2896
	size_t rp_len;
2897 2898
	int err;
	u16 i;
2899 2900 2901

	BT_DBG("");

2902
	hci_dev_lock(hdev);
2903

2904
	if (!hdev_is_powered(hdev)) {
2905 2906
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
				      MGMT_STATUS_NOT_POWERED);
2907 2908 2909
		goto unlock;
	}

2910
	i = 0;
2911 2912
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2913
			i++;
2914 2915
	}

2916
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2917
	rp = kmalloc(rp_len, GFP_KERNEL);
2918
	if (!rp) {
2919 2920 2921 2922 2923
		err = -ENOMEM;
		goto unlock;
	}

	i = 0;
2924
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2925 2926
		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
			continue;
2927
		bacpy(&rp->addr[i].bdaddr, &c->dst);
2928
		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2929
		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2930 2931 2932 2933
			continue;
		i++;
	}

2934
	rp->conn_count = cpu_to_le16(i);
2935

2936 2937
	/* Recalculate length in case of filtered SCO connections, etc */
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2938

2939 2940
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
				rp_len);
2941

2942
	kfree(rp);
2943 2944

unlock:
2945
	hci_dev_unlock(hdev);
2946 2947 2948
	return err;
}

2949
static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2950
				   struct mgmt_cp_pin_code_neg_reply *cp)
2951
{
2952
	struct mgmt_pending_cmd *cmd;
2953 2954
	int err;

2955
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2956
			       sizeof(*cp));
2957 2958 2959
	if (!cmd)
		return -ENOMEM;

2960
	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2961
			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2962 2963 2964 2965 2966 2967
	if (err < 0)
		mgmt_pending_remove(cmd);

	return err;
}

2968
static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2969
			  u16 len)
2970
{
2971
	struct hci_conn *conn;
2972
	struct mgmt_cp_pin_code_reply *cp = data;
2973
	struct hci_cp_pin_code_reply reply;
2974
	struct mgmt_pending_cmd *cmd;
2975 2976 2977 2978
	int err;

	BT_DBG("");

2979
	hci_dev_lock(hdev);
2980

2981
	if (!hdev_is_powered(hdev)) {
2982 2983
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
				      MGMT_STATUS_NOT_POWERED);
2984 2985 2986
		goto failed;
	}

2987
	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2988
	if (!conn) {
2989 2990
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
				      MGMT_STATUS_NOT_CONNECTED);
2991 2992 2993 2994
		goto failed;
	}

	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2995 2996 2997
		struct mgmt_cp_pin_code_neg_reply ncp;

		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2998 2999 3000

		BT_ERR("PIN code is not 16 bytes long");

3001
		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3002
		if (err >= 0)
3003 3004
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
					      MGMT_STATUS_INVALID_PARAMS);
3005 3006 3007 3008

		goto failed;
	}

3009
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3010 3011
	if (!cmd) {
		err = -ENOMEM;
3012
		goto failed;
3013
	}
3014

3015 3016
	cmd->cmd_complete = addr_cmd_complete;

3017
	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3018
	reply.pin_len = cp->pin_len;
3019
	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3020 3021 3022

	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
	if (err < 0)
3023
		mgmt_pending_remove(cmd);
3024 3025

failed:
3026
	hci_dev_unlock(hdev);
3027 3028 3029
	return err;
}

3030 3031
static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
3032
{
3033
	struct mgmt_cp_set_io_capability *cp = data;
3034 3035 3036

	BT_DBG("");

3037
	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3038 3039
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
					 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3040

3041
	hci_dev_lock(hdev);
3042 3043 3044 3045

	hdev->io_capability = cp->io_capability;

	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3046
	       hdev->io_capability);
3047

3048
	hci_dev_unlock(hdev);
3049

3050 3051
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
				 NULL, 0);
3052 3053
}

3054
static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3055 3056
{
	struct hci_dev *hdev = conn->hdev;
3057
	struct mgmt_pending_cmd *cmd;
3058

3059
	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071
		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
			continue;

		if (cmd->user_data != conn)
			continue;

		return cmd;
	}

	return NULL;
}

3072
static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3073 3074 3075
{
	struct mgmt_rp_pair_device rp;
	struct hci_conn *conn = cmd->user_data;
3076
	int err;
3077

3078 3079
	bacpy(&rp.addr.bdaddr, &conn->dst);
	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3080

3081 3082
	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
				status, &rp, sizeof(rp));
3083 3084 3085 3086 3087 3088

	/* So we don't get further callbacks for this connection */
	conn->connect_cfm_cb = NULL;
	conn->security_cfm_cb = NULL;
	conn->disconn_cfm_cb = NULL;

3089
	hci_conn_drop(conn);
3090 3091 3092 3093 3094

	/* The device is paired so there is no need to remove
	 * its connection parameters anymore.
	 */
	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3095 3096

	hci_conn_put(conn);
3097 3098

	return err;
3099 3100
}

3101 3102 3103
void mgmt_smp_complete(struct hci_conn *conn, bool complete)
{
	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3104
	struct mgmt_pending_cmd *cmd;
3105 3106

	cmd = find_pairing(conn);
3107
	if (cmd) {
3108
		cmd->cmd_complete(cmd, status);
3109 3110
		mgmt_pending_remove(cmd);
	}
3111 3112
}

3113 3114
static void pairing_complete_cb(struct hci_conn *conn, u8 status)
{
3115
	struct mgmt_pending_cmd *cmd;
3116 3117 3118 3119

	BT_DBG("status %u", status);

	cmd = find_pairing(conn);
3120
	if (!cmd) {
3121
		BT_DBG("Unable to find a pending command");
3122 3123 3124 3125 3126
		return;
	}

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);
3127 3128
}

3129
static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3130
{
3131
	struct mgmt_pending_cmd *cmd;
3132 3133 3134 3135 3136 3137 3138

	BT_DBG("status %u", status);

	if (!status)
		return;

	cmd = find_pairing(conn);
3139
	if (!cmd) {
3140
		BT_DBG("Unable to find a pending command");
3141 3142 3143 3144 3145
		return;
	}

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);
3146 3147
}

3148
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3149
		       u16 len)
3150
{
3151
	struct mgmt_cp_pair_device *cp = data;
3152
	struct mgmt_rp_pair_device rp;
3153
	struct mgmt_pending_cmd *cmd;
3154 3155 3156 3157 3158 3159
	u8 sec_level, auth_type;
	struct hci_conn *conn;
	int err;

	BT_DBG("");

3160 3161 3162 3163
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

3164
	if (!bdaddr_type_is_valid(cp->addr.type))
3165 3166 3167
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
3168

3169
	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3170 3171 3172
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
3173

3174
	hci_dev_lock(hdev);
3175

3176
	if (!hdev_is_powered(hdev)) {
3177 3178 3179
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
3180 3181 3182
		goto unlock;
	}

3183 3184 3185 3186 3187 3188 3189
	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_ALREADY_PAIRED, &rp,
					sizeof(rp));
		goto unlock;
	}

3190
	sec_level = BT_SECURITY_MEDIUM;
3191
	auth_type = HCI_AT_DEDICATED_BONDING;
3192

3193
	if (cp->addr.type == BDADDR_BREDR) {
3194 3195
		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
				       auth_type);
3196 3197 3198 3199 3200 3201 3202 3203 3204 3205
	} else {
		u8 addr_type;

		/* Convert from L2CAP channel address type to HCI address type
		 */
		if (cp->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216
		/* When pairing a new device, it is expected to remember
		 * this device for future connections. Adding the connection
		 * parameter information ahead of time allows tracking
		 * of the slave preferred values and will speed up any
		 * further connection establishment.
		 *
		 * If connection parameters already exist, then they
		 * will be kept and this function does nothing.
		 */
		hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);

3217
		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3218 3219
				      sec_level, HCI_LE_CONN_TIMEOUT,
				      HCI_ROLE_MASTER);
3220
	}
3221

3222
	if (IS_ERR(conn)) {
3223 3224 3225 3226
		int status;

		if (PTR_ERR(conn) == -EBUSY)
			status = MGMT_STATUS_BUSY;
3227 3228 3229 3230
		else if (PTR_ERR(conn) == -EOPNOTSUPP)
			status = MGMT_STATUS_NOT_SUPPORTED;
		else if (PTR_ERR(conn) == -ECONNREFUSED)
			status = MGMT_STATUS_REJECTED;
3231 3232 3233
		else
			status = MGMT_STATUS_CONNECT_FAILED;

3234 3235
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					status, &rp, sizeof(rp));
3236 3237 3238 3239
		goto unlock;
	}

	if (conn->connect_cfm_cb) {
3240
		hci_conn_drop(conn);
3241 3242
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3243 3244 3245
		goto unlock;
	}

3246
	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3247 3248
	if (!cmd) {
		err = -ENOMEM;
3249
		hci_conn_drop(conn);
3250 3251 3252
		goto unlock;
	}

3253 3254
	cmd->cmd_complete = pairing_complete;

3255
	/* For LE, just connecting isn't a proof that the pairing finished */
3256
	if (cp->addr.type == BDADDR_BREDR) {
3257
		conn->connect_cfm_cb = pairing_complete_cb;
3258 3259 3260 3261 3262 3263 3264
		conn->security_cfm_cb = pairing_complete_cb;
		conn->disconn_cfm_cb = pairing_complete_cb;
	} else {
		conn->connect_cfm_cb = le_pairing_complete_cb;
		conn->security_cfm_cb = le_pairing_complete_cb;
		conn->disconn_cfm_cb = le_pairing_complete_cb;
	}
3265

3266
	conn->io_capability = cp->io_cap;
3267
	cmd->user_data = hci_conn_get(conn);
3268

3269
	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3270 3271 3272 3273
	    hci_conn_security(conn, sec_level, auth_type, true)) {
		cmd->cmd_complete(cmd, 0);
		mgmt_pending_remove(cmd);
	}
3274 3275 3276 3277

	err = 0;

unlock:
3278
	hci_dev_unlock(hdev);
3279 3280 3281
	return err;
}

3282 3283
static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3284
{
3285
	struct mgmt_addr_info *addr = data;
3286
	struct mgmt_pending_cmd *cmd;
3287 3288 3289 3290 3291 3292 3293
	struct hci_conn *conn;
	int err;

	BT_DBG("");

	hci_dev_lock(hdev);

3294
	if (!hdev_is_powered(hdev)) {
3295 3296
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_NOT_POWERED);
3297 3298 3299
		goto unlock;
	}

3300
	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3301
	if (!cmd) {
3302 3303
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_INVALID_PARAMS);
3304 3305 3306 3307 3308 3309
		goto unlock;
	}

	conn = cmd->user_data;

	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3310 3311
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_INVALID_PARAMS);
3312 3313 3314
		goto unlock;
	}

3315 3316
	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
	mgmt_pending_remove(cmd);
3317

3318 3319
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
				addr, sizeof(*addr));
3320 3321 3322 3323 3324
unlock:
	hci_dev_unlock(hdev);
	return err;
}

3325
static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3326
			     struct mgmt_addr_info *addr, u16 mgmt_op,
3327
			     u16 hci_op, __le32 passkey)
3328
{
3329
	struct mgmt_pending_cmd *cmd;
3330
	struct hci_conn *conn;
3331 3332
	int err;

3333
	hci_dev_lock(hdev);
3334

3335
	if (!hdev_is_powered(hdev)) {
3336 3337 3338
		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
					MGMT_STATUS_NOT_POWERED, addr,
					sizeof(*addr));
3339
		goto done;
3340 3341
	}

3342 3343
	if (addr->type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3344
	else
3345
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3346 3347

	if (!conn) {
3348 3349 3350
		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
					MGMT_STATUS_NOT_CONNECTED, addr,
					sizeof(*addr));
3351 3352
		goto done;
	}
3353

3354
	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3355 3356
		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
		if (!err)
3357 3358 3359
			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
						MGMT_STATUS_SUCCESS, addr,
						sizeof(*addr));
3360
		else
3361 3362 3363
			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
						MGMT_STATUS_FAILED, addr,
						sizeof(*addr));
3364 3365 3366 3367

		goto done;
	}

3368
	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3369 3370
	if (!cmd) {
		err = -ENOMEM;
3371
		goto done;
3372 3373
	}

3374 3375
	cmd->cmd_complete = addr_cmd_complete;

3376
	/* Continue with pairing via HCI */
3377 3378 3379
	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
		struct hci_cp_user_passkey_reply cp;

3380
		bacpy(&cp.bdaddr, &addr->bdaddr);
3381 3382 3383
		cp.passkey = passkey;
		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
	} else
3384 3385
		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
				   &addr->bdaddr);
3386

3387 3388
	if (err < 0)
		mgmt_pending_remove(cmd);
3389

3390
done:
3391
	hci_dev_unlock(hdev);
3392 3393 3394
	return err;
}

3395 3396 3397 3398 3399 3400 3401
static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_pin_code_neg_reply *cp = data;

	BT_DBG("");

3402
	return user_pairing_resp(sk, hdev, &cp->addr,
3403 3404 3405 3406
				MGMT_OP_PIN_CODE_NEG_REPLY,
				HCI_OP_PIN_CODE_NEG_REPLY, 0);
}

3407 3408
static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3409
{
3410
	struct mgmt_cp_user_confirm_reply *cp = data;
3411 3412 3413 3414

	BT_DBG("");

	if (len != sizeof(*cp))
3415 3416
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
				       MGMT_STATUS_INVALID_PARAMS);
3417

3418
	return user_pairing_resp(sk, hdev, &cp->addr,
3419 3420
				 MGMT_OP_USER_CONFIRM_REPLY,
				 HCI_OP_USER_CONFIRM_REPLY, 0);
3421 3422
}

3423
static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3424
				  void *data, u16 len)
3425
{
3426
	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3427 3428 3429

	BT_DBG("");

3430
	return user_pairing_resp(sk, hdev, &cp->addr,
3431 3432
				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3433 3434
}

3435 3436
static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3437
{
3438
	struct mgmt_cp_user_passkey_reply *cp = data;
3439 3440 3441

	BT_DBG("");

3442
	return user_pairing_resp(sk, hdev, &cp->addr,
3443 3444
				 MGMT_OP_USER_PASSKEY_REPLY,
				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3445 3446
}

3447
static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3448
				  void *data, u16 len)
3449
{
3450
	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3451 3452 3453

	BT_DBG("");

3454
	return user_pairing_resp(sk, hdev, &cp->addr,
3455 3456
				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3457 3458
}

3459
static void update_name(struct hci_request *req)
3460
{
3461
	struct hci_dev *hdev = req->hdev;
3462 3463
	struct hci_cp_write_local_name cp;

3464
	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3465

3466
	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3467 3468
}

3469
static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3470 3471
{
	struct mgmt_cp_set_local_name *cp;
3472
	struct mgmt_pending_cmd *cmd;
3473 3474 3475 3476 3477

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

3478
	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3479 3480 3481 3482 3483 3484
	if (!cmd)
		goto unlock;

	cp = cmd->param;

	if (status)
3485 3486
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
			        mgmt_status(status));
3487
	else
3488 3489
		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
				  cp, sizeof(*cp));
3490 3491 3492 3493 3494 3495 3496

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

3497
static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3498
			  u16 len)
3499
{
3500
	struct mgmt_cp_set_local_name *cp = data;
3501
	struct mgmt_pending_cmd *cmd;
3502
	struct hci_request req;
3503 3504 3505 3506
	int err;

	BT_DBG("");

3507
	hci_dev_lock(hdev);
3508

3509 3510 3511 3512 3513 3514
	/* If the old values are the same as the new ones just return a
	 * direct command complete event.
	 */
	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
	    !memcmp(hdev->short_name, cp->short_name,
		    sizeof(hdev->short_name))) {
3515 3516
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
					data, len);
3517 3518 3519
		goto failed;
	}

3520
	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3521

3522
	if (!hdev_is_powered(hdev)) {
3523
		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3524

3525 3526
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
					data, len);
3527 3528 3529
		if (err < 0)
			goto failed;

3530 3531
		err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
					 data, len, sk);
3532

3533 3534 3535
		goto failed;
	}

3536
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3537 3538 3539 3540 3541
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

3542 3543
	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));

3544
	hci_req_init(&req, hdev);
3545 3546 3547 3548 3549 3550

	if (lmp_bredr_capable(hdev)) {
		update_name(&req);
		update_eir(&req);
	}

3551 3552 3553
	/* The name is stored in the scan response data and so
	 * no need to udpate the advertising data here.
	 */
3554
	if (lmp_le_capable(hdev))
3555
		update_scan_rsp_data(&req);
3556

3557
	err = hci_req_run(&req, set_name_complete);
3558 3559 3560 3561
	if (err < 0)
		mgmt_pending_remove(cmd);

failed:
3562
	hci_dev_unlock(hdev);
3563 3564 3565
	return err;
}

3566
static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3567
			       void *data, u16 data_len)
3568
{
3569
	struct mgmt_pending_cmd *cmd;
3570 3571
	int err;

3572
	BT_DBG("%s", hdev->name);
3573

3574
	hci_dev_lock(hdev);
3575

3576
	if (!hdev_is_powered(hdev)) {
3577 3578
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_NOT_POWERED);
3579 3580 3581
		goto unlock;
	}

3582
	if (!lmp_ssp_capable(hdev)) {
3583 3584
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_NOT_SUPPORTED);
3585 3586 3587
		goto unlock;
	}

3588
	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3589 3590
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_BUSY);
3591 3592 3593
		goto unlock;
	}

3594
	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3595 3596 3597 3598 3599
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

3600
	if (bredr_sc_enabled(hdev))
3601 3602 3603 3604 3605
		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
				   0, NULL);
	else
		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);

3606 3607 3608 3609
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
3610
	hci_dev_unlock(hdev);
3611 3612 3613
	return err;
}

3614
static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3615
			       void *data, u16 len)
3616
{
3617
	struct mgmt_addr_info *addr = data;
3618 3619
	int err;

3620
	BT_DBG("%s ", hdev->name);
3621

3622
	if (!bdaddr_type_is_valid(addr->type))
3623 3624 3625 3626
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_ADD_REMOTE_OOB_DATA,
					 MGMT_STATUS_INVALID_PARAMS,
					 addr, sizeof(*addr));
3627

3628
	hci_dev_lock(hdev);
3629

3630 3631 3632
	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_data *cp = data;
		u8 status;
3633

3634
		if (cp->addr.type != BDADDR_BREDR) {
3635 3636 3637 3638
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_ADD_REMOTE_OOB_DATA,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
3639 3640 3641
			goto unlock;
		}

3642
		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3643 3644
					      cp->addr.type, cp->hash,
					      cp->rand, NULL, NULL);
3645 3646 3647 3648 3649
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

3650 3651 3652
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
					&cp->addr, sizeof(cp->addr));
3653 3654
	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3655
		u8 *rand192, *hash192, *rand256, *hash256;
3656 3657
		u8 status;

3658
		if (bdaddr_type_is_le(cp->addr.type)) {
3659 3660 3661 3662 3663
			/* Enforce zero-valued 192-bit parameters as
			 * long as legacy SMP OOB isn't implemented.
			 */
			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
			    memcmp(cp->hash192, ZERO_KEY, 16)) {
3664 3665 3666 3667
				err = mgmt_cmd_complete(sk, hdev->id,
							MGMT_OP_ADD_REMOTE_OOB_DATA,
							MGMT_STATUS_INVALID_PARAMS,
							addr, sizeof(*addr));
3668 3669 3670
				goto unlock;
			}

3671 3672 3673
			rand192 = NULL;
			hash192 = NULL;
		} else {
3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696
			/* In case one of the P-192 values is set to zero,
			 * then just disable OOB data for P-192.
			 */
			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
				rand192 = NULL;
				hash192 = NULL;
			} else {
				rand192 = cp->rand192;
				hash192 = cp->hash192;
			}
		}

		/* In case one of the P-256 values is set to zero, then just
		 * disable OOB data for P-256.
		 */
		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
			rand256 = NULL;
			hash256 = NULL;
		} else {
			rand256 = cp->rand256;
			hash256 = cp->hash256;
3697 3698
		}

3699
		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3700
					      cp->addr.type, hash192, rand192,
3701
					      hash256, rand256);
3702 3703 3704 3705 3706
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

3707 3708 3709
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_ADD_REMOTE_OOB_DATA,
					status, &cp->addr, sizeof(cp->addr));
3710 3711
	} else {
		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3712 3713
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
				      MGMT_STATUS_INVALID_PARAMS);
3714
	}
3715

3716
unlock:
3717
	hci_dev_unlock(hdev);
3718 3719 3720
	return err;
}

3721
static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3722
				  void *data, u16 len)
3723
{
3724
	struct mgmt_cp_remove_remote_oob_data *cp = data;
3725
	u8 status;
3726 3727
	int err;

3728
	BT_DBG("%s", hdev->name);
3729

3730
	if (cp->addr.type != BDADDR_BREDR)
3731 3732 3733 3734
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
3735

3736
	hci_dev_lock(hdev);
3737

3738 3739 3740 3741 3742 3743
	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
		hci_remote_oob_data_clear(hdev);
		status = MGMT_STATUS_SUCCESS;
		goto done;
	}

3744
	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3745
	if (err < 0)
3746
		status = MGMT_STATUS_INVALID_PARAMS;
3747
	else
3748
		status = MGMT_STATUS_SUCCESS;
3749

3750
done:
3751 3752
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
				status, &cp->addr, sizeof(cp->addr));
3753

3754
	hci_dev_unlock(hdev);
3755 3756 3757
	return err;
}

3758
static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
3759
{
3760
	struct hci_dev *hdev = req->hdev;
3761
	struct hci_cp_inquiry cp;
3762 3763
	/* General inquiry access code (GIAC) */
	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789

	*status = mgmt_bredr_support(hdev);
	if (*status)
		return false;

	if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
		*status = MGMT_STATUS_BUSY;
		return false;
	}

	hci_inquiry_cache_flush(hdev);

	memset(&cp, 0, sizeof(cp));
	memcpy(&cp.lap, lap, sizeof(cp.lap));
	cp.length = DISCOV_BREDR_INQUIRY_LEN;

	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);

	return true;
}

static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_scan_param param_cp;
	struct hci_cp_le_set_scan_enable enable_cp;
3790
	u8 own_addr_type;
3791 3792
	int err;

3793 3794 3795
	*status = mgmt_le_support(hdev);
	if (*status)
		return false;
3796

3797 3798 3799 3800 3801 3802
	if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
		/* Don't let discovery abort an outgoing connection attempt
		 * that's using directed advertising.
		 */
		if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
			*status = MGMT_STATUS_REJECTED;
3803 3804
			return false;
		}
3805

3806 3807
		disable_advertising(req);
	}
3808

3809 3810 3811 3812 3813 3814
	/* If controller is scanning, it means the background scanning is
	 * running. Thus, we should temporarily stop it in order to set the
	 * discovery scanning parameters.
	 */
	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
		hci_req_add_le_scan_disable(req);
3815

3816 3817 3818 3819 3820 3821 3822 3823 3824
	/* All active scans will be done with either a resolvable private
	 * address (when privacy feature has been enabled) or non-resolvable
	 * private address.
	 */
	err = hci_update_random_address(req, true, &own_addr_type);
	if (err < 0) {
		*status = MGMT_STATUS_FAILED;
		return false;
	}
3825

3826 3827 3828 3829 3830
	memset(&param_cp, 0, sizeof(param_cp));
	param_cp.type = LE_SCAN_ACTIVE;
	param_cp.interval = cpu_to_le16(interval);
	param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
	param_cp.own_address_type = own_addr_type;
3831

3832 3833
	hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
		    &param_cp);
3834

3835 3836 3837
	memset(&enable_cp, 0, sizeof(enable_cp));
	enable_cp.enable = LE_SCAN_ENABLE;
	enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3838

3839 3840 3841 3842 3843
	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
		    &enable_cp);

	return true;
}
3844

3845 3846 3847
static bool trigger_discovery(struct hci_request *req, u8 *status)
{
	struct hci_dev *hdev = req->hdev;
3848

3849 3850 3851 3852 3853 3854 3855
	switch (hdev->discovery.type) {
	case DISCOV_TYPE_BREDR:
		if (!trigger_bredr_inquiry(req, status))
			return false;
		break;

	case DISCOV_TYPE_INTERLEAVED:
3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871
		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
			     &hdev->quirks)) {
			/* During simultaneous discovery, we double LE scan
			 * interval. We must leave some time for the controller
			 * to do BR/EDR inquiry.
			 */
			if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
					     status))
				return false;

			if (!trigger_bredr_inquiry(req, status))
				return false;

			return true;
		}

3872 3873
		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
			*status = MGMT_STATUS_NOT_SUPPORTED;
3874 3875
			return false;
		}
3876
		/* fall through */
3877

3878 3879 3880
	case DISCOV_TYPE_LE:
		if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
			return false;
3881 3882 3883 3884 3885 3886 3887 3888
		break;

	default:
		*status = MGMT_STATUS_INVALID_PARAMS;
		return false;
	}

	return true;
3889 3890
}

3891 3892
static void start_discovery_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
3893
{
3894
	struct mgmt_pending_cmd *cmd;
3895
	unsigned long timeout;
3896

3897 3898
	BT_DBG("status %d", status);

3899
	hci_dev_lock(hdev);
3900

3901
	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3902
	if (!cmd)
3903
		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3904

3905
	if (cmd) {
3906
		cmd->cmd_complete(cmd, mgmt_status(status));
3907 3908
		mgmt_pending_remove(cmd);
	}
3909 3910

	if (status) {
3911 3912
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
		goto unlock;
3913 3914 3915 3916
	}

	hci_discovery_set_state(hdev, DISCOVERY_FINDING);

3917 3918 3919
	/* If the scan involves LE scan, pick proper timeout to schedule
	 * hdev->le_scan_disable that will stop it.
	 */
3920 3921
	switch (hdev->discovery.type) {
	case DISCOV_TYPE_LE:
3922
		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3923 3924
		break;
	case DISCOV_TYPE_INTERLEAVED:
3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936
		 /* When running simultaneous discovery, the LE scanning time
		 * should occupy the whole discovery time sine BR/EDR inquiry
		 * and LE scanning are scheduled by the controller.
		 *
		 * For interleaving discovery in comparison, BR/EDR inquiry
		 * and LE scanning are done sequentially with separate
		 * timeouts.
		 */
		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
		else
			timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3937 3938
		break;
	case DISCOV_TYPE_BREDR:
3939
		timeout = 0;
3940 3941 3942
		break;
	default:
		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3943 3944
		timeout = 0;
		break;
3945
	}
3946

3947 3948 3949 3950 3951 3952 3953 3954
	if (timeout) {
		/* When service discovery is used and the controller has
		 * a strict duplicate filter, it is important to remember
		 * the start and duration of the scan. This is required
		 * for restarting scanning during the discovery phase.
		 */
		if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
			     &hdev->quirks) &&
3955
		    hdev->discovery.result_filtering) {
3956 3957 3958 3959
			hdev->discovery.scan_start = jiffies;
			hdev->discovery.scan_duration = timeout;
		}

3960 3961
		queue_delayed_work(hdev->workqueue,
				   &hdev->le_scan_disable, timeout);
3962
	}
3963

3964 3965
unlock:
	hci_dev_unlock(hdev);
3966 3967
}

3968
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3969
			   void *data, u16 len)
3970
{
3971
	struct mgmt_cp_start_discovery *cp = data;
3972
	struct mgmt_pending_cmd *cmd;
3973
	struct hci_request req;
3974
	u8 status;
3975 3976
	int err;

3977
	BT_DBG("%s", hdev->name);
3978

3979
	hci_dev_lock(hdev);
3980

3981
	if (!hdev_is_powered(hdev)) {
3982 3983 3984
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					MGMT_STATUS_NOT_POWERED,
					&cp->type, sizeof(cp->type));
3985 3986 3987
		goto failed;
	}

3988
	if (hdev->discovery.state != DISCOVERY_STOPPED ||
3989
	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3990 3991 3992
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					MGMT_STATUS_BUSY, &cp->type,
					sizeof(cp->type));
3993 3994 3995
		goto failed;
	}

3996
	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3997 3998 3999 4000 4001
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

4002 4003
	cmd->cmd_complete = generic_cmd_complete;

4004 4005 4006 4007 4008
	/* Clear the discovery filter first to free any previously
	 * allocated memory for the UUID list.
	 */
	hci_discovery_filter_clear(hdev);

A
Andre Guedes 已提交
4009
	hdev->discovery.type = cp->type;
4010
	hdev->discovery.report_invalid_rssi = false;
A
Andre Guedes 已提交
4011

4012 4013
	hci_req_init(&req, hdev);

4014
	if (!trigger_discovery(&req, &status)) {
4015 4016
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					status, &cp->type, sizeof(cp->type));
4017 4018
		mgmt_pending_remove(cmd);
		goto failed;
4019
	}
4020

4021
	err = hci_req_run(&req, start_discovery_complete);
4022
	if (err < 0) {
4023
		mgmt_pending_remove(cmd);
4024 4025
		goto failed;
	}
4026

4027
	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4028

4029
failed:
4030
	hci_dev_unlock(hdev);
4031 4032
	return err;
}
4033

4034 4035
static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
					  u8 status)
4036
{
4037 4038
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, 1);
4039
}
4040

4041 4042 4043 4044
static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
				   void *data, u16 len)
{
	struct mgmt_cp_start_service_discovery *cp = data;
4045
	struct mgmt_pending_cmd *cmd;
4046 4047 4048 4049 4050
	struct hci_request req;
	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
	u16 uuid_count, expected_len;
	u8 status;
	int err;
4051

4052
	BT_DBG("%s", hdev->name);
4053

4054
	hci_dev_lock(hdev);
4055

4056
	if (!hdev_is_powered(hdev)) {
4057 4058 4059 4060
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_NOT_POWERED,
					&cp->type, sizeof(cp->type));
4061 4062
		goto failed;
	}
4063

4064
	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4065
	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4066 4067 4068 4069
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_BUSY, &cp->type,
					sizeof(cp->type));
4070 4071
		goto failed;
	}
4072

4073 4074 4075 4076
	uuid_count = __le16_to_cpu(cp->uuid_count);
	if (uuid_count > max_uuid_count) {
		BT_ERR("service_discovery: too big uuid_count value %u",
		       uuid_count);
4077 4078 4079 4080
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS, &cp->type,
					sizeof(cp->type));
4081 4082 4083 4084 4085 4086 4087
		goto failed;
	}

	expected_len = sizeof(*cp) + uuid_count * 16;
	if (expected_len != len) {
		BT_ERR("service_discovery: expected %u bytes, got %u bytes",
		       expected_len, len);
4088 4089 4090 4091
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS, &cp->type,
					sizeof(cp->type));
4092 4093 4094 4095
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4096
			       hdev, data, len);
4097 4098 4099 4100 4101
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

4102 4103
	cmd->cmd_complete = service_discovery_cmd_complete;

4104 4105 4106 4107 4108
	/* Clear the discovery filter first to free any previously
	 * allocated memory for the UUID list.
	 */
	hci_discovery_filter_clear(hdev);

4109
	hdev->discovery.result_filtering = true;
4110 4111 4112 4113 4114 4115 4116 4117
	hdev->discovery.type = cp->type;
	hdev->discovery.rssi = cp->rssi;
	hdev->discovery.uuid_count = uuid_count;

	if (uuid_count > 0) {
		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
						GFP_KERNEL);
		if (!hdev->discovery.uuids) {
4118 4119 4120 4121
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_START_SERVICE_DISCOVERY,
						MGMT_STATUS_FAILED,
						&cp->type, sizeof(cp->type));
4122 4123 4124
			mgmt_pending_remove(cmd);
			goto failed;
		}
4125
	}
4126

4127
	hci_req_init(&req, hdev);
4128

4129
	if (!trigger_discovery(&req, &status)) {
4130 4131 4132
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					status, &cp->type, sizeof(cp->type));
4133 4134
		mgmt_pending_remove(cmd);
		goto failed;
4135
	}
4136

4137
	err = hci_req_run(&req, start_discovery_complete);
4138
	if (err < 0) {
4139
		mgmt_pending_remove(cmd);
4140 4141 4142 4143
		goto failed;
	}

	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4144 4145

failed:
4146
	hci_dev_unlock(hdev);
4147 4148 4149
	return err;
}

4150
static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4151
{
4152
	struct mgmt_pending_cmd *cmd;
4153

4154 4155 4156 4157
	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

4158
	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4159
	if (cmd) {
4160
		cmd->cmd_complete(cmd, mgmt_status(status));
4161
		mgmt_pending_remove(cmd);
4162 4163
	}

4164 4165
	if (!status)
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4166 4167 4168 4169

	hci_dev_unlock(hdev);
}

4170
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4171
			  u16 len)
4172
{
4173
	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4174
	struct mgmt_pending_cmd *cmd;
4175
	struct hci_request req;
4176 4177
	int err;

4178
	BT_DBG("%s", hdev->name);
4179

4180
	hci_dev_lock(hdev);
4181

4182
	if (!hci_discovery_active(hdev)) {
4183 4184 4185
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
					MGMT_STATUS_REJECTED, &mgmt_cp->type,
					sizeof(mgmt_cp->type));
4186 4187 4188 4189
		goto unlock;
	}

	if (hdev->discovery.type != mgmt_cp->type) {
4190 4191 4192
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS,
					&mgmt_cp->type, sizeof(mgmt_cp->type));
4193
		goto unlock;
4194 4195
	}

4196
	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4197 4198
	if (!cmd) {
		err = -ENOMEM;
4199 4200 4201
		goto unlock;
	}

4202 4203
	cmd->cmd_complete = generic_cmd_complete;

4204 4205
	hci_req_init(&req, hdev);

4206
	hci_stop_discovery(&req);
4207

4208 4209 4210
	err = hci_req_run(&req, stop_discovery_complete);
	if (!err) {
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4211
		goto unlock;
4212 4213
	}

4214 4215 4216 4217
	mgmt_pending_remove(cmd);

	/* If no HCI commands were sent we're done */
	if (err == -ENODATA) {
4218 4219
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
					&mgmt_cp->type, sizeof(mgmt_cp->type));
4220 4221
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
	}
4222

4223
unlock:
4224
	hci_dev_unlock(hdev);
4225 4226 4227
	return err;
}

4228
static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4229
			u16 len)
4230
{
4231
	struct mgmt_cp_confirm_name *cp = data;
4232 4233 4234
	struct inquiry_entry *e;
	int err;

4235
	BT_DBG("%s", hdev->name);
4236 4237 4238

	hci_dev_lock(hdev);

4239
	if (!hci_discovery_active(hdev)) {
4240 4241 4242
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
					MGMT_STATUS_FAILED, &cp->addr,
					sizeof(cp->addr));
4243 4244 4245
		goto failed;
	}

4246
	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4247
	if (!e) {
4248 4249 4250
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
					sizeof(cp->addr));
4251 4252 4253 4254 4255 4256 4257 4258
		goto failed;
	}

	if (cp->name_known) {
		e->name_state = NAME_KNOWN;
		list_del(&e->list);
	} else {
		e->name_state = NAME_NEEDED;
4259
		hci_inquiry_cache_update_resolve(hdev, e);
4260 4261
	}

4262 4263
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
				&cp->addr, sizeof(cp->addr));
4264 4265 4266 4267 4268 4269

failed:
	hci_dev_unlock(hdev);
	return err;
}

4270
static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4271
			u16 len)
4272
{
4273
	struct mgmt_cp_block_device *cp = data;
4274
	u8 status;
4275 4276
	int err;

4277
	BT_DBG("%s", hdev->name);
4278

4279
	if (!bdaddr_type_is_valid(cp->addr.type))
4280 4281 4282
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
4283

4284
	hci_dev_lock(hdev);
4285

4286 4287
	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
				  cp->addr.type);
4288
	if (err < 0) {
4289
		status = MGMT_STATUS_FAILED;
4290 4291 4292 4293 4294 4295
		goto done;
	}

	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
		   sk);
	status = MGMT_STATUS_SUCCESS;
4296

4297
done:
4298 4299
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
				&cp->addr, sizeof(cp->addr));
4300

4301
	hci_dev_unlock(hdev);
4302 4303 4304 4305

	return err;
}

4306
static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4307
			  u16 len)
4308
{
4309
	struct mgmt_cp_unblock_device *cp = data;
4310
	u8 status;
4311 4312
	int err;

4313
	BT_DBG("%s", hdev->name);
4314

4315
	if (!bdaddr_type_is_valid(cp->addr.type))
4316 4317 4318
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
4319

4320
	hci_dev_lock(hdev);
4321

4322 4323
	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
				  cp->addr.type);
4324
	if (err < 0) {
4325
		status = MGMT_STATUS_INVALID_PARAMS;
4326 4327 4328 4329 4330 4331
		goto done;
	}

	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
		   sk);
	status = MGMT_STATUS_SUCCESS;
4332

4333
done:
4334 4335
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
				&cp->addr, sizeof(cp->addr));
4336

4337
	hci_dev_unlock(hdev);
4338 4339 4340 4341

	return err;
}

4342 4343 4344 4345
static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_set_device_id *cp = data;
4346
	struct hci_request req;
4347
	int err;
4348
	__u16 source;
4349 4350 4351

	BT_DBG("%s", hdev->name);

4352 4353 4354
	source = __le16_to_cpu(cp->source);

	if (source > 0x0002)
4355 4356
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
				       MGMT_STATUS_INVALID_PARAMS);
4357

4358 4359
	hci_dev_lock(hdev);

4360
	hdev->devid_source = source;
4361 4362 4363 4364
	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
	hdev->devid_product = __le16_to_cpu(cp->product);
	hdev->devid_version = __le16_to_cpu(cp->version);

4365 4366
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
				NULL, 0);
4367

4368 4369 4370
	hci_req_init(&req, hdev);
	update_eir(&req);
	hci_req_run(&req, NULL);
4371 4372 4373 4374 4375 4376

	hci_dev_unlock(hdev);

	return err;
}

4377 4378
static void set_advertising_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
4379 4380 4381
{
	struct cmd_lookup match = { NULL, hdev };

4382 4383
	hci_dev_lock(hdev);

4384 4385 4386 4387 4388
	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
				     cmd_status_rsp, &mgmt_err);
4389
		goto unlock;
4390 4391
	}

4392
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4393
		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4394
	else
4395
		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4396

4397 4398 4399 4400 4401 4402 4403
	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
			     &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
4404 4405 4406

unlock:
	hci_dev_unlock(hdev);
4407 4408
}

4409 4410
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
4411 4412
{
	struct mgmt_mode *cp = data;
4413
	struct mgmt_pending_cmd *cmd;
4414
	struct hci_request req;
4415
	u8 val, status;
4416 4417 4418 4419
	int err;

	BT_DBG("request for %s", hdev->name);

4420 4421
	status = mgmt_le_support(hdev);
	if (status)
4422 4423
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				       status);
4424

4425
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4426 4427
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);
4428 4429 4430 4431 4432

	hci_dev_lock(hdev);

	val = !!cp->val;

4433 4434 4435 4436 4437
	/* The following conditions are ones which mean that we should
	 * not do any HCI communication but directly send a mgmt
	 * response to user space (after toggling the flag if
	 * necessary).
	 */
4438
	if (!hdev_is_powered(hdev) ||
4439 4440
	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4441
	    hci_conn_num(hdev, LE_LINK) > 0 ||
4442
	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4443
	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4444
		bool changed;
4445

4446
		if (cp->val) {
4447
			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4448
			if (cp->val == 0x02)
4449
				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4450
			else
4451
				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4452
		} else {
4453
			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4454
			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
		if (err < 0)
			goto unlock;

		if (changed)
			err = new_settings(hdev, sk);

		goto unlock;
	}

4467 4468
	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
	    pending_find(MGMT_OP_SET_LE, hdev)) {
4469 4470
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				      MGMT_STATUS_BUSY);
4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

4482
	if (cp->val == 0x02)
4483
		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4484
	else
4485
		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4486

4487 4488 4489 4490
	if (val)
		enable_advertising(&req);
	else
		disable_advertising(&req);
4491 4492 4493 4494 4495 4496 4497 4498 4499 4500

	err = hci_req_run(&req, set_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4501 4502 4503 4504 4505 4506 4507 4508
static int set_static_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_static_address *cp = data;
	int err;

	BT_DBG("%s", hdev->name);

4509
	if (!lmp_le_capable(hdev))
4510 4511
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				       MGMT_STATUS_NOT_SUPPORTED);
4512 4513

	if (hdev_is_powered(hdev))
4514 4515
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				       MGMT_STATUS_REJECTED);
4516 4517 4518

	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4519 4520 4521
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_SET_STATIC_ADDRESS,
					       MGMT_STATUS_INVALID_PARAMS);
4522 4523 4524

		/* Two most significant bits shall be set */
		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4525 4526 4527
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_SET_STATIC_ADDRESS,
					       MGMT_STATUS_INVALID_PARAMS);
4528 4529 4530 4531 4532 4533
	}

	hci_dev_lock(hdev);

	bacpy(&hdev->static_addr, &cp->bdaddr);

4534 4535 4536 4537 4538
	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
	if (err < 0)
		goto unlock;

	err = new_settings(hdev, sk);
4539

4540
unlock:
4541 4542 4543 4544
	hci_dev_unlock(hdev);
	return err;
}

4545 4546 4547 4548 4549 4550 4551 4552 4553 4554
static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_cp_set_scan_params *cp = data;
	__u16 interval, window;
	int err;

	BT_DBG("%s", hdev->name);

	if (!lmp_le_capable(hdev))
4555 4556
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_NOT_SUPPORTED);
4557 4558 4559 4560

	interval = __le16_to_cpu(cp->interval);

	if (interval < 0x0004 || interval > 0x4000)
4561 4562
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4563 4564 4565 4566

	window = __le16_to_cpu(cp->window);

	if (window < 0x0004 || window > 0x4000)
4567 4568
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4569

4570
	if (window > interval)
4571 4572
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4573

4574 4575 4576 4577 4578
	hci_dev_lock(hdev);

	hdev->le_scan_interval = interval;
	hdev->le_scan_window = window;

4579 4580
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
				NULL, 0);
4581

4582 4583 4584
	/* If background scan is running, restart it so new parameters are
	 * loaded.
	 */
4585
	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596
	    hdev->discovery.state == DISCOVERY_STOPPED) {
		struct hci_request req;

		hci_req_init(&req, hdev);

		hci_req_add_le_scan_disable(&req);
		hci_req_add_le_passive_scan(&req);

		hci_req_run(&req, NULL);
	}

4597 4598 4599 4600 4601
	hci_dev_unlock(hdev);

	return err;
}

4602 4603
static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
				      u16 opcode)
4604
{
4605
	struct mgmt_pending_cmd *cmd;
4606 4607 4608 4609 4610

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

4611
	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4612 4613 4614 4615
	if (!cmd)
		goto unlock;

	if (status) {
4616 4617
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
			        mgmt_status(status));
4618
	} else {
4619 4620 4621
		struct mgmt_mode *cp = cmd->param;

		if (cp->val)
4622
			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4623
		else
4624
			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4625

4626 4627 4628 4629 4630 4631 4632 4633 4634 4635
		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

4636
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4637
				void *data, u16 len)
4638
{
4639
	struct mgmt_mode *cp = data;
4640
	struct mgmt_pending_cmd *cmd;
4641
	struct hci_request req;
4642 4643
	int err;

4644
	BT_DBG("%s", hdev->name);
4645

4646
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4647
	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4648 4649
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				       MGMT_STATUS_NOT_SUPPORTED);
4650

4651
	if (cp->val != 0x00 && cp->val != 0x01)
4652 4653
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				       MGMT_STATUS_INVALID_PARAMS);
4654

4655 4656
	hci_dev_lock(hdev);

4657
	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4658 4659
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				      MGMT_STATUS_BUSY);
4660 4661 4662
		goto unlock;
	}

4663
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4664 4665 4666 4667 4668
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		goto unlock;
	}

4669
	if (!hdev_is_powered(hdev)) {
4670
		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4671 4672 4673 4674 4675 4676
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		new_settings(hdev, sk);
		goto unlock;
	}

4677 4678 4679 4680 4681
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
			       data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
4682 4683
	}

4684 4685
	hci_req_init(&req, hdev);

4686
	write_fast_connectable(&req, cp->val);
4687 4688

	err = hci_req_run(&req, fast_connectable_complete);
4689
	if (err < 0) {
4690 4691
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				      MGMT_STATUS_FAILED);
4692
		mgmt_pending_remove(cmd);
4693 4694
	}

4695
unlock:
4696
	hci_dev_unlock(hdev);
4697

4698 4699 4700
	return err;
}

4701
static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4702
{
4703
	struct mgmt_pending_cmd *cmd;
4704 4705 4706 4707 4708

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

4709
	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4710 4711 4712 4713 4714 4715 4716 4717 4718
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		/* We need to restore the flag if related HCI commands
		 * failed.
		 */
4719
		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4720

4721
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735
	} else {
		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
	struct mgmt_mode *cp = data;
4736
	struct mgmt_pending_cmd *cmd;
4737 4738 4739 4740 4741 4742
	struct hci_request req;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4743 4744
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_NOT_SUPPORTED);
4745

4746
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4747 4748
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_REJECTED);
4749 4750

	if (cp->val != 0x00 && cp->val != 0x01)
4751 4752
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_INVALID_PARAMS);
4753 4754 4755

	hci_dev_lock(hdev);

4756
	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4757 4758 4759 4760 4761 4762
		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		goto unlock;
	}

	if (!hdev_is_powered(hdev)) {
		if (!cp->val) {
4763 4764 4765 4766 4767
			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4768 4769
		}

4770
		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781

		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		if (err < 0)
			goto unlock;

		err = new_settings(hdev, sk);
		goto unlock;
	}

	/* Reject disabling when powered on */
	if (!cp->val) {
4782 4783
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				      MGMT_STATUS_REJECTED);
4784
		goto unlock;
4785 4786 4787 4788 4789 4790 4791 4792
	} else {
		/* When configuring a dual-mode controller to operate
		 * with LE only and using a static address, then switching
		 * BR/EDR back on is not allowed.
		 *
		 * Dual-mode controllers shall operate with the public
		 * address as its identity address for BR/EDR and LE. So
		 * reject the attempt to create an invalid configuration.
4793 4794 4795 4796 4797 4798
		 *
		 * The same restrictions applies when secure connections
		 * has been enabled. For BR/EDR this is a controller feature
		 * while for LE it is a host stack feature. This means that
		 * switching BR/EDR back on when secure connections has been
		 * enabled is not a supported transaction.
4799
		 */
4800
		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4801
		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4802
		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4803 4804
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
					      MGMT_STATUS_REJECTED);
4805 4806
			goto unlock;
		}
4807 4808
	}

4809
	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4810 4811
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				      MGMT_STATUS_BUSY);
4812 4813 4814 4815 4816 4817 4818 4819 4820
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

4821
	/* We need to flip the bit already here so that update_adv_data
4822 4823
	 * generates the correct flags.
	 */
4824
	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4825 4826

	hci_req_init(&req, hdev);
4827

4828
	write_fast_connectable(&req, false);
4829
	__hci_update_page_scan(&req);
4830

4831 4832 4833
	/* Since only the advertising data flags will change, there
	 * is no need to update the scan response data.
	 */
4834
	update_adv_data(&req);
4835

4836 4837 4838 4839 4840 4841 4842 4843 4844
	err = hci_req_run(&req, set_bredr_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4845 4846
static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
4847
	struct mgmt_pending_cmd *cmd;
4848 4849 4850 4851 4852 4853
	struct mgmt_mode *cp;

	BT_DBG("%s status %u", hdev->name, status);

	hci_dev_lock(hdev);

4854
	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4855 4856 4857 4858
	if (!cmd)
		goto unlock;

	if (status) {
4859 4860
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
			        mgmt_status(status));
4861 4862 4863 4864 4865 4866 4867
		goto remove;
	}

	cp = cmd->param;

	switch (cp->val) {
	case 0x00:
4868 4869
		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4870 4871
		break;
	case 0x01:
4872
		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4873
		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4874 4875
		break;
	case 0x02:
4876 4877
		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
		hci_dev_set_flag(hdev, HCI_SC_ONLY);
4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889
		break;
	}

	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
	new_settings(hdev, cmd->sk);

remove:
	mgmt_pending_remove(cmd);
unlock:
	hci_dev_unlock(hdev);
}

4890 4891 4892 4893
static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_mode *cp = data;
4894
	struct mgmt_pending_cmd *cmd;
4895
	struct hci_request req;
4896
	u8 val;
4897 4898 4899 4900
	int err;

	BT_DBG("request for %s", hdev->name);

4901
	if (!lmp_sc_capable(hdev) &&
4902
	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4903 4904
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				       MGMT_STATUS_NOT_SUPPORTED);
4905

4906
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4907
	    lmp_sc_capable(hdev) &&
4908
	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4909 4910
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				       MGMT_STATUS_REJECTED);
4911

4912
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4913
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4914 4915 4916 4917
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

4918
	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4919
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4920 4921
		bool changed;

4922
		if (cp->val) {
4923 4924
			changed = !hci_dev_test_and_set_flag(hdev,
							     HCI_SC_ENABLED);
4925
			if (cp->val == 0x02)
4926
				hci_dev_set_flag(hdev, HCI_SC_ONLY);
4927
			else
4928
				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4929
		} else {
4930 4931
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_SC_ENABLED);
4932
			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4933
		}
4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944

		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

		goto failed;
	}

4945
	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4946 4947
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				      MGMT_STATUS_BUSY);
4948 4949 4950
		goto failed;
	}

4951 4952
	val = !!cp->val;

4953 4954
	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4955 4956 4957 4958 4959 4960 4961 4962 4963 4964
		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

4965 4966 4967
	hci_req_init(&req, hdev);
	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
	err = hci_req_run(&req, sc_enable_complete);
4968 4969 4970 4971 4972 4973 4974 4975 4976 4977
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

4978 4979 4980 4981
static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
			  void *data, u16 len)
{
	struct mgmt_mode *cp = data;
4982
	bool changed, use_changed;
4983 4984 4985 4986
	int err;

	BT_DBG("request for %s", hdev->name);

4987
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4988 4989
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
4990 4991 4992 4993

	hci_dev_lock(hdev);

	if (cp->val)
4994
		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4995
	else
4996 4997
		changed = hci_dev_test_and_clear_flag(hdev,
						      HCI_KEEP_DEBUG_KEYS);
4998

4999
	if (cp->val == 0x02)
5000 5001
		use_changed = !hci_dev_test_and_set_flag(hdev,
							 HCI_USE_DEBUG_KEYS);
5002
	else
5003 5004
		use_changed = hci_dev_test_and_clear_flag(hdev,
							  HCI_USE_DEBUG_KEYS);
5005 5006

	if (hdev_is_powered(hdev) && use_changed &&
5007
	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5008 5009 5010 5011 5012
		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
			     sizeof(mode), &mode);
	}

5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024
	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5025 5026 5027 5028 5029 5030 5031 5032 5033 5034
static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
		       u16 len)
{
	struct mgmt_cp_set_privacy *cp = cp_data;
	bool changed;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
5035 5036
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_NOT_SUPPORTED);
5037 5038

	if (cp->privacy != 0x00 && cp->privacy != 0x01)
5039 5040
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_INVALID_PARAMS);
5041 5042

	if (hdev_is_powered(hdev))
5043 5044
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_REJECTED);
5045 5046 5047

	hci_dev_lock(hdev);

5048 5049 5050
	/* If user space supports this command it is also expected to
	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
	 */
5051
	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5052

5053
	if (cp->privacy) {
5054
		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5055
		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5056
		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5057
	} else {
5058
		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5059
		memset(hdev->irk, 0, sizeof(hdev->irk));
5060
		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074
	}

	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094
static bool irk_is_valid(struct mgmt_irk_info *irk)
{
	switch (irk->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
}

static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
		     u16 len)
{
	struct mgmt_cp_load_irks *cp = cp_data;
5095 5096
	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_irk_info));
5097 5098 5099 5100 5101 5102
	u16 irk_count, expected_len;
	int i, err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
5103 5104
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_NOT_SUPPORTED);
5105 5106

	irk_count = __le16_to_cpu(cp->irk_count);
5107 5108
	if (irk_count > max_irk_count) {
		BT_ERR("load_irks: too big irk_count value %u", irk_count);
5109 5110
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_INVALID_PARAMS);
5111
	}
5112 5113 5114 5115

	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
	if (expected_len != len) {
		BT_ERR("load_irks: expected %u bytes, got %u bytes",
5116
		       expected_len, len);
5117 5118
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_INVALID_PARAMS);
5119 5120 5121 5122 5123 5124 5125 5126
	}

	BT_DBG("%s irk_count %u", hdev->name, irk_count);

	for (i = 0; i < irk_count; i++) {
		struct mgmt_irk_info *key = &cp->irks[i];

		if (!irk_is_valid(key))
5127 5128 5129
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_IRKS,
					       MGMT_STATUS_INVALID_PARAMS);
5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148
	}

	hci_dev_lock(hdev);

	hci_smp_irks_clear(hdev);

	for (i = 0; i < irk_count; i++) {
		struct mgmt_irk_info *irk = &cp->irks[i];
		u8 addr_type;

		if (irk->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
			    BDADDR_ANY);
	}

5149
	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5150

5151
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5152 5153 5154 5155 5156 5157

	hci_dev_unlock(hdev);

	return err;
}

5158 5159 5160 5161
static bool ltk_is_valid(struct mgmt_ltk_info *key)
{
	if (key->master != 0x00 && key->master != 0x01)
		return false;
5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174

	switch (key->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
5175 5176
}

5177
static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5178
			       void *cp_data, u16 len)
5179 5180
{
	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5181 5182
	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_ltk_info));
5183
	u16 key_count, expected_len;
5184
	int i, err;
5185

5186 5187 5188
	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
5189 5190
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_NOT_SUPPORTED);
5191

5192
	key_count = __le16_to_cpu(cp->key_count);
5193 5194
	if (key_count > max_key_count) {
		BT_ERR("load_ltks: too big key_count value %u", key_count);
5195 5196
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5197
	}
5198 5199 5200 5201 5202

	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_ltk_info);
	if (expected_len != len) {
		BT_ERR("load_keys: expected %u bytes, got %u bytes",
5203
		       expected_len, len);
5204 5205
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5206 5207
	}

5208
	BT_DBG("%s key_count %u", hdev->name, key_count);
5209

5210 5211 5212
	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];

5213
		if (!ltk_is_valid(key))
5214 5215 5216
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_LONG_TERM_KEYS,
					       MGMT_STATUS_INVALID_PARAMS);
5217 5218
	}

5219 5220 5221 5222 5223 5224
	hci_dev_lock(hdev);

	hci_smp_ltks_clear(hdev);

	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];
5225
		u8 type, addr_type, authenticated;
5226 5227 5228 5229 5230

		if (key->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;
5231

5232 5233
		switch (key->type) {
		case MGMT_LTK_UNAUTHENTICATED:
5234
			authenticated = 0x00;
5235
			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5236 5237
			break;
		case MGMT_LTK_AUTHENTICATED:
5238
			authenticated = 0x01;
5239 5240 5241 5242 5243
			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
			break;
		case MGMT_LTK_P256_UNAUTH:
			authenticated = 0x00;
			type = SMP_LTK_P256;
5244
			break;
5245 5246 5247
		case MGMT_LTK_P256_AUTH:
			authenticated = 0x01;
			type = SMP_LTK_P256;
5248
			break;
5249 5250 5251
		case MGMT_LTK_P256_DEBUG:
			authenticated = 0x00;
			type = SMP_LTK_P256_DEBUG;
5252 5253 5254
		default:
			continue;
		}
5255

5256
		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5257
			    authenticated, key->val, key->enc_size, key->ediv,
5258
			    key->rand);
5259 5260
	}

5261
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5262 5263
			   NULL, 0);

5264 5265
	hci_dev_unlock(hdev);

5266
	return err;
5267 5268
}

5269
static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5270 5271
{
	struct hci_conn *conn = cmd->user_data;
5272
	struct mgmt_rp_get_conn_info rp;
5273
	int err;
5274

5275
	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5276

5277
	if (status == MGMT_STATUS_SUCCESS) {
5278
		rp.rssi = conn->rssi;
5279 5280 5281 5282 5283 5284
		rp.tx_power = conn->tx_power;
		rp.max_tx_power = conn->max_tx_power;
	} else {
		rp.rssi = HCI_RSSI_INVALID;
		rp.tx_power = HCI_TX_POWER_INVALID;
		rp.max_tx_power = HCI_TX_POWER_INVALID;
5285 5286
	}

5287 5288
	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
				status, &rp, sizeof(rp));
5289 5290

	hci_conn_drop(conn);
5291
	hci_conn_put(conn);
5292 5293

	return err;
5294 5295
}

5296 5297
static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
				       u16 opcode)
5298 5299
{
	struct hci_cp_read_rssi *cp;
5300
	struct mgmt_pending_cmd *cmd;
5301 5302
	struct hci_conn *conn;
	u16 handle;
5303
	u8 status;
5304

5305
	BT_DBG("status 0x%02x", hci_status);
5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320

	hci_dev_lock(hdev);

	/* Commands sent in request are either Read RSSI or Read Transmit Power
	 * Level so we check which one was last sent to retrieve connection
	 * handle.  Both commands have handle as first parameter so it's safe to
	 * cast data on the same command struct.
	 *
	 * First command sent is always Read RSSI and we fail only if it fails.
	 * In other case we simply override error to indicate success as we
	 * already remembered if TX power value is actually valid.
	 */
	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
	if (!cp) {
		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5321 5322 5323
		status = MGMT_STATUS_SUCCESS;
	} else {
		status = mgmt_status(hci_status);
5324 5325 5326
	}

	if (!cp) {
5327
		BT_ERR("invalid sent_cmd in conn_info response");
5328 5329 5330 5331 5332 5333
		goto unlock;
	}

	handle = __le16_to_cpu(cp->handle);
	conn = hci_conn_hash_lookup_handle(hdev, handle);
	if (!conn) {
5334
		BT_ERR("unknown handle (%d) in conn_info response", handle);
5335 5336 5337
		goto unlock;
	}

5338
	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5339 5340
	if (!cmd)
		goto unlock;
5341

5342 5343
	cmd->cmd_complete(cmd, status);
	mgmt_pending_remove(cmd);
5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364

unlock:
	hci_dev_unlock(hdev);
}

static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_get_conn_info *cp = data;
	struct mgmt_rp_get_conn_info rp;
	struct hci_conn *conn;
	unsigned long conn_info_age;
	int err = 0;

	BT_DBG("%s", hdev->name);

	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

	if (!bdaddr_type_is_valid(cp->addr.type))
5365 5366 5367
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
5368 5369 5370 5371

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
5372 5373 5374
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
5375 5376 5377 5378 5379 5380 5381 5382 5383 5384
		goto unlock;
	}

	if (cp->addr.type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);

	if (!conn || conn->state != BT_CONNECTED) {
5385 5386 5387
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_NOT_CONNECTED, &rp,
					sizeof(rp));
5388 5389 5390
		goto unlock;
	}

5391
	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5392 5393
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
5394 5395 5396
		goto unlock;
	}

5397 5398 5399 5400 5401 5402 5403 5404 5405 5406
	/* To avoid client trying to guess when to poll again for information we
	 * calculate conn info age as random value between min/max set in hdev.
	 */
	conn_info_age = hdev->conn_info_min_age +
			prandom_u32_max(hdev->conn_info_max_age -
					hdev->conn_info_min_age);

	/* Query controller to refresh cached values if they are too old or were
	 * never read.
	 */
5407 5408
	if (time_after(jiffies, conn->conn_info_timestamp +
		       msecs_to_jiffies(conn_info_age)) ||
5409 5410 5411 5412
	    !conn->conn_info_timestamp) {
		struct hci_request req;
		struct hci_cp_read_tx_power req_txp_cp;
		struct hci_cp_read_rssi req_rssi_cp;
5413
		struct mgmt_pending_cmd *cmd;
5414 5415 5416 5417 5418 5419

		hci_req_init(&req, hdev);
		req_rssi_cp.handle = cpu_to_le16(conn->handle);
		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
			    &req_rssi_cp);

5420 5421 5422 5423 5424 5425 5426 5427 5428 5429
		/* For LE links TX power does not change thus we don't need to
		 * query for it once value is known.
		 */
		if (!bdaddr_type_is_le(cp->addr.type) ||
		    conn->tx_power == HCI_TX_POWER_INVALID) {
			req_txp_cp.handle = cpu_to_le16(conn->handle);
			req_txp_cp.type = 0x00;
			hci_req_add(&req, HCI_OP_READ_TX_POWER,
				    sizeof(req_txp_cp), &req_txp_cp);
		}
5430

5431 5432 5433 5434 5435 5436 5437 5438
		/* Max TX power needs to be read only once per connection */
		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
			req_txp_cp.handle = cpu_to_le16(conn->handle);
			req_txp_cp.type = 0x01;
			hci_req_add(&req, HCI_OP_READ_TX_POWER,
				    sizeof(req_txp_cp), &req_txp_cp);
		}

5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450
		err = hci_req_run(&req, conn_info_refresh_complete);
		if (err < 0)
			goto unlock;

		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
				       data, len);
		if (!cmd) {
			err = -ENOMEM;
			goto unlock;
		}

		hci_conn_hold(conn);
5451
		cmd->user_data = hci_conn_get(conn);
5452
		cmd->cmd_complete = conn_info_cmd_complete;
5453 5454 5455 5456 5457 5458

		conn->conn_info_timestamp = jiffies;
	} else {
		/* Cache is valid, just reply with values cached in hci_conn */
		rp.rssi = conn->rssi;
		rp.tx_power = conn->tx_power;
5459
		rp.max_tx_power = conn->max_tx_power;
5460

5461 5462
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5463 5464 5465 5466 5467 5468 5469
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5470
static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5471
{
5472
	struct hci_conn *conn = cmd->user_data;
5473
	struct mgmt_rp_get_clock_info rp;
5474
	struct hci_dev *hdev;
5475
	int err;
5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494

	memset(&rp, 0, sizeof(rp));
	memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));

	if (status)
		goto complete;

	hdev = hci_dev_get(cmd->index);
	if (hdev) {
		rp.local_clock = cpu_to_le32(hdev->clock);
		hci_dev_put(hdev);
	}

	if (conn) {
		rp.piconet_clock = cpu_to_le32(conn->clock);
		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
	}

complete:
5495 5496
	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
				sizeof(rp));
5497 5498 5499 5500 5501

	if (conn) {
		hci_conn_drop(conn);
		hci_conn_put(conn);
	}
5502 5503

	return err;
5504 5505
}

5506
static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5507
{
5508
	struct hci_cp_read_clock *hci_cp;
5509
	struct mgmt_pending_cmd *cmd;
5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526
	struct hci_conn *conn;

	BT_DBG("%s status %u", hdev->name, status);

	hci_dev_lock(hdev);

	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
	if (!hci_cp)
		goto unlock;

	if (hci_cp->which) {
		u16 handle = __le16_to_cpu(hci_cp->handle);
		conn = hci_conn_hash_lookup_handle(hdev, handle);
	} else {
		conn = NULL;
	}

5527
	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5528 5529 5530
	if (!cmd)
		goto unlock;

5531
	cmd->cmd_complete(cmd, mgmt_status(status));
5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_get_clock_info *cp = data;
	struct mgmt_rp_get_clock_info rp;
	struct hci_cp_read_clock hci_cp;
5544
	struct mgmt_pending_cmd *cmd;
5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555
	struct hci_request req;
	struct hci_conn *conn;
	int err;

	BT_DBG("%s", hdev->name);

	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

	if (cp->addr.type != BDADDR_BREDR)
5556 5557 5558
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
5559 5560 5561 5562

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
5563 5564 5565
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
5566 5567 5568 5569 5570 5571 5572
		goto unlock;
	}

	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
		if (!conn || conn->state != BT_CONNECTED) {
5573 5574 5575 5576
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_GET_CLOCK_INFO,
						MGMT_STATUS_NOT_CONNECTED,
						&rp, sizeof(rp));
5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588
			goto unlock;
		}
	} else {
		conn = NULL;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

5589 5590
	cmd->cmd_complete = clock_info_cmd_complete;

5591 5592 5593 5594 5595 5596 5597
	hci_req_init(&req, hdev);

	memset(&hci_cp, 0, sizeof(hci_cp));
	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);

	if (conn) {
		hci_conn_hold(conn);
5598
		cmd->user_data = hci_conn_get(conn);
5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613

		hci_cp.handle = cpu_to_le16(conn->handle);
		hci_cp.which = 0x01; /* Piconet clock */
		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
	}

	err = hci_req_run(&req, get_clock_info_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672
static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
{
	struct hci_conn *conn;

	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
	if (!conn)
		return false;

	if (conn->dst_type != type)
		return false;

	if (conn->state != BT_CONNECTED)
		return false;

	return true;
}

/* This function requires the caller holds hdev->lock */
static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
			       u8 addr_type, u8 auto_connect)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_conn_params *params;

	params = hci_conn_params_add(hdev, addr, addr_type);
	if (!params)
		return -EIO;

	if (params->auto_connect == auto_connect)
		return 0;

	list_del_init(&params->action);

	switch (auto_connect) {
	case HCI_AUTO_CONN_DISABLED:
	case HCI_AUTO_CONN_LINK_LOSS:
		__hci_update_background_scan(req);
		break;
	case HCI_AUTO_CONN_REPORT:
		list_add(&params->action, &hdev->pend_le_reports);
		__hci_update_background_scan(req);
		break;
	case HCI_AUTO_CONN_DIRECT:
	case HCI_AUTO_CONN_ALWAYS:
		if (!is_connected(hdev, addr, addr_type)) {
			list_add(&params->action, &hdev->pend_le_conns);
			__hci_update_background_scan(req);
		}
		break;
	}

	params->auto_connect = auto_connect;

	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
	       auto_connect);

	return 0;
}

5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684
static void device_added(struct sock *sk, struct hci_dev *hdev,
			 bdaddr_t *bdaddr, u8 type, u8 action)
{
	struct mgmt_ev_device_added ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
	ev.action = action;

	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
}

5685
static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5686
{
5687
	struct mgmt_pending_cmd *cmd;
5688 5689 5690 5691 5692

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

5693
	cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
5694 5695 5696 5697 5698 5699 5700 5701 5702 5703
	if (!cmd)
		goto unlock;

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

5704 5705 5706 5707
static int add_device(struct sock *sk, struct hci_dev *hdev,
		      void *data, u16 len)
{
	struct mgmt_cp_add_device *cp = data;
5708
	struct mgmt_pending_cmd *cmd;
5709
	struct hci_request req;
5710 5711 5712 5713 5714
	u8 auto_conn, addr_type;
	int err;

	BT_DBG("%s", hdev->name);

5715
	if (!bdaddr_type_is_valid(cp->addr.type) ||
5716
	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5717 5718 5719
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
5720

5721
	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5722 5723 5724
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
5725

5726 5727
	hci_req_init(&req, hdev);

5728 5729
	hci_dev_lock(hdev);

5730 5731 5732 5733 5734 5735 5736 5737
	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	cmd->cmd_complete = addr_cmd_complete;

5738
	if (cp->addr.type == BDADDR_BREDR) {
5739
		/* Only incoming connections action is supported for now */
5740
		if (cp->action != 0x01) {
5741 5742
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
5743
			mgmt_pending_remove(cmd);
5744 5745 5746 5747 5748 5749 5750
			goto unlock;
		}

		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
					  cp->addr.type);
		if (err)
			goto unlock;
5751

5752
		__hci_update_page_scan(&req);
5753

5754 5755 5756
		goto added;
	}

5757 5758 5759 5760 5761
	if (cp->addr.type == BDADDR_LE_PUBLIC)
		addr_type = ADDR_LE_DEV_PUBLIC;
	else
		addr_type = ADDR_LE_DEV_RANDOM;

5762
	if (cp->action == 0x02)
5763
		auto_conn = HCI_AUTO_CONN_ALWAYS;
5764 5765
	else if (cp->action == 0x01)
		auto_conn = HCI_AUTO_CONN_DIRECT;
5766
	else
5767
		auto_conn = HCI_AUTO_CONN_REPORT;
5768

5769 5770 5771
	/* If the connection parameters don't exist for this device,
	 * they will be created and configured with defaults.
	 */
5772
	if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5773
				auto_conn) < 0) {
5774
		err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5775
		mgmt_pending_remove(cmd);
5776 5777 5778
		goto unlock;
	}

5779
added:
5780 5781
	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);

5782 5783 5784 5785 5786
	err = hci_req_run(&req, add_device_complete);
	if (err < 0) {
		/* ENODATA means no HCI commands were needed (e.g. if
		 * the adapter is powered off).
		 */
5787 5788
		if (err == -ENODATA)
			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5789 5790
		mgmt_pending_remove(cmd);
	}
5791 5792 5793 5794 5795 5796

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807
static void device_removed(struct sock *sk, struct hci_dev *hdev,
			   bdaddr_t *bdaddr, u8 type)
{
	struct mgmt_ev_device_removed ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;

	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
}

5808
static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5809
{
5810
	struct mgmt_pending_cmd *cmd;
5811 5812 5813 5814 5815

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

5816
	cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5817 5818 5819 5820 5821 5822 5823 5824 5825 5826
	if (!cmd)
		goto unlock;

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

5827 5828 5829 5830
static int remove_device(struct sock *sk, struct hci_dev *hdev,
			 void *data, u16 len)
{
	struct mgmt_cp_remove_device *cp = data;
5831
	struct mgmt_pending_cmd *cmd;
5832
	struct hci_request req;
5833 5834 5835 5836
	int err;

	BT_DBG("%s", hdev->name);

5837 5838
	hci_req_init(&req, hdev);

5839 5840
	hci_dev_lock(hdev);

5841 5842 5843 5844 5845 5846 5847 5848
	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	cmd->cmd_complete = addr_cmd_complete;

5849
	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5850
		struct hci_conn_params *params;
5851 5852
		u8 addr_type;

5853
		if (!bdaddr_type_is_valid(cp->addr.type)) {
5854 5855
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
5856
			mgmt_pending_remove(cmd);
5857 5858 5859
			goto unlock;
		}

5860 5861 5862 5863 5864
		if (cp->addr.type == BDADDR_BREDR) {
			err = hci_bdaddr_list_del(&hdev->whitelist,
						  &cp->addr.bdaddr,
						  cp->addr.type);
			if (err) {
5865 5866
				err = cmd->cmd_complete(cmd,
							MGMT_STATUS_INVALID_PARAMS);
5867
				mgmt_pending_remove(cmd);
5868 5869 5870
				goto unlock;
			}

5871
			__hci_update_page_scan(&req);
5872

5873 5874 5875 5876 5877
			device_removed(sk, hdev, &cp->addr.bdaddr,
				       cp->addr.type);
			goto complete;
		}

5878 5879 5880 5881 5882
		if (cp->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

5883 5884 5885
		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
						addr_type);
		if (!params) {
5886 5887
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
5888
			mgmt_pending_remove(cmd);
5889 5890 5891 5892
			goto unlock;
		}

		if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5893 5894
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
5895
			mgmt_pending_remove(cmd);
5896 5897 5898
			goto unlock;
		}

5899
		list_del(&params->action);
5900 5901
		list_del(&params->list);
		kfree(params);
5902
		__hci_update_background_scan(&req);
5903 5904

		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5905
	} else {
5906
		struct hci_conn_params *p, *tmp;
5907
		struct bdaddr_list *b, *btmp;
5908

5909
		if (cp->addr.type) {
5910 5911
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
5912
			mgmt_pending_remove(cmd);
5913 5914 5915
			goto unlock;
		}

5916 5917 5918 5919 5920 5921
		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
			list_del(&b->list);
			kfree(b);
		}

5922
		__hci_update_page_scan(&req);
5923

5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934
		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
				continue;
			device_removed(sk, hdev, &p->addr, p->addr_type);
			list_del(&p->action);
			list_del(&p->list);
			kfree(p);
		}

		BT_DBG("All LE connection parameters were removed");

5935
		__hci_update_background_scan(&req);
5936 5937
	}

5938
complete:
5939 5940 5941 5942 5943
	err = hci_req_run(&req, remove_device_complete);
	if (err < 0) {
		/* ENODATA means no HCI commands were needed (e.g. if
		 * the adapter is powered off).
		 */
5944 5945
		if (err == -ENODATA)
			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5946 5947
		mgmt_pending_remove(cmd);
	}
5948 5949 5950 5951 5952 5953

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5954 5955 5956 5957
static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
{
	struct mgmt_cp_load_conn_param *cp = data;
5958 5959
	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
				     sizeof(struct mgmt_conn_param));
5960 5961 5962 5963
	u16 param_count, expected_len;
	int i;

	if (!lmp_le_capable(hdev))
5964 5965
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_NOT_SUPPORTED);
5966 5967

	param_count = __le16_to_cpu(cp->param_count);
5968 5969 5970
	if (param_count > max_param_count) {
		BT_ERR("load_conn_param: too big param_count value %u",
		       param_count);
5971 5972
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_INVALID_PARAMS);
5973
	}
5974 5975 5976 5977 5978 5979

	expected_len = sizeof(*cp) + param_count *
					sizeof(struct mgmt_conn_param);
	if (expected_len != len) {
		BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
		       expected_len, len);
5980 5981
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_INVALID_PARAMS);
5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035
	}

	BT_DBG("%s param_count %u", hdev->name, param_count);

	hci_dev_lock(hdev);

	hci_conn_params_clear_disabled(hdev);

	for (i = 0; i < param_count; i++) {
		struct mgmt_conn_param *param = &cp->params[i];
		struct hci_conn_params *hci_param;
		u16 min, max, latency, timeout;
		u8 addr_type;

		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
		       param->addr.type);

		if (param->addr.type == BDADDR_LE_PUBLIC) {
			addr_type = ADDR_LE_DEV_PUBLIC;
		} else if (param->addr.type == BDADDR_LE_RANDOM) {
			addr_type = ADDR_LE_DEV_RANDOM;
		} else {
			BT_ERR("Ignoring invalid connection parameters");
			continue;
		}

		min = le16_to_cpu(param->min_interval);
		max = le16_to_cpu(param->max_interval);
		latency = le16_to_cpu(param->latency);
		timeout = le16_to_cpu(param->timeout);

		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
		       min, max, latency, timeout);

		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
			BT_ERR("Ignoring invalid connection parameters");
			continue;
		}

		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
						addr_type);
		if (!hci_param) {
			BT_ERR("Failed to add connection parameters");
			continue;
		}

		hci_param->conn_min_interval = min;
		hci_param->conn_max_interval = max;
		hci_param->conn_latency = latency;
		hci_param->supervision_timeout = timeout;
	}

	hci_dev_unlock(hdev);

6036 6037
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
				 NULL, 0);
6038 6039
}

6040 6041 6042 6043 6044 6045 6046 6047 6048 6049
static int set_external_config(struct sock *sk, struct hci_dev *hdev,
			       void *data, u16 len)
{
	struct mgmt_cp_set_external_config *cp = data;
	bool changed;
	int err;

	BT_DBG("%s", hdev->name);

	if (hdev_is_powered(hdev))
6050 6051
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				       MGMT_STATUS_REJECTED);
6052 6053

	if (cp->config != 0x00 && cp->config != 0x01)
6054 6055
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				         MGMT_STATUS_INVALID_PARAMS);
6056 6057

	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6058 6059
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				       MGMT_STATUS_NOT_SUPPORTED);
6060 6061 6062 6063

	hci_dev_lock(hdev);

	if (cp->config)
6064
		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6065
	else
6066
		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6067 6068 6069 6070 6071 6072 6073 6074

	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
	if (err < 0)
		goto unlock;

	if (!changed)
		goto unlock;

6075 6076
	err = new_options(hdev, sk);

6077
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6078
		mgmt_index_removed(hdev);
6079

6080
		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6081 6082
			hci_dev_set_flag(hdev, HCI_CONFIG);
			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6083 6084 6085

			queue_work(hdev->req_workqueue, &hdev->power_on);
		} else {
6086
			set_bit(HCI_RAW, &hdev->flags);
6087 6088
			mgmt_index_added(hdev);
		}
6089 6090 6091 6092 6093 6094 6095
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6096 6097 6098 6099 6100 6101 6102 6103 6104 6105
static int set_public_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_public_address *cp = data;
	bool changed;
	int err;

	BT_DBG("%s", hdev->name);

	if (hdev_is_powered(hdev))
6106 6107
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_REJECTED);
6108 6109

	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6110 6111
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_INVALID_PARAMS);
6112 6113

	if (!hdev->set_bdaddr)
6114 6115
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_NOT_SUPPORTED);
6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128

	hci_dev_lock(hdev);

	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
	bacpy(&hdev->public_addr, &cp->bdaddr);

	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
	if (err < 0)
		goto unlock;

	if (!changed)
		goto unlock;

6129
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6130 6131 6132 6133 6134
		err = new_options(hdev, sk);

	if (is_configured(hdev)) {
		mgmt_index_removed(hdev);

6135
		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6136

6137 6138
		hci_dev_set_flag(hdev, HCI_CONFIG);
		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6139 6140 6141 6142 6143 6144 6145 6146 6147

		queue_work(hdev->req_workqueue, &hdev->power_on);
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
				  u8 data_len)
{
	eir[eir_len++] = sizeof(type) + data_len;
	eir[eir_len++] = type;
	memcpy(&eir[eir_len], data, data_len);
	eir_len += data_len;

	return eir_len;
}

6159 6160 6161 6162 6163 6164 6165
static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
				   void *data, u16 data_len)
{
	struct mgmt_cp_read_local_oob_ext_data *cp = data;
	struct mgmt_rp_read_local_oob_ext_data *rp;
	size_t rp_len;
	u16 eir_len;
6166
	u8 status, flags, role, addr[7], hash[16], rand[16];
6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193
	int err;

	BT_DBG("%s", hdev->name);

	if (!hdev_is_powered(hdev))
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
					 MGMT_STATUS_NOT_POWERED,
					 &cp->type, sizeof(cp->type));

	switch (cp->type) {
	case BIT(BDADDR_BREDR):
		status = mgmt_bredr_support(hdev);
		if (status)
			return mgmt_cmd_complete(sk, hdev->id,
						 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
						 status, &cp->type,
						 sizeof(cp->type));
		eir_len = 5;
		break;
	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
		status = mgmt_le_support(hdev);
		if (status)
			return mgmt_cmd_complete(sk, hdev->id,
						 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
						 status, &cp->type,
						 sizeof(cp->type));
6194
		eir_len = 9 + 3 + 18 + 18 + 3;
6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218
		break;
	default:
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->type, sizeof(cp->type));
	}

	hci_dev_lock(hdev);

	rp_len = sizeof(*rp) + eir_len;
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		hci_dev_unlock(hdev);
		return -ENOMEM;
	}

	eir_len = 0;
	switch (cp->type) {
	case BIT(BDADDR_BREDR):
		eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV,
					  hdev->dev_class, 3);
		break;
	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6219 6220
		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
		    smp_generate_oob(hdev, hash, rand) < 0) {
6221 6222
			hci_dev_unlock(hdev);
			err = mgmt_cmd_complete(sk, hdev->id,
6223 6224 6225
						MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
						MGMT_STATUS_FAILED,
						&cp->type, sizeof(cp->type));
6226 6227 6228
			goto done;
		}

6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253
		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
			memcpy(addr, &hdev->rpa, 6);
			addr[6] = 0x01;
		} else if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
			   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
			   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
			    bacmp(&hdev->static_addr, BDADDR_ANY))) {
			memcpy(addr, &hdev->static_addr, 6);
			addr[6] = 0x01;
		} else {
			memcpy(addr, &hdev->bdaddr, 6);
			addr[6] = 0x00;
		}

		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
					  addr, sizeof(addr));

		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
			role = 0x02;
		else
			role = 0x01;

		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
					  &role, sizeof(role));

6254 6255 6256 6257
		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
			eir_len = eir_append_data(rp->eir, eir_len,
						  EIR_LE_SC_CONFIRM,
						  hash, sizeof(hash));
6258

6259 6260 6261 6262
			eir_len = eir_append_data(rp->eir, eir_len,
						  EIR_LE_SC_RANDOM,
						  rand, sizeof(rand));
		}
6263

6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278
		flags = get_adv_discov_flags(hdev);

		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
			flags |= LE_AD_NO_BREDR;

		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
					  &flags, sizeof(flags));
		break;
	}

	rp->type = cp->type;
	rp->eir_len = cpu_to_le16(eir_len);

	hci_dev_unlock(hdev);

6279 6280
	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);

6281
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6282
				MGMT_STATUS_SUCCESS, rp, sizeof(*rp) + eir_len);
6283 6284 6285 6286 6287 6288
	if (err < 0)
		goto done;

	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
				 rp, sizeof(*rp) + eir_len,
				 HCI_MGMT_OOB_DATA_EVENTS, sk);
6289

6290
done:
6291 6292 6293 6294 6295
	kfree(rp);

	return err;
}

6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314
static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
			     void *data, u16 data_len)
{
	struct mgmt_rp_read_adv_features *rp;
	size_t rp_len;
	int err;

	BT_DBG("%s", hdev->name);

	hci_dev_lock(hdev);

	rp_len = sizeof(*rp);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		hci_dev_unlock(hdev);
		return -ENOMEM;
	}

	rp->supported_flags = cpu_to_le32(0);
6315 6316
	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329
	rp->max_instances = 0;
	rp->num_instances = 0;

	hci_dev_unlock(hdev);

	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
				MGMT_STATUS_SUCCESS, rp, rp_len);

	kfree(rp);

	return err;
}

6330
static const struct hci_mgmt_handler mgmt_handlers[] = {
6331
	{ NULL }, /* 0x0000 (no command) */
6332
	{ read_version,            MGMT_READ_VERSION_SIZE,
6333 6334
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
6335
	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
6336 6337
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
6338
	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
6339 6340 6341 6342
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
	{ read_controller_info,    MGMT_READ_INFO_SIZE,
						HCI_MGMT_UNTRUSTED },
6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355
	{ set_powered,             MGMT_SETTING_SIZE },
	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
	{ set_connectable,         MGMT_SETTING_SIZE },
	{ set_fast_connectable,    MGMT_SETTING_SIZE },
	{ set_bondable,            MGMT_SETTING_SIZE },
	{ set_link_security,       MGMT_SETTING_SIZE },
	{ set_ssp,                 MGMT_SETTING_SIZE },
	{ set_hs,                  MGMT_SETTING_SIZE },
	{ set_le,                  MGMT_SETTING_SIZE },
	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
	{ add_uuid,                MGMT_ADD_UUID_SIZE },
	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
6356 6357 6358 6359
	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
						HCI_MGMT_VAR_LEN },
	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
						HCI_MGMT_VAR_LEN },
6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371
	{ disconnect,              MGMT_DISCONNECT_SIZE },
	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6372 6373 6374
	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
						HCI_MGMT_VAR_LEN },
6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388
	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
	{ set_advertising,         MGMT_SETTING_SIZE },
	{ set_bredr,               MGMT_SETTING_SIZE },
	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
	{ set_secure_conn,         MGMT_SETTING_SIZE },
	{ set_debug_keys,          MGMT_SETTING_SIZE },
	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
6389 6390
	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
						HCI_MGMT_VAR_LEN },
6391 6392 6393 6394
	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
	{ add_device,              MGMT_ADD_DEVICE_SIZE },
	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
6395 6396 6397
	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
						HCI_MGMT_VAR_LEN },
	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6398 6399
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
6400
	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
6401 6402
						HCI_MGMT_UNCONFIGURED |
						HCI_MGMT_UNTRUSTED },
6403 6404 6405 6406 6407 6408
	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
						HCI_MGMT_UNCONFIGURED },
	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
						HCI_MGMT_UNCONFIGURED },
	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
						HCI_MGMT_VAR_LEN },
6409
	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6410
	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
6411 6412
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
6413
	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
6414 6415
};

6416
void mgmt_index_added(struct hci_dev *hdev)
6417
{
6418
	struct mgmt_ev_ext_index ev;
6419

6420 6421 6422
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		return;

6423 6424 6425 6426 6427
	switch (hdev->dev_type) {
	case HCI_BREDR:
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6428
			ev.type = 0x01;
6429 6430 6431
		} else {
			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
					 HCI_MGMT_INDEX_EVENTS);
6432
			ev.type = 0x00;
6433 6434
		}
		break;
6435 6436 6437 6438 6439
	case HCI_AMP:
		ev.type = 0x02;
		break;
	default:
		return;
6440
	}
6441 6442 6443 6444 6445

	ev.bus = hdev->bus;

	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
			 HCI_MGMT_EXT_INDEX_EVENTS);
6446 6447
}

6448
void mgmt_index_removed(struct hci_dev *hdev)
6449
{
6450
	struct mgmt_ev_ext_index ev;
6451
	u8 status = MGMT_STATUS_INVALID_INDEX;
6452

6453 6454 6455
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		return;

6456 6457 6458
	switch (hdev->dev_type) {
	case HCI_BREDR:
		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6459

6460 6461 6462
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6463
			ev.type = 0x01;
6464 6465 6466
		} else {
			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
					 HCI_MGMT_INDEX_EVENTS);
6467
			ev.type = 0x00;
6468 6469
		}
		break;
6470 6471 6472 6473 6474
	case HCI_AMP:
		ev.type = 0x02;
		break;
	default:
		return;
6475
	}
6476 6477 6478 6479 6480

	ev.bus = hdev->bus;

	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
			 HCI_MGMT_EXT_INDEX_EVENTS);
6481 6482
}

6483
/* This function requires the caller holds hdev->lock */
6484
static void restart_le_actions(struct hci_request *req)
6485
{
6486
	struct hci_dev *hdev = req->hdev;
6487 6488 6489
	struct hci_conn_params *p;

	list_for_each_entry(p, &hdev->le_conn_params, list) {
6490 6491 6492 6493 6494 6495
		/* Needed for AUTO_OFF case where might not "really"
		 * have been powered off.
		 */
		list_del_init(&p->action);

		switch (p->auto_connect) {
6496
		case HCI_AUTO_CONN_DIRECT:
6497 6498 6499 6500 6501 6502 6503 6504
		case HCI_AUTO_CONN_ALWAYS:
			list_add(&p->action, &hdev->pend_le_conns);
			break;
		case HCI_AUTO_CONN_REPORT:
			list_add(&p->action, &hdev->pend_le_reports);
			break;
		default:
			break;
6505
		}
6506
	}
6507

6508
	__hci_update_background_scan(req);
6509 6510
}

6511
static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6512 6513 6514 6515 6516
{
	struct cmd_lookup match = { NULL, hdev };

	BT_DBG("status 0x%02x", status);

6517 6518 6519 6520 6521 6522 6523 6524 6525
	if (!status) {
		/* Register the available SMP channels (BR/EDR and LE) only
		 * when successfully powering on the controller. This late
		 * registration is required so that LE SMP can clearly
		 * decide if the public address or static address is used.
		 */
		smp_register(hdev);
	}

6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537
	hci_dev_lock(hdev);

	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	hci_dev_unlock(hdev);

	if (match.sk)
		sock_put(match.sk);
}

6538
static int powered_update_hci(struct hci_dev *hdev)
6539
{
6540
	struct hci_request req;
6541
	u8 link_sec;
6542

6543 6544
	hci_req_init(&req, hdev);

6545
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
6546
	    !lmp_host_ssp_capable(hdev)) {
6547
		u8 mode = 0x01;
6548

6549 6550 6551 6552
		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);

		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
			u8 support = 0x01;
6553

6554 6555 6556
			hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
				    sizeof(support), &support);
		}
6557 6558
	}

6559
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
6560
	    lmp_bredr_capable(hdev)) {
6561
		struct hci_cp_write_le_host_supported cp;
6562

6563 6564
		cp.le = 0x01;
		cp.simul = 0x00;
6565

6566 6567 6568 6569 6570
		/* Check first if we already have the right
		 * host state (host features set)
		 */
		if (cp.le != lmp_host_le_capable(hdev) ||
		    cp.simul != lmp_host_le_br_capable(hdev))
6571 6572
			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
				    sizeof(cp), &cp);
6573
	}
6574

6575
	if (lmp_le_capable(hdev)) {
6576 6577 6578 6579
		/* Make sure the controller has a good default for
		 * advertising data. This also applies to the case
		 * where BR/EDR was toggled during the AUTO_OFF phase.
		 */
6580
		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
6581
			update_adv_data(&req);
6582 6583
			update_scan_rsp_data(&req);
		}
6584

6585
		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6586
			enable_advertising(&req);
6587 6588

		restart_le_actions(&req);
6589 6590
	}

6591
	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
6592
	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6593 6594
		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
			    sizeof(link_sec), &link_sec);
6595

6596
	if (lmp_bredr_capable(hdev)) {
6597
		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
6598 6599 6600
			write_fast_connectable(&req, true);
		else
			write_fast_connectable(&req, false);
6601
		__hci_update_page_scan(&req);
6602
		update_class(&req);
6603
		update_name(&req);
6604
		update_eir(&req);
6605
	}
6606

6607
	return hci_req_run(&req, powered_complete);
6608
}
6609

6610 6611 6612
int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
	struct cmd_lookup match = { NULL, hdev };
6613
	u8 status, zero_cod[] = { 0, 0, 0 };
6614
	int err;
6615

6616
	if (!hci_dev_test_flag(hdev, HCI_MGMT))
6617 6618 6619
		return 0;

	if (powered) {
6620 6621
		if (powered_update_hci(hdev) == 0)
			return 0;
6622

6623 6624 6625
		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
				     &match);
		goto new_settings;
6626 6627
	}

6628
	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6629 6630 6631 6632 6633 6634 6635 6636

	/* If the power off is because of hdev unregistration let
	 * use the appropriate INVALID_INDEX status. Otherwise use
	 * NOT_POWERED. We cover both scenarios here since later in
	 * mgmt_index_removed() any hci_conn callbacks will have already
	 * been triggered, potentially causing misleading DISCONNECTED
	 * status responses.
	 */
6637
	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
6638 6639 6640 6641 6642
		status = MGMT_STATUS_INVALID_INDEX;
	else
		status = MGMT_STATUS_NOT_POWERED;

	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6643 6644

	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6645 6646
		mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
				   zero_cod, sizeof(zero_cod), NULL);
6647 6648

new_settings:
6649
	err = new_settings(hdev, match.sk);
6650 6651 6652 6653

	if (match.sk)
		sock_put(match.sk);

6654
	return err;
6655
}
6656

6657
void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6658
{
6659
	struct mgmt_pending_cmd *cmd;
6660 6661
	u8 status;

6662
	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6663
	if (!cmd)
6664
		return;
6665 6666 6667 6668 6669 6670

	if (err == -ERFKILL)
		status = MGMT_STATUS_RFKILLED;
	else
		status = MGMT_STATUS_FAILED;

6671
	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6672 6673 6674 6675

	mgmt_pending_remove(cmd);
}

6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686
void mgmt_discoverable_timeout(struct hci_dev *hdev)
{
	struct hci_request req;

	hci_dev_lock(hdev);

	/* When discoverable timeout triggers, then just make sure
	 * the limited discoverable flag is cleared. Even in the case
	 * of a timeout triggered from general discoverable, it is
	 * safe to unconditionally clear the flag.
	 */
6687 6688
	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6689 6690

	hci_req_init(&req, hdev);
6691
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6692 6693 6694 6695
		u8 scan = SCAN_PAGE;
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
			    sizeof(scan), &scan);
	}
6696
	update_class(&req);
6697
	update_adv_data(&req);
6698 6699 6700 6701
	hci_req_run(&req, NULL);

	hdev->discov_timeout = 0;

6702 6703
	new_settings(hdev, NULL);

6704 6705 6706
	hci_dev_unlock(hdev);
}

6707 6708
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
		       bool persistent)
6709
{
6710
	struct mgmt_ev_new_link_key ev;
6711

6712
	memset(&ev, 0, sizeof(ev));
6713

6714
	ev.store_hint = persistent;
6715
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6716
	ev.key.addr.type = BDADDR_BREDR;
6717
	ev.key.type = key->type;
6718
	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6719
	ev.key.pin_len = key->pin_len;
6720

6721
	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6722
}
6723

6724 6725
static u8 mgmt_ltk_type(struct smp_ltk *ltk)
{
6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738
	switch (ltk->type) {
	case SMP_LTK:
	case SMP_LTK_SLAVE:
		if (ltk->authenticated)
			return MGMT_LTK_AUTHENTICATED;
		return MGMT_LTK_UNAUTHENTICATED;
	case SMP_LTK_P256:
		if (ltk->authenticated)
			return MGMT_LTK_P256_AUTH;
		return MGMT_LTK_P256_UNAUTH;
	case SMP_LTK_P256_DEBUG:
		return MGMT_LTK_P256_DEBUG;
	}
6739 6740 6741 6742

	return MGMT_LTK_UNAUTHENTICATED;
}

6743
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6744 6745 6746 6747 6748
{
	struct mgmt_ev_new_long_term_key ev;

	memset(&ev, 0, sizeof(ev));

6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759
	/* Devices using resolvable or non-resolvable random addresses
	 * without providing an indentity resolving key don't require
	 * to store long term keys. Their addresses will change the
	 * next time around.
	 *
	 * Only when a remote device provides an identity address
	 * make sure the long term key is stored. If the remote
	 * identity is known, the long term keys are internally
	 * mapped to the identity address. So allow static random
	 * and public addresses here.
	 */
6760 6761 6762 6763
	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
		ev.store_hint = 0x00;
	else
6764
		ev.store_hint = persistent;
6765

6766
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6767
	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6768
	ev.key.type = mgmt_ltk_type(key);
6769 6770
	ev.key.enc_size = key->enc_size;
	ev.key.ediv = key->ediv;
6771
	ev.key.rand = key->rand;
6772

6773
	if (key->type == SMP_LTK)
6774 6775 6776 6777
		ev.key.master = 1;

	memcpy(ev.key.val, key->val, sizeof(key->val));

6778
	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6779 6780
}

6781 6782 6783 6784 6785 6786
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
{
	struct mgmt_ev_new_irk ev;

	memset(&ev, 0, sizeof(ev));

6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802
	/* For identity resolving keys from devices that are already
	 * using a public address or static random address, do not
	 * ask for storing this key. The identity resolving key really
	 * is only mandatory for devices using resovlable random
	 * addresses.
	 *
	 * Storing all identity resolving keys has the downside that
	 * they will be also loaded on next boot of they system. More
	 * identity resolving keys, means more time during scanning is
	 * needed to actually resolve these addresses.
	 */
	if (bacmp(&irk->rpa, BDADDR_ANY))
		ev.store_hint = 0x01;
	else
		ev.store_hint = 0x00;

6803 6804 6805 6806 6807 6808 6809 6810
	bacpy(&ev.rpa, &irk->rpa);
	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
	memcpy(ev.irk.val, irk->val, sizeof(irk->val));

	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
}

6811 6812
void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
		   bool persistent)
6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830
{
	struct mgmt_ev_new_csrk ev;

	memset(&ev, 0, sizeof(ev));

	/* Devices using resolvable or non-resolvable random addresses
	 * without providing an indentity resolving key don't require
	 * to store signature resolving keys. Their addresses will change
	 * the next time around.
	 *
	 * Only when a remote device provides an identity address
	 * make sure the signature resolving key is stored. So allow
	 * static random and public addresses here.
	 */
	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
		ev.store_hint = 0x00;
	else
6831
		ev.store_hint = persistent;
6832 6833 6834

	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6835
	ev.key.type = csrk->type;
6836 6837 6838 6839 6840
	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));

	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
}

6841
void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6842 6843
			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
			 u16 max_interval, u16 latency, u16 timeout)
6844 6845 6846
{
	struct mgmt_ev_new_conn_param ev;

6847 6848 6849
	if (!hci_is_identity_address(bdaddr, bdaddr_type))
		return;

6850 6851 6852
	memset(&ev, 0, sizeof(ev));
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6853
	ev.store_hint = store_hint;
6854 6855 6856 6857 6858 6859 6860 6861
	ev.min_interval = cpu_to_le16(min_interval);
	ev.max_interval = cpu_to_le16(max_interval);
	ev.latency = cpu_to_le16(latency);
	ev.timeout = cpu_to_le16(timeout);

	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
}

6862 6863
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
			   u32 flags, u8 *name, u8 name_len)
6864
{
6865 6866 6867
	char buf[512];
	struct mgmt_ev_device_connected *ev = (void *) buf;
	u16 eir_len = 0;
6868

6869 6870
	bacpy(&ev->addr.bdaddr, &conn->dst);
	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6871

6872
	ev->flags = __cpu_to_le32(flags);
6873

6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885
	/* We must ensure that the EIR Data fields are ordered and
	 * unique. Keep it simple for now and avoid the problem by not
	 * adding any BR/EDR data to the LE adv.
	 */
	if (conn->le_adv_data_len > 0) {
		memcpy(&ev->eir[eir_len],
		       conn->le_adv_data, conn->le_adv_data_len);
		eir_len = conn->le_adv_data_len;
	} else {
		if (name_len > 0)
			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
						  name, name_len);
6886

6887
		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6888 6889 6890 6891
			eir_len = eir_append_data(ev->eir, eir_len,
						  EIR_CLASS_OF_DEV,
						  conn->dev_class, 3);
	}
6892

6893
	ev->eir_len = cpu_to_le16(eir_len);
6894

6895 6896
	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
		    sizeof(*ev) + eir_len, NULL);
6897 6898
}

6899
static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
6900 6901 6902
{
	struct sock **sk = data;

6903
	cmd->cmd_complete(cmd, 0);
6904 6905 6906 6907

	*sk = cmd->sk;
	sock_hold(*sk);

6908
	mgmt_pending_remove(cmd);
6909 6910
}

6911
static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
6912
{
6913
	struct hci_dev *hdev = data;
6914
	struct mgmt_cp_unpair_device *cp = cmd->param;
6915

6916 6917
	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);

6918
	cmd->cmd_complete(cmd, 0);
6919 6920 6921
	mgmt_pending_remove(cmd);
}

6922 6923
bool mgmt_powering_down(struct hci_dev *hdev)
{
6924
	struct mgmt_pending_cmd *cmd;
6925 6926
	struct mgmt_mode *cp;

6927
	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6928 6929 6930 6931 6932 6933 6934 6935 6936 6937
	if (!cmd)
		return false;

	cp = cmd->param;
	if (!cp->val)
		return true;

	return false;
}

6938
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6939 6940
			      u8 link_type, u8 addr_type, u8 reason,
			      bool mgmt_connected)
6941
{
6942
	struct mgmt_ev_device_disconnected ev;
6943 6944
	struct sock *sk = NULL;

6945 6946 6947 6948 6949 6950
	/* The connection is still in hci_conn_hash so test for 1
	 * instead of 0 to know if this is the last one.
	 */
	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
		cancel_delayed_work(&hdev->power_off);
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
6951 6952
	}

6953 6954 6955
	if (!mgmt_connected)
		return;

6956 6957 6958
	if (link_type != ACL_LINK && link_type != LE_LINK)
		return;

6959
	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6960

6961 6962 6963
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.reason = reason;
6964

6965
	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6966 6967

	if (sk)
6968
		sock_put(sk);
6969

6970
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6971
			     hdev);
6972 6973
}

6974 6975
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
			    u8 link_type, u8 addr_type, u8 status)
6976
{
6977 6978
	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
	struct mgmt_cp_disconnect *cp;
6979
	struct mgmt_pending_cmd *cmd;
6980

6981 6982 6983
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
			     hdev);

6984
	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
6985
	if (!cmd)
6986
		return;
6987

6988 6989 6990 6991 6992 6993 6994 6995
	cp = cmd->param;

	if (bacmp(bdaddr, &cp->addr.bdaddr))
		return;

	if (cp->addr.type != bdaddr_type)
		return;

6996
	cmd->cmd_complete(cmd, mgmt_status(status));
6997
	mgmt_pending_remove(cmd);
6998
}
6999

7000 7001
void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			 u8 addr_type, u8 status)
7002 7003
{
	struct mgmt_ev_connect_failed ev;
7004

7005 7006 7007 7008 7009 7010
	/* The connection is still in hci_conn_hash so test for 1
	 * instead of 0 to know if this is the last one.
	 */
	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
		cancel_delayed_work(&hdev->power_off);
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7011
	}
7012

7013
	bacpy(&ev.addr.bdaddr, bdaddr);
7014
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7015
	ev.status = mgmt_status(status);
7016

7017
	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7018
}
7019

7020
void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7021 7022 7023
{
	struct mgmt_ev_pin_code_request ev;

7024
	bacpy(&ev.addr.bdaddr, bdaddr);
7025
	ev.addr.type = BDADDR_BREDR;
7026
	ev.secure = secure;
7027

7028
	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7029 7030
}

7031 7032
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				  u8 status)
7033
{
7034
	struct mgmt_pending_cmd *cmd;
7035

7036
	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7037
	if (!cmd)
7038
		return;
7039

7040
	cmd->cmd_complete(cmd, mgmt_status(status));
7041
	mgmt_pending_remove(cmd);
7042 7043
}

7044 7045
void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				      u8 status)
7046
{
7047
	struct mgmt_pending_cmd *cmd;
7048

7049
	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7050
	if (!cmd)
7051
		return;
7052

7053
	cmd->cmd_complete(cmd, mgmt_status(status));
7054
	mgmt_pending_remove(cmd);
7055
}
7056

7057
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7058
			      u8 link_type, u8 addr_type, u32 value,
7059
			      u8 confirm_hint)
7060 7061 7062
{
	struct mgmt_ev_user_confirm_request ev;

7063
	BT_DBG("%s", hdev->name);
7064

7065
	bacpy(&ev.addr.bdaddr, bdaddr);
7066
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7067
	ev.confirm_hint = confirm_hint;
7068
	ev.value = cpu_to_le32(value);
7069

7070
	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7071
			  NULL);
7072 7073
}

7074
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7075
			      u8 link_type, u8 addr_type)
7076 7077 7078 7079 7080
{
	struct mgmt_ev_user_passkey_request ev;

	BT_DBG("%s", hdev->name);

7081
	bacpy(&ev.addr.bdaddr, bdaddr);
7082
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7083 7084

	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7085
			  NULL);
7086 7087
}

7088
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7089 7090
				      u8 link_type, u8 addr_type, u8 status,
				      u8 opcode)
7091
{
7092
	struct mgmt_pending_cmd *cmd;
7093

7094
	cmd = pending_find(opcode, hdev);
7095 7096 7097
	if (!cmd)
		return -ENOENT;

7098
	cmd->cmd_complete(cmd, mgmt_status(status));
7099
	mgmt_pending_remove(cmd);
7100

7101
	return 0;
7102 7103
}

7104
int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7105
				     u8 link_type, u8 addr_type, u8 status)
7106
{
7107
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7108
					  status, MGMT_OP_USER_CONFIRM_REPLY);
7109 7110
}

7111
int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7112
					 u8 link_type, u8 addr_type, u8 status)
7113
{
7114
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7115 7116
					  status,
					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
7117
}
7118

7119
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7120
				     u8 link_type, u8 addr_type, u8 status)
7121
{
7122
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7123
					  status, MGMT_OP_USER_PASSKEY_REPLY);
7124 7125
}

7126
int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7127
					 u8 link_type, u8 addr_type, u8 status)
7128
{
7129
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7130 7131
					  status,
					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
7132 7133
}

7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149
int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
			     u8 link_type, u8 addr_type, u32 passkey,
			     u8 entered)
{
	struct mgmt_ev_passkey_notify ev;

	BT_DBG("%s", hdev->name);

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.passkey = __cpu_to_le32(passkey);
	ev.entered = entered;

	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
}

7150
void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7151 7152
{
	struct mgmt_ev_auth_failed ev;
7153
	struct mgmt_pending_cmd *cmd;
7154
	u8 status = mgmt_status(hci_status);
7155

7156 7157 7158
	bacpy(&ev.addr.bdaddr, &conn->dst);
	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
	ev.status = status;
7159

7160 7161 7162 7163 7164
	cmd = find_pairing(conn);

	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
		    cmd ? cmd->sk : NULL);

7165 7166 7167 7168
	if (cmd) {
		cmd->cmd_complete(cmd, status);
		mgmt_pending_remove(cmd);
	}
7169
}
7170

7171
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7172 7173
{
	struct cmd_lookup match = { NULL, hdev };
7174
	bool changed;
7175 7176 7177 7178

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7179
				     cmd_status_rsp, &mgmt_err);
7180
		return;
7181 7182
	}

7183
	if (test_bit(HCI_AUTH, &hdev->flags))
7184
		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7185
	else
7186
		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7187

7188
	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7189
			     &match);
7190

7191
	if (changed)
7192
		new_settings(hdev, match.sk);
7193 7194 7195 7196 7197

	if (match.sk)
		sock_put(match.sk);
}

7198
static void clear_eir(struct hci_request *req)
7199
{
7200
	struct hci_dev *hdev = req->hdev;
7201 7202
	struct hci_cp_write_eir cp;

7203
	if (!lmp_ext_inq_capable(hdev))
7204
		return;
7205

7206 7207
	memset(hdev->eir, 0, sizeof(hdev->eir));

7208 7209
	memset(&cp, 0, sizeof(cp));

7210
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7211 7212
}

7213
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7214 7215
{
	struct cmd_lookup match = { NULL, hdev };
7216
	struct hci_request req;
7217
	bool changed = false;
7218 7219 7220

	if (status) {
		u8 mgmt_err = mgmt_status(status);
7221

7222 7223
		if (enable && hci_dev_test_and_clear_flag(hdev,
							  HCI_SSP_ENABLED)) {
7224
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7225
			new_settings(hdev, NULL);
7226
		}
7227

7228 7229
		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
				     &mgmt_err);
7230
		return;
7231 7232 7233
	}

	if (enable) {
7234
		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7235
	} else {
7236
		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7237
		if (!changed)
7238 7239
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_HS_ENABLED);
7240
		else
7241
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7242 7243 7244 7245
	}

	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);

7246
	if (changed)
7247
		new_settings(hdev, match.sk);
7248

7249
	if (match.sk)
7250 7251
		sock_put(match.sk);

7252 7253
	hci_req_init(&req, hdev);

7254 7255
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7256 7257
			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
				    sizeof(enable), &enable);
7258
		update_eir(&req);
7259
	} else {
7260
		clear_eir(&req);
7261
	}
7262 7263

	hci_req_run(&req, NULL);
7264 7265
}

7266
static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7267 7268 7269 7270 7271 7272 7273 7274 7275
{
	struct cmd_lookup *match = data;

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}
}

7276 7277
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
				    u8 status)
7278
{
7279
	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7280

7281 7282 7283
	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7284 7285

	if (!status)
7286 7287
		mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
				   dev_class, 3, NULL);
7288 7289 7290

	if (match.sk)
		sock_put(match.sk);
7291 7292
}

7293
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7294 7295
{
	struct mgmt_cp_set_local_name ev;
7296
	struct mgmt_pending_cmd *cmd;
7297

7298
	if (status)
7299
		return;
7300 7301 7302

	memset(&ev, 0, sizeof(ev));
	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7303
	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7304

7305
	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7306 7307
	if (!cmd) {
		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7308

7309 7310 7311
		/* If this is a HCI command related to powering on the
		 * HCI dev don't send any mgmt signals.
		 */
7312
		if (pending_find(MGMT_OP_SET_POWERED, hdev))
7313
			return;
7314
	}
7315

7316 7317
	mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
			   cmd ? cmd->sk : NULL);
7318
}
7319

7320
void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7321 7322
				       u8 *rand192, u8 *hash256, u8 *rand256,
				       u8 status)
7323
{
7324
	struct mgmt_pending_cmd *cmd;
7325

7326
	BT_DBG("%s status %u", hdev->name, status);
7327

7328
	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7329
	if (!cmd)
7330
		return;
7331 7332

	if (status) {
7333 7334
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
			        mgmt_status(status));
7335
	} else {
7336 7337
		struct mgmt_rp_read_local_oob_data rp;
		size_t rp_size = sizeof(rp);
7338

7339 7340
		memcpy(rp.hash192, hash192, sizeof(rp.hash192));
		memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7341

7342
		if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7343
			memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7344
			memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7345
		} else {
7346
			rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7347
		}
7348

7349 7350 7351
		mgmt_cmd_complete(cmd->sk, hdev->id,
				  MGMT_OP_READ_LOCAL_OOB_DATA, 0,
				  &rp, rp_size);
7352 7353 7354 7355
	}

	mgmt_pending_remove(cmd);
}
7356

7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368
static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
{
	int i;

	for (i = 0; i < uuid_count; i++) {
		if (!memcmp(uuid, uuids[i], 16))
			return true;
	}

	return false;
}

7369 7370
static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
{
7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387
	u16 parsed = 0;

	while (parsed < eir_len) {
		u8 field_len = eir[0];
		u8 uuid[16];
		int i;

		if (field_len == 0)
			break;

		if (eir_len - parsed < field_len + 1)
			break;

		switch (eir[1]) {
		case EIR_UUID16_ALL:
		case EIR_UUID16_SOME:
			for (i = 0; i + 3 <= field_len; i += 2) {
7388
				memcpy(uuid, bluetooth_base_uuid, 16);
7389 7390 7391 7392 7393 7394 7395 7396 7397
				uuid[13] = eir[i + 3];
				uuid[12] = eir[i + 2];
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		case EIR_UUID32_ALL:
		case EIR_UUID32_SOME:
			for (i = 0; i + 5 <= field_len; i += 4) {
7398
				memcpy(uuid, bluetooth_base_uuid, 16);
7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420
				uuid[15] = eir[i + 5];
				uuid[14] = eir[i + 4];
				uuid[13] = eir[i + 3];
				uuid[12] = eir[i + 2];
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		case EIR_UUID128_ALL:
		case EIR_UUID128_SOME:
			for (i = 0; i + 17 <= field_len; i += 16) {
				memcpy(uuid, eir + i + 2, 16);
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		}

		parsed += field_len + 1;
		eir += field_len + 1;
	}

7421 7422 7423
	return false;
}

7424 7425 7426
static void restart_le_scan(struct hci_dev *hdev)
{
	/* If controller is not scanning we are done. */
7427
	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438
		return;

	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
		       hdev->discovery.scan_start +
		       hdev->discovery.scan_duration))
		return;

	queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
			   DISCOV_LE_RESTART_DELAY);
}

7439 7440
static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7441
{
7442 7443 7444 7445 7446
	/* If a RSSI threshold has been specified, and
	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
	 * is set, let it through for further processing, as we might need to
	 * restart the scan.
7447 7448 7449
	 *
	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
	 * the results are also dropped.
7450 7451
	 */
	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7452 7453 7454
	    (rssi == HCI_RSSI_INVALID ||
	    (rssi < hdev->discovery.rssi &&
	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7455
		return  false;
7456

7457 7458 7459
	if (hdev->discovery.uuid_count != 0) {
		/* If a list of UUIDs is provided in filter, results with no
		 * matching UUID should be dropped.
7460
		 */
7461 7462 7463 7464 7465 7466
		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
				   hdev->discovery.uuids) &&
		    !eir_has_uuids(scan_rsp, scan_rsp_len,
				   hdev->discovery.uuid_count,
				   hdev->discovery.uuids))
			return false;
7467
	}
7468

7469 7470
	/* If duplicate filtering does not report RSSI changes, then restart
	 * scanning to ensure updated result with updated RSSI values.
7471
	 */
7472 7473 7474 7475 7476 7477 7478 7479
	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
		restart_le_scan(hdev);

		/* Validate RSSI value against the RSSI threshold once more. */
		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
		    rssi < hdev->discovery.rssi)
			return false;
	}
7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502

	return true;
}

void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
{
	char buf[512];
	struct mgmt_ev_device_found *ev = (void *)buf;
	size_t ev_size;

	/* Don't send events for a non-kernel initiated discovery. With
	 * LE one exception is if we have pend_le_reports > 0 in which
	 * case we're doing passive scanning and want these events.
	 */
	if (!hci_discovery_active(hdev)) {
		if (link_type == ACL_LINK)
			return;
		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
			return;
	}

7503
	if (hdev->discovery.result_filtering) {
7504 7505 7506 7507 7508 7509 7510 7511 7512 7513
		/* We are using service discovery */
		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
				     scan_rsp_len))
			return;
	}

	/* Make sure that the buffer is big enough. The 5 extra bytes
	 * are for the potential CoD field.
	 */
	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7514 7515
		return;

7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546
	memset(buf, 0, sizeof(buf));

	/* In case of device discovery with BR/EDR devices (pre 1.2), the
	 * RSSI value was reported as 0 when not available. This behavior
	 * is kept when using device discovery. This is required for full
	 * backwards compatibility with the API.
	 *
	 * However when using service discovery, the value 127 will be
	 * returned when the RSSI is not available.
	 */
	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
	    link_type == ACL_LINK)
		rssi = 0;

	bacpy(&ev->addr.bdaddr, bdaddr);
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
	ev->rssi = rssi;
	ev->flags = cpu_to_le32(flags);

	if (eir_len > 0)
		/* Copy EIR or advertising data into event */
		memcpy(ev->eir, eir, eir_len);

	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
					  dev_class, 3);

	if (scan_rsp_len > 0)
		/* Append scan response data to event */
		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);

7547 7548
	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7549

7550
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7551
}
7552

7553 7554
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7555
{
7556 7557 7558
	struct mgmt_ev_device_found *ev;
	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
	u16 eir_len;
7559

7560
	ev = (struct mgmt_ev_device_found *) buf;
7561

7562 7563 7564
	memset(buf, 0, sizeof(buf));

	bacpy(&ev->addr.bdaddr, bdaddr);
7565
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7566 7567 7568
	ev->rssi = rssi;

	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7569
				  name_len);
7570

7571
	ev->eir_len = cpu_to_le16(eir_len);
7572

7573
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7574
}
7575

7576
void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7577
{
7578
	struct mgmt_ev_discovering ev;
7579

7580 7581
	BT_DBG("%s discovering %u", hdev->name, discovering);

7582 7583 7584 7585
	memset(&ev, 0, sizeof(ev));
	ev.type = hdev->discovery.type;
	ev.discovering = discovering;

7586
	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7587
}
7588

7589
static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7590 7591 7592 7593 7594 7595 7596 7597
{
	BT_DBG("%s status %u", hdev->name, status);
}

void mgmt_reenable_advertising(struct hci_dev *hdev)
{
	struct hci_request req;

7598
	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
7599 7600 7601 7602
		return;

	hci_req_init(&req, hdev);
	enable_advertising(&req);
7603
	hci_req_run(&req, adv_enable_complete);
7604
}
7605 7606 7607 7608 7609

static struct hci_mgmt_chan chan = {
	.channel	= HCI_CHANNEL_CONTROL,
	.handler_count	= ARRAY_SIZE(mgmt_handlers),
	.handlers	= mgmt_handlers,
7610
	.hdev_init	= mgmt_init_hdev,
7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621
};

int mgmt_init(void)
{
	return hci_mgmt_chan_register(&chan);
}

void mgmt_exit(void)
{
	hci_mgmt_chan_unregister(&chan);
}