mgmt.c 215.0 KB
Newer Older
1 2
/*
   BlueZ - Bluetooth protocol stack for Linux
3

4
   Copyright (C) 2010  Nokia Corporation
5
   Copyright (C) 2011-2012 Intel Corporation
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI Management interface */

27
#include <linux/module.h>
28 29 30 31
#include <asm/unaligned.h>

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
32
#include <net/bluetooth/hci_sock.h>
33
#include <net/bluetooth/l2cap.h>
34
#include <net/bluetooth/mgmt.h>
35

36
#include "hci_request.h"
37
#include "smp.h"
38
#include "mgmt_util.h"
39
#include "mgmt_config.h"
40
#include "msft.h"
41

42
#define MGMT_VERSION	1
43
#define MGMT_REVISION	17
44

45 46 47 48 49 50 51
static const u16 mgmt_commands[] = {
	MGMT_OP_READ_INDEX_LIST,
	MGMT_OP_READ_INFO,
	MGMT_OP_SET_POWERED,
	MGMT_OP_SET_DISCOVERABLE,
	MGMT_OP_SET_CONNECTABLE,
	MGMT_OP_SET_FAST_CONNECTABLE,
52
	MGMT_OP_SET_BONDABLE,
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
	MGMT_OP_SET_LINK_SECURITY,
	MGMT_OP_SET_SSP,
	MGMT_OP_SET_HS,
	MGMT_OP_SET_LE,
	MGMT_OP_SET_DEV_CLASS,
	MGMT_OP_SET_LOCAL_NAME,
	MGMT_OP_ADD_UUID,
	MGMT_OP_REMOVE_UUID,
	MGMT_OP_LOAD_LINK_KEYS,
	MGMT_OP_LOAD_LONG_TERM_KEYS,
	MGMT_OP_DISCONNECT,
	MGMT_OP_GET_CONNECTIONS,
	MGMT_OP_PIN_CODE_REPLY,
	MGMT_OP_PIN_CODE_NEG_REPLY,
	MGMT_OP_SET_IO_CAPABILITY,
	MGMT_OP_PAIR_DEVICE,
	MGMT_OP_CANCEL_PAIR_DEVICE,
	MGMT_OP_UNPAIR_DEVICE,
	MGMT_OP_USER_CONFIRM_REPLY,
	MGMT_OP_USER_CONFIRM_NEG_REPLY,
	MGMT_OP_USER_PASSKEY_REPLY,
	MGMT_OP_USER_PASSKEY_NEG_REPLY,
	MGMT_OP_READ_LOCAL_OOB_DATA,
	MGMT_OP_ADD_REMOTE_OOB_DATA,
	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
	MGMT_OP_START_DISCOVERY,
	MGMT_OP_STOP_DISCOVERY,
	MGMT_OP_CONFIRM_NAME,
	MGMT_OP_BLOCK_DEVICE,
	MGMT_OP_UNBLOCK_DEVICE,
83
	MGMT_OP_SET_DEVICE_ID,
84
	MGMT_OP_SET_ADVERTISING,
85
	MGMT_OP_SET_BREDR,
86
	MGMT_OP_SET_STATIC_ADDRESS,
87
	MGMT_OP_SET_SCAN_PARAMS,
88
	MGMT_OP_SET_SECURE_CONN,
89
	MGMT_OP_SET_DEBUG_KEYS,
90
	MGMT_OP_SET_PRIVACY,
91
	MGMT_OP_LOAD_IRKS,
92
	MGMT_OP_GET_CONN_INFO,
93
	MGMT_OP_GET_CLOCK_INFO,
94 95
	MGMT_OP_ADD_DEVICE,
	MGMT_OP_REMOVE_DEVICE,
96
	MGMT_OP_LOAD_CONN_PARAM,
97
	MGMT_OP_READ_UNCONF_INDEX_LIST,
98
	MGMT_OP_READ_CONFIG_INFO,
99
	MGMT_OP_SET_EXTERNAL_CONFIG,
100
	MGMT_OP_SET_PUBLIC_ADDRESS,
101
	MGMT_OP_START_SERVICE_DISCOVERY,
102
	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103
	MGMT_OP_READ_EXT_INDEX_LIST,
104
	MGMT_OP_READ_ADV_FEATURES,
105
	MGMT_OP_ADD_ADVERTISING,
106
	MGMT_OP_REMOVE_ADVERTISING,
107
	MGMT_OP_GET_ADV_SIZE_INFO,
108
	MGMT_OP_START_LIMITED_DISCOVERY,
109
	MGMT_OP_READ_EXT_INFO,
110
	MGMT_OP_SET_APPEARANCE,
111
	MGMT_OP_SET_BLOCKED_KEYS,
112
	MGMT_OP_SET_WIDEBAND_SPEECH,
113
	MGMT_OP_READ_SECURITY_INFO,
114 115
	MGMT_OP_READ_EXP_FEATURES_INFO,
	MGMT_OP_SET_EXP_FEATURE,
116 117
	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
118 119
	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
120 121
	MGMT_OP_GET_DEVICE_FLAGS,
	MGMT_OP_SET_DEVICE_FLAGS,
122
	MGMT_OP_READ_ADV_MONITOR_FEATURES,
123
	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
124
	MGMT_OP_REMOVE_ADV_MONITOR,
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
};

static const u16 mgmt_events[] = {
	MGMT_EV_CONTROLLER_ERROR,
	MGMT_EV_INDEX_ADDED,
	MGMT_EV_INDEX_REMOVED,
	MGMT_EV_NEW_SETTINGS,
	MGMT_EV_CLASS_OF_DEV_CHANGED,
	MGMT_EV_LOCAL_NAME_CHANGED,
	MGMT_EV_NEW_LINK_KEY,
	MGMT_EV_NEW_LONG_TERM_KEY,
	MGMT_EV_DEVICE_CONNECTED,
	MGMT_EV_DEVICE_DISCONNECTED,
	MGMT_EV_CONNECT_FAILED,
	MGMT_EV_PIN_CODE_REQUEST,
	MGMT_EV_USER_CONFIRM_REQUEST,
	MGMT_EV_USER_PASSKEY_REQUEST,
	MGMT_EV_AUTH_FAILED,
	MGMT_EV_DEVICE_FOUND,
	MGMT_EV_DISCOVERING,
	MGMT_EV_DEVICE_BLOCKED,
	MGMT_EV_DEVICE_UNBLOCKED,
	MGMT_EV_DEVICE_UNPAIRED,
148
	MGMT_EV_PASSKEY_NOTIFY,
149
	MGMT_EV_NEW_IRK,
150
	MGMT_EV_NEW_CSRK,
151 152
	MGMT_EV_DEVICE_ADDED,
	MGMT_EV_DEVICE_REMOVED,
153
	MGMT_EV_NEW_CONN_PARAM,
154
	MGMT_EV_UNCONF_INDEX_ADDED,
155
	MGMT_EV_UNCONF_INDEX_REMOVED,
156
	MGMT_EV_NEW_CONFIG_OPTIONS,
157 158
	MGMT_EV_EXT_INDEX_ADDED,
	MGMT_EV_EXT_INDEX_REMOVED,
159
	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
160 161
	MGMT_EV_ADVERTISING_ADDED,
	MGMT_EV_ADVERTISING_REMOVED,
162
	MGMT_EV_EXT_INFO_CHANGED,
163
	MGMT_EV_PHY_CONFIGURATION_CHANGED,
164
	MGMT_EV_EXP_FEATURE_CHANGED,
165
	MGMT_EV_DEVICE_FLAGS_CHANGED,
166 167
};

168 169 170 171 172 173
static const u16 mgmt_untrusted_commands[] = {
	MGMT_OP_READ_INDEX_LIST,
	MGMT_OP_READ_INFO,
	MGMT_OP_READ_UNCONF_INDEX_LIST,
	MGMT_OP_READ_CONFIG_INFO,
	MGMT_OP_READ_EXT_INDEX_LIST,
174
	MGMT_OP_READ_EXT_INFO,
175
	MGMT_OP_READ_SECURITY_INFO,
176
	MGMT_OP_READ_EXP_FEATURES_INFO,
177
	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
178
	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
179 180 181 182 183 184 185 186 187 188 189 190 191
};

static const u16 mgmt_untrusted_events[] = {
	MGMT_EV_INDEX_ADDED,
	MGMT_EV_INDEX_REMOVED,
	MGMT_EV_NEW_SETTINGS,
	MGMT_EV_CLASS_OF_DEV_CHANGED,
	MGMT_EV_LOCAL_NAME_CHANGED,
	MGMT_EV_UNCONF_INDEX_ADDED,
	MGMT_EV_UNCONF_INDEX_REMOVED,
	MGMT_EV_NEW_CONFIG_OPTIONS,
	MGMT_EV_EXT_INDEX_ADDED,
	MGMT_EV_EXT_INDEX_REMOVED,
192
	MGMT_EV_EXT_INFO_CHANGED,
193
	MGMT_EV_EXP_FEATURE_CHANGED,
194 195
};

196
#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
197

198 199 200
#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
		 "\x00\x00\x00\x00\x00\x00\x00\x00"

201
/* HCI to MGMT error code conversion table */
202
static const u8 mgmt_status_table[] = {
203 204 205 206 207 208
	MGMT_STATUS_SUCCESS,
	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
	MGMT_STATUS_FAILED,		/* Hardware Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
209
	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
	MGMT_STATUS_BUSY,		/* Command Disallowed */
	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
	MGMT_STATUS_REJECTED,		/* Rejected Security */
	MGMT_STATUS_REJECTED,		/* Rejected Personal */
	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
	MGMT_STATUS_BUSY,		/* Repeated Attempts */
	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
	MGMT_STATUS_FAILED,		/* Unspecified Error */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
	MGMT_STATUS_FAILED,		/* Transaction Collision */
	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
	MGMT_STATUS_REJECTED,		/* QoS Rejected */
	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
	MGMT_STATUS_REJECTED,		/* Insufficient Security */
	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
	MGMT_STATUS_BUSY,		/* Role Switch Pending */
	MGMT_STATUS_FAILED,		/* Slot Violation */
	MGMT_STATUS_FAILED,		/* Role Switch Failed */
	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
	MGMT_STATUS_BUSY,		/* Controller Busy */
	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
};

static u8 mgmt_status(u8 hci_status)
{
	if (hci_status < ARRAY_SIZE(mgmt_status_table))
		return mgmt_status_table[hci_status];

	return MGMT_STATUS_FAILED;
}

274 275
static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
			    u16 len, int flag)
276
{
277 278
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
			       flag, NULL);
279 280
}

281 282 283 284 285 286 287
static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
			      u16 len, int flag, struct sock *skip_sk)
{
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
			       flag, skip_sk);
}

288 289 290 291
static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
		      struct sock *skip_sk)
{
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
292
			       HCI_SOCK_TRUSTED, skip_sk);
293 294
}

295 296 297 298 299 300 301 302
static u8 le_addr_type(u8 mgmt_addr_type)
{
	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
		return ADDR_LE_DEV_PUBLIC;
	else
		return ADDR_LE_DEV_RANDOM;
}

303 304 305 306 307 308 309 310
void mgmt_fill_version_info(void *ver)
{
	struct mgmt_rp_read_version *rp = ver;

	rp->version = MGMT_VERSION;
	rp->revision = cpu_to_le16(MGMT_REVISION);
}

311 312
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
			u16 data_len)
313 314 315
{
	struct mgmt_rp_read_version rp;

316
	bt_dev_dbg(hdev, "sock %p", sk);
317

318
	mgmt_fill_version_info(&rp);
319

320 321
	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
				 &rp, sizeof(rp));
322 323
}

324 325
static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 data_len)
326 327
{
	struct mgmt_rp_read_commands *rp;
328
	u16 num_commands, num_events;
329 330 331
	size_t rp_size;
	int i, err;

332
	bt_dev_dbg(hdev, "sock %p", sk);
333

334 335 336 337 338 339 340 341
	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
		num_commands = ARRAY_SIZE(mgmt_commands);
		num_events = ARRAY_SIZE(mgmt_events);
	} else {
		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
		num_events = ARRAY_SIZE(mgmt_untrusted_events);
	}

342 343 344 345 346 347
	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));

	rp = kmalloc(rp_size, GFP_KERNEL);
	if (!rp)
		return -ENOMEM;

348 349
	rp->num_commands = cpu_to_le16(num_commands);
	rp->num_events = cpu_to_le16(num_events);
350

351 352 353 354 355
	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
		__le16 *opcode = rp->opcodes;

		for (i = 0; i < num_commands; i++, opcode++)
			put_unaligned_le16(mgmt_commands[i], opcode);
356

357 358 359 360 361 362 363 364 365 366 367
		for (i = 0; i < num_events; i++, opcode++)
			put_unaligned_le16(mgmt_events[i], opcode);
	} else {
		__le16 *opcode = rp->opcodes;

		for (i = 0; i < num_commands; i++, opcode++)
			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);

		for (i = 0; i < num_events; i++, opcode++)
			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
	}
368

369 370
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
				rp, rp_size);
371 372 373 374 375
	kfree(rp);

	return err;
}

376 377
static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
378 379
{
	struct mgmt_rp_read_index_list *rp;
380
	struct hci_dev *d;
381
	size_t rp_len;
382
	u16 count;
383
	int err;
384

385
	bt_dev_dbg(hdev, "sock %p", sk);
386 387 388 389

	read_lock(&hci_dev_list_lock);

	count = 0;
390
	list_for_each_entry(d, &hci_dev_list, list) {
391
		if (d->dev_type == HCI_PRIMARY &&
392
		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
393
			count++;
394 395
	}

396 397 398
	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
399
		read_unlock(&hci_dev_list_lock);
400
		return -ENOMEM;
401
	}
402

403
	count = 0;
404
	list_for_each_entry(d, &hci_dev_list, list) {
405 406 407
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
408 409
			continue;

410 411 412 413
		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
414 415
			continue;

416
		if (d->dev_type == HCI_PRIMARY &&
417
		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
418
			rp->index[count++] = cpu_to_le16(d->id);
419
			bt_dev_dbg(hdev, "Added hci%u", d->id);
420
		}
421 422
	}

423 424 425
	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

426 427
	read_unlock(&hci_dev_list_lock);

428 429
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
				0, rp, rp_len);
430

431 432 433
	kfree(rp);

	return err;
434 435
}

436 437 438 439 440 441 442 443 444
static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
				  void *data, u16 data_len)
{
	struct mgmt_rp_read_unconf_index_list *rp;
	struct hci_dev *d;
	size_t rp_len;
	u16 count;
	int err;

445
	bt_dev_dbg(hdev, "sock %p", sk);
446 447 448 449 450

	read_lock(&hci_dev_list_lock);

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
451
		if (d->dev_type == HCI_PRIMARY &&
452
		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
453 454 455 456 457 458 459 460 461 462 463 464
			count++;
	}

	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		read_unlock(&hci_dev_list_lock);
		return -ENOMEM;
	}

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
465 466 467
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
468 469 470 471 472 473 474 475
			continue;

		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
			continue;

476
		if (d->dev_type == HCI_PRIMARY &&
477
		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
478
			rp->index[count++] = cpu_to_le16(d->id);
479
			bt_dev_dbg(hdev, "Added hci%u", d->id);
480 481 482 483 484 485 486 487
		}
	}

	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

	read_unlock(&hci_dev_list_lock);

488 489
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
490 491 492 493 494 495

	kfree(rp);

	return err;
}

496 497 498 499 500 501 502 503
static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
			       void *data, u16 data_len)
{
	struct mgmt_rp_read_ext_index_list *rp;
	struct hci_dev *d;
	u16 count;
	int err;

504
	bt_dev_dbg(hdev, "sock %p", sk);
505 506 507 508 509

	read_lock(&hci_dev_list_lock);

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
510
		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
511 512 513
			count++;
	}

514
	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
	if (!rp) {
		read_unlock(&hci_dev_list_lock);
		return -ENOMEM;
	}

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
			continue;

		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
			continue;

533
		if (d->dev_type == HCI_PRIMARY) {
534 535 536 537 538 539 540 541 542 543 544 545
			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
				rp->entry[count].type = 0x01;
			else
				rp->entry[count].type = 0x00;
		} else if (d->dev_type == HCI_AMP) {
			rp->entry[count].type = 0x02;
		} else {
			continue;
		}

		rp->entry[count].bus = d->bus;
		rp->entry[count++].index = cpu_to_le16(d->id);
546
		bt_dev_dbg(hdev, "Added hci%u", d->id);
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
	}

	rp->num_controllers = cpu_to_le16(count);

	read_unlock(&hci_dev_list_lock);

	/* If this command is called at least once, then all the
	 * default index and unconfigured index events are disabled
	 * and from now on only extended index events are used.
	 */
	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);

	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
562 563
				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
				struct_size(rp, entry, count));
564 565 566 567 568 569

	kfree(rp);

	return err;
}

570 571 572
static bool is_configured(struct hci_dev *hdev)
{
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
573
	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
574 575
		return false;

576 577
	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
578 579 580 581 582 583
	    !bacmp(&hdev->public_addr, BDADDR_ANY))
		return false;

	return true;
}

584 585 586 587
static __le32 get_missing_options(struct hci_dev *hdev)
{
	u32 options = 0;

588
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
589
	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
590 591
		options |= MGMT_OPTION_EXTERNAL_CONFIG;

592 593
	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
594 595 596 597 598 599
	    !bacmp(&hdev->public_addr, BDADDR_ANY))
		options |= MGMT_OPTION_PUBLIC_ADDRESS;

	return cpu_to_le32(options);
}

600 601 602 603
static int new_options(struct hci_dev *hdev, struct sock *skip)
{
	__le32 options = get_missing_options(hdev);

604 605
	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
606 607
}

608 609 610 611
static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
{
	__le32 options = get_missing_options(hdev);

612 613
	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
				 sizeof(options));
614 615
}

616 617 618 619
static int read_config_info(struct sock *sk, struct hci_dev *hdev,
			    void *data, u16 data_len)
{
	struct mgmt_rp_read_config_info rp;
620
	u32 options = 0;
621

622
	bt_dev_dbg(hdev, "sock %p", sk);
623 624 625 626 627

	hci_dev_lock(hdev);

	memset(&rp, 0, sizeof(rp));
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
628

629 630 631
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
		options |= MGMT_OPTION_EXTERNAL_CONFIG;

632
	if (hdev->set_bdaddr)
633 634 635 636
		options |= MGMT_OPTION_PUBLIC_ADDRESS;

	rp.supported_options = cpu_to_le32(options);
	rp.missing_options = get_missing_options(hdev);
637 638 639

	hci_dev_unlock(hdev);

640 641
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
				 &rp, sizeof(rp));
642 643
}

644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
static u32 get_supported_phys(struct hci_dev *hdev)
{
	u32 supported_phys = 0;

	if (lmp_bredr_capable(hdev)) {
		supported_phys |= MGMT_PHY_BR_1M_1SLOT;

		if (hdev->features[0][0] & LMP_3SLOT)
			supported_phys |= MGMT_PHY_BR_1M_3SLOT;

		if (hdev->features[0][0] & LMP_5SLOT)
			supported_phys |= MGMT_PHY_BR_1M_5SLOT;

		if (lmp_edr_2m_capable(hdev)) {
			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;

			if (lmp_edr_3slot_capable(hdev))
				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;

			if (lmp_edr_5slot_capable(hdev))
				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;

			if (lmp_edr_3m_capable(hdev)) {
				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;

				if (lmp_edr_3slot_capable(hdev))
					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;

				if (lmp_edr_5slot_capable(hdev))
					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
			}
		}
	}

	if (lmp_le_capable(hdev)) {
		supported_phys |= MGMT_PHY_LE_1M_TX;
		supported_phys |= MGMT_PHY_LE_1M_RX;

		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
			supported_phys |= MGMT_PHY_LE_2M_TX;
			supported_phys |= MGMT_PHY_LE_2M_RX;
		}

		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
			supported_phys |= MGMT_PHY_LE_CODED_TX;
			supported_phys |= MGMT_PHY_LE_CODED_RX;
		}
	}

	return supported_phys;
}

static u32 get_selected_phys(struct hci_dev *hdev)
{
	u32 selected_phys = 0;

	if (lmp_bredr_capable(hdev)) {
		selected_phys |= MGMT_PHY_BR_1M_1SLOT;

		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
			selected_phys |= MGMT_PHY_BR_1M_3SLOT;

		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
			selected_phys |= MGMT_PHY_BR_1M_5SLOT;

		if (lmp_edr_2m_capable(hdev)) {
			if (!(hdev->pkt_type & HCI_2DH1))
				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;

			if (lmp_edr_3slot_capable(hdev) &&
			    !(hdev->pkt_type & HCI_2DH3))
				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;

			if (lmp_edr_5slot_capable(hdev) &&
			    !(hdev->pkt_type & HCI_2DH5))
				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;

			if (lmp_edr_3m_capable(hdev)) {
				if (!(hdev->pkt_type & HCI_3DH1))
					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;

				if (lmp_edr_3slot_capable(hdev) &&
				    !(hdev->pkt_type & HCI_3DH3))
					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;

				if (lmp_edr_5slot_capable(hdev) &&
				    !(hdev->pkt_type & HCI_3DH5))
					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
			}
		}
	}

	if (lmp_le_capable(hdev)) {
		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
			selected_phys |= MGMT_PHY_LE_1M_TX;

		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
			selected_phys |= MGMT_PHY_LE_1M_RX;

		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
			selected_phys |= MGMT_PHY_LE_2M_TX;

		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
			selected_phys |= MGMT_PHY_LE_2M_RX;

		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
			selected_phys |= MGMT_PHY_LE_CODED_TX;

		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
			selected_phys |= MGMT_PHY_LE_CODED_RX;
	}

	return selected_phys;
}

static u32 get_configurable_phys(struct hci_dev *hdev)
{
	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
}

765 766 767 768 769
static u32 get_supported_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

	settings |= MGMT_SETTING_POWERED;
770
	settings |= MGMT_SETTING_BONDABLE;
771
	settings |= MGMT_SETTING_DEBUG_KEYS;
772 773
	settings |= MGMT_SETTING_CONNECTABLE;
	settings |= MGMT_SETTING_DISCOVERABLE;
774

775
	if (lmp_bredr_capable(hdev)) {
776 777
		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
			settings |= MGMT_SETTING_FAST_CONNECTABLE;
778 779
		settings |= MGMT_SETTING_BREDR;
		settings |= MGMT_SETTING_LINK_SECURITY;
780 781 782 783 784

		if (lmp_ssp_capable(hdev)) {
			settings |= MGMT_SETTING_SSP;
			settings |= MGMT_SETTING_HS;
		}
785

786
		if (lmp_sc_capable(hdev))
787
			settings |= MGMT_SETTING_SECURE_CONN;
788

789
		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
790
			     &hdev->quirks))
791
			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
792
	}
793

794
	if (lmp_le_capable(hdev)) {
795
		settings |= MGMT_SETTING_LE;
796
		settings |= MGMT_SETTING_ADVERTISING;
797
		settings |= MGMT_SETTING_SECURE_CONN;
798
		settings |= MGMT_SETTING_PRIVACY;
799
		settings |= MGMT_SETTING_STATIC_ADDRESS;
800
	}
801

802 803
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
	    hdev->set_bdaddr)
804 805
		settings |= MGMT_SETTING_CONFIGURATION;

806 807
	settings |= MGMT_SETTING_PHY_CONFIGURATION;

808 809 810 811 812 813 814
	return settings;
}

static u32 get_current_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

815
	if (hdev_is_powered(hdev))
816 817
		settings |= MGMT_SETTING_POWERED;

818
	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
819 820
		settings |= MGMT_SETTING_CONNECTABLE;

821
	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
822 823
		settings |= MGMT_SETTING_FAST_CONNECTABLE;

824
	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
825 826
		settings |= MGMT_SETTING_DISCOVERABLE;

827
	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
828
		settings |= MGMT_SETTING_BONDABLE;
829

830
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
831 832
		settings |= MGMT_SETTING_BREDR;

833
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
834 835
		settings |= MGMT_SETTING_LE;

836
	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
837 838
		settings |= MGMT_SETTING_LINK_SECURITY;

839
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
840 841
		settings |= MGMT_SETTING_SSP;

842
	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
843 844
		settings |= MGMT_SETTING_HS;

845
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
846 847
		settings |= MGMT_SETTING_ADVERTISING;

848
	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
849 850
		settings |= MGMT_SETTING_SECURE_CONN;

851
	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
852 853
		settings |= MGMT_SETTING_DEBUG_KEYS;

854
	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
855 856
		settings |= MGMT_SETTING_PRIVACY;

857 858 859 860 861
	/* The current setting for static address has two purposes. The
	 * first is to indicate if the static address will be used and
	 * the second is to indicate if it is actually set.
	 *
	 * This means if the static address is not configured, this flag
862
	 * will never be set. If the address is configured, then if the
863 864 865 866 867 868
	 * address is actually used decides if the flag is set or not.
	 *
	 * For single mode LE only controllers and dual-mode controllers
	 * with BR/EDR disabled, the existence of the static address will
	 * be evaluated.
	 */
869
	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
870
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
871 872 873 874 875
	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
		if (bacmp(&hdev->static_addr, BDADDR_ANY))
			settings |= MGMT_SETTING_STATIC_ADDRESS;
	}

876 877 878
	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
		settings |= MGMT_SETTING_WIDEBAND_SPEECH;

879 880 881
	return settings;
}

882 883 884 885 886 887 888 889 890 891 892 893
static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
{
	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
}

static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
						  struct hci_dev *hdev,
						  const void *data)
{
	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
}

894
u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
895
{
896
	struct mgmt_pending_cmd *cmd;
897 898 899 900

	/* If there's a pending mgmt command the flags will not yet have
	 * their final values, so check for this first.
	 */
901
	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
902 903 904 905 906 907 908
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
		if (cp->val == 0x01)
			return LE_AD_GENERAL;
		else if (cp->val == 0x02)
			return LE_AD_LIMITED;
	} else {
909
		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
910
			return LE_AD_LIMITED;
911
		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
912 913 914 915 916 917
			return LE_AD_GENERAL;
	}

	return 0;
}

918
bool mgmt_get_connectable(struct hci_dev *hdev)
919 920
{
	struct mgmt_pending_cmd *cmd;
921

922 923 924 925 926 927
	/* If there's a pending mgmt command the flag will not yet have
	 * it's final value, so check for this first.
	 */
	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
928

929
		return cp->val;
930 931
	}

932 933
	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
}
934

935 936 937
static void service_cache_off(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
938
					    service_cache.work);
939
	struct hci_request req;
940

941
	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
942 943
		return;

944 945
	hci_req_init(&req, hdev);

946 947
	hci_dev_lock(hdev);

948
	__hci_req_update_eir(&req);
949
	__hci_req_update_class(&req);
950 951

	hci_dev_unlock(hdev);
952 953

	hci_req_run(&req, NULL);
954 955
}

956 957 958 959 960 961
static void rpa_expired(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
					    rpa_expired.work);
	struct hci_request req;

962
	bt_dev_dbg(hdev, "");
963

964
	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
965

966
	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
967 968 969
		return;

	/* The generation of a new RPA and programming it into the
970 971
	 * controller happens in the hci_req_enable_advertising()
	 * function.
972 973
	 */
	hci_req_init(&req, hdev);
974 975 976 977
	if (ext_adv_capable(hdev))
		__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
	else
		__hci_req_enable_advertising(&req);
978 979 980
	hci_req_run(&req, NULL);
}

981
static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
982
{
983
	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
984 985
		return;

986
	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
987
	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
988

989 990 991 992 993
	/* Non-mgmt controlled devices get this bit set
	 * implicitly so that pairing works for them, however
	 * for mgmt we require user-space to explicitly enable
	 * it
	 */
994
	hci_dev_clear_flag(hdev, HCI_BONDABLE);
995 996
}

997
static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
998
				void *data, u16 data_len)
999
{
1000
	struct mgmt_rp_read_info rp;
1001

1002
	bt_dev_dbg(hdev, "sock %p", sk);
1003

1004
	hci_dev_lock(hdev);
1005

1006 1007
	memset(&rp, 0, sizeof(rp));

1008
	bacpy(&rp.bdaddr, &hdev->bdaddr);
1009

1010
	rp.version = hdev->hci_ver;
1011
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1012 1013 1014

	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1015

1016
	memcpy(rp.dev_class, hdev->dev_class, 3);
1017

1018
	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1019
	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1020

1021
	hci_dev_unlock(hdev);
1022

1023 1024
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
				 sizeof(rp));
1025 1026
}

1027 1028 1029 1030 1031 1032 1033 1034 1035
static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
{
	u16 eir_len = 0;
	size_t name_len;

	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
					  hdev->dev_class, 3);

1036 1037 1038 1039
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
					  hdev->appearance);

1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
	name_len = strlen(hdev->dev_name);
	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
				  hdev->dev_name, name_len);

	name_len = strlen(hdev->short_name);
	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
				  hdev->short_name, name_len);

	return eir_len;
}

1051 1052 1053
static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
				    void *data, u16 data_len)
{
1054 1055
	char buf[512];
	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1056
	u16 eir_len;
1057

1058
	bt_dev_dbg(hdev, "sock %p", sk);
1059

1060 1061
	memset(&buf, 0, sizeof(buf));

1062 1063
	hci_dev_lock(hdev);

1064 1065 1066 1067 1068 1069 1070 1071
	bacpy(&rp->bdaddr, &hdev->bdaddr);

	rp->version = hdev->hci_ver;
	rp->manufacturer = cpu_to_le16(hdev->manufacturer);

	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
	rp->current_settings = cpu_to_le32(get_current_settings(hdev));

1072

1073
	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1074
	rp->eir_len = cpu_to_le16(eir_len);
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086

	hci_dev_unlock(hdev);

	/* If this command is called at least once, then the events
	 * for class of device and local name changes are disabled
	 * and only the new extended controller information event
	 * is used.
	 */
	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);

1087 1088
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
				 sizeof(*rp) + eir_len);
1089 1090 1091 1092
}

static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
{
1093 1094 1095 1096 1097
	char buf[512];
	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
	u16 eir_len;

	memset(buf, 0, sizeof(buf));
1098

1099 1100
	eir_len = append_eir_data_to_buf(hdev, ev->eir);
	ev->eir_len = cpu_to_le16(eir_len);
1101

1102 1103 1104
	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
				  sizeof(*ev) + eir_len,
				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1105 1106
}

1107
static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1108
{
1109
	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1110

1111 1112
	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
				 sizeof(settings));
1113 1114
}

1115
static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1116
{
1117
	bt_dev_dbg(hdev, "status 0x%02x", status);
1118

1119 1120
	if (hci_conn_count(hdev) == 0) {
		cancel_delayed_work(&hdev->power_off);
1121
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1122
	}
1123 1124
}

1125
void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1126 1127 1128 1129 1130 1131 1132 1133
{
	struct mgmt_ev_advertising_added ev;

	ev.instance = instance;

	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
}

1134 1135
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
			      u8 instance)
1136 1137 1138 1139 1140 1141 1142 1143
{
	struct mgmt_ev_advertising_removed ev;

	ev.instance = instance;

	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
}

1144 1145 1146 1147 1148 1149 1150 1151
static void cancel_adv_timeout(struct hci_dev *hdev)
{
	if (hdev->adv_instance_timeout) {
		hdev->adv_instance_timeout = 0;
		cancel_delayed_work(&hdev->adv_instance_expire);
	}
}

1152 1153 1154 1155
static int clean_up_hci_state(struct hci_dev *hdev)
{
	struct hci_request req;
	struct hci_conn *conn;
1156 1157
	bool discov_stopped;
	int err;
1158 1159 1160 1161 1162 1163 1164 1165 1166

	hci_req_init(&req, hdev);

	if (test_bit(HCI_ISCAN, &hdev->flags) ||
	    test_bit(HCI_PSCAN, &hdev->flags)) {
		u8 scan = 0x00;
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}

1167
	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1168

1169
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1170
		__hci_req_disable_advertising(&req);
1171

1172
	discov_stopped = hci_req_stop_discovery(&req);
1173 1174

	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1175 1176
		/* 0x15 == Terminated due to Power Off */
		__hci_abort_conn(&req, conn, 0x15);
1177 1178
	}

1179 1180 1181 1182 1183
	err = hci_req_run(&req, clean_up_hci_complete);
	if (!err && discov_stopped)
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);

	return err;
1184 1185
}

1186
static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1187
		       u16 len)
1188
{
1189
	struct mgmt_mode *cp = data;
1190
	struct mgmt_pending_cmd *cmd;
1191
	int err;
1192

1193
	bt_dev_dbg(hdev, "sock %p", sk);
1194

1195
	if (cp->val != 0x00 && cp->val != 0x01)
1196 1197
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				       MGMT_STATUS_INVALID_PARAMS);
1198

1199
	hci_dev_lock(hdev);
1200

1201
	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1202 1203
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				      MGMT_STATUS_BUSY);
1204 1205 1206
		goto failed;
	}

1207
	if (!!cp->val == hdev_is_powered(hdev)) {
1208
		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1209 1210 1211
		goto failed;
	}

1212
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1213 1214
	if (!cmd) {
		err = -ENOMEM;
1215
		goto failed;
1216
	}
1217

1218
	if (cp->val) {
1219
		queue_work(hdev->req_workqueue, &hdev->power_on);
1220 1221 1222 1223
		err = 0;
	} else {
		/* Disconnect connections, stop scans, etc */
		err = clean_up_hci_state(hdev);
1224 1225 1226
		if (!err)
			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
					   HCI_POWER_OFF_TIMEOUT);
1227

1228 1229
		/* ENODATA means there were no HCI commands queued */
		if (err == -ENODATA) {
1230
			cancel_delayed_work(&hdev->power_off);
1231 1232 1233 1234
			queue_work(hdev->req_workqueue, &hdev->power_off.work);
			err = 0;
		}
	}
1235 1236

failed:
1237
	hci_dev_unlock(hdev);
1238
	return err;
1239 1240
}

1241 1242
static int new_settings(struct hci_dev *hdev, struct sock *skip)
{
1243
	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1244

1245 1246
	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1247 1248
}

1249 1250 1251 1252 1253
int mgmt_new_settings(struct hci_dev *hdev)
{
	return new_settings(hdev, NULL);
}

1254 1255 1256 1257 1258 1259
struct cmd_lookup {
	struct sock *sk;
	struct hci_dev *hdev;
	u8 mgmt_status;
};

1260
static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
{
	struct cmd_lookup *match = data;

	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);

	list_del(&cmd->list);

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}

	mgmt_pending_free(cmd);
}

1276
static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1277 1278 1279
{
	u8 *status = data;

1280
	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1281 1282 1283
	mgmt_pending_remove(cmd);
}

1284
static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
{
	if (cmd->cmd_complete) {
		u8 *status = data;

		cmd->cmd_complete(cmd, *status);
		mgmt_pending_remove(cmd);

		return;
	}

	cmd_status_rsp(cmd, data);
}

1298
static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1299
{
1300 1301
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, cmd->param_len);
1302 1303
}

1304
static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1305
{
1306 1307
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, sizeof(struct mgmt_addr_info));
1308 1309
}

1310 1311 1312 1313
static u8 mgmt_bredr_support(struct hci_dev *hdev)
{
	if (!lmp_bredr_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
1314
	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1315 1316 1317 1318 1319 1320 1321 1322 1323
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

static u8 mgmt_le_support(struct hci_dev *hdev)
{
	if (!lmp_le_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
1324
	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1325 1326 1327 1328 1329
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

1330
void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1331
{
1332
	struct mgmt_pending_cmd *cmd;
1333

1334
	bt_dev_dbg(hdev, "status 0x%02x", status);
1335 1336 1337

	hci_dev_lock(hdev);

1338
	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1339 1340 1341 1342 1343
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);
1344
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1345
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1346 1347 1348
		goto remove_cmd;
	}

1349 1350 1351 1352
	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
	    hdev->discov_timeout > 0) {
		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1353
	}
1354 1355

	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1356
	new_settings(hdev, cmd->sk);
1357

1358 1359 1360 1361 1362 1363 1364
remove_cmd:
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1365
static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1366
			    u16 len)
1367
{
1368
	struct mgmt_cp_set_discoverable *cp = data;
1369
	struct mgmt_pending_cmd *cmd;
1370
	u16 timeout;
1371 1372
	int err;

1373
	bt_dev_dbg(hdev, "sock %p", sk);
1374

1375 1376
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1377 1378
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_REJECTED);
1379

1380
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1381 1382
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1383

1384
	timeout = __le16_to_cpu(cp->timeout);
1385 1386 1387 1388 1389 1390

	/* Disabling discoverable requires that no timeout is set,
	 * and enabling limited discoverable requires a timeout.
	 */
	if ((cp->val == 0x00 && timeout > 0) ||
	    (cp->val == 0x02 && timeout == 0))
1391 1392
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1393

1394
	hci_dev_lock(hdev);
1395

1396
	if (!hdev_is_powered(hdev) && timeout > 0) {
1397 1398
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_NOT_POWERED);
1399 1400 1401
		goto failed;
	}

1402 1403
	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1404 1405
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_BUSY);
1406 1407 1408
		goto failed;
	}

1409
	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1410 1411
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_REJECTED);
1412 1413 1414
		goto failed;
	}

1415 1416 1417 1418 1419 1420
	if (hdev->advertising_paused) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_BUSY);
		goto failed;
	}

1421
	if (!hdev_is_powered(hdev)) {
1422 1423
		bool changed = false;

1424 1425 1426 1427
		/* Setting limited discoverable when powered off is
		 * not a valid operation since it requires a timeout
		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
		 */
1428
		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1429
			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1430 1431 1432
			changed = true;
		}

1433
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1434 1435 1436 1437 1438 1439
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1440 1441 1442
		goto failed;
	}

1443 1444 1445 1446
	/* If the current mode is the same, then just update the timeout
	 * value with the new value. And if only the timeout gets updated,
	 * then no need for any HCI transactions.
	 */
1447 1448 1449
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
						   HCI_LIMITED_DISCOVERABLE)) {
1450 1451
		cancel_delayed_work(&hdev->discov_off);
		hdev->discov_timeout = timeout;
1452

1453 1454
		if (cp->val && hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1455 1456
			queue_delayed_work(hdev->req_workqueue,
					   &hdev->discov_off, to);
1457 1458
		}

1459
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1460 1461 1462
		goto failed;
	}

1463
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1464 1465
	if (!cmd) {
		err = -ENOMEM;
1466
		goto failed;
1467
	}
1468

1469 1470 1471 1472 1473 1474 1475
	/* Cancel any potential discoverable timeout that might be
	 * still active and store new timeout value. The arming of
	 * the timeout happens in the complete handler.
	 */
	cancel_delayed_work(&hdev->discov_off);
	hdev->discov_timeout = timeout;

1476 1477 1478 1479 1480
	if (cp->val)
		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
	else
		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);

1481 1482
	/* Limited discoverable mode */
	if (cp->val == 0x02)
1483
		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1484
	else
1485
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1486

1487 1488
	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
	err = 0;
1489 1490

failed:
1491
	hci_dev_unlock(hdev);
1492 1493 1494
	return err;
}

1495
void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1496
{
1497
	struct mgmt_pending_cmd *cmd;
1498

1499
	bt_dev_dbg(hdev, "status 0x%02x", status);
1500 1501 1502

	hci_dev_lock(hdev);

1503
	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1504 1505 1506
	if (!cmd)
		goto unlock;

1507 1508
	if (status) {
		u8 mgmt_err = mgmt_status(status);
1509
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1510 1511 1512
		goto remove_cmd;
	}

1513
	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1514
	new_settings(hdev, cmd->sk);
1515

1516
remove_cmd:
1517 1518 1519 1520 1521 1522
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1523 1524 1525 1526 1527 1528
static int set_connectable_update_settings(struct hci_dev *hdev,
					   struct sock *sk, u8 val)
{
	bool changed = false;
	int err;

1529
	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1530 1531 1532
		changed = true;

	if (val) {
1533
		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1534
	} else {
1535 1536
		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1537 1538 1539 1540 1541 1542
	}

	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
	if (err < 0)
		return err;

1543
	if (changed) {
1544
		hci_req_update_scan(hdev);
1545
		hci_update_background_scan(hdev);
1546
		return new_settings(hdev, sk);
1547
	}
1548 1549 1550 1551

	return 0;
}

1552
static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1553
			   u16 len)
1554
{
1555
	struct mgmt_mode *cp = data;
1556
	struct mgmt_pending_cmd *cmd;
1557 1558
	int err;

1559
	bt_dev_dbg(hdev, "sock %p", sk);
1560

1561 1562
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1563 1564
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				       MGMT_STATUS_REJECTED);
1565

1566
	if (cp->val != 0x00 && cp->val != 0x01)
1567 1568
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1569

1570
	hci_dev_lock(hdev);
1571

1572
	if (!hdev_is_powered(hdev)) {
1573
		err = set_connectable_update_settings(hdev, sk, cp->val);
1574 1575 1576
		goto failed;
	}

1577 1578
	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1579 1580
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				      MGMT_STATUS_BUSY);
1581 1582 1583
		goto failed;
	}

1584
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1585 1586
	if (!cmd) {
		err = -ENOMEM;
1587
		goto failed;
1588
	}
1589

1590 1591 1592 1593 1594
	if (cp->val) {
		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
	} else {
		if (hdev->discov_timeout > 0)
			cancel_delayed_work(&hdev->discov_off);
1595

1596 1597 1598
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1599
	}
1600

1601 1602
	queue_work(hdev->req_workqueue, &hdev->connectable_update);
	err = 0;
1603 1604

failed:
1605
	hci_dev_unlock(hdev);
1606 1607 1608
	return err;
}

1609
static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1610
			u16 len)
1611
{
1612
	struct mgmt_mode *cp = data;
1613
	bool changed;
1614 1615
	int err;

1616
	bt_dev_dbg(hdev, "sock %p", sk);
1617

1618
	if (cp->val != 0x00 && cp->val != 0x01)
1619 1620
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1621

1622
	hci_dev_lock(hdev);
1623 1624

	if (cp->val)
1625
		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1626
	else
1627
		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1628

1629
	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1630
	if (err < 0)
1631
		goto unlock;
1632

1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643
	if (changed) {
		/* In limited privacy mode the change of bondable mode
		 * may affect the local advertising address.
		 */
		if (hdev_is_powered(hdev) &&
		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
			queue_work(hdev->req_workqueue,
				   &hdev->discoverable_update);

1644
		err = new_settings(hdev, sk);
1645
	}
1646

1647
unlock:
1648
	hci_dev_unlock(hdev);
1649 1650 1651
	return err;
}

1652 1653
static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
1654 1655
{
	struct mgmt_mode *cp = data;
1656
	struct mgmt_pending_cmd *cmd;
1657
	u8 val, status;
1658 1659
	int err;

1660
	bt_dev_dbg(hdev, "sock %p", sk);
1661

1662 1663
	status = mgmt_bredr_support(hdev);
	if (status)
1664 1665
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				       status);
1666

1667
	if (cp->val != 0x00 && cp->val != 0x01)
1668 1669
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				       MGMT_STATUS_INVALID_PARAMS);
1670

1671 1672
	hci_dev_lock(hdev);

1673
	if (!hdev_is_powered(hdev)) {
1674 1675
		bool changed = false;

1676
		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1677
			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1688 1689 1690
		goto failed;
	}

1691
	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1692 1693
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				      MGMT_STATUS_BUSY);
1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720
		goto failed;
	}

	val = !!cp->val;

	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

1721
static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1722 1723
{
	struct mgmt_mode *cp = data;
1724
	struct mgmt_pending_cmd *cmd;
1725
	u8 status;
1726 1727
	int err;

1728
	bt_dev_dbg(hdev, "sock %p", sk);
1729

1730 1731
	status = mgmt_bredr_support(hdev);
	if (status)
1732
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1733

1734
	if (!lmp_ssp_capable(hdev))
1735 1736
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				       MGMT_STATUS_NOT_SUPPORTED);
1737

1738
	if (cp->val != 0x00 && cp->val != 0x01)
1739 1740
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				       MGMT_STATUS_INVALID_PARAMS);
1741

1742
	hci_dev_lock(hdev);
1743

1744
	if (!hdev_is_powered(hdev)) {
1745
		bool changed;
1746

1747
		if (cp->val) {
1748 1749
			changed = !hci_dev_test_and_set_flag(hdev,
							     HCI_SSP_ENABLED);
1750
		} else {
1751 1752
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_SSP_ENABLED);
1753
			if (!changed)
1754 1755
				changed = hci_dev_test_and_clear_flag(hdev,
								      HCI_HS_ENABLED);
1756
			else
1757
				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1758 1759 1760 1761 1762 1763 1764 1765 1766
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1767 1768 1769
		goto failed;
	}

1770
	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1771 1772
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				      MGMT_STATUS_BUSY);
1773 1774 1775
		goto failed;
	}

1776
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1777 1778 1779 1780 1781 1782 1783 1784 1785 1786
		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

1787
	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1788 1789 1790
		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
			     sizeof(cp->val), &cp->val);

1791
	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

1802
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1803 1804
{
	struct mgmt_mode *cp = data;
1805
	bool changed;
1806
	u8 status;
1807
	int err;
1808

1809
	bt_dev_dbg(hdev, "sock %p", sk);
1810

1811 1812
	status = mgmt_bredr_support(hdev);
	if (status)
1813
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1814

1815
	if (!lmp_ssp_capable(hdev))
1816 1817
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_NOT_SUPPORTED);
1818

1819
	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1820 1821
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_REJECTED);
1822

1823
	if (cp->val != 0x00 && cp->val != 0x01)
1824 1825
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_INVALID_PARAMS);
1826

1827 1828
	hci_dev_lock(hdev);

1829
	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1830 1831
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				      MGMT_STATUS_BUSY);
1832 1833 1834
		goto unlock;
	}

1835
	if (cp->val) {
1836
		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1837 1838
	} else {
		if (hdev_is_powered(hdev)) {
1839 1840
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
					      MGMT_STATUS_REJECTED);
1841 1842 1843
			goto unlock;
		}

1844
		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1845
	}
1846 1847 1848 1849 1850 1851 1852

	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);
1853

1854 1855 1856
unlock:
	hci_dev_unlock(hdev);
	return err;
1857 1858
}

1859
static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1860 1861 1862
{
	struct cmd_lookup match = { NULL, hdev };

1863 1864
	hci_dev_lock(hdev);

1865 1866 1867 1868 1869
	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
				     &mgmt_err);
1870
		goto unlock;
1871 1872 1873 1874 1875 1876 1877 1878
	}

	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
1879 1880 1881 1882 1883 1884

	/* Make sure the controller has a good default for
	 * advertising data. Restrict the update to when LE
	 * has actually been enabled. During power on, the
	 * update in powered_update_hci will take care of it.
	 */
1885
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1886 1887
		struct hci_request req;
		hci_req_init(&req, hdev);
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897
		if (ext_adv_capable(hdev)) {
			int err;

			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
			if (!err)
				__hci_req_update_scan_rsp_data(&req, 0x00);
		} else {
			__hci_req_update_adv_data(&req, 0x00);
			__hci_req_update_scan_rsp_data(&req, 0x00);
		}
1898
		hci_req_run(&req, NULL);
1899
		hci_update_background_scan(hdev);
1900
	}
1901 1902 1903

unlock:
	hci_dev_unlock(hdev);
1904 1905
}

1906
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1907 1908 1909
{
	struct mgmt_mode *cp = data;
	struct hci_cp_write_le_host_supported hci_cp;
1910
	struct mgmt_pending_cmd *cmd;
1911
	struct hci_request req;
1912
	int err;
1913
	u8 val, enabled;
1914

1915
	bt_dev_dbg(hdev, "sock %p", sk);
1916

1917
	if (!lmp_le_capable(hdev))
1918 1919
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_NOT_SUPPORTED);
1920

1921
	if (cp->val != 0x00 && cp->val != 0x01)
1922 1923
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_INVALID_PARAMS);
1924

1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937
	/* Bluetooth single mode LE only controllers or dual-mode
	 * controllers configured as LE only devices, do not allow
	 * switching LE off. These have either LE enabled explicitly
	 * or BR/EDR has been previously switched off.
	 *
	 * When trying to enable an already enabled LE, then gracefully
	 * send a positive response. Trying to disable it however will
	 * result into rejection.
	 */
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
		if (cp->val == 0x01)
			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);

1938 1939
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_REJECTED);
1940
	}
1941

1942
	hci_dev_lock(hdev);
1943 1944

	val = !!cp->val;
1945
	enabled = lmp_host_le_capable(hdev);
1946

1947
	if (!val)
1948
		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1949

1950
	if (!hdev_is_powered(hdev) || val == enabled) {
1951 1952
		bool changed = false;

1953
		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1954
			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1955 1956 1957
			changed = true;
		}

1958
		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1959
			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1960 1961 1962
			changed = true;
		}

1963 1964
		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
		if (err < 0)
1965
			goto unlock;
1966 1967 1968 1969

		if (changed)
			err = new_settings(hdev, sk);

1970
		goto unlock;
1971 1972
	}

1973 1974
	if (pending_find(MGMT_OP_SET_LE, hdev) ||
	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1975 1976
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				      MGMT_STATUS_BUSY);
1977
		goto unlock;
1978 1979 1980 1981 1982
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
1983
		goto unlock;
1984 1985
	}

1986 1987
	hci_req_init(&req, hdev);

1988 1989 1990 1991
	memset(&hci_cp, 0, sizeof(hci_cp));

	if (val) {
		hci_cp.le = val;
1992
		hci_cp.simul = 0x00;
1993
	} else {
1994
		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1995
			__hci_req_disable_advertising(&req);
1996 1997 1998

		if (ext_adv_capable(hdev))
			__hci_req_clear_ext_adv_sets(&req);
1999 2000
	}

2001 2002 2003 2004
	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
		    &hci_cp);

	err = hci_req_run(&req, le_enable_complete);
2005
	if (err < 0)
2006 2007
		mgmt_pending_remove(cmd);

2008 2009
unlock:
	hci_dev_unlock(hdev);
2010 2011 2012
	return err;
}

2013 2014 2015 2016 2017 2018 2019 2020
/* This is a helper function to test for pending mgmt commands that can
 * cause CoD or EIR HCI commands. We can only allow one such pending
 * mgmt command at a time since otherwise we cannot easily track what
 * the current values are, will be, and based on that calculate if a new
 * HCI command needs to be sent and if yes with what value.
 */
static bool pending_eir_or_class(struct hci_dev *hdev)
{
2021
	struct mgmt_pending_cmd *cmd;
2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		switch (cmd->opcode) {
		case MGMT_OP_ADD_UUID:
		case MGMT_OP_REMOVE_UUID:
		case MGMT_OP_SET_DEV_CLASS:
		case MGMT_OP_SET_POWERED:
			return true;
		}
	}

	return false;
}

2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054
static const u8 bluetooth_base_uuid[] = {
			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};

static u8 get_uuid_size(const u8 *uuid)
{
	u32 val;

	if (memcmp(uuid, bluetooth_base_uuid, 12))
		return 128;

	val = get_unaligned_le32(&uuid[12]);
	if (val > 0xffff)
		return 32;

	return 16;
}

2055 2056
static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
{
2057
	struct mgmt_pending_cmd *cmd;
2058 2059 2060

	hci_dev_lock(hdev);

2061
	cmd = pending_find(mgmt_op, hdev);
2062 2063 2064
	if (!cmd)
		goto unlock;

2065 2066
	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
			  mgmt_status(status), hdev->dev_class, 3);
2067 2068 2069 2070 2071 2072 2073

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

2074
static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2075
{
2076
	bt_dev_dbg(hdev, "status 0x%02x", status);
2077 2078 2079 2080

	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
}

2081
static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2082
{
2083
	struct mgmt_cp_add_uuid *cp = data;
2084
	struct mgmt_pending_cmd *cmd;
2085
	struct hci_request req;
2086 2087 2088
	struct bt_uuid *uuid;
	int err;

2089
	bt_dev_dbg(hdev, "sock %p", sk);
2090

2091
	hci_dev_lock(hdev);
2092

2093
	if (pending_eir_or_class(hdev)) {
2094 2095
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
				      MGMT_STATUS_BUSY);
2096 2097 2098
		goto failed;
	}

2099
	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2100 2101 2102 2103 2104 2105
	if (!uuid) {
		err = -ENOMEM;
		goto failed;
	}

	memcpy(uuid->uuid, cp->uuid, 16);
2106
	uuid->svc_hint = cp->svc_hint;
2107
	uuid->size = get_uuid_size(cp->uuid);
2108

2109
	list_add_tail(&uuid->list, &hdev->uuids);
2110

2111
	hci_req_init(&req, hdev);
2112

2113
	__hci_req_update_class(&req);
2114
	__hci_req_update_eir(&req);
2115

2116 2117 2118 2119
	err = hci_req_run(&req, add_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto failed;
2120

2121 2122
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
					hdev->dev_class, 3);
2123 2124 2125 2126
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2127
	if (!cmd) {
2128
		err = -ENOMEM;
2129 2130 2131 2132
		goto failed;
	}

	err = 0;
2133 2134

failed:
2135
	hci_dev_unlock(hdev);
2136 2137 2138
	return err;
}

2139 2140 2141 2142 2143
static bool enable_service_cache(struct hci_dev *hdev)
{
	if (!hdev_is_powered(hdev))
		return false;

2144
	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2145 2146
		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
				   CACHE_TIMEOUT);
2147 2148 2149 2150 2151 2152
		return true;
	}

	return false;
}

2153
static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2154
{
2155
	bt_dev_dbg(hdev, "status 0x%02x", status);
2156 2157 2158 2159

	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
}

2160
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2161
		       u16 len)
2162
{
2163
	struct mgmt_cp_remove_uuid *cp = data;
2164
	struct mgmt_pending_cmd *cmd;
2165
	struct bt_uuid *match, *tmp;
2166
	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2167
	struct hci_request req;
2168 2169
	int err, found;

2170
	bt_dev_dbg(hdev, "sock %p", sk);
2171

2172
	hci_dev_lock(hdev);
2173

2174
	if (pending_eir_or_class(hdev)) {
2175 2176
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
				      MGMT_STATUS_BUSY);
2177 2178 2179
		goto unlock;
	}

2180
	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2181
		hci_uuids_clear(hdev);
2182

2183
		if (enable_service_cache(hdev)) {
2184 2185 2186
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_UUID,
						0, hdev->dev_class, 3);
2187 2188
			goto unlock;
		}
2189

2190
		goto update_class;
2191 2192 2193 2194
	}

	found = 0;

2195
	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2196 2197 2198 2199
		if (memcmp(match->uuid, cp->uuid, 16) != 0)
			continue;

		list_del(&match->list);
2200
		kfree(match);
2201 2202 2203 2204
		found++;
	}

	if (found == 0) {
2205 2206
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
				      MGMT_STATUS_INVALID_PARAMS);
2207 2208 2209
		goto unlock;
	}

2210
update_class:
2211
	hci_req_init(&req, hdev);
2212

2213
	__hci_req_update_class(&req);
2214
	__hci_req_update_eir(&req);
2215

2216 2217 2218 2219
	err = hci_req_run(&req, remove_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2220

2221 2222
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
					hdev->dev_class, 3);
2223 2224 2225 2226
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2227
	if (!cmd) {
2228
		err = -ENOMEM;
2229 2230 2231 2232
		goto unlock;
	}

	err = 0;
2233 2234

unlock:
2235
	hci_dev_unlock(hdev);
2236 2237 2238
	return err;
}

2239
static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2240
{
2241
	bt_dev_dbg(hdev, "status 0x%02x", status);
2242 2243 2244 2245

	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
}

2246
static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2247
			 u16 len)
2248
{
2249
	struct mgmt_cp_set_dev_class *cp = data;
2250
	struct mgmt_pending_cmd *cmd;
2251
	struct hci_request req;
2252 2253
	int err;

2254
	bt_dev_dbg(hdev, "sock %p", sk);
2255

2256
	if (!lmp_bredr_capable(hdev))
2257 2258
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				       MGMT_STATUS_NOT_SUPPORTED);
2259

2260
	hci_dev_lock(hdev);
2261

2262
	if (pending_eir_or_class(hdev)) {
2263 2264
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				      MGMT_STATUS_BUSY);
2265 2266
		goto unlock;
	}
2267

2268
	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2269 2270
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				      MGMT_STATUS_INVALID_PARAMS);
2271 2272
		goto unlock;
	}
2273

2274 2275 2276
	hdev->major_class = cp->major;
	hdev->minor_class = cp->minor;

2277
	if (!hdev_is_powered(hdev)) {
2278 2279
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
					hdev->dev_class, 3);
2280 2281 2282
		goto unlock;
	}

2283 2284
	hci_req_init(&req, hdev);

2285
	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2286 2287 2288
		hci_dev_unlock(hdev);
		cancel_delayed_work_sync(&hdev->service_cache);
		hci_dev_lock(hdev);
2289
		__hci_req_update_eir(&req);
2290
	}
2291

2292
	__hci_req_update_class(&req);
2293

2294 2295 2296 2297
	err = hci_req_run(&req, set_class_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2298

2299 2300
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
					hdev->dev_class, 3);
2301 2302 2303 2304
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2305
	if (!cmd) {
2306
		err = -ENOMEM;
2307 2308 2309 2310
		goto unlock;
	}

	err = 0;
2311

2312
unlock:
2313
	hci_dev_unlock(hdev);
2314 2315 2316
	return err;
}

2317
static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2318
			  u16 len)
2319
{
2320
	struct mgmt_cp_load_link_keys *cp = data;
2321 2322
	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_link_key_info));
2323
	u16 key_count, expected_len;
2324
	bool changed;
2325
	int i;
2326

2327
	bt_dev_dbg(hdev, "sock %p", sk);
2328 2329

	if (!lmp_bredr_capable(hdev))
2330 2331
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_NOT_SUPPORTED);
2332

2333
	key_count = __le16_to_cpu(cp->key_count);
2334
	if (key_count > max_key_count) {
2335 2336
		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
			   key_count);
2337 2338
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2339
	}
2340

2341
	expected_len = struct_size(cp, keys, key_count);
2342
	if (expected_len != len) {
2343 2344
		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
			   expected_len, len);
2345 2346
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2347 2348
	}

2349
	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2350 2351
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2352

2353 2354
	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
		   key_count);
2355

2356 2357 2358
	for (i = 0; i < key_count; i++) {
		struct mgmt_link_key_info *key = &cp->keys[i];

2359
		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2360 2361 2362
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_LINK_KEYS,
					       MGMT_STATUS_INVALID_PARAMS);
2363 2364
	}

2365
	hci_dev_lock(hdev);
2366 2367 2368 2369

	hci_link_keys_clear(hdev);

	if (cp->debug_keys)
2370
		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2371
	else
2372 2373
		changed = hci_dev_test_and_clear_flag(hdev,
						      HCI_KEEP_DEBUG_KEYS);
2374 2375 2376

	if (changed)
		new_settings(hdev, NULL);
2377

2378
	for (i = 0; i < key_count; i++) {
2379
		struct mgmt_link_key_info *key = &cp->keys[i];
2380

2381 2382 2383 2384 2385 2386 2387 2388
		if (hci_is_blocked_key(hdev,
				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
				       key->val)) {
			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
				    &key->addr.bdaddr);
			continue;
		}

2389 2390 2391 2392 2393 2394
		/* Always ignore debug keys and require a new pairing if
		 * the user wants to use them.
		 */
		if (key->type == HCI_LK_DEBUG_COMBINATION)
			continue;

2395 2396
		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
				 key->type, key->pin_len, NULL);
2397 2398
	}

2399
	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2400

2401
	hci_dev_unlock(hdev);
2402

2403
	return 0;
2404 2405
}

2406
static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2407
			   u8 addr_type, struct sock *skip_sk)
2408 2409 2410 2411 2412 2413 2414
{
	struct mgmt_ev_device_unpaired ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = addr_type;

	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2415
			  skip_sk);
2416 2417
}

2418
static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2419
			 u16 len)
2420
{
2421 2422
	struct mgmt_cp_unpair_device *cp = data;
	struct mgmt_rp_unpair_device rp;
2423
	struct hci_conn_params *params;
2424
	struct mgmt_pending_cmd *cmd;
2425
	struct hci_conn *conn;
2426
	u8 addr_type;
2427 2428
	int err;

2429
	memset(&rp, 0, sizeof(rp));
2430 2431
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
2432

2433
	if (!bdaddr_type_is_valid(cp->addr.type))
2434 2435 2436
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2437

2438
	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2439 2440 2441
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2442

2443 2444
	hci_dev_lock(hdev);

2445
	if (!hdev_is_powered(hdev)) {
2446 2447 2448
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
2449 2450 2451
		goto unlock;
	}

2452
	if (cp->addr.type == BDADDR_BREDR) {
2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465
		/* If disconnection is requested, then look up the
		 * connection. If the remote device is connected, it
		 * will be later used to terminate the link.
		 *
		 * Setting it to NULL explicitly will cause no
		 * termination of the link.
		 */
		if (cp->disconnect)
			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
						       &cp->addr.bdaddr);
		else
			conn = NULL;

2466
		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2467 2468 2469 2470 2471 2472
		if (err < 0) {
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_UNPAIR_DEVICE,
						MGMT_STATUS_NOT_PAIRED, &rp,
						sizeof(rp));
			goto unlock;
2473 2474
		}

2475
		goto done;
2476
	}
2477

2478 2479 2480
	/* LE address type */
	addr_type = le_addr_type(cp->addr.type);

2481 2482
	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2483
	if (err < 0) {
2484 2485 2486
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					MGMT_STATUS_NOT_PAIRED, &rp,
					sizeof(rp));
2487 2488 2489
		goto unlock;
	}

2490 2491 2492 2493 2494 2495
	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
	if (!conn) {
		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
		goto done;
	}

2496

2497 2498 2499 2500 2501
	/* Defer clearing up the connection parameters until closing to
	 * give a chance of keeping them if a repairing happens.
	 */
	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);

2502 2503 2504 2505 2506 2507 2508 2509 2510
	/* Disable auto-connection parameters if present */
	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
	if (params) {
		if (params->explicit_connect)
			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
		else
			params->auto_connect = HCI_AUTO_CONN_DISABLED;
	}

2511 2512 2513 2514 2515 2516 2517
	/* If disconnection is not requested, then clear the connection
	 * variable so that the link is not terminated.
	 */
	if (!cp->disconnect)
		conn = NULL;

done:
2518 2519 2520
	/* If the connection variable is set, then termination of the
	 * link is requested.
	 */
2521
	if (!conn) {
2522 2523
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
					&rp, sizeof(rp));
2524
		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2525 2526
		goto unlock;
	}
2527

2528
	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2529
			       sizeof(*cp));
2530 2531 2532
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
2533 2534
	}

2535 2536
	cmd->cmd_complete = addr_cmd_complete;

2537
	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2538 2539 2540
	if (err < 0)
		mgmt_pending_remove(cmd);

2541
unlock:
2542
	hci_dev_unlock(hdev);
2543 2544 2545
	return err;
}

2546
static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2547
		      u16 len)
2548
{
2549
	struct mgmt_cp_disconnect *cp = data;
2550
	struct mgmt_rp_disconnect rp;
2551
	struct mgmt_pending_cmd *cmd;
2552 2553 2554
	struct hci_conn *conn;
	int err;

2555
	bt_dev_dbg(hdev, "sock %p", sk);
2556

2557 2558 2559 2560
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2561
	if (!bdaddr_type_is_valid(cp->addr.type))
2562 2563 2564
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2565

2566
	hci_dev_lock(hdev);
2567 2568

	if (!test_bit(HCI_UP, &hdev->flags)) {
2569 2570 2571
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
2572 2573 2574
		goto failed;
	}

2575
	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2576 2577
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2578 2579 2580
		goto failed;
	}

2581
	if (cp->addr.type == BDADDR_BREDR)
2582 2583
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
2584
	else
2585 2586
		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
					       le_addr_type(cp->addr.type));
2587

2588
	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2589 2590 2591
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_NOT_CONNECTED, &rp,
					sizeof(rp));
2592 2593 2594
		goto failed;
	}

2595
	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2596 2597
	if (!cmd) {
		err = -ENOMEM;
2598
		goto failed;
2599
	}
2600

2601 2602
	cmd->cmd_complete = generic_cmd_complete;

2603
	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2604
	if (err < 0)
2605
		mgmt_pending_remove(cmd);
2606 2607

failed:
2608
	hci_dev_unlock(hdev);
2609 2610 2611
	return err;
}

2612
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2613 2614 2615
{
	switch (link_type) {
	case LE_LINK:
2616 2617
		switch (addr_type) {
		case ADDR_LE_DEV_PUBLIC:
2618
			return BDADDR_LE_PUBLIC;
2619

2620
		default:
2621
			/* Fallback to LE Random address type */
2622
			return BDADDR_LE_RANDOM;
2623
		}
2624

2625
	default:
2626
		/* Fallback to BR/EDR type */
2627
		return BDADDR_BREDR;
2628 2629 2630
	}
}

2631 2632
static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
2633 2634
{
	struct mgmt_rp_get_connections *rp;
2635
	struct hci_conn *c;
2636 2637
	int err;
	u16 i;
2638

2639
	bt_dev_dbg(hdev, "sock %p", sk);
2640

2641
	hci_dev_lock(hdev);
2642

2643
	if (!hdev_is_powered(hdev)) {
2644 2645
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
				      MGMT_STATUS_NOT_POWERED);
2646 2647 2648
		goto unlock;
	}

2649
	i = 0;
2650 2651
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2652
			i++;
2653 2654
	}

2655
	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2656
	if (!rp) {
2657 2658 2659 2660 2661
		err = -ENOMEM;
		goto unlock;
	}

	i = 0;
2662
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2663 2664
		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
			continue;
2665
		bacpy(&rp->addr[i].bdaddr, &c->dst);
2666
		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2667
		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2668 2669 2670 2671
			continue;
		i++;
	}

2672
	rp->conn_count = cpu_to_le16(i);
2673

2674
	/* Recalculate length in case of filtered SCO connections, etc */
2675
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2676
				struct_size(rp, addr, i));
2677

2678
	kfree(rp);
2679 2680

unlock:
2681
	hci_dev_unlock(hdev);
2682 2683 2684
	return err;
}

2685
static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2686
				   struct mgmt_cp_pin_code_neg_reply *cp)
2687
{
2688
	struct mgmt_pending_cmd *cmd;
2689 2690
	int err;

2691
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2692
			       sizeof(*cp));
2693 2694 2695
	if (!cmd)
		return -ENOMEM;

2696 2697
	cmd->cmd_complete = addr_cmd_complete;

2698
	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2699
			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2700 2701 2702 2703 2704 2705
	if (err < 0)
		mgmt_pending_remove(cmd);

	return err;
}

2706
static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2707
			  u16 len)
2708
{
2709
	struct hci_conn *conn;
2710
	struct mgmt_cp_pin_code_reply *cp = data;
2711
	struct hci_cp_pin_code_reply reply;
2712
	struct mgmt_pending_cmd *cmd;
2713 2714
	int err;

2715
	bt_dev_dbg(hdev, "sock %p", sk);
2716

2717
	hci_dev_lock(hdev);
2718

2719
	if (!hdev_is_powered(hdev)) {
2720 2721
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
				      MGMT_STATUS_NOT_POWERED);
2722 2723 2724
		goto failed;
	}

2725
	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2726
	if (!conn) {
2727 2728
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
				      MGMT_STATUS_NOT_CONNECTED);
2729 2730 2731 2732
		goto failed;
	}

	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2733 2734 2735
		struct mgmt_cp_pin_code_neg_reply ncp;

		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2736

2737
		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2738

2739
		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2740
		if (err >= 0)
2741 2742
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
					      MGMT_STATUS_INVALID_PARAMS);
2743 2744 2745 2746

		goto failed;
	}

2747
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2748 2749
	if (!cmd) {
		err = -ENOMEM;
2750
		goto failed;
2751
	}
2752

2753 2754
	cmd->cmd_complete = addr_cmd_complete;

2755
	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2756
	reply.pin_len = cp->pin_len;
2757
	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2758 2759 2760

	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
	if (err < 0)
2761
		mgmt_pending_remove(cmd);
2762 2763

failed:
2764
	hci_dev_unlock(hdev);
2765 2766 2767
	return err;
}

2768 2769
static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
2770
{
2771
	struct mgmt_cp_set_io_capability *cp = data;
2772

2773
	bt_dev_dbg(hdev, "sock %p", sk);
2774

2775
	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2776 2777
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
				       MGMT_STATUS_INVALID_PARAMS);
2778

2779
	hci_dev_lock(hdev);
2780 2781 2782

	hdev->io_capability = cp->io_capability;

2783
	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2784

2785
	hci_dev_unlock(hdev);
2786

2787 2788
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
				 NULL, 0);
2789 2790
}

2791
static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2792 2793
{
	struct hci_dev *hdev = conn->hdev;
2794
	struct mgmt_pending_cmd *cmd;
2795

2796
	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808
		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
			continue;

		if (cmd->user_data != conn)
			continue;

		return cmd;
	}

	return NULL;
}

2809
static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2810 2811 2812
{
	struct mgmt_rp_pair_device rp;
	struct hci_conn *conn = cmd->user_data;
2813
	int err;
2814

2815 2816
	bacpy(&rp.addr.bdaddr, &conn->dst);
	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2817

2818 2819
	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
				status, &rp, sizeof(rp));
2820 2821 2822 2823 2824 2825

	/* So we don't get further callbacks for this connection */
	conn->connect_cfm_cb = NULL;
	conn->security_cfm_cb = NULL;
	conn->disconn_cfm_cb = NULL;

2826
	hci_conn_drop(conn);
2827 2828 2829 2830 2831

	/* The device is paired so there is no need to remove
	 * its connection parameters anymore.
	 */
	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2832 2833

	hci_conn_put(conn);
2834 2835

	return err;
2836 2837
}

2838 2839 2840
void mgmt_smp_complete(struct hci_conn *conn, bool complete)
{
	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2841
	struct mgmt_pending_cmd *cmd;
2842 2843

	cmd = find_pairing(conn);
2844
	if (cmd) {
2845
		cmd->cmd_complete(cmd, status);
2846 2847
		mgmt_pending_remove(cmd);
	}
2848 2849
}

2850 2851
static void pairing_complete_cb(struct hci_conn *conn, u8 status)
{
2852
	struct mgmt_pending_cmd *cmd;
2853 2854 2855 2856

	BT_DBG("status %u", status);

	cmd = find_pairing(conn);
2857
	if (!cmd) {
2858
		BT_DBG("Unable to find a pending command");
2859 2860 2861 2862 2863
		return;
	}

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);
2864 2865
}

2866
static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2867
{
2868
	struct mgmt_pending_cmd *cmd;
2869 2870 2871 2872 2873 2874 2875

	BT_DBG("status %u", status);

	if (!status)
		return;

	cmd = find_pairing(conn);
2876
	if (!cmd) {
2877
		BT_DBG("Unable to find a pending command");
2878 2879 2880 2881 2882
		return;
	}

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);
2883 2884
}

2885
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2886
		       u16 len)
2887
{
2888
	struct mgmt_cp_pair_device *cp = data;
2889
	struct mgmt_rp_pair_device rp;
2890
	struct mgmt_pending_cmd *cmd;
2891 2892 2893 2894
	u8 sec_level, auth_type;
	struct hci_conn *conn;
	int err;

2895
	bt_dev_dbg(hdev, "sock %p", sk);
2896

2897 2898 2899 2900
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2901
	if (!bdaddr_type_is_valid(cp->addr.type))
2902 2903 2904
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2905

2906
	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2907 2908 2909
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2910

2911
	hci_dev_lock(hdev);
2912

2913
	if (!hdev_is_powered(hdev)) {
2914 2915 2916
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
2917 2918 2919
		goto unlock;
	}

2920 2921 2922 2923 2924 2925 2926
	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_ALREADY_PAIRED, &rp,
					sizeof(rp));
		goto unlock;
	}

2927
	sec_level = BT_SECURITY_MEDIUM;
2928
	auth_type = HCI_AT_DEDICATED_BONDING;
2929

2930
	if (cp->addr.type == BDADDR_BREDR) {
2931 2932
		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
				       auth_type);
2933
	} else {
2934
		u8 addr_type = le_addr_type(cp->addr.type);
2935
		struct hci_conn_params *p;
2936

2937 2938 2939 2940 2941 2942 2943 2944 2945
		/* When pairing a new device, it is expected to remember
		 * this device for future connections. Adding the connection
		 * parameter information ahead of time allows tracking
		 * of the slave preferred values and will speed up any
		 * further connection establishment.
		 *
		 * If connection parameters already exist, then they
		 * will be kept and this function does nothing.
		 */
2946 2947 2948 2949
		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);

		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2950

2951 2952
		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
					   addr_type, sec_level,
2953
					   HCI_LE_CONN_TIMEOUT);
2954
	}
2955

2956
	if (IS_ERR(conn)) {
2957 2958 2959 2960
		int status;

		if (PTR_ERR(conn) == -EBUSY)
			status = MGMT_STATUS_BUSY;
2961 2962 2963 2964
		else if (PTR_ERR(conn) == -EOPNOTSUPP)
			status = MGMT_STATUS_NOT_SUPPORTED;
		else if (PTR_ERR(conn) == -ECONNREFUSED)
			status = MGMT_STATUS_REJECTED;
2965 2966 2967
		else
			status = MGMT_STATUS_CONNECT_FAILED;

2968 2969
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					status, &rp, sizeof(rp));
2970 2971 2972 2973
		goto unlock;
	}

	if (conn->connect_cfm_cb) {
2974
		hci_conn_drop(conn);
2975 2976
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2977 2978 2979
		goto unlock;
	}

2980
	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2981 2982
	if (!cmd) {
		err = -ENOMEM;
2983
		hci_conn_drop(conn);
2984 2985 2986
		goto unlock;
	}

2987 2988
	cmd->cmd_complete = pairing_complete;

2989
	/* For LE, just connecting isn't a proof that the pairing finished */
2990
	if (cp->addr.type == BDADDR_BREDR) {
2991
		conn->connect_cfm_cb = pairing_complete_cb;
2992 2993 2994 2995 2996 2997 2998
		conn->security_cfm_cb = pairing_complete_cb;
		conn->disconn_cfm_cb = pairing_complete_cb;
	} else {
		conn->connect_cfm_cb = le_pairing_complete_cb;
		conn->security_cfm_cb = le_pairing_complete_cb;
		conn->disconn_cfm_cb = le_pairing_complete_cb;
	}
2999

3000
	conn->io_capability = cp->io_cap;
3001
	cmd->user_data = hci_conn_get(conn);
3002

3003
	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3004 3005 3006 3007
	    hci_conn_security(conn, sec_level, auth_type, true)) {
		cmd->cmd_complete(cmd, 0);
		mgmt_pending_remove(cmd);
	}
3008 3009 3010 3011

	err = 0;

unlock:
3012
	hci_dev_unlock(hdev);
3013 3014 3015
	return err;
}

3016 3017
static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3018
{
3019
	struct mgmt_addr_info *addr = data;
3020
	struct mgmt_pending_cmd *cmd;
3021 3022 3023
	struct hci_conn *conn;
	int err;

3024
	bt_dev_dbg(hdev, "sock %p", sk);
3025 3026 3027

	hci_dev_lock(hdev);

3028
	if (!hdev_is_powered(hdev)) {
3029 3030
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_NOT_POWERED);
3031 3032 3033
		goto unlock;
	}

3034
	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3035
	if (!cmd) {
3036 3037
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_INVALID_PARAMS);
3038 3039 3040 3041 3042 3043
		goto unlock;
	}

	conn = cmd->user_data;

	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3044 3045
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_INVALID_PARAMS);
3046 3047 3048
		goto unlock;
	}

3049 3050
	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
	mgmt_pending_remove(cmd);
3051

3052 3053
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
				addr, sizeof(*addr));
3054 3055 3056 3057 3058
unlock:
	hci_dev_unlock(hdev);
	return err;
}

3059
static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3060
			     struct mgmt_addr_info *addr, u16 mgmt_op,
3061
			     u16 hci_op, __le32 passkey)
3062
{
3063
	struct mgmt_pending_cmd *cmd;
3064
	struct hci_conn *conn;
3065 3066
	int err;

3067
	hci_dev_lock(hdev);
3068

3069
	if (!hdev_is_powered(hdev)) {
3070 3071 3072
		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
					MGMT_STATUS_NOT_POWERED, addr,
					sizeof(*addr));
3073
		goto done;
3074 3075
	}

3076 3077
	if (addr->type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3078
	else
3079 3080
		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
					       le_addr_type(addr->type));
3081 3082

	if (!conn) {
3083 3084 3085
		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
					MGMT_STATUS_NOT_CONNECTED, addr,
					sizeof(*addr));
3086 3087
		goto done;
	}
3088

3089
	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3090 3091
		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
		if (!err)
3092 3093 3094
			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
						MGMT_STATUS_SUCCESS, addr,
						sizeof(*addr));
3095
		else
3096 3097 3098
			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
						MGMT_STATUS_FAILED, addr,
						sizeof(*addr));
3099 3100 3101 3102

		goto done;
	}

3103
	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3104 3105
	if (!cmd) {
		err = -ENOMEM;
3106
		goto done;
3107 3108
	}

3109 3110
	cmd->cmd_complete = addr_cmd_complete;

3111
	/* Continue with pairing via HCI */
3112 3113 3114
	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
		struct hci_cp_user_passkey_reply cp;

3115
		bacpy(&cp.bdaddr, &addr->bdaddr);
3116 3117 3118
		cp.passkey = passkey;
		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
	} else
3119 3120
		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
				   &addr->bdaddr);
3121

3122 3123
	if (err < 0)
		mgmt_pending_remove(cmd);
3124

3125
done:
3126
	hci_dev_unlock(hdev);
3127 3128 3129
	return err;
}

3130 3131 3132 3133 3134
static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_pin_code_neg_reply *cp = data;

3135
	bt_dev_dbg(hdev, "sock %p", sk);
3136

3137
	return user_pairing_resp(sk, hdev, &cp->addr,
3138 3139 3140 3141
				MGMT_OP_PIN_CODE_NEG_REPLY,
				HCI_OP_PIN_CODE_NEG_REPLY, 0);
}

3142 3143
static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3144
{
3145
	struct mgmt_cp_user_confirm_reply *cp = data;
3146

3147
	bt_dev_dbg(hdev, "sock %p", sk);
3148 3149

	if (len != sizeof(*cp))
3150 3151
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
				       MGMT_STATUS_INVALID_PARAMS);
3152

3153
	return user_pairing_resp(sk, hdev, &cp->addr,
3154 3155
				 MGMT_OP_USER_CONFIRM_REPLY,
				 HCI_OP_USER_CONFIRM_REPLY, 0);
3156 3157
}

3158
static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3159
				  void *data, u16 len)
3160
{
3161
	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3162

3163
	bt_dev_dbg(hdev, "sock %p", sk);
3164

3165
	return user_pairing_resp(sk, hdev, &cp->addr,
3166 3167
				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3168 3169
}

3170 3171
static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3172
{
3173
	struct mgmt_cp_user_passkey_reply *cp = data;
3174

3175
	bt_dev_dbg(hdev, "sock %p", sk);
3176

3177
	return user_pairing_resp(sk, hdev, &cp->addr,
3178 3179
				 MGMT_OP_USER_PASSKEY_REPLY,
				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3180 3181
}

3182
static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3183
				  void *data, u16 len)
3184
{
3185
	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3186

3187
	bt_dev_dbg(hdev, "sock %p", sk);
3188

3189
	return user_pairing_resp(sk, hdev, &cp->addr,
3190 3191
				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3192 3193
}

3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222
static void adv_expire(struct hci_dev *hdev, u32 flags)
{
	struct adv_info *adv_instance;
	struct hci_request req;
	int err;

	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
	if (!adv_instance)
		return;

	/* stop if current instance doesn't need to be changed */
	if (!(adv_instance->flags & flags))
		return;

	cancel_adv_timeout(hdev);

	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
	if (!adv_instance)
		return;

	hci_req_init(&req, hdev);
	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
					      true);
	if (err)
		return;

	hci_req_run(&req, NULL);
}

3223
static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3224 3225
{
	struct mgmt_cp_set_local_name *cp;
3226
	struct mgmt_pending_cmd *cmd;
3227

3228
	bt_dev_dbg(hdev, "status 0x%02x", status);
3229 3230 3231

	hci_dev_lock(hdev);

3232
	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3233 3234 3235 3236 3237
	if (!cmd)
		goto unlock;

	cp = cmd->param;

3238
	if (status) {
3239 3240
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
			        mgmt_status(status));
3241
	} else {
3242 3243
		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
				  cp, sizeof(*cp));
3244

3245 3246 3247 3248
		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
	}

3249 3250 3251 3252 3253 3254
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

3255
static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3256
			  u16 len)
3257
{
3258
	struct mgmt_cp_set_local_name *cp = data;
3259
	struct mgmt_pending_cmd *cmd;
3260
	struct hci_request req;
3261 3262
	int err;

3263
	bt_dev_dbg(hdev, "sock %p", sk);
3264

3265
	hci_dev_lock(hdev);
3266

3267 3268 3269 3270 3271 3272
	/* If the old values are the same as the new ones just return a
	 * direct command complete event.
	 */
	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
	    !memcmp(hdev->short_name, cp->short_name,
		    sizeof(hdev->short_name))) {
3273 3274
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
					data, len);
3275 3276 3277
		goto failed;
	}

3278
	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3279

3280
	if (!hdev_is_powered(hdev)) {
3281
		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3282

3283 3284
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
					data, len);
3285 3286 3287
		if (err < 0)
			goto failed;

3288 3289
		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3290
		ext_info_changed(hdev, sk);
3291

3292 3293 3294
		goto failed;
	}

3295
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3296 3297 3298 3299 3300
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

3301 3302
	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));

3303
	hci_req_init(&req, hdev);
3304 3305

	if (lmp_bredr_capable(hdev)) {
3306
		__hci_req_update_name(&req);
3307
		__hci_req_update_eir(&req);
3308 3309
	}

3310 3311 3312
	/* The name is stored in the scan response data and so
	 * no need to udpate the advertising data here.
	 */
3313
	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3314
		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3315

3316
	err = hci_req_run(&req, set_name_complete);
3317 3318 3319 3320
	if (err < 0)
		mgmt_pending_remove(cmd);

failed:
3321
	hci_dev_unlock(hdev);
3322 3323 3324
	return err;
}

3325 3326 3327 3328
static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
			  u16 len)
{
	struct mgmt_cp_set_appearance *cp = data;
3329
	u16 appearance;
3330 3331
	int err;

3332
	bt_dev_dbg(hdev, "sock %p", sk);
3333

3334 3335 3336 3337
	if (!lmp_le_capable(hdev))
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
				       MGMT_STATUS_NOT_SUPPORTED);

3338
	appearance = le16_to_cpu(cp->appearance);
3339 3340 3341

	hci_dev_lock(hdev);

3342 3343
	if (hdev->appearance != appearance) {
		hdev->appearance = appearance;
3344 3345 3346

		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3347 3348

		ext_info_changed(hdev, sk);
3349 3350 3351 3352 3353 3354 3355 3356 3357 3358
	}

	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
				0);

	hci_dev_unlock(hdev);

	return err;
}

3359 3360 3361 3362 3363
static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
				 void *data, u16 len)
{
	struct mgmt_rp_get_phy_confguration rp;

3364
	bt_dev_dbg(hdev, "sock %p", sk);
3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379

	hci_dev_lock(hdev);

	memset(&rp, 0, sizeof(rp));

	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));

	hci_dev_unlock(hdev);

	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
				 &rp, sizeof(rp));
}

3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391
int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
{
	struct mgmt_ev_phy_configuration_changed ev;

	memset(&ev, 0, sizeof(ev));

	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));

	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
			  sizeof(ev), skip);
}

3392 3393 3394 3395 3396
static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode, struct sk_buff *skb)
{
	struct mgmt_pending_cmd *cmd;

3397
	bt_dev_dbg(hdev, "status 0x%02x", status);
3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412

	hci_dev_lock(hdev);

	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		mgmt_cmd_status(cmd->sk, hdev->id,
				MGMT_OP_SET_PHY_CONFIGURATION,
				mgmt_status(status));
	} else {
		mgmt_cmd_complete(cmd->sk, hdev->id,
				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
				  NULL, 0);
3413 3414

		mgmt_phy_configuration_changed(hdev, cmd->sk);
3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
				 void *data, u16 len)
{
	struct mgmt_cp_set_phy_confguration *cp = data;
	struct hci_cp_le_set_default_phy cp_phy;
	struct mgmt_pending_cmd *cmd;
	struct hci_request req;
	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3432
	bool changed = false;
3433 3434
	int err;

3435
	bt_dev_dbg(hdev, "sock %p", sk);
3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513

	configurable_phys = get_configurable_phys(hdev);
	supported_phys = get_supported_phys(hdev);
	selected_phys = __le32_to_cpu(cp->selected_phys);

	if (selected_phys & ~supported_phys)
		return mgmt_cmd_status(sk, hdev->id,
				       MGMT_OP_SET_PHY_CONFIGURATION,
				       MGMT_STATUS_INVALID_PARAMS);

	unconfigure_phys = supported_phys & ~configurable_phys;

	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
		return mgmt_cmd_status(sk, hdev->id,
				       MGMT_OP_SET_PHY_CONFIGURATION,
				       MGMT_STATUS_INVALID_PARAMS);

	if (selected_phys == get_selected_phys(hdev))
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_SET_PHY_CONFIGURATION,
					 0, NULL, 0);

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
		err = mgmt_cmd_status(sk, hdev->id,
				      MGMT_OP_SET_PHY_CONFIGURATION,
				      MGMT_STATUS_REJECTED);
		goto unlock;
	}

	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
		err = mgmt_cmd_status(sk, hdev->id,
				      MGMT_OP_SET_PHY_CONFIGURATION,
				      MGMT_STATUS_BUSY);
		goto unlock;
	}

	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
		pkt_type |= (HCI_DH3 | HCI_DM3);
	else
		pkt_type &= ~(HCI_DH3 | HCI_DM3);

	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
		pkt_type |= (HCI_DH5 | HCI_DM5);
	else
		pkt_type &= ~(HCI_DH5 | HCI_DM5);

	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
		pkt_type &= ~HCI_2DH1;
	else
		pkt_type |= HCI_2DH1;

	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
		pkt_type &= ~HCI_2DH3;
	else
		pkt_type |= HCI_2DH3;

	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
		pkt_type &= ~HCI_2DH5;
	else
		pkt_type |= HCI_2DH5;

	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
		pkt_type &= ~HCI_3DH1;
	else
		pkt_type |= HCI_3DH1;

	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
		pkt_type &= ~HCI_3DH3;
	else
		pkt_type |= HCI_3DH3;

	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
		pkt_type &= ~HCI_3DH5;
	else
		pkt_type |= HCI_3DH5;

3514
	if (pkt_type != hdev->pkt_type) {
3515
		hdev->pkt_type = pkt_type;
3516 3517
		changed = true;
	}
3518 3519 3520

	if ((selected_phys & MGMT_PHY_LE_MASK) ==
	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3521 3522 3523
		if (changed)
			mgmt_phy_configuration_changed(hdev, sk);

3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_SET_PHY_CONFIGURATION,
					0, NULL, 0);

		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
			       len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

	memset(&cp_phy, 0, sizeof(cp_phy));

	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
		cp_phy.all_phys |= 0x01;

	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
		cp_phy.all_phys |= 0x02;

	if (selected_phys & MGMT_PHY_LE_1M_TX)
		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;

	if (selected_phys & MGMT_PHY_LE_2M_TX)
		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;

	if (selected_phys & MGMT_PHY_LE_CODED_TX)
		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;

	if (selected_phys & MGMT_PHY_LE_1M_RX)
		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;

	if (selected_phys & MGMT_PHY_LE_2M_RX)
		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;

	if (selected_phys & MGMT_PHY_LE_CODED_RX)
		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;

	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);

	err = hci_req_run_skb(&req, set_default_phy_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);

	return err;
}

3578 3579 3580 3581 3582 3583 3584 3585 3586 3587
static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
			    u16 len)
{
	int err = MGMT_STATUS_SUCCESS;
	struct mgmt_cp_set_blocked_keys *keys = data;
	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
				   sizeof(struct mgmt_blocked_key_info));
	u16 key_count, expected_len;
	int i;

3588
	bt_dev_dbg(hdev, "sock %p", sk);
3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626

	key_count = __le16_to_cpu(keys->key_count);
	if (key_count > max_key_count) {
		bt_dev_err(hdev, "too big key_count value %u", key_count);
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
	}

	expected_len = struct_size(keys, keys, key_count);
	if (expected_len != len) {
		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
			   expected_len, len);
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
	}

	hci_dev_lock(hdev);

	hci_blocked_keys_clear(hdev);

	for (i = 0; i < keys->key_count; ++i) {
		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);

		if (!b) {
			err = MGMT_STATUS_NO_RESOURCES;
			break;
		}

		b->type = keys->keys[i].type;
		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
		list_add_rcu(&b->list, &hdev->blocked_keys);
	}
	hci_dev_unlock(hdev);

	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
				err, NULL, 0);
}

3627 3628 3629 3630 3631 3632 3633
static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
			       void *data, u16 len)
{
	struct mgmt_mode *cp = data;
	int err;
	bool changed = false;

3634
	bt_dev_dbg(hdev, "sock %p", sk);
3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682

	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
		return mgmt_cmd_status(sk, hdev->id,
				       MGMT_OP_SET_WIDEBAND_SPEECH,
				       MGMT_STATUS_NOT_SUPPORTED);

	if (cp->val != 0x00 && cp->val != 0x01)
		return mgmt_cmd_status(sk, hdev->id,
				       MGMT_OP_SET_WIDEBAND_SPEECH,
				       MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
		err = mgmt_cmd_status(sk, hdev->id,
				      MGMT_OP_SET_WIDEBAND_SPEECH,
				      MGMT_STATUS_BUSY);
		goto unlock;
	}

	if (hdev_is_powered(hdev) &&
	    !!cp->val != hci_dev_test_flag(hdev,
					   HCI_WIDEBAND_SPEECH_ENABLED)) {
		err = mgmt_cmd_status(sk, hdev->id,
				      MGMT_OP_SET_WIDEBAND_SPEECH,
				      MGMT_STATUS_REJECTED);
		goto unlock;
	}

	if (cp->val)
		changed = !hci_dev_test_and_set_flag(hdev,
						   HCI_WIDEBAND_SPEECH_ENABLED);
	else
		changed = hci_dev_test_and_clear_flag(hdev,
						   HCI_WIDEBAND_SPEECH_ENABLED);

	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731
static int read_security_info(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 data_len)
{
	char buf[16];
	struct mgmt_rp_read_security_info *rp = (void *)buf;
	u16 sec_len = 0;
	u8 flags = 0;

	bt_dev_dbg(hdev, "sock %p", sk);

	memset(&buf, 0, sizeof(buf));

	hci_dev_lock(hdev);

	/* When the Read Simple Pairing Options command is supported, then
	 * the remote public key validation is supported.
	 */
	if (hdev->commands[41] & 0x08)
		flags |= 0x01;	/* Remote public key validation (BR/EDR) */

	flags |= 0x02;		/* Remote public key validation (LE) */

	/* When the Read Encryption Key Size command is supported, then the
	 * encryption key size is enforced.
	 */
	if (hdev->commands[20] & 0x10)
		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */

	flags |= 0x08;		/* Encryption key size enforcement (LE) */

	sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);

	/* When the Read Simple Pairing Options command is supported, then
	 * also max encryption key size information is provided.
	 */
	if (hdev->commands[41] & 0x08)
		sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
					  hdev->max_enc_key_size);

	sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);

	rp->sec_len = cpu_to_le16(sec_len);

	hci_dev_unlock(hdev);

	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
				 rp, sizeof(*rp) + sec_len);
}

3732 3733 3734 3735 3736 3737 3738 3739
#ifdef CONFIG_BT_FEATURE_DEBUG
/* d4992530-b9ec-469f-ab01-6c481c47da1c */
static const u8 debug_uuid[16] = {
	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
};
#endif

3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750
static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
				  void *data, u16 data_len)
{
	char buf[42];
	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
	u16 idx = 0;

	bt_dev_dbg(hdev, "sock %p", sk);

	memset(&buf, 0, sizeof(buf));

3751 3752 3753 3754 3755 3756 3757 3758 3759 3760
#ifdef CONFIG_BT_FEATURE_DEBUG
	if (!hdev) {
		u32 flags = bt_dbg_get() ? BIT(0) : 0;

		memcpy(rp->features[idx].uuid, debug_uuid, 16);
		rp->features[idx].flags = cpu_to_le32(flags);
		idx++;
	}
#endif

3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772
	rp->feature_count = cpu_to_le16(idx);

	/* After reading the experimental features information, enable
	 * the events to update client on any future change.
	 */
	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);

	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
				 MGMT_OP_READ_EXP_FEATURES_INFO,
				 0, rp, sizeof(*rp) + (20 * idx));
}

3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787
#ifdef CONFIG_BT_FEATURE_DEBUG
static int exp_debug_feature_changed(bool enabled, struct sock *skip)
{
	struct mgmt_ev_exp_feature_changed ev;

	memset(&ev, 0, sizeof(ev));
	memcpy(ev.uuid, debug_uuid, 16);
	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);

	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
				  &ev, sizeof(ev),
				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
}
#endif

3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799
static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 data_len)
{
	struct mgmt_cp_set_exp_feature *cp = data;
	struct mgmt_rp_set_exp_feature rp;

	bt_dev_dbg(hdev, "sock %p", sk);

	if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
		memset(rp.uuid, 0, 16);
		rp.flags = cpu_to_le32(0);

3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810
#ifdef CONFIG_BT_FEATURE_DEBUG
		if (!hdev) {
			bool changed = bt_dbg_get();

			bt_dbg_set(false);

			if (changed)
				exp_debug_feature_changed(false, sk);
		}
#endif

3811 3812 3813 3814 3815 3816 3817
		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);

		return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
					 MGMT_OP_SET_EXP_FEATURE, 0,
					 &rp, sizeof(rp));
	}

3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860
#ifdef CONFIG_BT_FEATURE_DEBUG
	if (!memcmp(cp->uuid, debug_uuid, 16)) {
		bool val, changed;
		int err;

		/* Command requires to use the non-controller index */
		if (hdev)
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_SET_EXP_FEATURE,
					       MGMT_STATUS_INVALID_INDEX);

		/* Parameters are limited to a single octet */
		if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
					       MGMT_OP_SET_EXP_FEATURE,
					       MGMT_STATUS_INVALID_PARAMS);

		/* Only boolean on/off is supported */
		if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
					       MGMT_OP_SET_EXP_FEATURE,
					       MGMT_STATUS_INVALID_PARAMS);

		val = !!cp->param[0];
		changed = val ? !bt_dbg_get() : bt_dbg_get();
		bt_dbg_set(val);

		memcpy(rp.uuid, debug_uuid, 16);
		rp.flags = cpu_to_le32(val ? BIT(0) : 0);

		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);

		err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
					MGMT_OP_SET_EXP_FEATURE, 0,
					&rp, sizeof(rp));

		if (changed)
			exp_debug_feature_changed(val, sk);

		return err;
	}
#endif

3861 3862 3863 3864 3865
	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
			       MGMT_OP_SET_EXP_FEATURE,
			       MGMT_STATUS_NOT_SUPPORTED);
}

3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979
#define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)

static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
			    u16 data_len)
{
	struct mgmt_cp_get_device_flags *cp = data;
	struct mgmt_rp_get_device_flags rp;
	struct bdaddr_list_with_flags *br_params;
	struct hci_conn_params *params;
	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
	u32 current_flags = 0;
	u8 status = MGMT_STATUS_INVALID_PARAMS;

	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
		   &cp->addr.bdaddr, cp->addr.type);

	if (cp->addr.type == BDADDR_BREDR) {
		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
							      &cp->addr.bdaddr,
							      cp->addr.type);
		if (!br_params)
			goto done;

		current_flags = br_params->current_flags;
	} else {
		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
						le_addr_type(cp->addr.type));

		if (!params)
			goto done;

		current_flags = params->current_flags;
	}

	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
	rp.supported_flags = cpu_to_le32(supported_flags);
	rp.current_flags = cpu_to_le32(current_flags);

	status = MGMT_STATUS_SUCCESS;

done:
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
				&rp, sizeof(rp));
}

static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
				 bdaddr_t *bdaddr, u8 bdaddr_type,
				 u32 supported_flags, u32 current_flags)
{
	struct mgmt_ev_device_flags_changed ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = bdaddr_type;
	ev.supported_flags = cpu_to_le32(supported_flags);
	ev.current_flags = cpu_to_le32(current_flags);

	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
}

static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
			    u16 len)
{
	struct mgmt_cp_set_device_flags *cp = data;
	struct bdaddr_list_with_flags *br_params;
	struct hci_conn_params *params;
	u8 status = MGMT_STATUS_INVALID_PARAMS;
	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
	u32 current_flags = __le32_to_cpu(cp->current_flags);

	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
		   &cp->addr.bdaddr, cp->addr.type,
		   __le32_to_cpu(current_flags));

	if ((supported_flags | current_flags) != supported_flags) {
		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
			    current_flags, supported_flags);
		goto done;
	}

	if (cp->addr.type == BDADDR_BREDR) {
		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
							      &cp->addr.bdaddr,
							      cp->addr.type);

		if (br_params) {
			br_params->current_flags = current_flags;
			status = MGMT_STATUS_SUCCESS;
		} else {
			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
				    &cp->addr.bdaddr, cp->addr.type);
		}
	} else {
		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
						le_addr_type(cp->addr.type));
		if (params) {
			params->current_flags = current_flags;
			status = MGMT_STATUS_SUCCESS;
		} else {
			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
				    &cp->addr.bdaddr,
				    le_addr_type(cp->addr.type));
		}
	}

done:
	if (status == MGMT_STATUS_SUCCESS)
		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
				     supported_flags, current_flags);

	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
				 &cp->addr, sizeof(cp->addr));
}

3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024
static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
				 void *data, u16 len)
{
	struct adv_monitor *monitor = NULL;
	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
	int handle;
	size_t rp_size = 0;
	__u32 supported = 0;
	__u16 num_handles = 0;
	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];

	BT_DBG("request for %s", hdev->name);

	hci_dev_lock(hdev);

	if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;

	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
		handles[num_handles++] = monitor->handle;
	}

	hci_dev_unlock(hdev);

	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
	rp = kmalloc(rp_size, GFP_KERNEL);
	if (!rp)
		return -ENOMEM;

	/* Once controller-based monitoring is in place, the enabled_features
	 * should reflect the use.
	 */
	rp->supported_features = cpu_to_le32(supported);
	rp->enabled_features = 0;
	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
	rp->num_handles = cpu_to_le16(num_handles);
	if (num_handles)
		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));

	return mgmt_cmd_complete(sk, hdev->id,
				 MGMT_OP_READ_ADV_MONITOR_FEATURES,
				 MGMT_STATUS_SUCCESS, rp, rp_size);
}

4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121
static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
				    void *data, u16 len)
{
	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
	struct mgmt_rp_add_adv_patterns_monitor rp;
	struct adv_monitor *m = NULL;
	struct adv_pattern *p = NULL;
	__u8 cp_ofst = 0, cp_len = 0;
	unsigned int mp_cnt = 0;
	int err, i;

	BT_DBG("request for %s", hdev->name);

	if (len <= sizeof(*cp) || cp->pattern_count == 0) {
		err = mgmt_cmd_status(sk, hdev->id,
				      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
				      MGMT_STATUS_INVALID_PARAMS);
		goto failed;
	}

	m = kmalloc(sizeof(*m), GFP_KERNEL);
	if (!m) {
		err = -ENOMEM;
		goto failed;
	}

	INIT_LIST_HEAD(&m->patterns);
	m->active = false;

	for (i = 0; i < cp->pattern_count; i++) {
		if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
			err = mgmt_cmd_status(sk, hdev->id,
					      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
					      MGMT_STATUS_INVALID_PARAMS);
			goto failed;
		}

		cp_ofst = cp->patterns[i].offset;
		cp_len = cp->patterns[i].length;
		if (cp_ofst >= HCI_MAX_AD_LENGTH ||
		    cp_len > HCI_MAX_AD_LENGTH ||
		    (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
			err = mgmt_cmd_status(sk, hdev->id,
					      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
					      MGMT_STATUS_INVALID_PARAMS);
			goto failed;
		}

		p = kmalloc(sizeof(*p), GFP_KERNEL);
		if (!p) {
			err = -ENOMEM;
			goto failed;
		}

		p->ad_type = cp->patterns[i].ad_type;
		p->offset = cp->patterns[i].offset;
		p->length = cp->patterns[i].length;
		memcpy(p->value, cp->patterns[i].value, p->length);

		INIT_LIST_HEAD(&p->list);
		list_add(&p->list, &m->patterns);
	}

	if (mp_cnt != cp->pattern_count) {
		err = mgmt_cmd_status(sk, hdev->id,
				      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
				      MGMT_STATUS_INVALID_PARAMS);
		goto failed;
	}

	hci_dev_lock(hdev);

	err = hci_add_adv_monitor(hdev, m);
	if (err) {
		if (err == -ENOSPC) {
			mgmt_cmd_status(sk, hdev->id,
					MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
					MGMT_STATUS_NO_RESOURCES);
		}
		goto unlock;
	}

	hci_dev_unlock(hdev);

	rp.monitor_handle = cpu_to_le16(m->handle);

	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));

unlock:
	hci_dev_unlock(hdev);

failed:
	hci_free_adv_monitor(m);
	return err;
}

4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154
static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_remove_adv_monitor *cp = data;
	struct mgmt_rp_remove_adv_monitor rp;
	u16 handle;
	int err;

	BT_DBG("request for %s", hdev->name);

	hci_dev_lock(hdev);

	handle = __le16_to_cpu(cp->monitor_handle);

	err = hci_remove_adv_monitor(hdev, handle);
	if (err == -ENOENT) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
				      MGMT_STATUS_INVALID_INDEX);
		goto unlock;
	}

	hci_dev_unlock(hdev);

	rp.monitor_handle = cp->monitor_handle;

	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4155 4156 4157 4158 4159 4160 4161
static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
				         u16 opcode, struct sk_buff *skb)
{
	struct mgmt_rp_read_local_oob_data mgmt_rp;
	size_t rp_size = sizeof(mgmt_rp);
	struct mgmt_pending_cmd *cmd;

4162
	bt_dev_dbg(hdev, "status %u", status);
4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213

	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
	if (!cmd)
		return;

	if (status || !skb) {
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
		goto remove;
	}

	memset(&mgmt_rp, 0, sizeof(mgmt_rp));

	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;

		if (skb->len < sizeof(*rp)) {
			mgmt_cmd_status(cmd->sk, hdev->id,
					MGMT_OP_READ_LOCAL_OOB_DATA,
					MGMT_STATUS_FAILED);
			goto remove;
		}

		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));

		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
	} else {
		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;

		if (skb->len < sizeof(*rp)) {
			mgmt_cmd_status(cmd->sk, hdev->id,
					MGMT_OP_READ_LOCAL_OOB_DATA,
					MGMT_STATUS_FAILED);
			goto remove;
		}

		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));

		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
	}

	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);

remove:
	mgmt_pending_remove(cmd);
}

4214
static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4215
			       void *data, u16 data_len)
4216
{
4217
	struct mgmt_pending_cmd *cmd;
4218
	struct hci_request req;
4219 4220
	int err;

4221
	bt_dev_dbg(hdev, "sock %p", sk);
4222

4223
	hci_dev_lock(hdev);
4224

4225
	if (!hdev_is_powered(hdev)) {
4226 4227
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_NOT_POWERED);
4228 4229 4230
		goto unlock;
	}

4231
	if (!lmp_ssp_capable(hdev)) {
4232 4233
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_NOT_SUPPORTED);
4234 4235 4236
		goto unlock;
	}

4237
	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4238 4239
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_BUSY);
4240 4241 4242
		goto unlock;
	}

4243
	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4244 4245 4246 4247 4248
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

4249 4250
	hci_req_init(&req, hdev);

4251
	if (bredr_sc_enabled(hdev))
4252
		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4253
	else
4254
		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4255

4256
	err = hci_req_run_skb(&req, read_local_oob_data_complete);
4257 4258 4259 4260
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
4261
	hci_dev_unlock(hdev);
4262 4263 4264
	return err;
}

4265
static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4266
			       void *data, u16 len)
4267
{
4268
	struct mgmt_addr_info *addr = data;
4269 4270
	int err;

4271
	bt_dev_dbg(hdev, "sock %p", sk);
4272

4273
	if (!bdaddr_type_is_valid(addr->type))
4274 4275 4276 4277
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_ADD_REMOTE_OOB_DATA,
					 MGMT_STATUS_INVALID_PARAMS,
					 addr, sizeof(*addr));
4278

4279
	hci_dev_lock(hdev);
4280

4281 4282 4283
	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_data *cp = data;
		u8 status;
4284

4285
		if (cp->addr.type != BDADDR_BREDR) {
4286 4287 4288 4289
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_ADD_REMOTE_OOB_DATA,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
4290 4291 4292
			goto unlock;
		}

4293
		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4294 4295
					      cp->addr.type, cp->hash,
					      cp->rand, NULL, NULL);
4296 4297 4298 4299 4300
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

4301 4302 4303
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
					&cp->addr, sizeof(cp->addr));
4304 4305
	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4306
		u8 *rand192, *hash192, *rand256, *hash256;
4307 4308
		u8 status;

4309
		if (bdaddr_type_is_le(cp->addr.type)) {
4310 4311 4312 4313 4314
			/* Enforce zero-valued 192-bit parameters as
			 * long as legacy SMP OOB isn't implemented.
			 */
			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
			    memcmp(cp->hash192, ZERO_KEY, 16)) {
4315 4316 4317 4318
				err = mgmt_cmd_complete(sk, hdev->id,
							MGMT_OP_ADD_REMOTE_OOB_DATA,
							MGMT_STATUS_INVALID_PARAMS,
							addr, sizeof(*addr));
4319 4320 4321
				goto unlock;
			}

4322 4323 4324
			rand192 = NULL;
			hash192 = NULL;
		} else {
4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347
			/* In case one of the P-192 values is set to zero,
			 * then just disable OOB data for P-192.
			 */
			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
				rand192 = NULL;
				hash192 = NULL;
			} else {
				rand192 = cp->rand192;
				hash192 = cp->hash192;
			}
		}

		/* In case one of the P-256 values is set to zero, then just
		 * disable OOB data for P-256.
		 */
		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
			rand256 = NULL;
			hash256 = NULL;
		} else {
			rand256 = cp->rand256;
			hash256 = cp->hash256;
4348 4349
		}

4350
		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4351
					      cp->addr.type, hash192, rand192,
4352
					      hash256, rand256);
4353 4354 4355 4356 4357
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

4358 4359 4360
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_ADD_REMOTE_OOB_DATA,
					status, &cp->addr, sizeof(cp->addr));
4361
	} else {
4362 4363
		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
			   len);
4364 4365
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
				      MGMT_STATUS_INVALID_PARAMS);
4366
	}
4367

4368
unlock:
4369
	hci_dev_unlock(hdev);
4370 4371 4372
	return err;
}

4373
static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4374
				  void *data, u16 len)
4375
{
4376
	struct mgmt_cp_remove_remote_oob_data *cp = data;
4377
	u8 status;
4378 4379
	int err;

4380
	bt_dev_dbg(hdev, "sock %p", sk);
4381

4382
	if (cp->addr.type != BDADDR_BREDR)
4383 4384 4385 4386
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
4387

4388
	hci_dev_lock(hdev);
4389

4390 4391 4392 4393 4394 4395
	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
		hci_remote_oob_data_clear(hdev);
		status = MGMT_STATUS_SUCCESS;
		goto done;
	}

4396
	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4397
	if (err < 0)
4398
		status = MGMT_STATUS_INVALID_PARAMS;
4399
	else
4400
		status = MGMT_STATUS_SUCCESS;
4401

4402
done:
4403 4404
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
				status, &cp->addr, sizeof(cp->addr));
4405

4406
	hci_dev_unlock(hdev);
4407 4408 4409
	return err;
}

4410
void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4411
{
4412
	struct mgmt_pending_cmd *cmd;
4413

4414
	bt_dev_dbg(hdev, "status %d", status);
4415

4416
	hci_dev_lock(hdev);
4417

4418
	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4419
	if (!cmd)
4420
		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4421

4422 4423 4424
	if (!cmd)
		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);

4425
	if (cmd) {
4426
		cmd->cmd_complete(cmd, mgmt_status(status));
4427 4428
		mgmt_pending_remove(cmd);
	}
4429

4430
	hci_dev_unlock(hdev);
4431 4432 4433 4434 4435 4436 4437

	/* Handle suspend notifier */
	if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
			       hdev->suspend_tasks)) {
		bt_dev_dbg(hdev, "Unpaused discovery");
		wake_up(&hdev->suspend_wait_q);
	}
4438 4439
}

4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466
static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
				    uint8_t *mgmt_status)
{
	switch (type) {
	case DISCOV_TYPE_LE:
		*mgmt_status = mgmt_le_support(hdev);
		if (*mgmt_status)
			return false;
		break;
	case DISCOV_TYPE_INTERLEAVED:
		*mgmt_status = mgmt_le_support(hdev);
		if (*mgmt_status)
			return false;
		/* Intentional fall-through */
	case DISCOV_TYPE_BREDR:
		*mgmt_status = mgmt_bredr_support(hdev);
		if (*mgmt_status)
			return false;
		break;
	default:
		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
		return false;
	}

	return true;
}

4467 4468
static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
				    u16 op, void *data, u16 len)
4469
{
4470
	struct mgmt_cp_start_discovery *cp = data;
4471
	struct mgmt_pending_cmd *cmd;
4472
	u8 status;
4473 4474
	int err;

4475
	bt_dev_dbg(hdev, "sock %p", sk);
4476

4477
	hci_dev_lock(hdev);
4478

4479
	if (!hdev_is_powered(hdev)) {
4480
		err = mgmt_cmd_complete(sk, hdev->id, op,
4481 4482
					MGMT_STATUS_NOT_POWERED,
					&cp->type, sizeof(cp->type));
4483 4484 4485
		goto failed;
	}

4486
	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4487
	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4488 4489
		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
					&cp->type, sizeof(cp->type));
4490 4491 4492
		goto failed;
	}

4493
	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4494 4495
		err = mgmt_cmd_complete(sk, hdev->id, op, status,
					&cp->type, sizeof(cp->type));
4496 4497 4498
		goto failed;
	}

4499 4500 4501 4502 4503 4504 4505
	/* Can't start discovery when it is paused */
	if (hdev->discovery_paused) {
		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
					&cp->type, sizeof(cp->type));
		goto failed;
	}

4506 4507 4508 4509 4510
	/* Clear the discovery filter first to free any previously
	 * allocated memory for the UUID list.
	 */
	hci_discovery_filter_clear(hdev);

A
Andre Guedes 已提交
4511
	hdev->discovery.type = cp->type;
4512
	hdev->discovery.report_invalid_rssi = false;
4513 4514 4515 4516
	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
		hdev->discovery.limited = true;
	else
		hdev->discovery.limited = false;
A
Andre Guedes 已提交
4517

4518
	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4519 4520
	if (!cmd) {
		err = -ENOMEM;
4521
		goto failed;
4522
	}
4523

4524
	cmd->cmd_complete = generic_cmd_complete;
4525

4526
	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4527 4528
	queue_work(hdev->req_workqueue, &hdev->discov_update);
	err = 0;
4529

4530
failed:
4531
	hci_dev_unlock(hdev);
4532 4533
	return err;
}
4534

4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
					data, len);
}

static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
				   void *data, u16 len)
{
	return start_discovery_internal(sk, hdev,
					MGMT_OP_START_LIMITED_DISCOVERY,
					data, len);
}

4550 4551
static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
					  u8 status)
4552
{
4553 4554
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, 1);
4555
}
4556

4557 4558 4559 4560
static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
				   void *data, u16 len)
{
	struct mgmt_cp_start_service_discovery *cp = data;
4561
	struct mgmt_pending_cmd *cmd;
4562 4563 4564 4565
	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
	u16 uuid_count, expected_len;
	u8 status;
	int err;
4566

4567
	bt_dev_dbg(hdev, "sock %p", sk);
4568

4569
	hci_dev_lock(hdev);
4570

4571
	if (!hdev_is_powered(hdev)) {
4572 4573 4574 4575
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_NOT_POWERED,
					&cp->type, sizeof(cp->type));
4576 4577
		goto failed;
	}
4578

4579
	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4580
	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4581 4582 4583 4584
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_BUSY, &cp->type,
					sizeof(cp->type));
4585 4586
		goto failed;
	}
4587

4588 4589
	uuid_count = __le16_to_cpu(cp->uuid_count);
	if (uuid_count > max_uuid_count) {
4590 4591
		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
			   uuid_count);
4592 4593 4594 4595
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS, &cp->type,
					sizeof(cp->type));
4596 4597 4598 4599 4600
		goto failed;
	}

	expected_len = sizeof(*cp) + uuid_count * 16;
	if (expected_len != len) {
4601 4602
		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
			   expected_len, len);
4603 4604 4605 4606
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS, &cp->type,
					sizeof(cp->type));
4607 4608 4609
		goto failed;
	}

4610 4611 4612 4613 4614 4615 4616
	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					status, &cp->type, sizeof(cp->type));
		goto failed;
	}

4617
	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4618
			       hdev, data, len);
4619 4620 4621 4622 4623
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

4624 4625
	cmd->cmd_complete = service_discovery_cmd_complete;

4626 4627 4628 4629 4630
	/* Clear the discovery filter first to free any previously
	 * allocated memory for the UUID list.
	 */
	hci_discovery_filter_clear(hdev);

4631
	hdev->discovery.result_filtering = true;
4632 4633 4634 4635 4636 4637 4638 4639
	hdev->discovery.type = cp->type;
	hdev->discovery.rssi = cp->rssi;
	hdev->discovery.uuid_count = uuid_count;

	if (uuid_count > 0) {
		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
						GFP_KERNEL);
		if (!hdev->discovery.uuids) {
4640 4641 4642 4643
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_START_SERVICE_DISCOVERY,
						MGMT_STATUS_FAILED,
						&cp->type, sizeof(cp->type));
4644 4645 4646
			mgmt_pending_remove(cmd);
			goto failed;
		}
4647
	}
4648

4649
	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4650 4651
	queue_work(hdev->req_workqueue, &hdev->discov_update);
	err = 0;
4652 4653

failed:
4654
	hci_dev_unlock(hdev);
4655 4656 4657
	return err;
}

4658
void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4659
{
4660
	struct mgmt_pending_cmd *cmd;
4661

4662
	bt_dev_dbg(hdev, "status %d", status);
4663 4664 4665

	hci_dev_lock(hdev);

4666
	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4667
	if (cmd) {
4668
		cmd->cmd_complete(cmd, mgmt_status(status));
4669
		mgmt_pending_remove(cmd);
4670 4671 4672
	}

	hci_dev_unlock(hdev);
4673 4674 4675 4676 4677 4678

	/* Handle suspend notifier */
	if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
		bt_dev_dbg(hdev, "Paused discovery");
		wake_up(&hdev->suspend_wait_q);
	}
4679 4680
}

4681
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4682
			  u16 len)
4683
{
4684
	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4685
	struct mgmt_pending_cmd *cmd;
4686 4687
	int err;

4688
	bt_dev_dbg(hdev, "sock %p", sk);
4689

4690
	hci_dev_lock(hdev);
4691

4692
	if (!hci_discovery_active(hdev)) {
4693 4694 4695
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
					MGMT_STATUS_REJECTED, &mgmt_cp->type,
					sizeof(mgmt_cp->type));
4696 4697 4698 4699
		goto unlock;
	}

	if (hdev->discovery.type != mgmt_cp->type) {
4700 4701 4702
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS,
					&mgmt_cp->type, sizeof(mgmt_cp->type));
4703
		goto unlock;
4704 4705
	}

4706
	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4707 4708
	if (!cmd) {
		err = -ENOMEM;
4709 4710 4711
		goto unlock;
	}

4712 4713
	cmd->cmd_complete = generic_cmd_complete;

4714 4715 4716
	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
	queue_work(hdev->req_workqueue, &hdev->discov_update);
	err = 0;
4717

4718
unlock:
4719
	hci_dev_unlock(hdev);
4720 4721 4722
	return err;
}

4723
static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4724
			u16 len)
4725
{
4726
	struct mgmt_cp_confirm_name *cp = data;
4727 4728 4729
	struct inquiry_entry *e;
	int err;

4730
	bt_dev_dbg(hdev, "sock %p", sk);
4731 4732 4733

	hci_dev_lock(hdev);

4734
	if (!hci_discovery_active(hdev)) {
4735 4736 4737
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
					MGMT_STATUS_FAILED, &cp->addr,
					sizeof(cp->addr));
4738 4739 4740
		goto failed;
	}

4741
	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4742
	if (!e) {
4743 4744 4745
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
					sizeof(cp->addr));
4746 4747 4748 4749 4750 4751 4752 4753
		goto failed;
	}

	if (cp->name_known) {
		e->name_state = NAME_KNOWN;
		list_del(&e->list);
	} else {
		e->name_state = NAME_NEEDED;
4754
		hci_inquiry_cache_update_resolve(hdev, e);
4755 4756
	}

4757 4758
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
				&cp->addr, sizeof(cp->addr));
4759 4760 4761 4762 4763 4764

failed:
	hci_dev_unlock(hdev);
	return err;
}

4765
static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4766
			u16 len)
4767
{
4768
	struct mgmt_cp_block_device *cp = data;
4769
	u8 status;
4770 4771
	int err;

4772
	bt_dev_dbg(hdev, "sock %p", sk);
4773

4774
	if (!bdaddr_type_is_valid(cp->addr.type))
4775 4776 4777
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
4778

4779
	hci_dev_lock(hdev);
4780

4781 4782
	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
				  cp->addr.type);
4783
	if (err < 0) {
4784
		status = MGMT_STATUS_FAILED;
4785 4786 4787 4788 4789 4790
		goto done;
	}

	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
		   sk);
	status = MGMT_STATUS_SUCCESS;
4791

4792
done:
4793 4794
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
				&cp->addr, sizeof(cp->addr));
4795

4796
	hci_dev_unlock(hdev);
4797 4798 4799 4800

	return err;
}

4801
static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4802
			  u16 len)
4803
{
4804
	struct mgmt_cp_unblock_device *cp = data;
4805
	u8 status;
4806 4807
	int err;

4808
	bt_dev_dbg(hdev, "sock %p", sk);
4809

4810
	if (!bdaddr_type_is_valid(cp->addr.type))
4811 4812 4813
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
4814

4815
	hci_dev_lock(hdev);
4816

4817 4818
	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
				  cp->addr.type);
4819
	if (err < 0) {
4820
		status = MGMT_STATUS_INVALID_PARAMS;
4821 4822 4823 4824 4825 4826
		goto done;
	}

	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
		   sk);
	status = MGMT_STATUS_SUCCESS;
4827

4828
done:
4829 4830
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
				&cp->addr, sizeof(cp->addr));
4831

4832
	hci_dev_unlock(hdev);
4833 4834 4835 4836

	return err;
}

4837 4838 4839 4840
static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_set_device_id *cp = data;
4841
	struct hci_request req;
4842
	int err;
4843
	__u16 source;
4844

4845
	bt_dev_dbg(hdev, "sock %p", sk);
4846

4847 4848 4849
	source = __le16_to_cpu(cp->source);

	if (source > 0x0002)
4850 4851
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
				       MGMT_STATUS_INVALID_PARAMS);
4852

4853 4854
	hci_dev_lock(hdev);

4855
	hdev->devid_source = source;
4856 4857 4858 4859
	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
	hdev->devid_product = __le16_to_cpu(cp->product);
	hdev->devid_version = __le16_to_cpu(cp->version);

4860 4861
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
				NULL, 0);
4862

4863
	hci_req_init(&req, hdev);
4864
	__hci_req_update_eir(&req);
4865
	hci_req_run(&req, NULL);
4866 4867 4868 4869 4870 4871

	hci_dev_unlock(hdev);

	return err;
}

4872 4873 4874
static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
					u16 opcode)
{
4875
	bt_dev_dbg(hdev, "status %d", status);
4876 4877
}

4878 4879
static void set_advertising_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
4880 4881
{
	struct cmd_lookup match = { NULL, hdev };
4882
	struct hci_request req;
4883 4884 4885
	u8 instance;
	struct adv_info *adv_instance;
	int err;
4886

4887 4888
	hci_dev_lock(hdev);

4889 4890 4891 4892 4893
	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
				     cmd_status_rsp, &mgmt_err);
4894
		goto unlock;
4895 4896
	}

4897
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4898
		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4899
	else
4900
		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4901

4902 4903 4904 4905 4906 4907 4908
	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
			     &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
4909

4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920
	/* Handle suspend notifier */
	if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
			       hdev->suspend_tasks)) {
		bt_dev_dbg(hdev, "Paused advertising");
		wake_up(&hdev->suspend_wait_q);
	} else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
				      hdev->suspend_tasks)) {
		bt_dev_dbg(hdev, "Unpaused advertising");
		wake_up(&hdev->suspend_wait_q);
	}

4921
	/* If "Set Advertising" was just disabled and instance advertising was
4922
	 * set up earlier, then re-enable multi-instance advertising.
4923 4924
	 */
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4925
	    list_empty(&hdev->adv_instances))
4926 4927
		goto unlock;

4928 4929 4930 4931 4932 4933 4934 4935 4936 4937
	instance = hdev->cur_adv_instance;
	if (!instance) {
		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
							struct adv_info, list);
		if (!adv_instance)
			goto unlock;

		instance = adv_instance->instance;
	}

4938 4939
	hci_req_init(&req, hdev);

4940
	err = __hci_req_schedule_adv_instance(&req, instance, true);
4941 4942 4943

	if (!err)
		err = hci_req_run(&req, enable_advertising_instance);
4944

4945
	if (err)
4946
		bt_dev_err(hdev, "failed to re-configure advertising");
4947

4948 4949
unlock:
	hci_dev_unlock(hdev);
4950 4951
}

4952 4953
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
4954 4955
{
	struct mgmt_mode *cp = data;
4956
	struct mgmt_pending_cmd *cmd;
4957
	struct hci_request req;
4958
	u8 val, status;
4959 4960
	int err;

4961
	bt_dev_dbg(hdev, "sock %p", sk);
4962

4963 4964
	status = mgmt_le_support(hdev);
	if (status)
4965 4966
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				       status);
4967

4968
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4969 4970
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);
4971

4972 4973 4974 4975
	if (hdev->advertising_paused)
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				       MGMT_STATUS_BUSY);

4976 4977 4978 4979
	hci_dev_lock(hdev);

	val = !!cp->val;

4980 4981 4982 4983 4984
	/* The following conditions are ones which mean that we should
	 * not do any HCI communication but directly send a mgmt
	 * response to user space (after toggling the flag if
	 * necessary).
	 */
4985
	if (!hdev_is_powered(hdev) ||
4986 4987
	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4988
	    hci_conn_num(hdev, LE_LINK) > 0 ||
4989
	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4990
	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4991
		bool changed;
4992

4993
		if (cp->val) {
4994
			hdev->cur_adv_instance = 0x00;
4995
			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4996
			if (cp->val == 0x02)
4997
				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4998
			else
4999
				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5000
		} else {
5001
			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5002
			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
		if (err < 0)
			goto unlock;

		if (changed)
			err = new_settings(hdev, sk);

		goto unlock;
	}

5015 5016
	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
	    pending_find(MGMT_OP_SET_LE, hdev)) {
5017 5018
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				      MGMT_STATUS_BUSY);
5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

5030
	if (cp->val == 0x02)
5031
		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5032
	else
5033
		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5034

5035 5036
	cancel_adv_timeout(hdev);

5037
	if (val) {
5038 5039 5040 5041
		/* Switch to instance "0" for the Set Advertising setting.
		 * We cannot use update_[adv|scan_rsp]_data() here as the
		 * HCI_ADVERTISING flag is not yet set.
		 */
5042
		hdev->cur_adv_instance = 0x00;
5043 5044 5045 5046 5047 5048 5049 5050

		if (ext_adv_capable(hdev)) {
			__hci_req_start_ext_adv(&req, 0x00);
		} else {
			__hci_req_update_adv_data(&req, 0x00);
			__hci_req_update_scan_rsp_data(&req, 0x00);
			__hci_req_enable_advertising(&req);
		}
5051
	} else {
5052
		__hci_req_disable_advertising(&req);
5053
	}
5054 5055 5056 5057 5058 5059 5060 5061 5062 5063

	err = hci_req_run(&req, set_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5064 5065 5066 5067 5068 5069
static int set_static_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_static_address *cp = data;
	int err;

5070
	bt_dev_dbg(hdev, "sock %p", sk);
5071

5072
	if (!lmp_le_capable(hdev))
5073 5074
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				       MGMT_STATUS_NOT_SUPPORTED);
5075 5076

	if (hdev_is_powered(hdev))
5077 5078
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				       MGMT_STATUS_REJECTED);
5079 5080 5081

	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5082 5083 5084
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_SET_STATIC_ADDRESS,
					       MGMT_STATUS_INVALID_PARAMS);
5085 5086 5087

		/* Two most significant bits shall be set */
		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5088 5089 5090
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_SET_STATIC_ADDRESS,
					       MGMT_STATUS_INVALID_PARAMS);
5091 5092 5093 5094 5095 5096
	}

	hci_dev_lock(hdev);

	bacpy(&hdev->static_addr, &cp->bdaddr);

5097 5098 5099 5100 5101
	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
	if (err < 0)
		goto unlock;

	err = new_settings(hdev, sk);
5102

5103
unlock:
5104 5105 5106 5107
	hci_dev_unlock(hdev);
	return err;
}

5108 5109 5110 5111 5112 5113 5114
static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_cp_set_scan_params *cp = data;
	__u16 interval, window;
	int err;

5115
	bt_dev_dbg(hdev, "sock %p", sk);
5116 5117

	if (!lmp_le_capable(hdev))
5118 5119
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_NOT_SUPPORTED);
5120 5121 5122 5123

	interval = __le16_to_cpu(cp->interval);

	if (interval < 0x0004 || interval > 0x4000)
5124 5125
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
5126 5127 5128 5129

	window = __le16_to_cpu(cp->window);

	if (window < 0x0004 || window > 0x4000)
5130 5131
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
5132

5133
	if (window > interval)
5134 5135
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
5136

5137 5138 5139 5140 5141
	hci_dev_lock(hdev);

	hdev->le_scan_interval = interval;
	hdev->le_scan_window = window;

5142 5143
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
				NULL, 0);
5144

5145 5146 5147
	/* If background scan is running, restart it so new parameters are
	 * loaded.
	 */
5148
	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159
	    hdev->discovery.state == DISCOVERY_STOPPED) {
		struct hci_request req;

		hci_req_init(&req, hdev);

		hci_req_add_le_scan_disable(&req);
		hci_req_add_le_passive_scan(&req);

		hci_req_run(&req, NULL);
	}

5160 5161 5162 5163 5164
	hci_dev_unlock(hdev);

	return err;
}

5165 5166
static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
				      u16 opcode)
5167
{
5168
	struct mgmt_pending_cmd *cmd;
5169

5170
	bt_dev_dbg(hdev, "status 0x%02x", status);
5171 5172 5173

	hci_dev_lock(hdev);

5174
	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5175 5176 5177 5178
	if (!cmd)
		goto unlock;

	if (status) {
5179 5180
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
			        mgmt_status(status));
5181
	} else {
5182 5183 5184
		struct mgmt_mode *cp = cmd->param;

		if (cp->val)
5185
			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5186
		else
5187
			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5188

5189 5190 5191 5192 5193 5194 5195 5196 5197 5198
		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

5199
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5200
				void *data, u16 len)
5201
{
5202
	struct mgmt_mode *cp = data;
5203
	struct mgmt_pending_cmd *cmd;
5204
	struct hci_request req;
5205 5206
	int err;

5207
	bt_dev_dbg(hdev, "sock %p", sk);
5208

5209
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5210
	    hdev->hci_ver < BLUETOOTH_VER_1_2)
5211 5212
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				       MGMT_STATUS_NOT_SUPPORTED);
5213

5214
	if (cp->val != 0x00 && cp->val != 0x01)
5215 5216
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				       MGMT_STATUS_INVALID_PARAMS);
5217

5218 5219
	hci_dev_lock(hdev);

5220
	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5221 5222
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				      MGMT_STATUS_BUSY);
5223 5224 5225
		goto unlock;
	}

5226
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5227 5228 5229 5230 5231
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		goto unlock;
	}

5232
	if (!hdev_is_powered(hdev)) {
5233
		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5234 5235 5236 5237 5238 5239
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		new_settings(hdev, sk);
		goto unlock;
	}

5240 5241 5242 5243 5244
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
			       data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
5245 5246
	}

5247 5248
	hci_req_init(&req, hdev);

5249
	__hci_req_write_fast_connectable(&req, cp->val);
5250 5251

	err = hci_req_run(&req, fast_connectable_complete);
5252
	if (err < 0) {
5253 5254
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				      MGMT_STATUS_FAILED);
5255
		mgmt_pending_remove(cmd);
5256 5257
	}

5258
unlock:
5259
	hci_dev_unlock(hdev);
5260

5261 5262 5263
	return err;
}

5264
static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5265
{
5266
	struct mgmt_pending_cmd *cmd;
5267

5268
	bt_dev_dbg(hdev, "status 0x%02x", status);
5269 5270 5271

	hci_dev_lock(hdev);

5272
	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5273 5274 5275 5276 5277 5278 5279 5280 5281
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		/* We need to restore the flag if related HCI commands
		 * failed.
		 */
5282
		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5283

5284
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298
	} else {
		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
	struct mgmt_mode *cp = data;
5299
	struct mgmt_pending_cmd *cmd;
5300 5301 5302
	struct hci_request req;
	int err;

5303
	bt_dev_dbg(hdev, "sock %p", sk);
5304 5305

	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5306 5307
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_NOT_SUPPORTED);
5308

5309
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5310 5311
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_REJECTED);
5312 5313

	if (cp->val != 0x00 && cp->val != 0x01)
5314 5315
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_INVALID_PARAMS);
5316 5317 5318

	hci_dev_lock(hdev);

5319
	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5320 5321 5322 5323 5324 5325
		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		goto unlock;
	}

	if (!hdev_is_powered(hdev)) {
		if (!cp->val) {
5326 5327 5328 5329 5330
			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5331 5332
		}

5333
		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344

		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		if (err < 0)
			goto unlock;

		err = new_settings(hdev, sk);
		goto unlock;
	}

	/* Reject disabling when powered on */
	if (!cp->val) {
5345 5346
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				      MGMT_STATUS_REJECTED);
5347
		goto unlock;
5348 5349 5350 5351 5352 5353 5354 5355
	} else {
		/* When configuring a dual-mode controller to operate
		 * with LE only and using a static address, then switching
		 * BR/EDR back on is not allowed.
		 *
		 * Dual-mode controllers shall operate with the public
		 * address as its identity address for BR/EDR and LE. So
		 * reject the attempt to create an invalid configuration.
5356 5357 5358 5359 5360 5361
		 *
		 * The same restrictions applies when secure connections
		 * has been enabled. For BR/EDR this is a controller feature
		 * while for LE it is a host stack feature. This means that
		 * switching BR/EDR back on when secure connections has been
		 * enabled is not a supported transaction.
5362
		 */
5363
		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5364
		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5365
		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5366 5367
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
					      MGMT_STATUS_REJECTED);
5368 5369
			goto unlock;
		}
5370 5371
	}

5372
	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5373 5374
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				      MGMT_STATUS_BUSY);
5375 5376 5377 5378 5379 5380 5381 5382 5383
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

5384 5385
	/* We need to flip the bit already here so that
	 * hci_req_update_adv_data generates the correct flags.
5386
	 */
5387
	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5388 5389

	hci_req_init(&req, hdev);
5390

5391
	__hci_req_write_fast_connectable(&req, false);
5392
	__hci_req_update_scan(&req);
5393

5394 5395 5396
	/* Since only the advertising data flags will change, there
	 * is no need to update the scan response data.
	 */
5397
	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5398

5399 5400 5401 5402 5403 5404 5405 5406 5407
	err = hci_req_run(&req, set_bredr_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5408 5409
static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
5410
	struct mgmt_pending_cmd *cmd;
5411 5412
	struct mgmt_mode *cp;

5413
	bt_dev_dbg(hdev, "status %u", status);
5414 5415 5416

	hci_dev_lock(hdev);

5417
	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5418 5419 5420 5421
	if (!cmd)
		goto unlock;

	if (status) {
5422 5423
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
			        mgmt_status(status));
5424 5425 5426 5427 5428 5429 5430
		goto remove;
	}

	cp = cmd->param;

	switch (cp->val) {
	case 0x00:
5431 5432
		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5433 5434
		break;
	case 0x01:
5435
		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5436
		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5437 5438
		break;
	case 0x02:
5439 5440
		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
		hci_dev_set_flag(hdev, HCI_SC_ONLY);
5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452
		break;
	}

	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
	new_settings(hdev, cmd->sk);

remove:
	mgmt_pending_remove(cmd);
unlock:
	hci_dev_unlock(hdev);
}

5453 5454 5455 5456
static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_mode *cp = data;
5457
	struct mgmt_pending_cmd *cmd;
5458
	struct hci_request req;
5459
	u8 val;
5460 5461
	int err;

5462
	bt_dev_dbg(hdev, "sock %p", sk);
5463

5464
	if (!lmp_sc_capable(hdev) &&
5465
	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5466 5467
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				       MGMT_STATUS_NOT_SUPPORTED);
5468

5469
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5470
	    lmp_sc_capable(hdev) &&
5471
	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5472 5473
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				       MGMT_STATUS_REJECTED);
5474

5475
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5476
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5477 5478 5479 5480
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

5481
	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5482
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5483 5484
		bool changed;

5485
		if (cp->val) {
5486 5487
			changed = !hci_dev_test_and_set_flag(hdev,
							     HCI_SC_ENABLED);
5488
			if (cp->val == 0x02)
5489
				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5490
			else
5491
				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5492
		} else {
5493 5494
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_SC_ENABLED);
5495
			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5496
		}
5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507

		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

		goto failed;
	}

5508
	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5509 5510
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				      MGMT_STATUS_BUSY);
5511 5512 5513
		goto failed;
	}

5514 5515
	val = !!cp->val;

5516 5517
	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5518 5519 5520 5521 5522 5523 5524 5525 5526 5527
		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

5528 5529 5530
	hci_req_init(&req, hdev);
	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
	err = hci_req_run(&req, sc_enable_complete);
5531 5532 5533 5534 5535 5536 5537 5538 5539 5540
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

5541 5542 5543 5544
static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
			  void *data, u16 len)
{
	struct mgmt_mode *cp = data;
5545
	bool changed, use_changed;
5546 5547
	int err;

5548
	bt_dev_dbg(hdev, "sock %p", sk);
5549

5550
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5551 5552
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5553 5554 5555 5556

	hci_dev_lock(hdev);

	if (cp->val)
5557
		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5558
	else
5559 5560
		changed = hci_dev_test_and_clear_flag(hdev,
						      HCI_KEEP_DEBUG_KEYS);
5561

5562
	if (cp->val == 0x02)
5563 5564
		use_changed = !hci_dev_test_and_set_flag(hdev,
							 HCI_USE_DEBUG_KEYS);
5565
	else
5566 5567
		use_changed = hci_dev_test_and_clear_flag(hdev,
							  HCI_USE_DEBUG_KEYS);
5568 5569

	if (hdev_is_powered(hdev) && use_changed &&
5570
	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5571 5572 5573 5574 5575
		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
			     sizeof(mode), &mode);
	}

5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587
	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5588 5589 5590 5591 5592 5593 5594
static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
		       u16 len)
{
	struct mgmt_cp_set_privacy *cp = cp_data;
	bool changed;
	int err;

5595
	bt_dev_dbg(hdev, "sock %p", sk);
5596 5597

	if (!lmp_le_capable(hdev))
5598 5599
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_NOT_SUPPORTED);
5600

5601
	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5602 5603
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_INVALID_PARAMS);
5604 5605

	if (hdev_is_powered(hdev))
5606 5607
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_REJECTED);
5608 5609 5610

	hci_dev_lock(hdev);

5611 5612 5613
	/* If user space supports this command it is also expected to
	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
	 */
5614
	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5615

5616
	if (cp->privacy) {
5617
		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5618
		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5619
		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5620
		hci_adv_instances_set_rpa_expired(hdev, true);
5621 5622 5623 5624
		if (cp->privacy == 0x02)
			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
		else
			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5625
	} else {
5626
		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5627
		memset(hdev->irk, 0, sizeof(hdev->irk));
5628
		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5629
		hci_adv_instances_set_rpa_expired(hdev, false);
5630
		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644
	}

	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664
static bool irk_is_valid(struct mgmt_irk_info *irk)
{
	switch (irk->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
}

static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
		     u16 len)
{
	struct mgmt_cp_load_irks *cp = cp_data;
5665 5666
	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_irk_info));
5667 5668 5669
	u16 irk_count, expected_len;
	int i, err;

5670
	bt_dev_dbg(hdev, "sock %p", sk);
5671 5672

	if (!lmp_le_capable(hdev))
5673 5674
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_NOT_SUPPORTED);
5675 5676

	irk_count = __le16_to_cpu(cp->irk_count);
5677
	if (irk_count > max_irk_count) {
5678 5679
		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
			   irk_count);
5680 5681
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_INVALID_PARAMS);
5682
	}
5683

5684
	expected_len = struct_size(cp, irks, irk_count);
5685
	if (expected_len != len) {
5686 5687
		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
			   expected_len, len);
5688 5689
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_INVALID_PARAMS);
5690 5691
	}

5692
	bt_dev_dbg(hdev, "irk_count %u", irk_count);
5693 5694 5695 5696 5697

	for (i = 0; i < irk_count; i++) {
		struct mgmt_irk_info *key = &cp->irks[i];

		if (!irk_is_valid(key))
5698 5699 5700
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_IRKS,
					       MGMT_STATUS_INVALID_PARAMS);
5701 5702 5703 5704 5705 5706 5707 5708 5709
	}

	hci_dev_lock(hdev);

	hci_smp_irks_clear(hdev);

	for (i = 0; i < irk_count; i++) {
		struct mgmt_irk_info *irk = &cp->irks[i];

5710 5711 5712 5713 5714 5715 5716 5717
		if (hci_is_blocked_key(hdev,
				       HCI_BLOCKED_KEY_TYPE_IRK,
				       irk->val)) {
			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
				    &irk->addr.bdaddr);
			continue;
		}

5718 5719
		hci_add_irk(hdev, &irk->addr.bdaddr,
			    le_addr_type(irk->addr.type), irk->val,
5720 5721 5722
			    BDADDR_ANY);
	}

5723
	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5724

5725
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5726 5727 5728 5729 5730 5731

	hci_dev_unlock(hdev);

	return err;
}

5732 5733 5734 5735
static bool ltk_is_valid(struct mgmt_ltk_info *key)
{
	if (key->master != 0x00 && key->master != 0x01)
		return false;
5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748

	switch (key->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
5749 5750
}

5751
static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5752
			       void *cp_data, u16 len)
5753 5754
{
	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5755 5756
	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_ltk_info));
5757
	u16 key_count, expected_len;
5758
	int i, err;
5759

5760
	bt_dev_dbg(hdev, "sock %p", sk);
5761 5762

	if (!lmp_le_capable(hdev))
5763 5764
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_NOT_SUPPORTED);
5765

5766
	key_count = __le16_to_cpu(cp->key_count);
5767
	if (key_count > max_key_count) {
5768 5769
		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
			   key_count);
5770 5771
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5772
	}
5773

5774
	expected_len = struct_size(cp, keys, key_count);
5775
	if (expected_len != len) {
5776 5777
		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
			   expected_len, len);
5778 5779
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5780 5781
	}

5782
	bt_dev_dbg(hdev, "key_count %u", key_count);
5783

5784 5785 5786
	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];

5787
		if (!ltk_is_valid(key))
5788 5789 5790
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_LONG_TERM_KEYS,
					       MGMT_STATUS_INVALID_PARAMS);
5791 5792
	}

5793 5794 5795 5796 5797 5798
	hci_dev_lock(hdev);

	hci_smp_ltks_clear(hdev);

	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];
5799
		u8 type, authenticated;
5800

5801 5802 5803 5804 5805 5806 5807 5808
		if (hci_is_blocked_key(hdev,
				       HCI_BLOCKED_KEY_TYPE_LTK,
				       key->val)) {
			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
				    &key->addr.bdaddr);
			continue;
		}

5809 5810
		switch (key->type) {
		case MGMT_LTK_UNAUTHENTICATED:
5811
			authenticated = 0x00;
5812
			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5813 5814
			break;
		case MGMT_LTK_AUTHENTICATED:
5815
			authenticated = 0x01;
5816 5817 5818 5819 5820
			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
			break;
		case MGMT_LTK_P256_UNAUTH:
			authenticated = 0x00;
			type = SMP_LTK_P256;
5821
			break;
5822 5823 5824
		case MGMT_LTK_P256_AUTH:
			authenticated = 0x01;
			type = SMP_LTK_P256;
5825
			break;
5826 5827 5828
		case MGMT_LTK_P256_DEBUG:
			authenticated = 0x00;
			type = SMP_LTK_P256_DEBUG;
5829
			/* fall through */
5830 5831 5832
		default:
			continue;
		}
5833

5834 5835 5836
		hci_add_ltk(hdev, &key->addr.bdaddr,
			    le_addr_type(key->addr.type), type, authenticated,
			    key->val, key->enc_size, key->ediv, key->rand);
5837 5838
	}

5839
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5840 5841
			   NULL, 0);

5842 5843
	hci_dev_unlock(hdev);

5844
	return err;
5845 5846
}

5847
static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5848 5849
{
	struct hci_conn *conn = cmd->user_data;
5850
	struct mgmt_rp_get_conn_info rp;
5851
	int err;
5852

5853
	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5854

5855
	if (status == MGMT_STATUS_SUCCESS) {
5856
		rp.rssi = conn->rssi;
5857 5858 5859 5860 5861 5862
		rp.tx_power = conn->tx_power;
		rp.max_tx_power = conn->max_tx_power;
	} else {
		rp.rssi = HCI_RSSI_INVALID;
		rp.tx_power = HCI_TX_POWER_INVALID;
		rp.max_tx_power = HCI_TX_POWER_INVALID;
5863 5864
	}

5865 5866
	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
				status, &rp, sizeof(rp));
5867 5868

	hci_conn_drop(conn);
5869
	hci_conn_put(conn);
5870 5871

	return err;
5872 5873
}

5874 5875
static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
				       u16 opcode)
5876 5877
{
	struct hci_cp_read_rssi *cp;
5878
	struct mgmt_pending_cmd *cmd;
5879 5880
	struct hci_conn *conn;
	u16 handle;
5881
	u8 status;
5882

5883
	bt_dev_dbg(hdev, "status 0x%02x", hci_status);
5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898

	hci_dev_lock(hdev);

	/* Commands sent in request are either Read RSSI or Read Transmit Power
	 * Level so we check which one was last sent to retrieve connection
	 * handle.  Both commands have handle as first parameter so it's safe to
	 * cast data on the same command struct.
	 *
	 * First command sent is always Read RSSI and we fail only if it fails.
	 * In other case we simply override error to indicate success as we
	 * already remembered if TX power value is actually valid.
	 */
	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
	if (!cp) {
		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5899 5900 5901
		status = MGMT_STATUS_SUCCESS;
	} else {
		status = mgmt_status(hci_status);
5902 5903 5904
	}

	if (!cp) {
5905
		bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
5906 5907 5908 5909 5910 5911
		goto unlock;
	}

	handle = __le16_to_cpu(cp->handle);
	conn = hci_conn_hash_lookup_handle(hdev, handle);
	if (!conn) {
5912 5913
		bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
			   handle);
5914 5915 5916
		goto unlock;
	}

5917
	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5918 5919
	if (!cmd)
		goto unlock;
5920

5921 5922
	cmd->cmd_complete(cmd, status);
	mgmt_pending_remove(cmd);
5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936

unlock:
	hci_dev_unlock(hdev);
}

static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_get_conn_info *cp = data;
	struct mgmt_rp_get_conn_info rp;
	struct hci_conn *conn;
	unsigned long conn_info_age;
	int err = 0;

5937
	bt_dev_dbg(hdev, "sock %p", sk);
5938 5939 5940 5941 5942 5943

	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

	if (!bdaddr_type_is_valid(cp->addr.type))
5944 5945 5946
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
5947 5948 5949 5950

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
5951 5952 5953
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
5954 5955 5956 5957 5958 5959 5960 5961 5962 5963
		goto unlock;
	}

	if (cp->addr.type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);

	if (!conn || conn->state != BT_CONNECTED) {
5964 5965 5966
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_NOT_CONNECTED, &rp,
					sizeof(rp));
5967 5968 5969
		goto unlock;
	}

5970
	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5971 5972
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
5973 5974 5975
		goto unlock;
	}

5976 5977 5978 5979 5980 5981 5982 5983 5984 5985
	/* To avoid client trying to guess when to poll again for information we
	 * calculate conn info age as random value between min/max set in hdev.
	 */
	conn_info_age = hdev->conn_info_min_age +
			prandom_u32_max(hdev->conn_info_max_age -
					hdev->conn_info_min_age);

	/* Query controller to refresh cached values if they are too old or were
	 * never read.
	 */
5986 5987
	if (time_after(jiffies, conn->conn_info_timestamp +
		       msecs_to_jiffies(conn_info_age)) ||
5988 5989 5990 5991
	    !conn->conn_info_timestamp) {
		struct hci_request req;
		struct hci_cp_read_tx_power req_txp_cp;
		struct hci_cp_read_rssi req_rssi_cp;
5992
		struct mgmt_pending_cmd *cmd;
5993 5994 5995 5996 5997 5998

		hci_req_init(&req, hdev);
		req_rssi_cp.handle = cpu_to_le16(conn->handle);
		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
			    &req_rssi_cp);

5999 6000 6001 6002 6003 6004 6005 6006 6007 6008
		/* For LE links TX power does not change thus we don't need to
		 * query for it once value is known.
		 */
		if (!bdaddr_type_is_le(cp->addr.type) ||
		    conn->tx_power == HCI_TX_POWER_INVALID) {
			req_txp_cp.handle = cpu_to_le16(conn->handle);
			req_txp_cp.type = 0x00;
			hci_req_add(&req, HCI_OP_READ_TX_POWER,
				    sizeof(req_txp_cp), &req_txp_cp);
		}
6009

6010 6011 6012 6013 6014 6015 6016 6017
		/* Max TX power needs to be read only once per connection */
		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
			req_txp_cp.handle = cpu_to_le16(conn->handle);
			req_txp_cp.type = 0x01;
			hci_req_add(&req, HCI_OP_READ_TX_POWER,
				    sizeof(req_txp_cp), &req_txp_cp);
		}

6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029
		err = hci_req_run(&req, conn_info_refresh_complete);
		if (err < 0)
			goto unlock;

		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
				       data, len);
		if (!cmd) {
			err = -ENOMEM;
			goto unlock;
		}

		hci_conn_hold(conn);
6030
		cmd->user_data = hci_conn_get(conn);
6031
		cmd->cmd_complete = conn_info_cmd_complete;
6032 6033 6034 6035 6036 6037

		conn->conn_info_timestamp = jiffies;
	} else {
		/* Cache is valid, just reply with values cached in hci_conn */
		rp.rssi = conn->rssi;
		rp.tx_power = conn->tx_power;
6038
		rp.max_tx_power = conn->max_tx_power;
6039

6040 6041
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6042 6043 6044 6045 6046 6047 6048
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6049
static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6050
{
6051
	struct hci_conn *conn = cmd->user_data;
6052
	struct mgmt_rp_get_clock_info rp;
6053
	struct hci_dev *hdev;
6054
	int err;
6055 6056

	memset(&rp, 0, sizeof(rp));
6057
	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073

	if (status)
		goto complete;

	hdev = hci_dev_get(cmd->index);
	if (hdev) {
		rp.local_clock = cpu_to_le32(hdev->clock);
		hci_dev_put(hdev);
	}

	if (conn) {
		rp.piconet_clock = cpu_to_le32(conn->clock);
		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
	}

complete:
6074 6075
	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
				sizeof(rp));
6076 6077 6078 6079 6080

	if (conn) {
		hci_conn_drop(conn);
		hci_conn_put(conn);
	}
6081 6082

	return err;
6083 6084
}

6085
static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6086
{
6087
	struct hci_cp_read_clock *hci_cp;
6088
	struct mgmt_pending_cmd *cmd;
6089 6090
	struct hci_conn *conn;

6091
	bt_dev_dbg(hdev, "status %u", status);
6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105

	hci_dev_lock(hdev);

	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
	if (!hci_cp)
		goto unlock;

	if (hci_cp->which) {
		u16 handle = __le16_to_cpu(hci_cp->handle);
		conn = hci_conn_hash_lookup_handle(hdev, handle);
	} else {
		conn = NULL;
	}

6106
	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6107 6108 6109
	if (!cmd)
		goto unlock;

6110
	cmd->cmd_complete(cmd, mgmt_status(status));
6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_get_clock_info *cp = data;
	struct mgmt_rp_get_clock_info rp;
	struct hci_cp_read_clock hci_cp;
6123
	struct mgmt_pending_cmd *cmd;
6124 6125 6126 6127
	struct hci_request req;
	struct hci_conn *conn;
	int err;

6128
	bt_dev_dbg(hdev, "sock %p", sk);
6129 6130 6131 6132 6133 6134

	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

	if (cp->addr.type != BDADDR_BREDR)
6135 6136 6137
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
6138 6139 6140 6141

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
6142 6143 6144
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
6145 6146 6147 6148 6149 6150 6151
		goto unlock;
	}

	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
		if (!conn || conn->state != BT_CONNECTED) {
6152 6153 6154 6155
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_GET_CLOCK_INFO,
						MGMT_STATUS_NOT_CONNECTED,
						&rp, sizeof(rp));
6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167
			goto unlock;
		}
	} else {
		conn = NULL;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

6168 6169
	cmd->cmd_complete = clock_info_cmd_complete;

6170 6171 6172 6173 6174 6175 6176
	hci_req_init(&req, hdev);

	memset(&hci_cp, 0, sizeof(hci_cp));
	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);

	if (conn) {
		hci_conn_hold(conn);
6177
		cmd->user_data = hci_conn_get(conn);
6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192

		hci_cp.handle = cpu_to_le16(conn->handle);
		hci_cp.which = 0x01; /* Piconet clock */
		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
	}

	err = hci_req_run(&req, get_clock_info_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210
static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
{
	struct hci_conn *conn;

	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
	if (!conn)
		return false;

	if (conn->dst_type != type)
		return false;

	if (conn->state != BT_CONNECTED)
		return false;

	return true;
}

/* This function requires the caller holds hdev->lock */
6211
static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227
			       u8 addr_type, u8 auto_connect)
{
	struct hci_conn_params *params;

	params = hci_conn_params_add(hdev, addr, addr_type);
	if (!params)
		return -EIO;

	if (params->auto_connect == auto_connect)
		return 0;

	list_del_init(&params->action);

	switch (auto_connect) {
	case HCI_AUTO_CONN_DISABLED:
	case HCI_AUTO_CONN_LINK_LOSS:
6228 6229 6230 6231 6232
		/* If auto connect is being disabled when we're trying to
		 * connect to device, keep connecting.
		 */
		if (params->explicit_connect)
			list_add(&params->action, &hdev->pend_le_conns);
6233 6234
		break;
	case HCI_AUTO_CONN_REPORT:
6235 6236 6237 6238
		if (params->explicit_connect)
			list_add(&params->action, &hdev->pend_le_conns);
		else
			list_add(&params->action, &hdev->pend_le_reports);
6239 6240 6241
		break;
	case HCI_AUTO_CONN_DIRECT:
	case HCI_AUTO_CONN_ALWAYS:
6242
		if (!is_connected(hdev, addr, addr_type))
6243 6244 6245 6246 6247 6248
			list_add(&params->action, &hdev->pend_le_conns);
		break;
	}

	params->auto_connect = auto_connect;

6249 6250
	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
		   addr, addr_type, auto_connect);
6251 6252 6253 6254

	return 0;
}

6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266
static void device_added(struct sock *sk, struct hci_dev *hdev,
			 bdaddr_t *bdaddr, u8 type, u8 action)
{
	struct mgmt_ev_device_added ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
	ev.action = action;

	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
}

6267 6268 6269 6270 6271
static int add_device(struct sock *sk, struct hci_dev *hdev,
		      void *data, u16 len)
{
	struct mgmt_cp_add_device *cp = data;
	u8 auto_conn, addr_type;
6272
	struct hci_conn_params *params;
6273
	int err;
6274
	u32 current_flags = 0;
6275

6276
	bt_dev_dbg(hdev, "sock %p", sk);
6277

6278
	if (!bdaddr_type_is_valid(cp->addr.type) ||
6279
	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6280 6281 6282
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
6283

6284
	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6285 6286 6287
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
6288 6289 6290

	hci_dev_lock(hdev);

6291
	if (cp->addr.type == BDADDR_BREDR) {
6292
		/* Only incoming connections action is supported for now */
6293
		if (cp->action != 0x01) {
6294 6295 6296 6297
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_ADD_DEVICE,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
6298 6299 6300
			goto unlock;
		}

6301 6302 6303
		err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
						     &cp->addr.bdaddr,
						     cp->addr.type, 0);
6304 6305
		if (err)
			goto unlock;
6306

6307
		hci_req_update_scan(hdev);
6308

6309 6310 6311
		goto added;
	}

6312
	addr_type = le_addr_type(cp->addr.type);
6313

6314
	if (cp->action == 0x02)
6315
		auto_conn = HCI_AUTO_CONN_ALWAYS;
6316 6317
	else if (cp->action == 0x01)
		auto_conn = HCI_AUTO_CONN_DIRECT;
6318
	else
6319
		auto_conn = HCI_AUTO_CONN_REPORT;
6320

6321 6322 6323 6324 6325 6326
	/* Kernel internally uses conn_params with resolvable private
	 * address, but Add Device allows only identity addresses.
	 * Make sure it is enforced before calling
	 * hci_conn_params_lookup.
	 */
	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6327 6328 6329
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					MGMT_STATUS_INVALID_PARAMS,
					&cp->addr, sizeof(cp->addr));
6330 6331 6332
		goto unlock;
	}

6333 6334 6335
	/* If the connection parameters don't exist for this device,
	 * they will be created and configured with defaults.
	 */
6336
	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6337
				auto_conn) < 0) {
6338 6339 6340
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					MGMT_STATUS_FAILED, &cp->addr,
					sizeof(cp->addr));
6341
		goto unlock;
6342 6343 6344 6345 6346
	} else {
		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
						addr_type);
		if (params)
			current_flags = params->current_flags;
6347 6348
	}

6349 6350
	hci_update_background_scan(hdev);

6351
added:
6352
	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6353 6354
	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
			     SUPPORTED_DEVICE_FLAGS(), current_flags);
6355

6356 6357 6358
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
				MGMT_STATUS_SUCCESS, &cp->addr,
				sizeof(cp->addr));
6359 6360 6361 6362 6363 6364

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375
static void device_removed(struct sock *sk, struct hci_dev *hdev,
			   bdaddr_t *bdaddr, u8 type)
{
	struct mgmt_ev_device_removed ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;

	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
}

6376 6377 6378 6379 6380 6381
static int remove_device(struct sock *sk, struct hci_dev *hdev,
			 void *data, u16 len)
{
	struct mgmt_cp_remove_device *cp = data;
	int err;

6382
	bt_dev_dbg(hdev, "sock %p", sk);
6383 6384 6385 6386

	hci_dev_lock(hdev);

	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6387
		struct hci_conn_params *params;
6388 6389
		u8 addr_type;

6390
		if (!bdaddr_type_is_valid(cp->addr.type)) {
6391 6392 6393 6394
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_DEVICE,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
6395 6396 6397
			goto unlock;
		}

6398 6399 6400 6401 6402
		if (cp->addr.type == BDADDR_BREDR) {
			err = hci_bdaddr_list_del(&hdev->whitelist,
						  &cp->addr.bdaddr,
						  cp->addr.type);
			if (err) {
6403 6404 6405 6406 6407
				err = mgmt_cmd_complete(sk, hdev->id,
							MGMT_OP_REMOVE_DEVICE,
							MGMT_STATUS_INVALID_PARAMS,
							&cp->addr,
							sizeof(cp->addr));
6408 6409 6410
				goto unlock;
			}

6411
			hci_req_update_scan(hdev);
6412

6413 6414 6415 6416 6417
			device_removed(sk, hdev, &cp->addr.bdaddr,
				       cp->addr.type);
			goto complete;
		}

6418
		addr_type = le_addr_type(cp->addr.type);
6419

6420 6421 6422 6423 6424 6425
		/* Kernel internally uses conn_params with resolvable private
		 * address, but Remove Device allows only identity addresses.
		 * Make sure it is enforced before calling
		 * hci_conn_params_lookup.
		 */
		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6426 6427 6428 6429
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_DEVICE,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
6430 6431 6432
			goto unlock;
		}

6433 6434 6435
		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
						addr_type);
		if (!params) {
6436 6437 6438 6439
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_DEVICE,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
6440 6441 6442
			goto unlock;
		}

6443 6444
		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6445 6446 6447 6448
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_DEVICE,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
6449 6450 6451
			goto unlock;
		}

6452
		list_del(&params->action);
6453 6454
		list_del(&params->list);
		kfree(params);
6455
		hci_update_background_scan(hdev);
6456 6457

		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6458
	} else {
6459
		struct hci_conn_params *p, *tmp;
6460
		struct bdaddr_list *b, *btmp;
6461

6462
		if (cp->addr.type) {
6463 6464 6465 6466
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_DEVICE,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
6467 6468 6469
			goto unlock;
		}

6470 6471 6472 6473 6474 6475
		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
			list_del(&b->list);
			kfree(b);
		}

6476
		hci_req_update_scan(hdev);
6477

6478 6479 6480 6481
		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
				continue;
			device_removed(sk, hdev, &p->addr, p->addr_type);
6482 6483 6484 6485
			if (p->explicit_connect) {
				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
				continue;
			}
6486 6487 6488 6489 6490
			list_del(&p->action);
			list_del(&p->list);
			kfree(p);
		}

6491
		bt_dev_dbg(hdev, "All LE connection parameters were removed");
6492

6493
		hci_update_background_scan(hdev);
6494 6495
	}

6496
complete:
6497 6498 6499
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
				MGMT_STATUS_SUCCESS, &cp->addr,
				sizeof(cp->addr));
6500 6501 6502 6503 6504
unlock:
	hci_dev_unlock(hdev);
	return err;
}

6505 6506 6507 6508
static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
{
	struct mgmt_cp_load_conn_param *cp = data;
6509 6510
	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
				     sizeof(struct mgmt_conn_param));
6511 6512 6513 6514
	u16 param_count, expected_len;
	int i;

	if (!lmp_le_capable(hdev))
6515 6516
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_NOT_SUPPORTED);
6517 6518

	param_count = __le16_to_cpu(cp->param_count);
6519
	if (param_count > max_param_count) {
6520 6521
		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
			   param_count);
6522 6523
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_INVALID_PARAMS);
6524
	}
6525

6526
	expected_len = struct_size(cp, params, param_count);
6527
	if (expected_len != len) {
6528 6529
		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
			   expected_len, len);
6530 6531
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_INVALID_PARAMS);
6532 6533
	}

6534
	bt_dev_dbg(hdev, "param_count %u", param_count);
6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545

	hci_dev_lock(hdev);

	hci_conn_params_clear_disabled(hdev);

	for (i = 0; i < param_count; i++) {
		struct mgmt_conn_param *param = &cp->params[i];
		struct hci_conn_params *hci_param;
		u16 min, max, latency, timeout;
		u8 addr_type;

6546 6547
		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
			   param->addr.type);
6548 6549 6550 6551 6552 6553

		if (param->addr.type == BDADDR_LE_PUBLIC) {
			addr_type = ADDR_LE_DEV_PUBLIC;
		} else if (param->addr.type == BDADDR_LE_RANDOM) {
			addr_type = ADDR_LE_DEV_RANDOM;
		} else {
6554
			bt_dev_err(hdev, "ignoring invalid connection parameters");
6555 6556 6557 6558 6559 6560 6561 6562
			continue;
		}

		min = le16_to_cpu(param->min_interval);
		max = le16_to_cpu(param->max_interval);
		latency = le16_to_cpu(param->latency);
		timeout = le16_to_cpu(param->timeout);

6563 6564
		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
			   min, max, latency, timeout);
6565 6566

		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6567
			bt_dev_err(hdev, "ignoring invalid connection parameters");
6568 6569 6570 6571 6572 6573
			continue;
		}

		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
						addr_type);
		if (!hci_param) {
6574
			bt_dev_err(hdev, "failed to add connection parameters");
6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585
			continue;
		}

		hci_param->conn_min_interval = min;
		hci_param->conn_max_interval = max;
		hci_param->conn_latency = latency;
		hci_param->supervision_timeout = timeout;
	}

	hci_dev_unlock(hdev);

6586 6587
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
				 NULL, 0);
6588 6589
}

6590 6591 6592 6593 6594 6595 6596
static int set_external_config(struct sock *sk, struct hci_dev *hdev,
			       void *data, u16 len)
{
	struct mgmt_cp_set_external_config *cp = data;
	bool changed;
	int err;

6597
	bt_dev_dbg(hdev, "sock %p", sk);
6598 6599

	if (hdev_is_powered(hdev))
6600 6601
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				       MGMT_STATUS_REJECTED);
6602 6603

	if (cp->config != 0x00 && cp->config != 0x01)
6604 6605
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				         MGMT_STATUS_INVALID_PARAMS);
6606 6607

	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6608 6609
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				       MGMT_STATUS_NOT_SUPPORTED);
6610 6611 6612 6613

	hci_dev_lock(hdev);

	if (cp->config)
6614
		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6615
	else
6616
		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6617 6618 6619 6620 6621 6622 6623 6624

	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
	if (err < 0)
		goto unlock;

	if (!changed)
		goto unlock;

6625 6626
	err = new_options(hdev, sk);

6627
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6628
		mgmt_index_removed(hdev);
6629

6630
		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6631 6632
			hci_dev_set_flag(hdev, HCI_CONFIG);
			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6633 6634 6635

			queue_work(hdev->req_workqueue, &hdev->power_on);
		} else {
6636
			set_bit(HCI_RAW, &hdev->flags);
6637 6638
			mgmt_index_added(hdev);
		}
6639 6640 6641 6642 6643 6644 6645
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6646 6647 6648 6649 6650 6651 6652
static int set_public_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_public_address *cp = data;
	bool changed;
	int err;

6653
	bt_dev_dbg(hdev, "sock %p", sk);
6654 6655

	if (hdev_is_powered(hdev))
6656 6657
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_REJECTED);
6658 6659

	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6660 6661
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_INVALID_PARAMS);
6662 6663

	if (!hdev->set_bdaddr)
6664 6665
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_NOT_SUPPORTED);
6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678

	hci_dev_lock(hdev);

	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
	bacpy(&hdev->public_addr, &cp->bdaddr);

	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
	if (err < 0)
		goto unlock;

	if (!changed)
		goto unlock;

6679
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6680 6681 6682 6683 6684
		err = new_options(hdev, sk);

	if (is_configured(hdev)) {
		mgmt_index_removed(hdev);

6685
		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6686

6687 6688
		hci_dev_set_flag(hdev, HCI_CONFIG);
		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6689 6690 6691 6692 6693 6694 6695 6696 6697

		queue_work(hdev->req_workqueue, &hdev->power_on);
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6698 6699 6700 6701 6702 6703 6704 6705 6706 6707
static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
					     u16 opcode, struct sk_buff *skb)
{
	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
	u8 *h192, *r192, *h256, *r256;
	struct mgmt_pending_cmd *cmd;
	u16 eir_len;
	int err;

6708
	bt_dev_dbg(hdev, "status %u", status);
6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836

	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
	if (!cmd)
		return;

	mgmt_cp = cmd->param;

	if (status) {
		status = mgmt_status(status);
		eir_len = 0;

		h192 = NULL;
		r192 = NULL;
		h256 = NULL;
		r256 = NULL;
	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
		struct hci_rp_read_local_oob_data *rp;

		if (skb->len != sizeof(*rp)) {
			status = MGMT_STATUS_FAILED;
			eir_len = 0;
		} else {
			status = MGMT_STATUS_SUCCESS;
			rp = (void *)skb->data;

			eir_len = 5 + 18 + 18;
			h192 = rp->hash;
			r192 = rp->rand;
			h256 = NULL;
			r256 = NULL;
		}
	} else {
		struct hci_rp_read_local_oob_ext_data *rp;

		if (skb->len != sizeof(*rp)) {
			status = MGMT_STATUS_FAILED;
			eir_len = 0;
		} else {
			status = MGMT_STATUS_SUCCESS;
			rp = (void *)skb->data;

			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
				eir_len = 5 + 18 + 18;
				h192 = NULL;
				r192 = NULL;
			} else {
				eir_len = 5 + 18 + 18 + 18 + 18;
				h192 = rp->hash192;
				r192 = rp->rand192;
			}

			h256 = rp->hash256;
			r256 = rp->rand256;
		}
	}

	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
	if (!mgmt_rp)
		goto done;

	if (status)
		goto send_rsp;

	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
				  hdev->dev_class, 3);

	if (h192 && r192) {
		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
					  EIR_SSP_HASH_C192, h192, 16);
		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
					  EIR_SSP_RAND_R192, r192, 16);
	}

	if (h256 && r256) {
		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
					  EIR_SSP_HASH_C256, h256, 16);
		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
					  EIR_SSP_RAND_R256, r256, 16);
	}

send_rsp:
	mgmt_rp->type = mgmt_cp->type;
	mgmt_rp->eir_len = cpu_to_le16(eir_len);

	err = mgmt_cmd_complete(cmd->sk, hdev->id,
				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
	if (err < 0 || status)
		goto done;

	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);

	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
done:
	kfree(mgmt_rp);
	mgmt_pending_remove(cmd);
}

static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
				  struct mgmt_cp_read_local_oob_ext_data *cp)
{
	struct mgmt_pending_cmd *cmd;
	struct hci_request req;
	int err;

	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
			       cp, sizeof(*cp));
	if (!cmd)
		return -ENOMEM;

	hci_req_init(&req, hdev);

	if (bredr_sc_enabled(hdev))
		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
	else
		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);

	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
	if (err < 0) {
		mgmt_pending_remove(cmd);
		return err;
	}

	return 0;
}

6837 6838 6839 6840 6841 6842 6843
static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
				   void *data, u16 data_len)
{
	struct mgmt_cp_read_local_oob_ext_data *cp = data;
	struct mgmt_rp_read_local_oob_ext_data *rp;
	size_t rp_len;
	u16 eir_len;
6844
	u8 status, flags, role, addr[7], hash[16], rand[16];
6845 6846
	int err;

6847
	bt_dev_dbg(hdev, "sock %p", sk);
6848

6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872
	if (hdev_is_powered(hdev)) {
		switch (cp->type) {
		case BIT(BDADDR_BREDR):
			status = mgmt_bredr_support(hdev);
			if (status)
				eir_len = 0;
			else
				eir_len = 5;
			break;
		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
			status = mgmt_le_support(hdev);
			if (status)
				eir_len = 0;
			else
				eir_len = 9 + 3 + 18 + 18 + 3;
			break;
		default:
			status = MGMT_STATUS_INVALID_PARAMS;
			eir_len = 0;
			break;
		}
	} else {
		status = MGMT_STATUS_NOT_POWERED;
		eir_len = 0;
6873 6874 6875 6876
	}

	rp_len = sizeof(*rp) + eir_len;
	rp = kmalloc(rp_len, GFP_ATOMIC);
6877
	if (!rp)
6878
		return -ENOMEM;
6879

6880 6881 6882
	if (status)
		goto complete;

6883
	hci_dev_lock(hdev);
6884 6885 6886 6887

	eir_len = 0;
	switch (cp->type) {
	case BIT(BDADDR_BREDR):
6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900
		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
			err = read_local_ssp_oob_req(hdev, sk, cp);
			hci_dev_unlock(hdev);
			if (!err)
				goto done;

			status = MGMT_STATUS_FAILED;
			goto complete;
		} else {
			eir_len = eir_append_data(rp->eir, eir_len,
						  EIR_CLASS_OF_DEV,
						  hdev->dev_class, 3);
		}
6901 6902
		break;
	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6903 6904
		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
		    smp_generate_oob(hdev, hash, rand) < 0) {
6905
			hci_dev_unlock(hdev);
6906 6907
			status = MGMT_STATUS_FAILED;
			goto complete;
6908 6909
		}

6910 6911 6912 6913 6914 6915 6916 6917 6918 6919
		/* This should return the active RPA, but since the RPA
		 * is only programmed on demand, it is really hard to fill
		 * this in at the moment. For now disallow retrieving
		 * local out-of-band data when privacy is in use.
		 *
		 * Returning the identity address will not help here since
		 * pairing happens before the identity resolving key is
		 * known and thus the connection establishment happens
		 * based on the RPA and not the identity address.
		 */
6920
		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6921 6922 6923 6924 6925 6926 6927 6928 6929
			hci_dev_unlock(hdev);
			status = MGMT_STATUS_REJECTED;
			goto complete;
		}

		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947
			memcpy(addr, &hdev->static_addr, 6);
			addr[6] = 0x01;
		} else {
			memcpy(addr, &hdev->bdaddr, 6);
			addr[6] = 0x00;
		}

		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
					  addr, sizeof(addr));

		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
			role = 0x02;
		else
			role = 0x01;

		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
					  &role, sizeof(role));

6948 6949 6950 6951
		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
			eir_len = eir_append_data(rp->eir, eir_len,
						  EIR_LE_SC_CONFIRM,
						  hash, sizeof(hash));
6952

6953 6954 6955 6956
			eir_len = eir_append_data(rp->eir, eir_len,
						  EIR_LE_SC_RANDOM,
						  rand, sizeof(rand));
		}
6957

6958
		flags = mgmt_get_adv_discov_flags(hdev);
6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969

		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
			flags |= LE_AD_NO_BREDR;

		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
					  &flags, sizeof(flags));
		break;
	}

	hci_dev_unlock(hdev);

6970 6971
	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);

6972 6973 6974
	status = MGMT_STATUS_SUCCESS;

complete:
6975 6976 6977
	rp->type = cp->type;
	rp->eir_len = cpu_to_le16(eir_len);

6978
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6979 6980
				status, rp, sizeof(*rp) + eir_len);
	if (err < 0 || status)
6981 6982 6983 6984 6985
		goto done;

	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
				 rp, sizeof(*rp) + eir_len,
				 HCI_MGMT_OOB_DATA_EVENTS, sk);
6986

6987
done:
6988 6989 6990 6991 6992
	kfree(rp);

	return err;
}

6993 6994 6995 6996 6997 6998 6999 7000
static u32 get_supported_adv_flags(struct hci_dev *hdev)
{
	u32 flags = 0;

	flags |= MGMT_ADV_FLAG_CONNECTABLE;
	flags |= MGMT_ADV_FLAG_DISCOV;
	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7001
	flags |= MGMT_ADV_FLAG_APPEARANCE;
7002
	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7003

7004 7005 7006 7007 7008
	/* In extended adv TX_POWER returned from Set Adv Param
	 * will be always valid.
	 */
	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
	    ext_adv_capable(hdev))
7009 7010
		flags |= MGMT_ADV_FLAG_TX_POWER;

7011 7012 7013 7014 7015 7016 7017 7018 7019 7020
	if (ext_adv_capable(hdev)) {
		flags |= MGMT_ADV_FLAG_SEC_1M;

		if (hdev->le_features[1] & HCI_LE_PHY_2M)
			flags |= MGMT_ADV_FLAG_SEC_2M;

		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
			flags |= MGMT_ADV_FLAG_SEC_CODED;
	}

7021 7022 7023
	return flags;
}

7024 7025 7026 7027 7028
static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
			     void *data, u16 data_len)
{
	struct mgmt_rp_read_adv_features *rp;
	size_t rp_len;
7029
	int err;
7030
	struct adv_info *adv_instance;
7031
	u32 supported_flags;
7032
	u8 *instance;
7033

7034
	bt_dev_dbg(hdev, "sock %p", sk);
7035

7036 7037 7038 7039
	if (!lmp_le_capable(hdev))
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
				       MGMT_STATUS_REJECTED);

7040 7041
	hci_dev_lock(hdev);

7042
	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7043 7044 7045 7046 7047 7048
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		hci_dev_unlock(hdev);
		return -ENOMEM;
	}

7049 7050 7051
	supported_flags = get_supported_adv_flags(hdev);

	rp->supported_flags = cpu_to_le32(supported_flags);
7052 7053
	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7054
	rp->max_instances = HCI_MAX_ADV_INSTANCES;
7055
	rp->num_instances = hdev->adv_instance_cnt;
7056

7057 7058 7059 7060
	instance = rp->instance;
	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
		*instance = adv_instance->instance;
		instance++;
7061
	}
7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072

	hci_dev_unlock(hdev);

	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
				MGMT_STATUS_SUCCESS, rp, rp_len);

	kfree(rp);

	return err;
}

7073 7074 7075 7076 7077 7078 7079 7080 7081
static u8 calculate_name_len(struct hci_dev *hdev)
{
	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];

	return append_local_name(hdev, buf, 0);
}

static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
			   bool is_adv_data)
7082
{
7083
	u8 max_len = HCI_MAX_AD_LENGTH;
7084

7085 7086 7087
	if (is_adv_data) {
		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7088
				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7089
			max_len -= 3;
7090

7091
		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7092
			max_len -= 3;
7093 7094
	} else {
		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7095
			max_len -= calculate_name_len(hdev);
7096

7097
		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7098
			max_len -= 4;
7099 7100
	}

7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125
	return max_len;
}

static bool flags_managed(u32 adv_flags)
{
	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
			    MGMT_ADV_FLAG_LIMITED_DISCOV |
			    MGMT_ADV_FLAG_MANAGED_FLAGS);
}

static bool tx_power_managed(u32 adv_flags)
{
	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
}

static bool name_managed(u32 adv_flags)
{
	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
}

static bool appearance_managed(u32 adv_flags)
{
	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
}

7126 7127
static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
			      u8 len, bool is_adv_data)
7128 7129 7130 7131
{
	int i, cur_len;
	u8 max_len;

7132
	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7133

7134
	if (len > max_len)
7135 7136
		return false;

7137 7138 7139
	/* Make sure that the data is correctly formatted. */
	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
		cur_len = data[i];
7140

7141 7142
		if (data[i + 1] == EIR_FLAGS &&
		    (!is_adv_data || flags_managed(adv_flags)))
7143 7144 7145 7146 7147 7148 7149 7150 7151
			return false;

		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
			return false;

		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
			return false;

		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7152 7153
			return false;

7154 7155
		if (data[i + 1] == EIR_APPEARANCE &&
		    appearance_managed(adv_flags))
7156 7157
			return false;

7158 7159 7160
		/* If the current field length would exceed the total data
		 * length, then it's invalid.
		 */
7161
		if (i + cur_len >= len)
7162 7163 7164 7165 7166 7167 7168 7169 7170 7171
			return false;
	}

	return true;
}

static void add_advertising_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
{
	struct mgmt_pending_cmd *cmd;
7172
	struct mgmt_cp_add_advertising *cp;
7173
	struct mgmt_rp_add_advertising rp;
7174 7175
	struct adv_info *adv_instance, *n;
	u8 instance;
7176

7177
	bt_dev_dbg(hdev, "status %d", status);
7178 7179 7180 7181 7182

	hci_dev_lock(hdev);

	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);

7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197
	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
		if (!adv_instance->pending)
			continue;

		if (!status) {
			adv_instance->pending = false;
			continue;
		}

		instance = adv_instance->instance;

		if (hdev->cur_adv_instance == instance)
			cancel_adv_timeout(hdev);

		hci_remove_adv_instance(hdev, instance);
7198
		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7199 7200 7201 7202 7203
	}

	if (!cmd)
		goto unlock;

7204 7205
	cp = cmd->param;
	rp.instance = cp->instance;
7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225

	if (status)
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
				mgmt_status(status));
	else
		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
				  mgmt_status(status), &rp, sizeof(rp));

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int add_advertising(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 data_len)
{
	struct mgmt_cp_add_advertising *cp = data;
	struct mgmt_rp_add_advertising rp;
	u32 flags;
7226
	u32 supported_flags, phy_flags;
7227
	u8 status;
7228 7229 7230 7231
	u16 timeout, duration;
	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
	u8 schedule_instance = 0;
	struct adv_info *next_instance;
7232 7233 7234 7235
	int err;
	struct mgmt_pending_cmd *cmd;
	struct hci_request req;

7236
	bt_dev_dbg(hdev, "sock %p", sk);
7237 7238 7239 7240 7241 7242

	status = mgmt_le_support(hdev);
	if (status)
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				       status);

7243
	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
7244 7245 7246 7247
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);

	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7248 7249 7250
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);

7251
	flags = __le32_to_cpu(cp->flags);
7252
	timeout = __le16_to_cpu(cp->timeout);
7253
	duration = __le16_to_cpu(cp->duration);
7254

7255
	/* The current implementation only supports a subset of the specified
7256
	 * flags. Also need to check mutual exclusiveness of sec flags.
7257 7258
	 */
	supported_flags = get_supported_adv_flags(hdev);
7259 7260 7261
	phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
	if (flags & ~supported_flags ||
	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7262 7263 7264 7265 7266
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

7267 7268 7269 7270 7271 7272
	if (timeout && !hdev_is_powered(hdev)) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				      MGMT_STATUS_REJECTED);
		goto unlock;
	}

7273
	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7274
	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7275 7276 7277 7278 7279 7280
	    pending_find(MGMT_OP_SET_LE, hdev)) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				      MGMT_STATUS_BUSY);
		goto unlock;
	}

7281 7282
	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7283
			       cp->scan_rsp_len, false)) {
7284 7285 7286 7287 7288
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				      MGMT_STATUS_INVALID_PARAMS);
		goto unlock;
	}

7289 7290 7291 7292 7293 7294 7295 7296 7297 7298
	err = hci_add_adv_instance(hdev, cp->instance, flags,
				   cp->adv_data_len, cp->data,
				   cp->scan_rsp_len,
				   cp->data + cp->adv_data_len,
				   timeout, duration);
	if (err < 0) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				      MGMT_STATUS_FAILED);
		goto unlock;
	}
7299

7300 7301 7302 7303
	/* Only trigger an advertising added event if a new instance was
	 * actually added.
	 */
	if (hdev->adv_instance_cnt > prev_instance_cnt)
7304
		mgmt_advertising_added(sk, hdev, cp->instance);
7305

7306 7307 7308 7309 7310 7311 7312
	if (hdev->cur_adv_instance == cp->instance) {
		/* If the currently advertised instance is being changed then
		 * cancel the current advertising and schedule the next
		 * instance. If there is only one instance then the overridden
		 * advertising data will be visible right away.
		 */
		cancel_adv_timeout(hdev);
7313

7314 7315 7316 7317 7318 7319 7320 7321 7322
		next_instance = hci_get_next_instance(hdev, cp->instance);
		if (next_instance)
			schedule_instance = next_instance->instance;
	} else if (!hdev->adv_instance_timeout) {
		/* Immediately advertise the new instance if no other
		 * instance is currently being advertised.
		 */
		schedule_instance = cp->instance;
	}
7323

7324 7325 7326
	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
	 * there is no instance to be advertised then we have no HCI
	 * communication to make. Simply return.
7327 7328
	 */
	if (!hdev_is_powered(hdev) ||
7329 7330 7331
	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
	    !schedule_instance) {
		rp.instance = cp->instance;
7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
		goto unlock;
	}

	/* We're good to go, update advertising data, parameters, and start
	 * advertising.
	 */
	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
			       data_len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

7349
	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7350 7351 7352

	if (!err)
		err = hci_req_run(&req, add_advertising_complete);
7353

7354 7355 7356
	if (err < 0) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				      MGMT_STATUS_FAILED);
7357
		mgmt_pending_remove(cmd);
7358
	}
7359 7360 7361 7362 7363 7364 7365

unlock:
	hci_dev_unlock(hdev);

	return err;
}

7366 7367 7368 7369
static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
					u16 opcode)
{
	struct mgmt_pending_cmd *cmd;
7370
	struct mgmt_cp_remove_advertising *cp;
7371 7372
	struct mgmt_rp_remove_advertising rp;

7373
	bt_dev_dbg(hdev, "status %d", status);
7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384

	hci_dev_lock(hdev);

	/* A failure status here only means that we failed to disable
	 * advertising. Otherwise, the advertising instance has been removed,
	 * so report success.
	 */
	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
	if (!cmd)
		goto unlock;

7385 7386
	cp = cmd->param;
	rp.instance = cp->instance;
7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402

	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
			  &rp, sizeof(rp));
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 data_len)
{
	struct mgmt_cp_remove_advertising *cp = data;
	struct mgmt_rp_remove_advertising rp;
	struct mgmt_pending_cmd *cmd;
	struct hci_request req;
7403
	int err;
7404

7405
	bt_dev_dbg(hdev, "sock %p", sk);
7406 7407 7408

	hci_dev_lock(hdev);

7409
	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7410 7411 7412 7413 7414 7415
		err = mgmt_cmd_status(sk, hdev->id,
				      MGMT_OP_REMOVE_ADVERTISING,
				      MGMT_STATUS_INVALID_PARAMS);
		goto unlock;
	}

7416 7417 7418 7419 7420 7421 7422 7423
	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
	    pending_find(MGMT_OP_SET_LE, hdev)) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
				      MGMT_STATUS_BUSY);
		goto unlock;
	}

7424
	if (list_empty(&hdev->adv_instances)) {
7425 7426 7427 7428 7429
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
				      MGMT_STATUS_INVALID_PARAMS);
		goto unlock;
	}

7430
	hci_req_init(&req, hdev);
7431

7432
	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
7433

7434
	if (list_empty(&hdev->adv_instances))
7435
		__hci_req_disable_advertising(&req);
7436

7437 7438 7439
	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
	 * flag is set or the device isn't powered then we have no HCI
	 * communication to make. Simply return.
7440
	 */
7441 7442
	if (skb_queue_empty(&req.cmd_q) ||
	    !hdev_is_powered(hdev) ||
7443
	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7444
		hci_req_purge(&req);
7445
		rp.instance = cp->instance;
7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_REMOVE_ADVERTISING,
					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
			       data_len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	err = hci_req_run(&req, remove_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);

	return err;
}

7469 7470 7471 7472 7473 7474 7475 7476
static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
			     void *data, u16 data_len)
{
	struct mgmt_cp_get_adv_size_info *cp = data;
	struct mgmt_rp_get_adv_size_info rp;
	u32 flags, supported_flags;
	int err;

7477
	bt_dev_dbg(hdev, "sock %p", sk);
7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498

	if (!lmp_le_capable(hdev))
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
				       MGMT_STATUS_REJECTED);

	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
				       MGMT_STATUS_INVALID_PARAMS);

	flags = __le32_to_cpu(cp->flags);

	/* The current implementation only supports a subset of the specified
	 * flags.
	 */
	supported_flags = get_supported_adv_flags(hdev);
	if (flags & ~supported_flags)
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
				       MGMT_STATUS_INVALID_PARAMS);

	rp.instance = cp->instance;
	rp.flags = cp->flags;
7499 7500
	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7501 7502 7503 7504 7505 7506 7507

	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));

	return err;
}

7508
static const struct hci_mgmt_handler mgmt_handlers[] = {
7509
	{ NULL }, /* 0x0000 (no command) */
7510
	{ read_version,            MGMT_READ_VERSION_SIZE,
7511 7512
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
7513
	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
7514 7515
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
7516
	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
7517 7518 7519 7520
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
	{ read_controller_info,    MGMT_READ_INFO_SIZE,
						HCI_MGMT_UNTRUSTED },
7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533
	{ set_powered,             MGMT_SETTING_SIZE },
	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
	{ set_connectable,         MGMT_SETTING_SIZE },
	{ set_fast_connectable,    MGMT_SETTING_SIZE },
	{ set_bondable,            MGMT_SETTING_SIZE },
	{ set_link_security,       MGMT_SETTING_SIZE },
	{ set_ssp,                 MGMT_SETTING_SIZE },
	{ set_hs,                  MGMT_SETTING_SIZE },
	{ set_le,                  MGMT_SETTING_SIZE },
	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
	{ add_uuid,                MGMT_ADD_UUID_SIZE },
	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
7534 7535 7536 7537
	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
						HCI_MGMT_VAR_LEN },
	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
						HCI_MGMT_VAR_LEN },
7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549
	{ disconnect,              MGMT_DISCONNECT_SIZE },
	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7550 7551 7552
	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
						HCI_MGMT_VAR_LEN },
7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566
	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
	{ set_advertising,         MGMT_SETTING_SIZE },
	{ set_bredr,               MGMT_SETTING_SIZE },
	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
	{ set_secure_conn,         MGMT_SETTING_SIZE },
	{ set_debug_keys,          MGMT_SETTING_SIZE },
	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
7567 7568
	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
						HCI_MGMT_VAR_LEN },
7569 7570 7571 7572
	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
	{ add_device,              MGMT_ADD_DEVICE_SIZE },
	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
7573 7574 7575
	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
						HCI_MGMT_VAR_LEN },
	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7576 7577
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
7578
	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
7579 7580
						HCI_MGMT_UNCONFIGURED |
						HCI_MGMT_UNTRUSTED },
7581 7582 7583 7584 7585 7586
	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
						HCI_MGMT_UNCONFIGURED },
	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
						HCI_MGMT_UNCONFIGURED },
	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
						HCI_MGMT_VAR_LEN },
7587
	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7588
	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
7589 7590
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
7591
	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
7592 7593
	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
						HCI_MGMT_VAR_LEN },
7594
	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
7595
	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
7596
	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
7597 7598
	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
						HCI_MGMT_UNTRUSTED },
7599
	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
7600
	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
7601
	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
7602 7603
	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
						HCI_MGMT_VAR_LEN },
7604
	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
7605 7606
	{ read_security_info,      MGMT_READ_SECURITY_INFO_SIZE,
						HCI_MGMT_UNTRUSTED },
7607 7608 7609 7610 7611 7612
	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
						HCI_MGMT_UNTRUSTED |
						HCI_MGMT_HDEV_OPTIONAL },
	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
						HCI_MGMT_VAR_LEN |
						HCI_MGMT_HDEV_OPTIONAL },
7613 7614 7615 7616
	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
						HCI_MGMT_UNTRUSTED },
	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
						HCI_MGMT_VAR_LEN },
7617 7618 7619 7620
	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
						HCI_MGMT_UNTRUSTED },
	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
						HCI_MGMT_VAR_LEN },
7621 7622
	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
7623
	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
7624 7625
	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
						HCI_MGMT_VAR_LEN },
7626
	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
7627 7628
};

7629
void mgmt_index_added(struct hci_dev *hdev)
7630
{
7631
	struct mgmt_ev_ext_index ev;
7632

7633 7634 7635
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		return;

7636
	switch (hdev->dev_type) {
7637
	case HCI_PRIMARY:
7638 7639 7640
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7641
			ev.type = 0x01;
7642 7643 7644
		} else {
			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
					 HCI_MGMT_INDEX_EVENTS);
7645
			ev.type = 0x00;
7646 7647
		}
		break;
7648 7649 7650 7651 7652
	case HCI_AMP:
		ev.type = 0x02;
		break;
	default:
		return;
7653
	}
7654 7655 7656 7657 7658

	ev.bus = hdev->bus;

	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
			 HCI_MGMT_EXT_INDEX_EVENTS);
7659 7660
}

7661
void mgmt_index_removed(struct hci_dev *hdev)
7662
{
7663
	struct mgmt_ev_ext_index ev;
7664
	u8 status = MGMT_STATUS_INVALID_INDEX;
7665

7666 7667 7668
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		return;

7669
	switch (hdev->dev_type) {
7670
	case HCI_PRIMARY:
7671
		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7672

7673 7674 7675
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7676
			ev.type = 0x01;
7677 7678 7679
		} else {
			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
					 HCI_MGMT_INDEX_EVENTS);
7680
			ev.type = 0x00;
7681 7682
		}
		break;
7683 7684 7685 7686 7687
	case HCI_AMP:
		ev.type = 0x02;
		break;
	default:
		return;
7688
	}
7689 7690 7691 7692 7693

	ev.bus = hdev->bus;

	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
			 HCI_MGMT_EXT_INDEX_EVENTS);
7694 7695
}

7696
/* This function requires the caller holds hdev->lock */
7697
static void restart_le_actions(struct hci_dev *hdev)
7698 7699 7700 7701
{
	struct hci_conn_params *p;

	list_for_each_entry(p, &hdev->le_conn_params, list) {
7702 7703 7704 7705 7706 7707
		/* Needed for AUTO_OFF case where might not "really"
		 * have been powered off.
		 */
		list_del_init(&p->action);

		switch (p->auto_connect) {
7708
		case HCI_AUTO_CONN_DIRECT:
7709 7710 7711 7712 7713 7714 7715 7716
		case HCI_AUTO_CONN_ALWAYS:
			list_add(&p->action, &hdev->pend_le_conns);
			break;
		case HCI_AUTO_CONN_REPORT:
			list_add(&p->action, &hdev->pend_le_reports);
			break;
		default:
			break;
7717
		}
7718 7719 7720
	}
}

7721
void mgmt_power_on(struct hci_dev *hdev, int err)
7722 7723 7724
{
	struct cmd_lookup match = { NULL, hdev };

7725
	bt_dev_dbg(hdev, "err %d", err);
7726

7727 7728 7729
	hci_dev_lock(hdev);

	if (!err) {
7730 7731
		restart_le_actions(hdev);
		hci_update_background_scan(hdev);
7732 7733
	}

7734 7735 7736 7737 7738 7739 7740
	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);

7741
	hci_dev_unlock(hdev);
7742
}
7743

7744
void __mgmt_power_off(struct hci_dev *hdev)
7745 7746
{
	struct cmd_lookup match = { NULL, hdev };
7747
	u8 status, zero_cod[] = { 0, 0, 0 };
7748

7749
	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7750 7751 7752 7753 7754 7755 7756 7757

	/* If the power off is because of hdev unregistration let
	 * use the appropriate INVALID_INDEX status. Otherwise use
	 * NOT_POWERED. We cover both scenarios here since later in
	 * mgmt_index_removed() any hci_conn callbacks will have already
	 * been triggered, potentially causing misleading DISCONNECTED
	 * status responses.
	 */
7758
	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7759 7760 7761 7762 7763
		status = MGMT_STATUS_INVALID_INDEX;
	else
		status = MGMT_STATUS_NOT_POWERED;

	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7764

7765
	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7766 7767 7768
		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
				   zero_cod, sizeof(zero_cod),
				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7769 7770
		ext_info_changed(hdev, NULL);
	}
7771

7772
	new_settings(hdev, match.sk);
7773 7774 7775

	if (match.sk)
		sock_put(match.sk);
7776
}
7777

7778
void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7779
{
7780
	struct mgmt_pending_cmd *cmd;
7781 7782
	u8 status;

7783
	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7784
	if (!cmd)
7785
		return;
7786 7787 7788 7789 7790 7791

	if (err == -ERFKILL)
		status = MGMT_STATUS_RFKILLED;
	else
		status = MGMT_STATUS_FAILED;

7792
	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7793 7794 7795 7796

	mgmt_pending_remove(cmd);
}

7797 7798
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
		       bool persistent)
7799
{
7800
	struct mgmt_ev_new_link_key ev;
7801

7802
	memset(&ev, 0, sizeof(ev));
7803

7804
	ev.store_hint = persistent;
7805
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7806
	ev.key.addr.type = BDADDR_BREDR;
7807
	ev.key.type = key->type;
7808
	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7809
	ev.key.pin_len = key->pin_len;
7810

7811
	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7812
}
7813

7814 7815
static u8 mgmt_ltk_type(struct smp_ltk *ltk)
{
7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828
	switch (ltk->type) {
	case SMP_LTK:
	case SMP_LTK_SLAVE:
		if (ltk->authenticated)
			return MGMT_LTK_AUTHENTICATED;
		return MGMT_LTK_UNAUTHENTICATED;
	case SMP_LTK_P256:
		if (ltk->authenticated)
			return MGMT_LTK_P256_AUTH;
		return MGMT_LTK_P256_UNAUTH;
	case SMP_LTK_P256_DEBUG:
		return MGMT_LTK_P256_DEBUG;
	}
7829 7830 7831 7832

	return MGMT_LTK_UNAUTHENTICATED;
}

7833
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7834 7835 7836 7837 7838
{
	struct mgmt_ev_new_long_term_key ev;

	memset(&ev, 0, sizeof(ev));

7839
	/* Devices using resolvable or non-resolvable random addresses
F
Florian Grandel 已提交
7840
	 * without providing an identity resolving key don't require
7841 7842 7843 7844 7845 7846 7847 7848 7849
	 * to store long term keys. Their addresses will change the
	 * next time around.
	 *
	 * Only when a remote device provides an identity address
	 * make sure the long term key is stored. If the remote
	 * identity is known, the long term keys are internally
	 * mapped to the identity address. So allow static random
	 * and public addresses here.
	 */
7850 7851 7852 7853
	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
		ev.store_hint = 0x00;
	else
7854
		ev.store_hint = persistent;
7855

7856
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7857
	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7858
	ev.key.type = mgmt_ltk_type(key);
7859 7860
	ev.key.enc_size = key->enc_size;
	ev.key.ediv = key->ediv;
7861
	ev.key.rand = key->rand;
7862

7863
	if (key->type == SMP_LTK)
7864 7865
		ev.key.master = 1;

7866 7867 7868
	/* Make sure we copy only the significant bytes based on the
	 * encryption key size, and set the rest of the value to zeroes.
	 */
7869
	memcpy(ev.key.val, key->val, key->enc_size);
7870 7871
	memset(ev.key.val + key->enc_size, 0,
	       sizeof(ev.key.val) - key->enc_size);
7872

7873
	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7874 7875
}

7876
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
7877 7878 7879 7880 7881
{
	struct mgmt_ev_new_irk ev;

	memset(&ev, 0, sizeof(ev));

7882
	ev.store_hint = persistent;
7883

7884 7885 7886 7887 7888 7889 7890 7891
	bacpy(&ev.rpa, &irk->rpa);
	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
	memcpy(ev.irk.val, irk->val, sizeof(irk->val));

	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
}

7892 7893
void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
		   bool persistent)
7894 7895 7896 7897 7898 7899
{
	struct mgmt_ev_new_csrk ev;

	memset(&ev, 0, sizeof(ev));

	/* Devices using resolvable or non-resolvable random addresses
F
Florian Grandel 已提交
7900
	 * without providing an identity resolving key don't require
7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911
	 * to store signature resolving keys. Their addresses will change
	 * the next time around.
	 *
	 * Only when a remote device provides an identity address
	 * make sure the signature resolving key is stored. So allow
	 * static random and public addresses here.
	 */
	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
		ev.store_hint = 0x00;
	else
7912
		ev.store_hint = persistent;
7913 7914 7915

	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7916
	ev.key.type = csrk->type;
7917 7918 7919 7920 7921
	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));

	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
}

7922
void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7923 7924
			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
			 u16 max_interval, u16 latency, u16 timeout)
7925 7926 7927
{
	struct mgmt_ev_new_conn_param ev;

7928 7929 7930
	if (!hci_is_identity_address(bdaddr, bdaddr_type))
		return;

7931 7932 7933
	memset(&ev, 0, sizeof(ev));
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7934
	ev.store_hint = store_hint;
7935 7936 7937 7938 7939 7940 7941 7942
	ev.min_interval = cpu_to_le16(min_interval);
	ev.max_interval = cpu_to_le16(max_interval);
	ev.latency = cpu_to_le16(latency);
	ev.timeout = cpu_to_le16(timeout);

	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
}

7943 7944
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
			   u32 flags, u8 *name, u8 name_len)
7945
{
7946 7947 7948
	char buf[512];
	struct mgmt_ev_device_connected *ev = (void *) buf;
	u16 eir_len = 0;
7949

7950 7951
	bacpy(&ev->addr.bdaddr, &conn->dst);
	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7952

7953
	ev->flags = __cpu_to_le32(flags);
7954

7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966
	/* We must ensure that the EIR Data fields are ordered and
	 * unique. Keep it simple for now and avoid the problem by not
	 * adding any BR/EDR data to the LE adv.
	 */
	if (conn->le_adv_data_len > 0) {
		memcpy(&ev->eir[eir_len],
		       conn->le_adv_data, conn->le_adv_data_len);
		eir_len = conn->le_adv_data_len;
	} else {
		if (name_len > 0)
			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
						  name, name_len);
7967

7968
		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7969 7970 7971 7972
			eir_len = eir_append_data(ev->eir, eir_len,
						  EIR_CLASS_OF_DEV,
						  conn->dev_class, 3);
	}
7973

7974
	ev->eir_len = cpu_to_le16(eir_len);
7975

7976 7977
	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
		    sizeof(*ev) + eir_len, NULL);
7978 7979
}

7980
static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7981 7982 7983
{
	struct sock **sk = data;

7984
	cmd->cmd_complete(cmd, 0);
7985 7986 7987 7988

	*sk = cmd->sk;
	sock_hold(*sk);

7989
	mgmt_pending_remove(cmd);
7990 7991
}

7992
static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7993
{
7994
	struct hci_dev *hdev = data;
7995
	struct mgmt_cp_unpair_device *cp = cmd->param;
7996

7997 7998
	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);

7999
	cmd->cmd_complete(cmd, 0);
8000 8001 8002
	mgmt_pending_remove(cmd);
}

8003 8004
bool mgmt_powering_down(struct hci_dev *hdev)
{
8005
	struct mgmt_pending_cmd *cmd;
8006 8007
	struct mgmt_mode *cp;

8008
	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8009 8010 8011 8012 8013 8014 8015 8016 8017 8018
	if (!cmd)
		return false;

	cp = cmd->param;
	if (!cp->val)
		return true;

	return false;
}

8019
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8020 8021
			      u8 link_type, u8 addr_type, u8 reason,
			      bool mgmt_connected)
8022
{
8023
	struct mgmt_ev_device_disconnected ev;
8024 8025
	struct sock *sk = NULL;

8026 8027 8028 8029 8030 8031
	/* The connection is still in hci_conn_hash so test for 1
	 * instead of 0 to know if this is the last one.
	 */
	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
		cancel_delayed_work(&hdev->power_off);
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8032 8033
	}

8034 8035 8036
	if (!mgmt_connected)
		return;

8037 8038 8039
	if (link_type != ACL_LINK && link_type != LE_LINK)
		return;

8040
	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8041

8042 8043 8044
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.reason = reason;
8045

8046
	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8047 8048

	if (sk)
8049
		sock_put(sk);
8050

8051
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8052
			     hdev);
8053 8054
}

8055 8056
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
			    u8 link_type, u8 addr_type, u8 status)
8057
{
8058 8059
	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
	struct mgmt_cp_disconnect *cp;
8060
	struct mgmt_pending_cmd *cmd;
8061

8062 8063 8064
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
			     hdev);

8065
	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8066
	if (!cmd)
8067
		return;
8068

8069 8070 8071 8072 8073 8074 8075 8076
	cp = cmd->param;

	if (bacmp(bdaddr, &cp->addr.bdaddr))
		return;

	if (cp->addr.type != bdaddr_type)
		return;

8077
	cmd->cmd_complete(cmd, mgmt_status(status));
8078
	mgmt_pending_remove(cmd);
8079
}
8080

8081 8082
void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			 u8 addr_type, u8 status)
8083 8084
{
	struct mgmt_ev_connect_failed ev;
8085

8086 8087 8088 8089 8090 8091
	/* The connection is still in hci_conn_hash so test for 1
	 * instead of 0 to know if this is the last one.
	 */
	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
		cancel_delayed_work(&hdev->power_off);
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8092
	}
8093

8094
	bacpy(&ev.addr.bdaddr, bdaddr);
8095
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8096
	ev.status = mgmt_status(status);
8097

8098
	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8099
}
8100

8101
void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8102 8103 8104
{
	struct mgmt_ev_pin_code_request ev;

8105
	bacpy(&ev.addr.bdaddr, bdaddr);
8106
	ev.addr.type = BDADDR_BREDR;
8107
	ev.secure = secure;
8108

8109
	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8110 8111
}

8112 8113
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				  u8 status)
8114
{
8115
	struct mgmt_pending_cmd *cmd;
8116

8117
	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8118
	if (!cmd)
8119
		return;
8120

8121
	cmd->cmd_complete(cmd, mgmt_status(status));
8122
	mgmt_pending_remove(cmd);
8123 8124
}

8125 8126
void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				      u8 status)
8127
{
8128
	struct mgmt_pending_cmd *cmd;
8129

8130
	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8131
	if (!cmd)
8132
		return;
8133

8134
	cmd->cmd_complete(cmd, mgmt_status(status));
8135
	mgmt_pending_remove(cmd);
8136
}
8137

8138
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8139
			      u8 link_type, u8 addr_type, u32 value,
8140
			      u8 confirm_hint)
8141 8142 8143
{
	struct mgmt_ev_user_confirm_request ev;

8144
	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8145

8146
	bacpy(&ev.addr.bdaddr, bdaddr);
8147
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8148
	ev.confirm_hint = confirm_hint;
8149
	ev.value = cpu_to_le32(value);
8150

8151
	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8152
			  NULL);
8153 8154
}

8155
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8156
			      u8 link_type, u8 addr_type)
8157 8158 8159
{
	struct mgmt_ev_user_passkey_request ev;

8160
	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8161

8162
	bacpy(&ev.addr.bdaddr, bdaddr);
8163
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8164 8165

	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8166
			  NULL);
8167 8168
}

8169
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8170 8171
				      u8 link_type, u8 addr_type, u8 status,
				      u8 opcode)
8172
{
8173
	struct mgmt_pending_cmd *cmd;
8174

8175
	cmd = pending_find(opcode, hdev);
8176 8177 8178
	if (!cmd)
		return -ENOENT;

8179
	cmd->cmd_complete(cmd, mgmt_status(status));
8180
	mgmt_pending_remove(cmd);
8181

8182
	return 0;
8183 8184
}

8185
int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8186
				     u8 link_type, u8 addr_type, u8 status)
8187
{
8188
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8189
					  status, MGMT_OP_USER_CONFIRM_REPLY);
8190 8191
}

8192
int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8193
					 u8 link_type, u8 addr_type, u8 status)
8194
{
8195
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8196 8197
					  status,
					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
8198
}
8199

8200
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8201
				     u8 link_type, u8 addr_type, u8 status)
8202
{
8203
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8204
					  status, MGMT_OP_USER_PASSKEY_REPLY);
8205 8206
}

8207
int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8208
					 u8 link_type, u8 addr_type, u8 status)
8209
{
8210
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8211 8212
					  status,
					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
8213 8214
}

8215 8216 8217 8218 8219 8220
int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
			     u8 link_type, u8 addr_type, u32 passkey,
			     u8 entered)
{
	struct mgmt_ev_passkey_notify ev;

8221
	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8222 8223 8224 8225 8226 8227 8228 8229 8230

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.passkey = __cpu_to_le32(passkey);
	ev.entered = entered;

	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
}

8231
void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8232 8233
{
	struct mgmt_ev_auth_failed ev;
8234
	struct mgmt_pending_cmd *cmd;
8235
	u8 status = mgmt_status(hci_status);
8236

8237 8238 8239
	bacpy(&ev.addr.bdaddr, &conn->dst);
	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
	ev.status = status;
8240

8241 8242 8243 8244 8245
	cmd = find_pairing(conn);

	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
		    cmd ? cmd->sk : NULL);

8246 8247 8248 8249
	if (cmd) {
		cmd->cmd_complete(cmd, status);
		mgmt_pending_remove(cmd);
	}
8250
}
8251

8252
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8253 8254
{
	struct cmd_lookup match = { NULL, hdev };
8255
	bool changed;
8256 8257 8258 8259

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8260
				     cmd_status_rsp, &mgmt_err);
8261
		return;
8262 8263
	}

8264
	if (test_bit(HCI_AUTH, &hdev->flags))
8265
		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8266
	else
8267
		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8268

8269
	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8270
			     &match);
8271

8272
	if (changed)
8273
		new_settings(hdev, match.sk);
8274 8275 8276 8277 8278

	if (match.sk)
		sock_put(match.sk);
}

8279
static void clear_eir(struct hci_request *req)
8280
{
8281
	struct hci_dev *hdev = req->hdev;
8282 8283
	struct hci_cp_write_eir cp;

8284
	if (!lmp_ext_inq_capable(hdev))
8285
		return;
8286

8287 8288
	memset(hdev->eir, 0, sizeof(hdev->eir));

8289 8290
	memset(&cp, 0, sizeof(cp));

8291
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8292 8293
}

8294
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8295 8296
{
	struct cmd_lookup match = { NULL, hdev };
8297
	struct hci_request req;
8298
	bool changed = false;
8299 8300 8301

	if (status) {
		u8 mgmt_err = mgmt_status(status);
8302

8303 8304
		if (enable && hci_dev_test_and_clear_flag(hdev,
							  HCI_SSP_ENABLED)) {
8305
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8306
			new_settings(hdev, NULL);
8307
		}
8308

8309 8310
		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
				     &mgmt_err);
8311
		return;
8312 8313 8314
	}

	if (enable) {
8315
		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8316
	} else {
8317
		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8318
		if (!changed)
8319 8320
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_HS_ENABLED);
8321
		else
8322
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8323 8324 8325 8326
	}

	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);

8327
	if (changed)
8328
		new_settings(hdev, match.sk);
8329

8330
	if (match.sk)
8331 8332
		sock_put(match.sk);

8333 8334
	hci_req_init(&req, hdev);

8335 8336
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8337 8338
			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
				    sizeof(enable), &enable);
8339
		__hci_req_update_eir(&req);
8340
	} else {
8341
		clear_eir(&req);
8342
	}
8343 8344

	hci_req_run(&req, NULL);
8345 8346
}

8347
static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8348 8349 8350 8351 8352 8353 8354 8355 8356
{
	struct cmd_lookup *match = data;

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}
}

8357 8358
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
				    u8 status)
8359
{
8360
	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8361

8362 8363 8364
	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8365

8366
	if (!status) {
8367 8368
		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8369 8370
		ext_info_changed(hdev, NULL);
	}
8371 8372 8373

	if (match.sk)
		sock_put(match.sk);
8374 8375
}

8376
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8377 8378
{
	struct mgmt_cp_set_local_name ev;
8379
	struct mgmt_pending_cmd *cmd;
8380

8381
	if (status)
8382
		return;
8383 8384 8385

	memset(&ev, 0, sizeof(ev));
	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8386
	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8387

8388
	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8389 8390
	if (!cmd) {
		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8391

8392 8393 8394
		/* If this is a HCI command related to powering on the
		 * HCI dev don't send any mgmt signals.
		 */
8395
		if (pending_find(MGMT_OP_SET_POWERED, hdev))
8396
			return;
8397
	}
8398

8399 8400
	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
8401
	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
8402
}
8403

8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415
static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
{
	int i;

	for (i = 0; i < uuid_count; i++) {
		if (!memcmp(uuid, uuids[i], 16))
			return true;
	}

	return false;
}

8416 8417
static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
{
8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434
	u16 parsed = 0;

	while (parsed < eir_len) {
		u8 field_len = eir[0];
		u8 uuid[16];
		int i;

		if (field_len == 0)
			break;

		if (eir_len - parsed < field_len + 1)
			break;

		switch (eir[1]) {
		case EIR_UUID16_ALL:
		case EIR_UUID16_SOME:
			for (i = 0; i + 3 <= field_len; i += 2) {
8435
				memcpy(uuid, bluetooth_base_uuid, 16);
8436 8437 8438 8439 8440 8441 8442 8443 8444
				uuid[13] = eir[i + 3];
				uuid[12] = eir[i + 2];
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		case EIR_UUID32_ALL:
		case EIR_UUID32_SOME:
			for (i = 0; i + 5 <= field_len; i += 4) {
8445
				memcpy(uuid, bluetooth_base_uuid, 16);
8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467
				uuid[15] = eir[i + 5];
				uuid[14] = eir[i + 4];
				uuid[13] = eir[i + 3];
				uuid[12] = eir[i + 2];
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		case EIR_UUID128_ALL:
		case EIR_UUID128_SOME:
			for (i = 0; i + 17 <= field_len; i += 16) {
				memcpy(uuid, eir + i + 2, 16);
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		}

		parsed += field_len + 1;
		eir += field_len + 1;
	}

8468 8469 8470
	return false;
}

8471 8472 8473
static void restart_le_scan(struct hci_dev *hdev)
{
	/* If controller is not scanning we are done. */
8474
	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8475 8476 8477 8478 8479 8480 8481
		return;

	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
		       hdev->discovery.scan_start +
		       hdev->discovery.scan_duration))
		return;

8482
	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8483 8484 8485
			   DISCOV_LE_RESTART_DELAY);
}

8486 8487
static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8488
{
8489 8490 8491 8492 8493
	/* If a RSSI threshold has been specified, and
	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
	 * is set, let it through for further processing, as we might need to
	 * restart the scan.
8494 8495 8496
	 *
	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
	 * the results are also dropped.
8497 8498
	 */
	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8499 8500 8501
	    (rssi == HCI_RSSI_INVALID ||
	    (rssi < hdev->discovery.rssi &&
	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8502
		return  false;
8503

8504 8505 8506
	if (hdev->discovery.uuid_count != 0) {
		/* If a list of UUIDs is provided in filter, results with no
		 * matching UUID should be dropped.
8507
		 */
8508 8509 8510 8511 8512 8513
		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
				   hdev->discovery.uuids) &&
		    !eir_has_uuids(scan_rsp, scan_rsp_len,
				   hdev->discovery.uuid_count,
				   hdev->discovery.uuids))
			return false;
8514
	}
8515

8516 8517
	/* If duplicate filtering does not report RSSI changes, then restart
	 * scanning to ensure updated result with updated RSSI values.
8518
	 */
8519 8520 8521 8522 8523 8524 8525 8526
	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
		restart_le_scan(hdev);

		/* Validate RSSI value against the RSSI threshold once more. */
		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
		    rssi < hdev->discovery.rssi)
			return false;
	}
8527 8528 8529 8530 8531 8532 8533 8534 8535 8536 8537 8538 8539 8540 8541 8542 8543 8544 8545 8546 8547 8548 8549

	return true;
}

void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
{
	char buf[512];
	struct mgmt_ev_device_found *ev = (void *)buf;
	size_t ev_size;

	/* Don't send events for a non-kernel initiated discovery. With
	 * LE one exception is if we have pend_le_reports > 0 in which
	 * case we're doing passive scanning and want these events.
	 */
	if (!hci_discovery_active(hdev)) {
		if (link_type == ACL_LINK)
			return;
		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
			return;
	}

8550
	if (hdev->discovery.result_filtering) {
8551 8552 8553 8554 8555 8556
		/* We are using service discovery */
		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
				     scan_rsp_len))
			return;
	}

8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567 8568
	if (hdev->discovery.limited) {
		/* Check for limited discoverable bit */
		if (dev_class) {
			if (!(dev_class[1] & 0x20))
				return;
		} else {
			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
			if (!flags || !(flags[0] & LE_AD_LIMITED))
				return;
		}
	}

8569 8570 8571 8572
	/* Make sure that the buffer is big enough. The 5 extra bytes
	 * are for the potential CoD field.
	 */
	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8573 8574
		return;

8575 8576 8577 8578 8579 8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597
	memset(buf, 0, sizeof(buf));

	/* In case of device discovery with BR/EDR devices (pre 1.2), the
	 * RSSI value was reported as 0 when not available. This behavior
	 * is kept when using device discovery. This is required for full
	 * backwards compatibility with the API.
	 *
	 * However when using service discovery, the value 127 will be
	 * returned when the RSSI is not available.
	 */
	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
	    link_type == ACL_LINK)
		rssi = 0;

	bacpy(&ev->addr.bdaddr, bdaddr);
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
	ev->rssi = rssi;
	ev->flags = cpu_to_le32(flags);

	if (eir_len > 0)
		/* Copy EIR or advertising data into event */
		memcpy(ev->eir, eir, eir_len);

8598 8599
	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
				       NULL))
8600 8601 8602 8603 8604 8605 8606
		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
					  dev_class, 3);

	if (scan_rsp_len > 0)
		/* Append scan response data to event */
		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);

8607 8608
	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8609

8610
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8611
}
8612

8613 8614
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8615
{
8616 8617 8618
	struct mgmt_ev_device_found *ev;
	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
	u16 eir_len;
8619

8620
	ev = (struct mgmt_ev_device_found *) buf;
8621

8622 8623 8624
	memset(buf, 0, sizeof(buf));

	bacpy(&ev->addr.bdaddr, bdaddr);
8625
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8626 8627 8628
	ev->rssi = rssi;

	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8629
				  name_len);
8630

8631
	ev->eir_len = cpu_to_le16(eir_len);
8632

8633
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8634
}
8635

8636
void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8637
{
8638
	struct mgmt_ev_discovering ev;
8639

8640
	bt_dev_dbg(hdev, "discovering %u", discovering);
8641

8642 8643 8644 8645
	memset(&ev, 0, sizeof(ev));
	ev.type = hdev->discovery.type;
	ev.discovering = discovering;

8646
	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8647
}
8648

8649 8650 8651 8652
static struct hci_mgmt_chan chan = {
	.channel	= HCI_CHANNEL_CONTROL,
	.handler_count	= ARRAY_SIZE(mgmt_handlers),
	.handlers	= mgmt_handlers,
8653
	.hdev_init	= mgmt_init_hdev,
8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664
};

int mgmt_init(void)
{
	return hci_mgmt_chan_register(&chan);
}

void mgmt_exit(void)
{
	hci_mgmt_chan_unregister(&chan);
}