mgmt.c 185.2 KB
Newer Older
1 2
/*
   BlueZ - Bluetooth protocol stack for Linux
3

4
   Copyright (C) 2010  Nokia Corporation
5
   Copyright (C) 2011-2012 Intel Corporation
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI Management interface */

27
#include <linux/module.h>
28 29 30 31
#include <asm/unaligned.h>

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
32
#include <net/bluetooth/hci_sock.h>
33
#include <net/bluetooth/l2cap.h>
34
#include <net/bluetooth/mgmt.h>
35

36
#include "hci_request.h"
37
#include "smp.h"
38
#include "mgmt_util.h"
39

40
#define MGMT_VERSION	1
41
#define MGMT_REVISION	13
42

43 44 45 46 47 48 49
static const u16 mgmt_commands[] = {
	MGMT_OP_READ_INDEX_LIST,
	MGMT_OP_READ_INFO,
	MGMT_OP_SET_POWERED,
	MGMT_OP_SET_DISCOVERABLE,
	MGMT_OP_SET_CONNECTABLE,
	MGMT_OP_SET_FAST_CONNECTABLE,
50
	MGMT_OP_SET_BONDABLE,
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
	MGMT_OP_SET_LINK_SECURITY,
	MGMT_OP_SET_SSP,
	MGMT_OP_SET_HS,
	MGMT_OP_SET_LE,
	MGMT_OP_SET_DEV_CLASS,
	MGMT_OP_SET_LOCAL_NAME,
	MGMT_OP_ADD_UUID,
	MGMT_OP_REMOVE_UUID,
	MGMT_OP_LOAD_LINK_KEYS,
	MGMT_OP_LOAD_LONG_TERM_KEYS,
	MGMT_OP_DISCONNECT,
	MGMT_OP_GET_CONNECTIONS,
	MGMT_OP_PIN_CODE_REPLY,
	MGMT_OP_PIN_CODE_NEG_REPLY,
	MGMT_OP_SET_IO_CAPABILITY,
	MGMT_OP_PAIR_DEVICE,
	MGMT_OP_CANCEL_PAIR_DEVICE,
	MGMT_OP_UNPAIR_DEVICE,
	MGMT_OP_USER_CONFIRM_REPLY,
	MGMT_OP_USER_CONFIRM_NEG_REPLY,
	MGMT_OP_USER_PASSKEY_REPLY,
	MGMT_OP_USER_PASSKEY_NEG_REPLY,
	MGMT_OP_READ_LOCAL_OOB_DATA,
	MGMT_OP_ADD_REMOTE_OOB_DATA,
	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
	MGMT_OP_START_DISCOVERY,
	MGMT_OP_STOP_DISCOVERY,
	MGMT_OP_CONFIRM_NAME,
	MGMT_OP_BLOCK_DEVICE,
	MGMT_OP_UNBLOCK_DEVICE,
81
	MGMT_OP_SET_DEVICE_ID,
82
	MGMT_OP_SET_ADVERTISING,
83
	MGMT_OP_SET_BREDR,
84
	MGMT_OP_SET_STATIC_ADDRESS,
85
	MGMT_OP_SET_SCAN_PARAMS,
86
	MGMT_OP_SET_SECURE_CONN,
87
	MGMT_OP_SET_DEBUG_KEYS,
88
	MGMT_OP_SET_PRIVACY,
89
	MGMT_OP_LOAD_IRKS,
90
	MGMT_OP_GET_CONN_INFO,
91
	MGMT_OP_GET_CLOCK_INFO,
92 93
	MGMT_OP_ADD_DEVICE,
	MGMT_OP_REMOVE_DEVICE,
94
	MGMT_OP_LOAD_CONN_PARAM,
95
	MGMT_OP_READ_UNCONF_INDEX_LIST,
96
	MGMT_OP_READ_CONFIG_INFO,
97
	MGMT_OP_SET_EXTERNAL_CONFIG,
98
	MGMT_OP_SET_PUBLIC_ADDRESS,
99
	MGMT_OP_START_SERVICE_DISCOVERY,
100
	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101
	MGMT_OP_READ_EXT_INDEX_LIST,
102
	MGMT_OP_READ_ADV_FEATURES,
103
	MGMT_OP_ADD_ADVERTISING,
104
	MGMT_OP_REMOVE_ADVERTISING,
105
	MGMT_OP_GET_ADV_SIZE_INFO,
106
	MGMT_OP_START_LIMITED_DISCOVERY,
107
	MGMT_OP_READ_EXT_INFO,
108
	MGMT_OP_SET_APPEARANCE,
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
};

static const u16 mgmt_events[] = {
	MGMT_EV_CONTROLLER_ERROR,
	MGMT_EV_INDEX_ADDED,
	MGMT_EV_INDEX_REMOVED,
	MGMT_EV_NEW_SETTINGS,
	MGMT_EV_CLASS_OF_DEV_CHANGED,
	MGMT_EV_LOCAL_NAME_CHANGED,
	MGMT_EV_NEW_LINK_KEY,
	MGMT_EV_NEW_LONG_TERM_KEY,
	MGMT_EV_DEVICE_CONNECTED,
	MGMT_EV_DEVICE_DISCONNECTED,
	MGMT_EV_CONNECT_FAILED,
	MGMT_EV_PIN_CODE_REQUEST,
	MGMT_EV_USER_CONFIRM_REQUEST,
	MGMT_EV_USER_PASSKEY_REQUEST,
	MGMT_EV_AUTH_FAILED,
	MGMT_EV_DEVICE_FOUND,
	MGMT_EV_DISCOVERING,
	MGMT_EV_DEVICE_BLOCKED,
	MGMT_EV_DEVICE_UNBLOCKED,
	MGMT_EV_DEVICE_UNPAIRED,
132
	MGMT_EV_PASSKEY_NOTIFY,
133
	MGMT_EV_NEW_IRK,
134
	MGMT_EV_NEW_CSRK,
135 136
	MGMT_EV_DEVICE_ADDED,
	MGMT_EV_DEVICE_REMOVED,
137
	MGMT_EV_NEW_CONN_PARAM,
138
	MGMT_EV_UNCONF_INDEX_ADDED,
139
	MGMT_EV_UNCONF_INDEX_REMOVED,
140
	MGMT_EV_NEW_CONFIG_OPTIONS,
141 142
	MGMT_EV_EXT_INDEX_ADDED,
	MGMT_EV_EXT_INDEX_REMOVED,
143
	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
144 145
	MGMT_EV_ADVERTISING_ADDED,
	MGMT_EV_ADVERTISING_REMOVED,
146
	MGMT_EV_EXT_INFO_CHANGED,
147 148
};

149 150 151 152 153 154
static const u16 mgmt_untrusted_commands[] = {
	MGMT_OP_READ_INDEX_LIST,
	MGMT_OP_READ_INFO,
	MGMT_OP_READ_UNCONF_INDEX_LIST,
	MGMT_OP_READ_CONFIG_INFO,
	MGMT_OP_READ_EXT_INDEX_LIST,
155
	MGMT_OP_READ_EXT_INFO,
156 157 158 159 160 161 162 163 164 165 166 167 168
};

static const u16 mgmt_untrusted_events[] = {
	MGMT_EV_INDEX_ADDED,
	MGMT_EV_INDEX_REMOVED,
	MGMT_EV_NEW_SETTINGS,
	MGMT_EV_CLASS_OF_DEV_CHANGED,
	MGMT_EV_LOCAL_NAME_CHANGED,
	MGMT_EV_UNCONF_INDEX_ADDED,
	MGMT_EV_UNCONF_INDEX_REMOVED,
	MGMT_EV_NEW_CONFIG_OPTIONS,
	MGMT_EV_EXT_INDEX_ADDED,
	MGMT_EV_EXT_INDEX_REMOVED,
169
	MGMT_EV_EXT_INFO_CHANGED,
170 171
};

172
#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
173

174 175 176
#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
		 "\x00\x00\x00\x00\x00\x00\x00\x00"

177 178 179 180 181 182 183 184
/* HCI to MGMT error code conversion table */
static u8 mgmt_status_table[] = {
	MGMT_STATUS_SUCCESS,
	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
	MGMT_STATUS_FAILED,		/* Hardware Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
185
	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
	MGMT_STATUS_BUSY,		/* Command Disallowed */
	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
	MGMT_STATUS_REJECTED,		/* Rejected Security */
	MGMT_STATUS_REJECTED,		/* Rejected Personal */
	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
	MGMT_STATUS_BUSY,		/* Repeated Attempts */
	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
	MGMT_STATUS_FAILED,		/* Unspecified Error */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
	MGMT_STATUS_FAILED,		/* Transaction Collision */
	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
	MGMT_STATUS_REJECTED,		/* QoS Rejected */
	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
	MGMT_STATUS_REJECTED,		/* Insufficient Security */
	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
	MGMT_STATUS_BUSY,		/* Role Switch Pending */
	MGMT_STATUS_FAILED,		/* Slot Violation */
	MGMT_STATUS_FAILED,		/* Role Switch Failed */
	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
	MGMT_STATUS_BUSY,		/* Controller Busy */
	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
};

static u8 mgmt_status(u8 hci_status)
{
	if (hci_status < ARRAY_SIZE(mgmt_status_table))
		return mgmt_status_table[hci_status];

	return MGMT_STATUS_FAILED;
}

250 251
static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
			    u16 len, int flag)
252
{
253 254
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
			       flag, NULL);
255 256
}

257 258 259 260 261 262 263
static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
			      u16 len, int flag, struct sock *skip_sk)
{
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
			       flag, skip_sk);
}

264 265 266 267
static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
		      struct sock *skip_sk)
{
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268
			       HCI_SOCK_TRUSTED, skip_sk);
269 270
}

271 272 273 274 275 276 277 278
static u8 le_addr_type(u8 mgmt_addr_type)
{
	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
		return ADDR_LE_DEV_PUBLIC;
	else
		return ADDR_LE_DEV_RANDOM;
}

279 280 281 282 283 284 285 286
void mgmt_fill_version_info(void *ver)
{
	struct mgmt_rp_read_version *rp = ver;

	rp->version = MGMT_VERSION;
	rp->revision = cpu_to_le16(MGMT_REVISION);
}

287 288
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
			u16 data_len)
289 290 291 292 293
{
	struct mgmt_rp_read_version rp;

	BT_DBG("sock %p", sk);

294
	mgmt_fill_version_info(&rp);
295

296 297
	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
				 &rp, sizeof(rp));
298 299
}

300 301
static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 data_len)
302 303
{
	struct mgmt_rp_read_commands *rp;
304
	u16 num_commands, num_events;
305 306 307 308 309
	size_t rp_size;
	int i, err;

	BT_DBG("sock %p", sk);

310 311 312 313 314 315 316 317
	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
		num_commands = ARRAY_SIZE(mgmt_commands);
		num_events = ARRAY_SIZE(mgmt_events);
	} else {
		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
		num_events = ARRAY_SIZE(mgmt_untrusted_events);
	}

318 319 320 321 322 323
	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));

	rp = kmalloc(rp_size, GFP_KERNEL);
	if (!rp)
		return -ENOMEM;

324 325
	rp->num_commands = cpu_to_le16(num_commands);
	rp->num_events = cpu_to_le16(num_events);
326

327 328 329 330 331
	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
		__le16 *opcode = rp->opcodes;

		for (i = 0; i < num_commands; i++, opcode++)
			put_unaligned_le16(mgmt_commands[i], opcode);
332

333 334 335 336 337 338 339 340 341 342 343
		for (i = 0; i < num_events; i++, opcode++)
			put_unaligned_le16(mgmt_events[i], opcode);
	} else {
		__le16 *opcode = rp->opcodes;

		for (i = 0; i < num_commands; i++, opcode++)
			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);

		for (i = 0; i < num_events; i++, opcode++)
			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
	}
344

345 346
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
				rp, rp_size);
347 348 349 350 351
	kfree(rp);

	return err;
}

352 353
static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
354 355
{
	struct mgmt_rp_read_index_list *rp;
356
	struct hci_dev *d;
357
	size_t rp_len;
358
	u16 count;
359
	int err;
360 361 362 363 364 365

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
366
	list_for_each_entry(d, &hci_dev_list, list) {
367
		if (d->dev_type == HCI_PRIMARY &&
368
		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
369
			count++;
370 371
	}

372 373 374
	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
375
		read_unlock(&hci_dev_list_lock);
376
		return -ENOMEM;
377
	}
378

379
	count = 0;
380
	list_for_each_entry(d, &hci_dev_list, list) {
381 382 383
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
384 385
			continue;

386 387 388 389
		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
390 391
			continue;

392
		if (d->dev_type == HCI_PRIMARY &&
393
		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
394 395 396
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
397 398
	}

399 400 401
	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

402 403
	read_unlock(&hci_dev_list_lock);

404 405
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
				0, rp, rp_len);
406

407 408 409
	kfree(rp);

	return err;
410 411
}

412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
				  void *data, u16 data_len)
{
	struct mgmt_rp_read_unconf_index_list *rp;
	struct hci_dev *d;
	size_t rp_len;
	u16 count;
	int err;

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
427
		if (d->dev_type == HCI_PRIMARY &&
428
		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
429 430 431 432 433 434 435 436 437 438 439 440
			count++;
	}

	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		read_unlock(&hci_dev_list_lock);
		return -ENOMEM;
	}

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
441 442 443
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
444 445 446 447 448 449 450 451
			continue;

		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
			continue;

452
		if (d->dev_type == HCI_PRIMARY &&
453
		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
454 455 456 457 458 459 460 461 462 463
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
	}

	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

	read_unlock(&hci_dev_list_lock);

464 465
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
466 467 468 469 470 471

	kfree(rp);

	return err;
}

472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
			       void *data, u16 data_len)
{
	struct mgmt_rp_read_ext_index_list *rp;
	struct hci_dev *d;
	size_t rp_len;
	u16 count;
	int err;

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
487
		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
			count++;
	}

	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		read_unlock(&hci_dev_list_lock);
		return -ENOMEM;
	}

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
			continue;

		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
			continue;

511
		if (d->dev_type == HCI_PRIMARY) {
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
				rp->entry[count].type = 0x01;
			else
				rp->entry[count].type = 0x00;
		} else if (d->dev_type == HCI_AMP) {
			rp->entry[count].type = 0x02;
		} else {
			continue;
		}

		rp->entry[count].bus = d->bus;
		rp->entry[count++].index = cpu_to_le16(d->id);
		BT_DBG("Added hci%u", d->id);
	}

	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);

	read_unlock(&hci_dev_list_lock);

	/* If this command is called at least once, then all the
	 * default index and unconfigured index events are disabled
	 * and from now on only extended index events are used.
	 */
	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);

	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);

	kfree(rp);

	return err;
}

548 549 550
static bool is_configured(struct hci_dev *hdev)
{
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551
	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 553 554 555 556 557 558 559 560
		return false;

	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
	    !bacmp(&hdev->public_addr, BDADDR_ANY))
		return false;

	return true;
}

561 562 563 564
static __le32 get_missing_options(struct hci_dev *hdev)
{
	u32 options = 0;

565
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
566
	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
567 568
		options |= MGMT_OPTION_EXTERNAL_CONFIG;

569 570 571 572 573 574 575
	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
	    !bacmp(&hdev->public_addr, BDADDR_ANY))
		options |= MGMT_OPTION_PUBLIC_ADDRESS;

	return cpu_to_le32(options);
}

576 577 578 579
static int new_options(struct hci_dev *hdev, struct sock *skip)
{
	__le32 options = get_missing_options(hdev);

580 581
	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
582 583
}

584 585 586 587
static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
{
	__le32 options = get_missing_options(hdev);

588 589
	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
				 sizeof(options));
590 591
}

592 593 594 595
static int read_config_info(struct sock *sk, struct hci_dev *hdev,
			    void *data, u16 data_len)
{
	struct mgmt_rp_read_config_info rp;
596
	u32 options = 0;
597 598 599 600 601 602 603

	BT_DBG("sock %p %s", sk, hdev->name);

	hci_dev_lock(hdev);

	memset(&rp, 0, sizeof(rp));
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
604

605 606 607
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
		options |= MGMT_OPTION_EXTERNAL_CONFIG;

608
	if (hdev->set_bdaddr)
609 610 611 612
		options |= MGMT_OPTION_PUBLIC_ADDRESS;

	rp.supported_options = cpu_to_le32(options);
	rp.missing_options = get_missing_options(hdev);
613 614 615

	hci_dev_unlock(hdev);

616 617
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
				 &rp, sizeof(rp));
618 619
}

620 621 622 623 624
static u32 get_supported_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

	settings |= MGMT_SETTING_POWERED;
625
	settings |= MGMT_SETTING_BONDABLE;
626
	settings |= MGMT_SETTING_DEBUG_KEYS;
627 628
	settings |= MGMT_SETTING_CONNECTABLE;
	settings |= MGMT_SETTING_DISCOVERABLE;
629

630
	if (lmp_bredr_capable(hdev)) {
631 632
		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
			settings |= MGMT_SETTING_FAST_CONNECTABLE;
633 634
		settings |= MGMT_SETTING_BREDR;
		settings |= MGMT_SETTING_LINK_SECURITY;
635 636 637 638 639

		if (lmp_ssp_capable(hdev)) {
			settings |= MGMT_SETTING_SSP;
			settings |= MGMT_SETTING_HS;
		}
640

641
		if (lmp_sc_capable(hdev))
642
			settings |= MGMT_SETTING_SECURE_CONN;
643
	}
644

645
	if (lmp_le_capable(hdev)) {
646
		settings |= MGMT_SETTING_LE;
647
		settings |= MGMT_SETTING_ADVERTISING;
648
		settings |= MGMT_SETTING_SECURE_CONN;
649
		settings |= MGMT_SETTING_PRIVACY;
650
		settings |= MGMT_SETTING_STATIC_ADDRESS;
651
	}
652

653 654
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
	    hdev->set_bdaddr)
655 656
		settings |= MGMT_SETTING_CONFIGURATION;

657 658 659 660 661 662 663
	return settings;
}

static u32 get_current_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

664
	if (hdev_is_powered(hdev))
665 666
		settings |= MGMT_SETTING_POWERED;

667
	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
668 669
		settings |= MGMT_SETTING_CONNECTABLE;

670
	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
671 672
		settings |= MGMT_SETTING_FAST_CONNECTABLE;

673
	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
674 675
		settings |= MGMT_SETTING_DISCOVERABLE;

676
	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
677
		settings |= MGMT_SETTING_BONDABLE;
678

679
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
680 681
		settings |= MGMT_SETTING_BREDR;

682
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
683 684
		settings |= MGMT_SETTING_LE;

685
	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
686 687
		settings |= MGMT_SETTING_LINK_SECURITY;

688
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
689 690
		settings |= MGMT_SETTING_SSP;

691
	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
692 693
		settings |= MGMT_SETTING_HS;

694
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
695 696
		settings |= MGMT_SETTING_ADVERTISING;

697
	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
698 699
		settings |= MGMT_SETTING_SECURE_CONN;

700
	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
701 702
		settings |= MGMT_SETTING_DEBUG_KEYS;

703
	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
704 705
		settings |= MGMT_SETTING_PRIVACY;

706 707 708 709 710
	/* The current setting for static address has two purposes. The
	 * first is to indicate if the static address will be used and
	 * the second is to indicate if it is actually set.
	 *
	 * This means if the static address is not configured, this flag
711
	 * will never be set. If the address is configured, then if the
712 713 714 715 716 717
	 * address is actually used decides if the flag is set or not.
	 *
	 * For single mode LE only controllers and dual-mode controllers
	 * with BR/EDR disabled, the existence of the static address will
	 * be evaluated.
	 */
718
	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
719
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
720 721 722 723 724
	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
		if (bacmp(&hdev->static_addr, BDADDR_ANY))
			settings |= MGMT_SETTING_STATIC_ADDRESS;
	}

725 726 727
	return settings;
}

728 729 730 731 732 733 734 735 736 737 738 739
static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
{
	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
}

static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
						  struct hci_dev *hdev,
						  const void *data)
{
	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
}

740
u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
741
{
742
	struct mgmt_pending_cmd *cmd;
743 744 745 746

	/* If there's a pending mgmt command the flags will not yet have
	 * their final values, so check for this first.
	 */
747
	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
748 749 750 751 752 753 754
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
		if (cp->val == 0x01)
			return LE_AD_GENERAL;
		else if (cp->val == 0x02)
			return LE_AD_LIMITED;
	} else {
755
		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
756
			return LE_AD_LIMITED;
757
		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
758 759 760 761 762 763
			return LE_AD_GENERAL;
	}

	return 0;
}

764
bool mgmt_get_connectable(struct hci_dev *hdev)
765 766
{
	struct mgmt_pending_cmd *cmd;
767

768 769 770 771 772 773
	/* If there's a pending mgmt command the flag will not yet have
	 * it's final value, so check for this first.
	 */
	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
774

775
		return cp->val;
776 777
	}

778 779
	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
}
780

781 782 783
static void service_cache_off(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
784
					    service_cache.work);
785
	struct hci_request req;
786

787
	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
788 789
		return;

790 791
	hci_req_init(&req, hdev);

792 793
	hci_dev_lock(hdev);

794
	__hci_req_update_eir(&req);
795
	__hci_req_update_class(&req);
796 797

	hci_dev_unlock(hdev);
798 799

	hci_req_run(&req, NULL);
800 801
}

802 803 804 805 806 807 808 809
static void rpa_expired(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
					    rpa_expired.work);
	struct hci_request req;

	BT_DBG("");

810
	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
811

812
	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
813 814 815
		return;

	/* The generation of a new RPA and programming it into the
816 817
	 * controller happens in the hci_req_enable_advertising()
	 * function.
818 819
	 */
	hci_req_init(&req, hdev);
820
	__hci_req_enable_advertising(&req);
821 822 823
	hci_req_run(&req, NULL);
}

824
static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
825
{
826
	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
827 828
		return;

829
	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
830
	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
831

832 833 834 835 836
	/* Non-mgmt controlled devices get this bit set
	 * implicitly so that pairing works for them, however
	 * for mgmt we require user-space to explicitly enable
	 * it
	 */
837
	hci_dev_clear_flag(hdev, HCI_BONDABLE);
838 839
}

840
static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
841
				void *data, u16 data_len)
842
{
843
	struct mgmt_rp_read_info rp;
844

845
	BT_DBG("sock %p %s", sk, hdev->name);
846

847
	hci_dev_lock(hdev);
848

849 850
	memset(&rp, 0, sizeof(rp));

851
	bacpy(&rp.bdaddr, &hdev->bdaddr);
852

853
	rp.version = hdev->hci_ver;
854
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
855 856 857

	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
858

859
	memcpy(rp.dev_class, hdev->dev_class, 3);
860

861
	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
862
	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
863

864
	hci_dev_unlock(hdev);
865

866 867
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
				 sizeof(rp));
868 869
}

870 871 872 873 874 875 876 877 878 879 880
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
				  u8 data_len)
{
	eir[eir_len++] = sizeof(type) + data_len;
	eir[eir_len++] = type;
	memcpy(&eir[eir_len], data, data_len);
	eir_len += data_len;

	return eir_len;
}

881 882 883
static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
				    void *data, u16 data_len)
{
884 885 886 887
	struct mgmt_rp_read_ext_info *rp;
	char buff[512];
	u16 eir_len = 0;
	u8 name_len;
888 889 890 891 892

	BT_DBG("sock %p %s", sk, hdev->name);

	hci_dev_lock(hdev);

893 894 895 896
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
		eir_len = eir_append_data(buff, eir_len,
					  EIR_CLASS_OF_DEV,
					  hdev->dev_class, 3);
897

898 899 900
	name_len = strlen(hdev->dev_name);
	eir_len = eir_append_data(buff, eir_len, EIR_NAME_COMPLETE,
				  hdev->dev_name, name_len);
901

902 903 904
	name_len = strlen(hdev->short_name);
	eir_len = eir_append_data(buff, eir_len, EIR_NAME_SHORT,
				  hdev->short_name, name_len);
905

906
	rp = kzalloc(sizeof(*rp) + eir_len, GFP_KERNEL);
907 908 909 910 911 912 913
	if (!rp)
		return -ENOMEM;

	rp->eir_len = cpu_to_le16(eir_len);
	memcpy(rp->eir, buff, eir_len);

	bacpy(&rp->bdaddr, &hdev->bdaddr);
914

915 916 917 918 919
	rp->version = hdev->hci_ver;
	rp->manufacturer = cpu_to_le16(hdev->manufacturer);

	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
920 921 922 923 924 925 926 927 928 929 930 931

	hci_dev_unlock(hdev);

	/* If this command is called at least once, then the events
	 * for class of device and local name changes are disabled
	 * and only the new extended controller information event
	 * is used.
	 */
	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);

932 933
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
				 sizeof(*rp) + eir_len);
934 935 936 937 938 939 940 941 942 943 944 945
}

static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
{
	struct mgmt_ev_ext_info_changed ev;

	ev.eir_len = cpu_to_le16(0);

	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, &ev,
				  sizeof(ev), HCI_MGMT_EXT_INFO_EVENTS, skip);
}

946
static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
947
{
948
	__le32 settings = cpu_to_le32(get_current_settings(hdev));
949

950 951
	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
				 sizeof(settings));
952 953
}

954
static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
955 956 957
{
	BT_DBG("%s status 0x%02x", hdev->name, status);

958 959
	if (hci_conn_count(hdev) == 0) {
		cancel_delayed_work(&hdev->power_off);
960
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
961
	}
962 963
}

964
void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
965 966 967 968 969 970 971 972
{
	struct mgmt_ev_advertising_added ev;

	ev.instance = instance;

	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
}

973 974
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
			      u8 instance)
975 976 977 978 979 980 981 982
{
	struct mgmt_ev_advertising_removed ev;

	ev.instance = instance;

	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
}

983 984 985 986 987 988 989 990
static void cancel_adv_timeout(struct hci_dev *hdev)
{
	if (hdev->adv_instance_timeout) {
		hdev->adv_instance_timeout = 0;
		cancel_delayed_work(&hdev->adv_instance_expire);
	}
}

991 992 993 994
static int clean_up_hci_state(struct hci_dev *hdev)
{
	struct hci_request req;
	struct hci_conn *conn;
995 996
	bool discov_stopped;
	int err;
997 998 999 1000 1001 1002 1003 1004 1005

	hci_req_init(&req, hdev);

	if (test_bit(HCI_ISCAN, &hdev->flags) ||
	    test_bit(HCI_PSCAN, &hdev->flags)) {
		u8 scan = 0x00;
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}

1006
	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1007

1008
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1009
		__hci_req_disable_advertising(&req);
1010

1011
	discov_stopped = hci_req_stop_discovery(&req);
1012 1013

	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1014 1015
		/* 0x15 == Terminated due to Power Off */
		__hci_abort_conn(&req, conn, 0x15);
1016 1017
	}

1018 1019 1020 1021 1022
	err = hci_req_run(&req, clean_up_hci_complete);
	if (!err && discov_stopped)
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);

	return err;
1023 1024
}

1025
static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1026
		       u16 len)
1027
{
1028
	struct mgmt_mode *cp = data;
1029
	struct mgmt_pending_cmd *cmd;
1030
	int err;
1031

1032
	BT_DBG("request for %s", hdev->name);
1033

1034
	if (cp->val != 0x00 && cp->val != 0x01)
1035 1036
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				       MGMT_STATUS_INVALID_PARAMS);
1037

1038
	hci_dev_lock(hdev);
1039

1040
	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1041 1042
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				      MGMT_STATUS_BUSY);
1043 1044 1045
		goto failed;
	}

1046
	if (!!cp->val == hdev_is_powered(hdev)) {
1047
		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1048 1049 1050
		goto failed;
	}

1051
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1052 1053
	if (!cmd) {
		err = -ENOMEM;
1054
		goto failed;
1055
	}
1056

1057
	if (cp->val) {
1058
		queue_work(hdev->req_workqueue, &hdev->power_on);
1059 1060 1061 1062
		err = 0;
	} else {
		/* Disconnect connections, stop scans, etc */
		err = clean_up_hci_state(hdev);
1063 1064 1065
		if (!err)
			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
					   HCI_POWER_OFF_TIMEOUT);
1066

1067 1068
		/* ENODATA means there were no HCI commands queued */
		if (err == -ENODATA) {
1069
			cancel_delayed_work(&hdev->power_off);
1070 1071 1072 1073
			queue_work(hdev->req_workqueue, &hdev->power_off.work);
			err = 0;
		}
	}
1074 1075

failed:
1076
	hci_dev_unlock(hdev);
1077
	return err;
1078 1079
}

1080 1081
static int new_settings(struct hci_dev *hdev, struct sock *skip)
{
1082
	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1083

1084 1085
	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1086 1087
}

1088 1089 1090 1091 1092
int mgmt_new_settings(struct hci_dev *hdev)
{
	return new_settings(hdev, NULL);
}

1093 1094 1095 1096 1097 1098
struct cmd_lookup {
	struct sock *sk;
	struct hci_dev *hdev;
	u8 mgmt_status;
};

1099
static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
{
	struct cmd_lookup *match = data;

	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);

	list_del(&cmd->list);

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}

	mgmt_pending_free(cmd);
}

1115
static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1116 1117 1118
{
	u8 *status = data;

1119
	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1120 1121 1122
	mgmt_pending_remove(cmd);
}

1123
static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
{
	if (cmd->cmd_complete) {
		u8 *status = data;

		cmd->cmd_complete(cmd, *status);
		mgmt_pending_remove(cmd);

		return;
	}

	cmd_status_rsp(cmd, data);
}

1137
static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1138
{
1139 1140
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, cmd->param_len);
1141 1142
}

1143
static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1144
{
1145 1146
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, sizeof(struct mgmt_addr_info));
1147 1148
}

1149 1150 1151 1152
static u8 mgmt_bredr_support(struct hci_dev *hdev)
{
	if (!lmp_bredr_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
1153
	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1154 1155 1156 1157 1158 1159 1160 1161 1162
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

static u8 mgmt_le_support(struct hci_dev *hdev)
{
	if (!lmp_le_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
1163
	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1164 1165 1166 1167 1168
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

1169
void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1170
{
1171
	struct mgmt_pending_cmd *cmd;
1172 1173 1174 1175 1176

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

1177
	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1178 1179 1180 1181 1182
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);
1183
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1184
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1185 1186 1187
		goto remove_cmd;
	}

1188 1189 1190 1191
	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
	    hdev->discov_timeout > 0) {
		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1192
	}
1193 1194

	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1195
	new_settings(hdev, cmd->sk);
1196

1197 1198 1199 1200 1201 1202 1203
remove_cmd:
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1204
static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1205
			    u16 len)
1206
{
1207
	struct mgmt_cp_set_discoverable *cp = data;
1208
	struct mgmt_pending_cmd *cmd;
1209
	u16 timeout;
1210 1211
	int err;

1212
	BT_DBG("request for %s", hdev->name);
1213

1214 1215
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1216 1217
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_REJECTED);
1218

1219
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1220 1221
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1222

1223
	timeout = __le16_to_cpu(cp->timeout);
1224 1225 1226 1227 1228 1229

	/* Disabling discoverable requires that no timeout is set,
	 * and enabling limited discoverable requires a timeout.
	 */
	if ((cp->val == 0x00 && timeout > 0) ||
	    (cp->val == 0x02 && timeout == 0))
1230 1231
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1232

1233
	hci_dev_lock(hdev);
1234

1235
	if (!hdev_is_powered(hdev) && timeout > 0) {
1236 1237
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_NOT_POWERED);
1238 1239 1240
		goto failed;
	}

1241 1242
	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1243 1244
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_BUSY);
1245 1246 1247
		goto failed;
	}

1248
	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1249 1250
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_REJECTED);
1251 1252 1253 1254
		goto failed;
	}

	if (!hdev_is_powered(hdev)) {
1255 1256
		bool changed = false;

1257 1258 1259 1260
		/* Setting limited discoverable when powered off is
		 * not a valid operation since it requires a timeout
		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
		 */
1261
		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1262
			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1263 1264 1265
			changed = true;
		}

1266
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1267 1268 1269 1270 1271 1272
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1273 1274 1275
		goto failed;
	}

1276 1277 1278 1279
	/* If the current mode is the same, then just update the timeout
	 * value with the new value. And if only the timeout gets updated,
	 * then no need for any HCI transactions.
	 */
1280 1281 1282
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
						   HCI_LIMITED_DISCOVERABLE)) {
1283 1284
		cancel_delayed_work(&hdev->discov_off);
		hdev->discov_timeout = timeout;
1285

1286 1287
		if (cp->val && hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1288 1289
			queue_delayed_work(hdev->req_workqueue,
					   &hdev->discov_off, to);
1290 1291
		}

1292
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1293 1294 1295
		goto failed;
	}

1296
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1297 1298
	if (!cmd) {
		err = -ENOMEM;
1299
		goto failed;
1300
	}
1301

1302 1303 1304 1305 1306 1307 1308
	/* Cancel any potential discoverable timeout that might be
	 * still active and store new timeout value. The arming of
	 * the timeout happens in the complete handler.
	 */
	cancel_delayed_work(&hdev->discov_off);
	hdev->discov_timeout = timeout;

1309 1310 1311 1312 1313
	if (cp->val)
		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
	else
		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);

1314 1315
	/* Limited discoverable mode */
	if (cp->val == 0x02)
1316
		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1317
	else
1318
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1319

1320 1321
	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
	err = 0;
1322 1323

failed:
1324
	hci_dev_unlock(hdev);
1325 1326 1327
	return err;
}

1328
void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1329
{
1330
	struct mgmt_pending_cmd *cmd;
1331 1332 1333 1334 1335

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

1336
	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1337 1338 1339
	if (!cmd)
		goto unlock;

1340 1341
	if (status) {
		u8 mgmt_err = mgmt_status(status);
1342
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1343 1344 1345
		goto remove_cmd;
	}

1346
	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1347
	new_settings(hdev, cmd->sk);
1348

1349
remove_cmd:
1350 1351 1352 1353 1354 1355
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1356 1357 1358 1359 1360 1361
static int set_connectable_update_settings(struct hci_dev *hdev,
					   struct sock *sk, u8 val)
{
	bool changed = false;
	int err;

1362
	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1363 1364 1365
		changed = true;

	if (val) {
1366
		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1367
	} else {
1368 1369
		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1370 1371 1372 1373 1374 1375
	}

	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
	if (err < 0)
		return err;

1376
	if (changed) {
1377
		hci_req_update_scan(hdev);
1378
		hci_update_background_scan(hdev);
1379
		return new_settings(hdev, sk);
1380
	}
1381 1382 1383 1384

	return 0;
}

1385
static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1386
			   u16 len)
1387
{
1388
	struct mgmt_mode *cp = data;
1389
	struct mgmt_pending_cmd *cmd;
1390 1391
	int err;

1392
	BT_DBG("request for %s", hdev->name);
1393

1394 1395
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1396 1397
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				       MGMT_STATUS_REJECTED);
1398

1399
	if (cp->val != 0x00 && cp->val != 0x01)
1400 1401
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1402

1403
	hci_dev_lock(hdev);
1404

1405
	if (!hdev_is_powered(hdev)) {
1406
		err = set_connectable_update_settings(hdev, sk, cp->val);
1407 1408 1409
		goto failed;
	}

1410 1411
	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1412 1413
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				      MGMT_STATUS_BUSY);
1414 1415 1416
		goto failed;
	}

1417
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1418 1419
	if (!cmd) {
		err = -ENOMEM;
1420
		goto failed;
1421
	}
1422

1423 1424 1425 1426 1427
	if (cp->val) {
		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
	} else {
		if (hdev->discov_timeout > 0)
			cancel_delayed_work(&hdev->discov_off);
1428

1429 1430 1431
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1432
	}
1433

1434 1435
	queue_work(hdev->req_workqueue, &hdev->connectable_update);
	err = 0;
1436 1437

failed:
1438
	hci_dev_unlock(hdev);
1439 1440 1441
	return err;
}

1442
static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1443
			u16 len)
1444
{
1445
	struct mgmt_mode *cp = data;
1446
	bool changed;
1447 1448
	int err;

1449
	BT_DBG("request for %s", hdev->name);
1450

1451
	if (cp->val != 0x00 && cp->val != 0x01)
1452 1453
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1454

1455
	hci_dev_lock(hdev);
1456 1457

	if (cp->val)
1458
		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1459
	else
1460
		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1461

1462
	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1463
	if (err < 0)
1464
		goto unlock;
1465

1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
	if (changed) {
		/* In limited privacy mode the change of bondable mode
		 * may affect the local advertising address.
		 */
		if (hdev_is_powered(hdev) &&
		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
			queue_work(hdev->req_workqueue,
				   &hdev->discoverable_update);

1477
		err = new_settings(hdev, sk);
1478
	}
1479

1480
unlock:
1481
	hci_dev_unlock(hdev);
1482 1483 1484
	return err;
}

1485 1486
static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
1487 1488
{
	struct mgmt_mode *cp = data;
1489
	struct mgmt_pending_cmd *cmd;
1490
	u8 val, status;
1491 1492
	int err;

1493
	BT_DBG("request for %s", hdev->name);
1494

1495 1496
	status = mgmt_bredr_support(hdev);
	if (status)
1497 1498
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				       status);
1499

1500
	if (cp->val != 0x00 && cp->val != 0x01)
1501 1502
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				       MGMT_STATUS_INVALID_PARAMS);
1503

1504 1505
	hci_dev_lock(hdev);

1506
	if (!hdev_is_powered(hdev)) {
1507 1508
		bool changed = false;

1509
		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1510
			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1521 1522 1523
		goto failed;
	}

1524
	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1525 1526
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				      MGMT_STATUS_BUSY);
1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
		goto failed;
	}

	val = !!cp->val;

	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

1554
static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1555 1556
{
	struct mgmt_mode *cp = data;
1557
	struct mgmt_pending_cmd *cmd;
1558
	u8 status;
1559 1560
	int err;

1561
	BT_DBG("request for %s", hdev->name);
1562

1563 1564
	status = mgmt_bredr_support(hdev);
	if (status)
1565
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1566

1567
	if (!lmp_ssp_capable(hdev))
1568 1569
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				       MGMT_STATUS_NOT_SUPPORTED);
1570

1571
	if (cp->val != 0x00 && cp->val != 0x01)
1572 1573
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				       MGMT_STATUS_INVALID_PARAMS);
1574

1575
	hci_dev_lock(hdev);
1576

1577
	if (!hdev_is_powered(hdev)) {
1578
		bool changed;
1579

1580
		if (cp->val) {
1581 1582
			changed = !hci_dev_test_and_set_flag(hdev,
							     HCI_SSP_ENABLED);
1583
		} else {
1584 1585
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_SSP_ENABLED);
1586
			if (!changed)
1587 1588
				changed = hci_dev_test_and_clear_flag(hdev,
								      HCI_HS_ENABLED);
1589
			else
1590
				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1591 1592 1593 1594 1595 1596 1597 1598 1599
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1600 1601 1602
		goto failed;
	}

1603
	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1604 1605
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				      MGMT_STATUS_BUSY);
1606 1607 1608
		goto failed;
	}

1609
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1610 1611 1612 1613 1614 1615 1616 1617 1618 1619
		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

1620
	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1621 1622 1623
		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
			     sizeof(cp->val), &cp->val);

1624
	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1625 1626 1627 1628 1629 1630 1631 1632 1633 1634
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

1635
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1636 1637
{
	struct mgmt_mode *cp = data;
1638
	bool changed;
1639
	u8 status;
1640
	int err;
1641

1642
	BT_DBG("request for %s", hdev->name);
1643

1644 1645
	status = mgmt_bredr_support(hdev);
	if (status)
1646
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1647

1648
	if (!lmp_ssp_capable(hdev))
1649 1650
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_NOT_SUPPORTED);
1651

1652
	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1653 1654
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_REJECTED);
1655

1656
	if (cp->val != 0x00 && cp->val != 0x01)
1657 1658
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_INVALID_PARAMS);
1659

1660 1661
	hci_dev_lock(hdev);

1662
	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1663 1664
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				      MGMT_STATUS_BUSY);
1665 1666 1667
		goto unlock;
	}

1668
	if (cp->val) {
1669
		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1670 1671
	} else {
		if (hdev_is_powered(hdev)) {
1672 1673
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
					      MGMT_STATUS_REJECTED);
1674 1675 1676
			goto unlock;
		}

1677
		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1678
	}
1679 1680 1681 1682 1683 1684 1685

	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);
1686

1687 1688 1689
unlock:
	hci_dev_unlock(hdev);
	return err;
1690 1691
}

1692
static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1693 1694 1695
{
	struct cmd_lookup match = { NULL, hdev };

1696 1697
	hci_dev_lock(hdev);

1698 1699 1700 1701 1702
	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
				     &mgmt_err);
1703
		goto unlock;
1704 1705 1706 1707 1708 1709 1710 1711
	}

	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
1712 1713 1714 1715 1716 1717

	/* Make sure the controller has a good default for
	 * advertising data. Restrict the update to when LE
	 * has actually been enabled. During power on, the
	 * update in powered_update_hci will take care of it.
	 */
1718
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1719 1720 1721
		struct hci_request req;

		hci_req_init(&req, hdev);
1722 1723
		__hci_req_update_adv_data(&req, 0x00);
		__hci_req_update_scan_rsp_data(&req, 0x00);
1724
		hci_req_run(&req, NULL);
1725
		hci_update_background_scan(hdev);
1726
	}
1727 1728 1729

unlock:
	hci_dev_unlock(hdev);
1730 1731
}

1732
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1733 1734 1735
{
	struct mgmt_mode *cp = data;
	struct hci_cp_write_le_host_supported hci_cp;
1736
	struct mgmt_pending_cmd *cmd;
1737
	struct hci_request req;
1738
	int err;
1739
	u8 val, enabled;
1740

1741
	BT_DBG("request for %s", hdev->name);
1742

1743
	if (!lmp_le_capable(hdev))
1744 1745
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_NOT_SUPPORTED);
1746

1747
	if (cp->val != 0x00 && cp->val != 0x01)
1748 1749
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_INVALID_PARAMS);
1750

1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
	/* Bluetooth single mode LE only controllers or dual-mode
	 * controllers configured as LE only devices, do not allow
	 * switching LE off. These have either LE enabled explicitly
	 * or BR/EDR has been previously switched off.
	 *
	 * When trying to enable an already enabled LE, then gracefully
	 * send a positive response. Trying to disable it however will
	 * result into rejection.
	 */
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
		if (cp->val == 0x01)
			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);

1764 1765
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_REJECTED);
1766
	}
1767

1768
	hci_dev_lock(hdev);
1769 1770

	val = !!cp->val;
1771
	enabled = lmp_host_le_capable(hdev);
1772

1773
	if (!val)
1774
		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1775

1776
	if (!hdev_is_powered(hdev) || val == enabled) {
1777 1778
		bool changed = false;

1779
		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1780
			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1781 1782 1783
			changed = true;
		}

1784
		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1785
			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1786 1787 1788
			changed = true;
		}

1789 1790
		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
		if (err < 0)
1791
			goto unlock;
1792 1793 1794 1795

		if (changed)
			err = new_settings(hdev, sk);

1796
		goto unlock;
1797 1798
	}

1799 1800
	if (pending_find(MGMT_OP_SET_LE, hdev) ||
	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1801 1802
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				      MGMT_STATUS_BUSY);
1803
		goto unlock;
1804 1805 1806 1807 1808
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
1809
		goto unlock;
1810 1811
	}

1812 1813
	hci_req_init(&req, hdev);

1814 1815 1816 1817
	memset(&hci_cp, 0, sizeof(hci_cp));

	if (val) {
		hci_cp.le = val;
1818
		hci_cp.simul = 0x00;
1819
	} else {
1820
		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1821
			__hci_req_disable_advertising(&req);
1822 1823
	}

1824 1825 1826 1827
	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
		    &hci_cp);

	err = hci_req_run(&req, le_enable_complete);
1828
	if (err < 0)
1829 1830
		mgmt_pending_remove(cmd);

1831 1832
unlock:
	hci_dev_unlock(hdev);
1833 1834 1835
	return err;
}

1836 1837 1838 1839 1840 1841 1842 1843
/* This is a helper function to test for pending mgmt commands that can
 * cause CoD or EIR HCI commands. We can only allow one such pending
 * mgmt command at a time since otherwise we cannot easily track what
 * the current values are, will be, and based on that calculate if a new
 * HCI command needs to be sent and if yes with what value.
 */
static bool pending_eir_or_class(struct hci_dev *hdev)
{
1844
	struct mgmt_pending_cmd *cmd;
1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		switch (cmd->opcode) {
		case MGMT_OP_ADD_UUID:
		case MGMT_OP_REMOVE_UUID:
		case MGMT_OP_SET_DEV_CLASS:
		case MGMT_OP_SET_POWERED:
			return true;
		}
	}

	return false;
}

1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
static const u8 bluetooth_base_uuid[] = {
			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};

static u8 get_uuid_size(const u8 *uuid)
{
	u32 val;

	if (memcmp(uuid, bluetooth_base_uuid, 12))
		return 128;

	val = get_unaligned_le32(&uuid[12]);
	if (val > 0xffff)
		return 32;

	return 16;
}

1878 1879
static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
{
1880
	struct mgmt_pending_cmd *cmd;
1881 1882 1883

	hci_dev_lock(hdev);

1884
	cmd = pending_find(mgmt_op, hdev);
1885 1886 1887
	if (!cmd)
		goto unlock;

1888 1889
	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
			  mgmt_status(status), hdev->dev_class, 3);
1890 1891 1892 1893 1894 1895 1896

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1897
static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1898 1899 1900 1901 1902 1903
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
}

1904
static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1905
{
1906
	struct mgmt_cp_add_uuid *cp = data;
1907
	struct mgmt_pending_cmd *cmd;
1908
	struct hci_request req;
1909 1910 1911
	struct bt_uuid *uuid;
	int err;

1912
	BT_DBG("request for %s", hdev->name);
1913

1914
	hci_dev_lock(hdev);
1915

1916
	if (pending_eir_or_class(hdev)) {
1917 1918
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
				      MGMT_STATUS_BUSY);
1919 1920 1921
		goto failed;
	}

1922
	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1923 1924 1925 1926 1927 1928
	if (!uuid) {
		err = -ENOMEM;
		goto failed;
	}

	memcpy(uuid->uuid, cp->uuid, 16);
1929
	uuid->svc_hint = cp->svc_hint;
1930
	uuid->size = get_uuid_size(cp->uuid);
1931

1932
	list_add_tail(&uuid->list, &hdev->uuids);
1933

1934
	hci_req_init(&req, hdev);
1935

1936
	__hci_req_update_class(&req);
1937
	__hci_req_update_eir(&req);
1938

1939 1940 1941 1942
	err = hci_req_run(&req, add_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto failed;
1943

1944 1945
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
					hdev->dev_class, 3);
1946 1947 1948 1949
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1950
	if (!cmd) {
1951
		err = -ENOMEM;
1952 1953 1954 1955
		goto failed;
	}

	err = 0;
1956 1957

failed:
1958
	hci_dev_unlock(hdev);
1959 1960 1961
	return err;
}

1962 1963 1964 1965 1966
static bool enable_service_cache(struct hci_dev *hdev)
{
	if (!hdev_is_powered(hdev))
		return false;

1967
	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
1968 1969
		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
				   CACHE_TIMEOUT);
1970 1971 1972 1973 1974 1975
		return true;
	}

	return false;
}

1976
static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1977 1978 1979 1980 1981 1982
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
}

1983
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1984
		       u16 len)
1985
{
1986
	struct mgmt_cp_remove_uuid *cp = data;
1987
	struct mgmt_pending_cmd *cmd;
1988
	struct bt_uuid *match, *tmp;
1989
	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1990
	struct hci_request req;
1991 1992
	int err, found;

1993
	BT_DBG("request for %s", hdev->name);
1994

1995
	hci_dev_lock(hdev);
1996

1997
	if (pending_eir_or_class(hdev)) {
1998 1999
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
				      MGMT_STATUS_BUSY);
2000 2001 2002
		goto unlock;
	}

2003
	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2004
		hci_uuids_clear(hdev);
2005

2006
		if (enable_service_cache(hdev)) {
2007 2008 2009
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_UUID,
						0, hdev->dev_class, 3);
2010 2011
			goto unlock;
		}
2012

2013
		goto update_class;
2014 2015 2016 2017
	}

	found = 0;

2018
	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2019 2020 2021 2022
		if (memcmp(match->uuid, cp->uuid, 16) != 0)
			continue;

		list_del(&match->list);
2023
		kfree(match);
2024 2025 2026 2027
		found++;
	}

	if (found == 0) {
2028 2029
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
				      MGMT_STATUS_INVALID_PARAMS);
2030 2031 2032
		goto unlock;
	}

2033
update_class:
2034
	hci_req_init(&req, hdev);
2035

2036
	__hci_req_update_class(&req);
2037
	__hci_req_update_eir(&req);
2038

2039 2040 2041 2042
	err = hci_req_run(&req, remove_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2043

2044 2045
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
					hdev->dev_class, 3);
2046 2047 2048 2049
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2050
	if (!cmd) {
2051
		err = -ENOMEM;
2052 2053 2054 2055
		goto unlock;
	}

	err = 0;
2056 2057

unlock:
2058
	hci_dev_unlock(hdev);
2059 2060 2061
	return err;
}

2062
static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2063 2064 2065 2066 2067 2068
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
}

2069
static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2070
			 u16 len)
2071
{
2072
	struct mgmt_cp_set_dev_class *cp = data;
2073
	struct mgmt_pending_cmd *cmd;
2074
	struct hci_request req;
2075 2076
	int err;

2077
	BT_DBG("request for %s", hdev->name);
2078

2079
	if (!lmp_bredr_capable(hdev))
2080 2081
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				       MGMT_STATUS_NOT_SUPPORTED);
2082

2083
	hci_dev_lock(hdev);
2084

2085
	if (pending_eir_or_class(hdev)) {
2086 2087
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				      MGMT_STATUS_BUSY);
2088 2089
		goto unlock;
	}
2090

2091
	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2092 2093
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				      MGMT_STATUS_INVALID_PARAMS);
2094 2095
		goto unlock;
	}
2096

2097 2098 2099
	hdev->major_class = cp->major;
	hdev->minor_class = cp->minor;

2100
	if (!hdev_is_powered(hdev)) {
2101 2102
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
					hdev->dev_class, 3);
2103 2104 2105
		goto unlock;
	}

2106 2107
	hci_req_init(&req, hdev);

2108
	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2109 2110 2111
		hci_dev_unlock(hdev);
		cancel_delayed_work_sync(&hdev->service_cache);
		hci_dev_lock(hdev);
2112
		__hci_req_update_eir(&req);
2113
	}
2114

2115
	__hci_req_update_class(&req);
2116

2117 2118 2119 2120
	err = hci_req_run(&req, set_class_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2121

2122 2123
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
					hdev->dev_class, 3);
2124 2125 2126 2127
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2128
	if (!cmd) {
2129
		err = -ENOMEM;
2130 2131 2132 2133
		goto unlock;
	}

	err = 0;
2134

2135
unlock:
2136
	hci_dev_unlock(hdev);
2137 2138 2139
	return err;
}

2140
static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2141
			  u16 len)
2142
{
2143
	struct mgmt_cp_load_link_keys *cp = data;
2144 2145
	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_link_key_info));
2146
	u16 key_count, expected_len;
2147
	bool changed;
2148
	int i;
2149

2150 2151 2152
	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev))
2153 2154
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_NOT_SUPPORTED);
2155

2156
	key_count = __le16_to_cpu(cp->key_count);
2157 2158 2159
	if (key_count > max_key_count) {
		BT_ERR("load_link_keys: too big key_count value %u",
		       key_count);
2160 2161
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2162
	}
2163

2164 2165
	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_link_key_info);
2166
	if (expected_len != len) {
2167
		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2168
		       expected_len, len);
2169 2170
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2171 2172
	}

2173
	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2174 2175
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2176

2177
	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2178
	       key_count);
2179

2180 2181 2182
	for (i = 0; i < key_count; i++) {
		struct mgmt_link_key_info *key = &cp->keys[i];

2183
		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2184 2185 2186
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_LINK_KEYS,
					       MGMT_STATUS_INVALID_PARAMS);
2187 2188
	}

2189
	hci_dev_lock(hdev);
2190 2191 2192 2193

	hci_link_keys_clear(hdev);

	if (cp->debug_keys)
2194
		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2195
	else
2196 2197
		changed = hci_dev_test_and_clear_flag(hdev,
						      HCI_KEEP_DEBUG_KEYS);
2198 2199 2200

	if (changed)
		new_settings(hdev, NULL);
2201

2202
	for (i = 0; i < key_count; i++) {
2203
		struct mgmt_link_key_info *key = &cp->keys[i];
2204

2205 2206 2207 2208 2209 2210
		/* Always ignore debug keys and require a new pairing if
		 * the user wants to use them.
		 */
		if (key->type == HCI_LK_DEBUG_COMBINATION)
			continue;

2211 2212
		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
				 key->type, key->pin_len, NULL);
2213 2214
	}

2215
	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2216

2217
	hci_dev_unlock(hdev);
2218

2219
	return 0;
2220 2221
}

2222
static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2223
			   u8 addr_type, struct sock *skip_sk)
2224 2225 2226 2227 2228 2229 2230
{
	struct mgmt_ev_device_unpaired ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = addr_type;

	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2231
			  skip_sk);
2232 2233
}

2234
static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2235
			 u16 len)
2236
{
2237 2238
	struct mgmt_cp_unpair_device *cp = data;
	struct mgmt_rp_unpair_device rp;
2239
	struct hci_conn_params *params;
2240
	struct mgmt_pending_cmd *cmd;
2241
	struct hci_conn *conn;
2242
	u8 addr_type;
2243 2244
	int err;

2245
	memset(&rp, 0, sizeof(rp));
2246 2247
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
2248

2249
	if (!bdaddr_type_is_valid(cp->addr.type))
2250 2251 2252
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2253

2254
	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2255 2256 2257
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2258

2259 2260
	hci_dev_lock(hdev);

2261
	if (!hdev_is_powered(hdev)) {
2262 2263 2264
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
2265 2266 2267
		goto unlock;
	}

2268
	if (cp->addr.type == BDADDR_BREDR) {
2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281
		/* If disconnection is requested, then look up the
		 * connection. If the remote device is connected, it
		 * will be later used to terminate the link.
		 *
		 * Setting it to NULL explicitly will cause no
		 * termination of the link.
		 */
		if (cp->disconnect)
			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
						       &cp->addr.bdaddr);
		else
			conn = NULL;

2282
		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2283 2284 2285 2286 2287 2288
		if (err < 0) {
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_UNPAIR_DEVICE,
						MGMT_STATUS_NOT_PAIRED, &rp,
						sizeof(rp));
			goto unlock;
2289 2290
		}

2291
		goto done;
2292
	}
2293

2294 2295 2296 2297 2298 2299
	/* LE address type */
	addr_type = le_addr_type(cp->addr.type);

	hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);

	err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2300
	if (err < 0) {
2301 2302 2303
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					MGMT_STATUS_NOT_PAIRED, &rp,
					sizeof(rp));
2304 2305 2306
		goto unlock;
	}

2307 2308 2309 2310 2311 2312
	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
	if (!conn) {
		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
		goto done;
	}

2313 2314 2315
	/* Abort any ongoing SMP pairing */
	smp_cancel_pairing(conn);

2316 2317 2318 2319 2320
	/* Defer clearing up the connection parameters until closing to
	 * give a chance of keeping them if a repairing happens.
	 */
	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);

2321 2322 2323 2324 2325 2326 2327 2328 2329
	/* Disable auto-connection parameters if present */
	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
	if (params) {
		if (params->explicit_connect)
			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
		else
			params->auto_connect = HCI_AUTO_CONN_DISABLED;
	}

2330 2331 2332 2333 2334 2335 2336
	/* If disconnection is not requested, then clear the connection
	 * variable so that the link is not terminated.
	 */
	if (!cp->disconnect)
		conn = NULL;

done:
2337 2338 2339
	/* If the connection variable is set, then termination of the
	 * link is requested.
	 */
2340
	if (!conn) {
2341 2342
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
					&rp, sizeof(rp));
2343
		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2344 2345
		goto unlock;
	}
2346

2347
	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2348
			       sizeof(*cp));
2349 2350 2351
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
2352 2353
	}

2354 2355
	cmd->cmd_complete = addr_cmd_complete;

2356
	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2357 2358 2359
	if (err < 0)
		mgmt_pending_remove(cmd);

2360
unlock:
2361
	hci_dev_unlock(hdev);
2362 2363 2364
	return err;
}

2365
static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2366
		      u16 len)
2367
{
2368
	struct mgmt_cp_disconnect *cp = data;
2369
	struct mgmt_rp_disconnect rp;
2370
	struct mgmt_pending_cmd *cmd;
2371 2372 2373 2374 2375
	struct hci_conn *conn;
	int err;

	BT_DBG("");

2376 2377 2378 2379
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2380
	if (!bdaddr_type_is_valid(cp->addr.type))
2381 2382 2383
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2384

2385
	hci_dev_lock(hdev);
2386 2387

	if (!test_bit(HCI_UP, &hdev->flags)) {
2388 2389 2390
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
2391 2392 2393
		goto failed;
	}

2394
	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2395 2396
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2397 2398 2399
		goto failed;
	}

2400
	if (cp->addr.type == BDADDR_BREDR)
2401 2402
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
2403
	else
2404 2405
		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
					       le_addr_type(cp->addr.type));
2406

2407
	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2408 2409 2410
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_NOT_CONNECTED, &rp,
					sizeof(rp));
2411 2412 2413
		goto failed;
	}

2414
	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2415 2416
	if (!cmd) {
		err = -ENOMEM;
2417
		goto failed;
2418
	}
2419

2420 2421
	cmd->cmd_complete = generic_cmd_complete;

2422
	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2423
	if (err < 0)
2424
		mgmt_pending_remove(cmd);
2425 2426

failed:
2427
	hci_dev_unlock(hdev);
2428 2429 2430
	return err;
}

2431
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2432 2433 2434
{
	switch (link_type) {
	case LE_LINK:
2435 2436
		switch (addr_type) {
		case ADDR_LE_DEV_PUBLIC:
2437
			return BDADDR_LE_PUBLIC;
2438

2439
		default:
2440
			/* Fallback to LE Random address type */
2441
			return BDADDR_LE_RANDOM;
2442
		}
2443

2444
	default:
2445
		/* Fallback to BR/EDR type */
2446
		return BDADDR_BREDR;
2447 2448 2449
	}
}

2450 2451
static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
2452 2453
{
	struct mgmt_rp_get_connections *rp;
2454
	struct hci_conn *c;
2455
	size_t rp_len;
2456 2457
	int err;
	u16 i;
2458 2459 2460

	BT_DBG("");

2461
	hci_dev_lock(hdev);
2462

2463
	if (!hdev_is_powered(hdev)) {
2464 2465
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
				      MGMT_STATUS_NOT_POWERED);
2466 2467 2468
		goto unlock;
	}

2469
	i = 0;
2470 2471
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2472
			i++;
2473 2474
	}

2475
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2476
	rp = kmalloc(rp_len, GFP_KERNEL);
2477
	if (!rp) {
2478 2479 2480 2481 2482
		err = -ENOMEM;
		goto unlock;
	}

	i = 0;
2483
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2484 2485
		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
			continue;
2486
		bacpy(&rp->addr[i].bdaddr, &c->dst);
2487
		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2488
		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2489 2490 2491 2492
			continue;
		i++;
	}

2493
	rp->conn_count = cpu_to_le16(i);
2494

2495 2496
	/* Recalculate length in case of filtered SCO connections, etc */
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2497

2498 2499
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
				rp_len);
2500

2501
	kfree(rp);
2502 2503

unlock:
2504
	hci_dev_unlock(hdev);
2505 2506 2507
	return err;
}

2508
static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2509
				   struct mgmt_cp_pin_code_neg_reply *cp)
2510
{
2511
	struct mgmt_pending_cmd *cmd;
2512 2513
	int err;

2514
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2515
			       sizeof(*cp));
2516 2517 2518
	if (!cmd)
		return -ENOMEM;

2519
	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2520
			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2521 2522 2523 2524 2525 2526
	if (err < 0)
		mgmt_pending_remove(cmd);

	return err;
}

2527
static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2528
			  u16 len)
2529
{
2530
	struct hci_conn *conn;
2531
	struct mgmt_cp_pin_code_reply *cp = data;
2532
	struct hci_cp_pin_code_reply reply;
2533
	struct mgmt_pending_cmd *cmd;
2534 2535 2536 2537
	int err;

	BT_DBG("");

2538
	hci_dev_lock(hdev);
2539

2540
	if (!hdev_is_powered(hdev)) {
2541 2542
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
				      MGMT_STATUS_NOT_POWERED);
2543 2544 2545
		goto failed;
	}

2546
	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2547
	if (!conn) {
2548 2549
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
				      MGMT_STATUS_NOT_CONNECTED);
2550 2551 2552 2553
		goto failed;
	}

	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2554 2555 2556
		struct mgmt_cp_pin_code_neg_reply ncp;

		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2557 2558 2559

		BT_ERR("PIN code is not 16 bytes long");

2560
		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2561
		if (err >= 0)
2562 2563
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
					      MGMT_STATUS_INVALID_PARAMS);
2564 2565 2566 2567

		goto failed;
	}

2568
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2569 2570
	if (!cmd) {
		err = -ENOMEM;
2571
		goto failed;
2572
	}
2573

2574 2575
	cmd->cmd_complete = addr_cmd_complete;

2576
	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2577
	reply.pin_len = cp->pin_len;
2578
	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2579 2580 2581

	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
	if (err < 0)
2582
		mgmt_pending_remove(cmd);
2583 2584

failed:
2585
	hci_dev_unlock(hdev);
2586 2587 2588
	return err;
}

2589 2590
static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
2591
{
2592
	struct mgmt_cp_set_io_capability *cp = data;
2593 2594 2595

	BT_DBG("");

2596
	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2597 2598
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
				       MGMT_STATUS_INVALID_PARAMS);
2599

2600
	hci_dev_lock(hdev);
2601 2602 2603 2604

	hdev->io_capability = cp->io_capability;

	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2605
	       hdev->io_capability);
2606

2607
	hci_dev_unlock(hdev);
2608

2609 2610
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
				 NULL, 0);
2611 2612
}

2613
static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2614 2615
{
	struct hci_dev *hdev = conn->hdev;
2616
	struct mgmt_pending_cmd *cmd;
2617

2618
	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
			continue;

		if (cmd->user_data != conn)
			continue;

		return cmd;
	}

	return NULL;
}

2631
static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2632 2633 2634
{
	struct mgmt_rp_pair_device rp;
	struct hci_conn *conn = cmd->user_data;
2635
	int err;
2636

2637 2638
	bacpy(&rp.addr.bdaddr, &conn->dst);
	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2639

2640 2641
	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
				status, &rp, sizeof(rp));
2642 2643 2644 2645 2646 2647

	/* So we don't get further callbacks for this connection */
	conn->connect_cfm_cb = NULL;
	conn->security_cfm_cb = NULL;
	conn->disconn_cfm_cb = NULL;

2648
	hci_conn_drop(conn);
2649 2650 2651 2652 2653

	/* The device is paired so there is no need to remove
	 * its connection parameters anymore.
	 */
	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2654 2655

	hci_conn_put(conn);
2656 2657

	return err;
2658 2659
}

2660 2661 2662
void mgmt_smp_complete(struct hci_conn *conn, bool complete)
{
	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2663
	struct mgmt_pending_cmd *cmd;
2664 2665

	cmd = find_pairing(conn);
2666
	if (cmd) {
2667
		cmd->cmd_complete(cmd, status);
2668 2669
		mgmt_pending_remove(cmd);
	}
2670 2671
}

2672 2673
static void pairing_complete_cb(struct hci_conn *conn, u8 status)
{
2674
	struct mgmt_pending_cmd *cmd;
2675 2676 2677 2678

	BT_DBG("status %u", status);

	cmd = find_pairing(conn);
2679
	if (!cmd) {
2680
		BT_DBG("Unable to find a pending command");
2681 2682 2683 2684 2685
		return;
	}

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);
2686 2687
}

2688
static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2689
{
2690
	struct mgmt_pending_cmd *cmd;
2691 2692 2693 2694 2695 2696 2697

	BT_DBG("status %u", status);

	if (!status)
		return;

	cmd = find_pairing(conn);
2698
	if (!cmd) {
2699
		BT_DBG("Unable to find a pending command");
2700 2701 2702 2703 2704
		return;
	}

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);
2705 2706
}

2707
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2708
		       u16 len)
2709
{
2710
	struct mgmt_cp_pair_device *cp = data;
2711
	struct mgmt_rp_pair_device rp;
2712
	struct mgmt_pending_cmd *cmd;
2713 2714 2715 2716 2717 2718
	u8 sec_level, auth_type;
	struct hci_conn *conn;
	int err;

	BT_DBG("");

2719 2720 2721 2722
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2723
	if (!bdaddr_type_is_valid(cp->addr.type))
2724 2725 2726
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2727

2728
	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2729 2730 2731
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2732

2733
	hci_dev_lock(hdev);
2734

2735
	if (!hdev_is_powered(hdev)) {
2736 2737 2738
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
2739 2740 2741
		goto unlock;
	}

2742 2743 2744 2745 2746 2747 2748
	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_ALREADY_PAIRED, &rp,
					sizeof(rp));
		goto unlock;
	}

2749
	sec_level = BT_SECURITY_MEDIUM;
2750
	auth_type = HCI_AT_DEDICATED_BONDING;
2751

2752
	if (cp->addr.type == BDADDR_BREDR) {
2753 2754
		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
				       auth_type);
2755
	} else {
2756
		u8 addr_type = le_addr_type(cp->addr.type);
2757
		struct hci_conn_params *p;
2758

2759 2760 2761 2762 2763 2764 2765 2766 2767
		/* When pairing a new device, it is expected to remember
		 * this device for future connections. Adding the connection
		 * parameter information ahead of time allows tracking
		 * of the slave preferred values and will speed up any
		 * further connection establishment.
		 *
		 * If connection parameters already exist, then they
		 * will be kept and this function does nothing.
		 */
2768 2769 2770 2771
		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);

		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2772

2773 2774
		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
					   addr_type, sec_level,
2775
					   HCI_LE_CONN_TIMEOUT);
2776
	}
2777

2778
	if (IS_ERR(conn)) {
2779 2780 2781 2782
		int status;

		if (PTR_ERR(conn) == -EBUSY)
			status = MGMT_STATUS_BUSY;
2783 2784 2785 2786
		else if (PTR_ERR(conn) == -EOPNOTSUPP)
			status = MGMT_STATUS_NOT_SUPPORTED;
		else if (PTR_ERR(conn) == -ECONNREFUSED)
			status = MGMT_STATUS_REJECTED;
2787 2788 2789
		else
			status = MGMT_STATUS_CONNECT_FAILED;

2790 2791
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					status, &rp, sizeof(rp));
2792 2793 2794 2795
		goto unlock;
	}

	if (conn->connect_cfm_cb) {
2796
		hci_conn_drop(conn);
2797 2798
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2799 2800 2801
		goto unlock;
	}

2802
	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2803 2804
	if (!cmd) {
		err = -ENOMEM;
2805
		hci_conn_drop(conn);
2806 2807 2808
		goto unlock;
	}

2809 2810
	cmd->cmd_complete = pairing_complete;

2811
	/* For LE, just connecting isn't a proof that the pairing finished */
2812
	if (cp->addr.type == BDADDR_BREDR) {
2813
		conn->connect_cfm_cb = pairing_complete_cb;
2814 2815 2816 2817 2818 2819 2820
		conn->security_cfm_cb = pairing_complete_cb;
		conn->disconn_cfm_cb = pairing_complete_cb;
	} else {
		conn->connect_cfm_cb = le_pairing_complete_cb;
		conn->security_cfm_cb = le_pairing_complete_cb;
		conn->disconn_cfm_cb = le_pairing_complete_cb;
	}
2821

2822
	conn->io_capability = cp->io_cap;
2823
	cmd->user_data = hci_conn_get(conn);
2824

2825
	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2826 2827 2828 2829
	    hci_conn_security(conn, sec_level, auth_type, true)) {
		cmd->cmd_complete(cmd, 0);
		mgmt_pending_remove(cmd);
	}
2830 2831 2832 2833

	err = 0;

unlock:
2834
	hci_dev_unlock(hdev);
2835 2836 2837
	return err;
}

2838 2839
static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2840
{
2841
	struct mgmt_addr_info *addr = data;
2842
	struct mgmt_pending_cmd *cmd;
2843 2844 2845 2846 2847 2848 2849
	struct hci_conn *conn;
	int err;

	BT_DBG("");

	hci_dev_lock(hdev);

2850
	if (!hdev_is_powered(hdev)) {
2851 2852
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_NOT_POWERED);
2853 2854 2855
		goto unlock;
	}

2856
	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2857
	if (!cmd) {
2858 2859
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_INVALID_PARAMS);
2860 2861 2862 2863 2864 2865
		goto unlock;
	}

	conn = cmd->user_data;

	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2866 2867
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_INVALID_PARAMS);
2868 2869 2870
		goto unlock;
	}

2871 2872
	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
	mgmt_pending_remove(cmd);
2873

2874 2875
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
				addr, sizeof(*addr));
2876 2877 2878 2879 2880
unlock:
	hci_dev_unlock(hdev);
	return err;
}

2881
static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2882
			     struct mgmt_addr_info *addr, u16 mgmt_op,
2883
			     u16 hci_op, __le32 passkey)
2884
{
2885
	struct mgmt_pending_cmd *cmd;
2886
	struct hci_conn *conn;
2887 2888
	int err;

2889
	hci_dev_lock(hdev);
2890

2891
	if (!hdev_is_powered(hdev)) {
2892 2893 2894
		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
					MGMT_STATUS_NOT_POWERED, addr,
					sizeof(*addr));
2895
		goto done;
2896 2897
	}

2898 2899
	if (addr->type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2900
	else
2901 2902
		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
					       le_addr_type(addr->type));
2903 2904

	if (!conn) {
2905 2906 2907
		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
					MGMT_STATUS_NOT_CONNECTED, addr,
					sizeof(*addr));
2908 2909
		goto done;
	}
2910

2911
	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2912 2913
		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
		if (!err)
2914 2915 2916
			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
						MGMT_STATUS_SUCCESS, addr,
						sizeof(*addr));
2917
		else
2918 2919 2920
			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
						MGMT_STATUS_FAILED, addr,
						sizeof(*addr));
2921 2922 2923 2924

		goto done;
	}

2925
	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2926 2927
	if (!cmd) {
		err = -ENOMEM;
2928
		goto done;
2929 2930
	}

2931 2932
	cmd->cmd_complete = addr_cmd_complete;

2933
	/* Continue with pairing via HCI */
2934 2935 2936
	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
		struct hci_cp_user_passkey_reply cp;

2937
		bacpy(&cp.bdaddr, &addr->bdaddr);
2938 2939 2940
		cp.passkey = passkey;
		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
	} else
2941 2942
		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
				   &addr->bdaddr);
2943

2944 2945
	if (err < 0)
		mgmt_pending_remove(cmd);
2946

2947
done:
2948
	hci_dev_unlock(hdev);
2949 2950 2951
	return err;
}

2952 2953 2954 2955 2956 2957 2958
static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_pin_code_neg_reply *cp = data;

	BT_DBG("");

2959
	return user_pairing_resp(sk, hdev, &cp->addr,
2960 2961 2962 2963
				MGMT_OP_PIN_CODE_NEG_REPLY,
				HCI_OP_PIN_CODE_NEG_REPLY, 0);
}

2964 2965
static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2966
{
2967
	struct mgmt_cp_user_confirm_reply *cp = data;
2968 2969 2970 2971

	BT_DBG("");

	if (len != sizeof(*cp))
2972 2973
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
				       MGMT_STATUS_INVALID_PARAMS);
2974

2975
	return user_pairing_resp(sk, hdev, &cp->addr,
2976 2977
				 MGMT_OP_USER_CONFIRM_REPLY,
				 HCI_OP_USER_CONFIRM_REPLY, 0);
2978 2979
}

2980
static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2981
				  void *data, u16 len)
2982
{
2983
	struct mgmt_cp_user_confirm_neg_reply *cp = data;
2984 2985 2986

	BT_DBG("");

2987
	return user_pairing_resp(sk, hdev, &cp->addr,
2988 2989
				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2990 2991
}

2992 2993
static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2994
{
2995
	struct mgmt_cp_user_passkey_reply *cp = data;
2996 2997 2998

	BT_DBG("");

2999
	return user_pairing_resp(sk, hdev, &cp->addr,
3000 3001
				 MGMT_OP_USER_PASSKEY_REPLY,
				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3002 3003
}

3004
static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3005
				  void *data, u16 len)
3006
{
3007
	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3008 3009 3010

	BT_DBG("");

3011
	return user_pairing_resp(sk, hdev, &cp->addr,
3012 3013
				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3014 3015
}

3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044
static void adv_expire(struct hci_dev *hdev, u32 flags)
{
	struct adv_info *adv_instance;
	struct hci_request req;
	int err;

	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
	if (!adv_instance)
		return;

	/* stop if current instance doesn't need to be changed */
	if (!(adv_instance->flags & flags))
		return;

	cancel_adv_timeout(hdev);

	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
	if (!adv_instance)
		return;

	hci_req_init(&req, hdev);
	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
					      true);
	if (err)
		return;

	hci_req_run(&req, NULL);
}

3045
static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3046 3047
{
	struct mgmt_cp_set_local_name *cp;
3048
	struct mgmt_pending_cmd *cmd;
3049 3050 3051 3052 3053

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

3054
	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3055 3056 3057 3058 3059
	if (!cmd)
		goto unlock;

	cp = cmd->param;

3060
	if (status) {
3061 3062
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
			        mgmt_status(status));
3063
	} else {
3064 3065
		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
				  cp, sizeof(*cp));
3066

3067 3068 3069 3070
		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
	}

3071 3072 3073 3074 3075 3076
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

3077
static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3078
			  u16 len)
3079
{
3080
	struct mgmt_cp_set_local_name *cp = data;
3081
	struct mgmt_pending_cmd *cmd;
3082
	struct hci_request req;
3083 3084 3085 3086
	int err;

	BT_DBG("");

3087
	hci_dev_lock(hdev);
3088

3089 3090 3091 3092 3093 3094
	/* If the old values are the same as the new ones just return a
	 * direct command complete event.
	 */
	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
	    !memcmp(hdev->short_name, cp->short_name,
		    sizeof(hdev->short_name))) {
3095 3096
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
					data, len);
3097 3098 3099
		goto failed;
	}

3100
	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3101

3102
	if (!hdev_is_powered(hdev)) {
3103
		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3104

3105 3106
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
					data, len);
3107 3108 3109
		if (err < 0)
			goto failed;

3110 3111
		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3112
		ext_info_changed(hdev, sk);
3113

3114 3115 3116
		goto failed;
	}

3117
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3118 3119 3120 3121 3122
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

3123 3124
	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));

3125
	hci_req_init(&req, hdev);
3126 3127

	if (lmp_bredr_capable(hdev)) {
3128
		__hci_req_update_name(&req);
3129
		__hci_req_update_eir(&req);
3130 3131
	}

3132 3133 3134
	/* The name is stored in the scan response data and so
	 * no need to udpate the advertising data here.
	 */
3135
	if (lmp_le_capable(hdev))
3136
		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3137

3138
	err = hci_req_run(&req, set_name_complete);
3139 3140 3141 3142
	if (err < 0)
		mgmt_pending_remove(cmd);

failed:
3143
	hci_dev_unlock(hdev);
3144 3145 3146
	return err;
}

3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174
static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
			  u16 len)
{
	struct mgmt_cp_set_appearance *cp = data;
	u16 apperance;
	int err;

	BT_DBG("");

	apperance = le16_to_cpu(cp->appearance);

	hci_dev_lock(hdev);

	if (hdev->appearance != apperance) {
		hdev->appearance = apperance;

		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
	}

	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
				0);

	hci_dev_unlock(hdev);

	return err;
}

3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233
static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
				         u16 opcode, struct sk_buff *skb)
{
	struct mgmt_rp_read_local_oob_data mgmt_rp;
	size_t rp_size = sizeof(mgmt_rp);
	struct mgmt_pending_cmd *cmd;

	BT_DBG("%s status %u", hdev->name, status);

	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
	if (!cmd)
		return;

	if (status || !skb) {
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
		goto remove;
	}

	memset(&mgmt_rp, 0, sizeof(mgmt_rp));

	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;

		if (skb->len < sizeof(*rp)) {
			mgmt_cmd_status(cmd->sk, hdev->id,
					MGMT_OP_READ_LOCAL_OOB_DATA,
					MGMT_STATUS_FAILED);
			goto remove;
		}

		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));

		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
	} else {
		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;

		if (skb->len < sizeof(*rp)) {
			mgmt_cmd_status(cmd->sk, hdev->id,
					MGMT_OP_READ_LOCAL_OOB_DATA,
					MGMT_STATUS_FAILED);
			goto remove;
		}

		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));

		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
	}

	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);

remove:
	mgmt_pending_remove(cmd);
}

3234
static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3235
			       void *data, u16 data_len)
3236
{
3237
	struct mgmt_pending_cmd *cmd;
3238
	struct hci_request req;
3239 3240
	int err;

3241
	BT_DBG("%s", hdev->name);
3242

3243
	hci_dev_lock(hdev);
3244

3245
	if (!hdev_is_powered(hdev)) {
3246 3247
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_NOT_POWERED);
3248 3249 3250
		goto unlock;
	}

3251
	if (!lmp_ssp_capable(hdev)) {
3252 3253
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_NOT_SUPPORTED);
3254 3255 3256
		goto unlock;
	}

3257
	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3258 3259
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_BUSY);
3260 3261 3262
		goto unlock;
	}

3263
	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3264 3265 3266 3267 3268
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

3269 3270
	hci_req_init(&req, hdev);

3271
	if (bredr_sc_enabled(hdev))
3272
		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3273
	else
3274
		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3275

3276
	err = hci_req_run_skb(&req, read_local_oob_data_complete);
3277 3278 3279 3280
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
3281
	hci_dev_unlock(hdev);
3282 3283 3284
	return err;
}

3285
static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3286
			       void *data, u16 len)
3287
{
3288
	struct mgmt_addr_info *addr = data;
3289 3290
	int err;

3291
	BT_DBG("%s ", hdev->name);
3292

3293
	if (!bdaddr_type_is_valid(addr->type))
3294 3295 3296 3297
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_ADD_REMOTE_OOB_DATA,
					 MGMT_STATUS_INVALID_PARAMS,
					 addr, sizeof(*addr));
3298

3299
	hci_dev_lock(hdev);
3300

3301 3302 3303
	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_data *cp = data;
		u8 status;
3304

3305
		if (cp->addr.type != BDADDR_BREDR) {
3306 3307 3308 3309
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_ADD_REMOTE_OOB_DATA,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
3310 3311 3312
			goto unlock;
		}

3313
		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3314 3315
					      cp->addr.type, cp->hash,
					      cp->rand, NULL, NULL);
3316 3317 3318 3319 3320
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

3321 3322 3323
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
					&cp->addr, sizeof(cp->addr));
3324 3325
	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3326
		u8 *rand192, *hash192, *rand256, *hash256;
3327 3328
		u8 status;

3329
		if (bdaddr_type_is_le(cp->addr.type)) {
3330 3331 3332 3333 3334
			/* Enforce zero-valued 192-bit parameters as
			 * long as legacy SMP OOB isn't implemented.
			 */
			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
			    memcmp(cp->hash192, ZERO_KEY, 16)) {
3335 3336 3337 3338
				err = mgmt_cmd_complete(sk, hdev->id,
							MGMT_OP_ADD_REMOTE_OOB_DATA,
							MGMT_STATUS_INVALID_PARAMS,
							addr, sizeof(*addr));
3339 3340 3341
				goto unlock;
			}

3342 3343 3344
			rand192 = NULL;
			hash192 = NULL;
		} else {
3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367
			/* In case one of the P-192 values is set to zero,
			 * then just disable OOB data for P-192.
			 */
			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
				rand192 = NULL;
				hash192 = NULL;
			} else {
				rand192 = cp->rand192;
				hash192 = cp->hash192;
			}
		}

		/* In case one of the P-256 values is set to zero, then just
		 * disable OOB data for P-256.
		 */
		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
			rand256 = NULL;
			hash256 = NULL;
		} else {
			rand256 = cp->rand256;
			hash256 = cp->hash256;
3368 3369
		}

3370
		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3371
					      cp->addr.type, hash192, rand192,
3372
					      hash256, rand256);
3373 3374 3375 3376 3377
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

3378 3379 3380
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_ADD_REMOTE_OOB_DATA,
					status, &cp->addr, sizeof(cp->addr));
3381 3382
	} else {
		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3383 3384
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
				      MGMT_STATUS_INVALID_PARAMS);
3385
	}
3386

3387
unlock:
3388
	hci_dev_unlock(hdev);
3389 3390 3391
	return err;
}

3392
static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3393
				  void *data, u16 len)
3394
{
3395
	struct mgmt_cp_remove_remote_oob_data *cp = data;
3396
	u8 status;
3397 3398
	int err;

3399
	BT_DBG("%s", hdev->name);
3400

3401
	if (cp->addr.type != BDADDR_BREDR)
3402 3403 3404 3405
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
3406

3407
	hci_dev_lock(hdev);
3408

3409 3410 3411 3412 3413 3414
	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
		hci_remote_oob_data_clear(hdev);
		status = MGMT_STATUS_SUCCESS;
		goto done;
	}

3415
	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3416
	if (err < 0)
3417
		status = MGMT_STATUS_INVALID_PARAMS;
3418
	else
3419
		status = MGMT_STATUS_SUCCESS;
3420

3421
done:
3422 3423
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
				status, &cp->addr, sizeof(cp->addr));
3424

3425
	hci_dev_unlock(hdev);
3426 3427 3428
	return err;
}

3429
void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3430
{
3431
	struct mgmt_pending_cmd *cmd;
3432

3433 3434
	BT_DBG("status %d", status);

3435
	hci_dev_lock(hdev);
3436

3437
	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3438
	if (!cmd)
3439
		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3440

3441 3442 3443
	if (!cmd)
		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);

3444
	if (cmd) {
3445
		cmd->cmd_complete(cmd, mgmt_status(status));
3446 3447
		mgmt_pending_remove(cmd);
	}
3448

3449
	hci_dev_unlock(hdev);
3450 3451
}

3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478
static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
				    uint8_t *mgmt_status)
{
	switch (type) {
	case DISCOV_TYPE_LE:
		*mgmt_status = mgmt_le_support(hdev);
		if (*mgmt_status)
			return false;
		break;
	case DISCOV_TYPE_INTERLEAVED:
		*mgmt_status = mgmt_le_support(hdev);
		if (*mgmt_status)
			return false;
		/* Intentional fall-through */
	case DISCOV_TYPE_BREDR:
		*mgmt_status = mgmt_bredr_support(hdev);
		if (*mgmt_status)
			return false;
		break;
	default:
		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
		return false;
	}

	return true;
}

3479 3480
static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
				    u16 op, void *data, u16 len)
3481
{
3482
	struct mgmt_cp_start_discovery *cp = data;
3483
	struct mgmt_pending_cmd *cmd;
3484
	u8 status;
3485 3486
	int err;

3487
	BT_DBG("%s", hdev->name);
3488

3489
	hci_dev_lock(hdev);
3490

3491
	if (!hdev_is_powered(hdev)) {
3492
		err = mgmt_cmd_complete(sk, hdev->id, op,
3493 3494
					MGMT_STATUS_NOT_POWERED,
					&cp->type, sizeof(cp->type));
3495 3496 3497
		goto failed;
	}

3498
	if (hdev->discovery.state != DISCOVERY_STOPPED ||
3499
	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3500 3501
		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
					&cp->type, sizeof(cp->type));
3502 3503 3504
		goto failed;
	}

3505
	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3506 3507
		err = mgmt_cmd_complete(sk, hdev->id, op, status,
					&cp->type, sizeof(cp->type));
3508 3509 3510
		goto failed;
	}

3511 3512 3513 3514 3515
	/* Clear the discovery filter first to free any previously
	 * allocated memory for the UUID list.
	 */
	hci_discovery_filter_clear(hdev);

A
Andre Guedes 已提交
3516
	hdev->discovery.type = cp->type;
3517
	hdev->discovery.report_invalid_rssi = false;
3518 3519 3520 3521
	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
		hdev->discovery.limited = true;
	else
		hdev->discovery.limited = false;
A
Andre Guedes 已提交
3522

3523
	cmd = mgmt_pending_add(sk, op, hdev, data, len);
3524 3525
	if (!cmd) {
		err = -ENOMEM;
3526
		goto failed;
3527
	}
3528

3529
	cmd->cmd_complete = generic_cmd_complete;
3530

3531
	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3532 3533
	queue_work(hdev->req_workqueue, &hdev->discov_update);
	err = 0;
3534

3535
failed:
3536
	hci_dev_unlock(hdev);
3537 3538
	return err;
}
3539

3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
					data, len);
}

static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
				   void *data, u16 len)
{
	return start_discovery_internal(sk, hdev,
					MGMT_OP_START_LIMITED_DISCOVERY,
					data, len);
}

3555 3556
static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
					  u8 status)
3557
{
3558 3559
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, 1);
3560
}
3561

3562 3563 3564 3565
static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
				   void *data, u16 len)
{
	struct mgmt_cp_start_service_discovery *cp = data;
3566
	struct mgmt_pending_cmd *cmd;
3567 3568 3569 3570
	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
	u16 uuid_count, expected_len;
	u8 status;
	int err;
3571

3572
	BT_DBG("%s", hdev->name);
3573

3574
	hci_dev_lock(hdev);
3575

3576
	if (!hdev_is_powered(hdev)) {
3577 3578 3579 3580
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_NOT_POWERED,
					&cp->type, sizeof(cp->type));
3581 3582
		goto failed;
	}
3583

3584
	if (hdev->discovery.state != DISCOVERY_STOPPED ||
3585
	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3586 3587 3588 3589
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_BUSY, &cp->type,
					sizeof(cp->type));
3590 3591
		goto failed;
	}
3592

3593 3594 3595 3596
	uuid_count = __le16_to_cpu(cp->uuid_count);
	if (uuid_count > max_uuid_count) {
		BT_ERR("service_discovery: too big uuid_count value %u",
		       uuid_count);
3597 3598 3599 3600
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS, &cp->type,
					sizeof(cp->type));
3601 3602 3603 3604 3605 3606 3607
		goto failed;
	}

	expected_len = sizeof(*cp) + uuid_count * 16;
	if (expected_len != len) {
		BT_ERR("service_discovery: expected %u bytes, got %u bytes",
		       expected_len, len);
3608 3609 3610 3611
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS, &cp->type,
					sizeof(cp->type));
3612 3613 3614
		goto failed;
	}

3615 3616 3617 3618 3619 3620 3621
	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					status, &cp->type, sizeof(cp->type));
		goto failed;
	}

3622
	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3623
			       hdev, data, len);
3624 3625 3626 3627 3628
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

3629 3630
	cmd->cmd_complete = service_discovery_cmd_complete;

3631 3632 3633 3634 3635
	/* Clear the discovery filter first to free any previously
	 * allocated memory for the UUID list.
	 */
	hci_discovery_filter_clear(hdev);

3636
	hdev->discovery.result_filtering = true;
3637 3638 3639 3640 3641 3642 3643 3644
	hdev->discovery.type = cp->type;
	hdev->discovery.rssi = cp->rssi;
	hdev->discovery.uuid_count = uuid_count;

	if (uuid_count > 0) {
		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
						GFP_KERNEL);
		if (!hdev->discovery.uuids) {
3645 3646 3647 3648
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_START_SERVICE_DISCOVERY,
						MGMT_STATUS_FAILED,
						&cp->type, sizeof(cp->type));
3649 3650 3651
			mgmt_pending_remove(cmd);
			goto failed;
		}
3652
	}
3653

3654
	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3655 3656
	queue_work(hdev->req_workqueue, &hdev->discov_update);
	err = 0;
3657 3658

failed:
3659
	hci_dev_unlock(hdev);
3660 3661 3662
	return err;
}

3663
void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
3664
{
3665
	struct mgmt_pending_cmd *cmd;
3666

3667 3668 3669 3670
	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

3671
	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3672
	if (cmd) {
3673
		cmd->cmd_complete(cmd, mgmt_status(status));
3674
		mgmt_pending_remove(cmd);
3675 3676 3677 3678 3679
	}

	hci_dev_unlock(hdev);
}

3680
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3681
			  u16 len)
3682
{
3683
	struct mgmt_cp_stop_discovery *mgmt_cp = data;
3684
	struct mgmt_pending_cmd *cmd;
3685 3686
	int err;

3687
	BT_DBG("%s", hdev->name);
3688

3689
	hci_dev_lock(hdev);
3690

3691
	if (!hci_discovery_active(hdev)) {
3692 3693 3694
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
					MGMT_STATUS_REJECTED, &mgmt_cp->type,
					sizeof(mgmt_cp->type));
3695 3696 3697 3698
		goto unlock;
	}

	if (hdev->discovery.type != mgmt_cp->type) {
3699 3700 3701
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS,
					&mgmt_cp->type, sizeof(mgmt_cp->type));
3702
		goto unlock;
3703 3704
	}

3705
	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
3706 3707
	if (!cmd) {
		err = -ENOMEM;
3708 3709 3710
		goto unlock;
	}

3711 3712
	cmd->cmd_complete = generic_cmd_complete;

3713 3714 3715
	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
	queue_work(hdev->req_workqueue, &hdev->discov_update);
	err = 0;
3716

3717
unlock:
3718
	hci_dev_unlock(hdev);
3719 3720 3721
	return err;
}

3722
static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3723
			u16 len)
3724
{
3725
	struct mgmt_cp_confirm_name *cp = data;
3726 3727 3728
	struct inquiry_entry *e;
	int err;

3729
	BT_DBG("%s", hdev->name);
3730 3731 3732

	hci_dev_lock(hdev);

3733
	if (!hci_discovery_active(hdev)) {
3734 3735 3736
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
					MGMT_STATUS_FAILED, &cp->addr,
					sizeof(cp->addr));
3737 3738 3739
		goto failed;
	}

3740
	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3741
	if (!e) {
3742 3743 3744
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
					sizeof(cp->addr));
3745 3746 3747 3748 3749 3750 3751 3752
		goto failed;
	}

	if (cp->name_known) {
		e->name_state = NAME_KNOWN;
		list_del(&e->list);
	} else {
		e->name_state = NAME_NEEDED;
3753
		hci_inquiry_cache_update_resolve(hdev, e);
3754 3755
	}

3756 3757
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
				&cp->addr, sizeof(cp->addr));
3758 3759 3760 3761 3762 3763

failed:
	hci_dev_unlock(hdev);
	return err;
}

3764
static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3765
			u16 len)
3766
{
3767
	struct mgmt_cp_block_device *cp = data;
3768
	u8 status;
3769 3770
	int err;

3771
	BT_DBG("%s", hdev->name);
3772

3773
	if (!bdaddr_type_is_valid(cp->addr.type))
3774 3775 3776
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
3777

3778
	hci_dev_lock(hdev);
3779

3780 3781
	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
				  cp->addr.type);
3782
	if (err < 0) {
3783
		status = MGMT_STATUS_FAILED;
3784 3785 3786 3787 3788 3789
		goto done;
	}

	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
		   sk);
	status = MGMT_STATUS_SUCCESS;
3790

3791
done:
3792 3793
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
				&cp->addr, sizeof(cp->addr));
3794

3795
	hci_dev_unlock(hdev);
3796 3797 3798 3799

	return err;
}

3800
static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3801
			  u16 len)
3802
{
3803
	struct mgmt_cp_unblock_device *cp = data;
3804
	u8 status;
3805 3806
	int err;

3807
	BT_DBG("%s", hdev->name);
3808

3809
	if (!bdaddr_type_is_valid(cp->addr.type))
3810 3811 3812
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
3813

3814
	hci_dev_lock(hdev);
3815

3816 3817
	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
				  cp->addr.type);
3818
	if (err < 0) {
3819
		status = MGMT_STATUS_INVALID_PARAMS;
3820 3821 3822 3823 3824 3825
		goto done;
	}

	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
		   sk);
	status = MGMT_STATUS_SUCCESS;
3826

3827
done:
3828 3829
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
				&cp->addr, sizeof(cp->addr));
3830

3831
	hci_dev_unlock(hdev);
3832 3833 3834 3835

	return err;
}

3836 3837 3838 3839
static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_set_device_id *cp = data;
3840
	struct hci_request req;
3841
	int err;
3842
	__u16 source;
3843 3844 3845

	BT_DBG("%s", hdev->name);

3846 3847 3848
	source = __le16_to_cpu(cp->source);

	if (source > 0x0002)
3849 3850
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
				       MGMT_STATUS_INVALID_PARAMS);
3851

3852 3853
	hci_dev_lock(hdev);

3854
	hdev->devid_source = source;
3855 3856 3857 3858
	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
	hdev->devid_product = __le16_to_cpu(cp->product);
	hdev->devid_version = __le16_to_cpu(cp->version);

3859 3860
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
				NULL, 0);
3861

3862
	hci_req_init(&req, hdev);
3863
	__hci_req_update_eir(&req);
3864
	hci_req_run(&req, NULL);
3865 3866 3867 3868 3869 3870

	hci_dev_unlock(hdev);

	return err;
}

3871 3872 3873 3874 3875 3876
static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
					u16 opcode)
{
	BT_DBG("status %d", status);
}

3877 3878
static void set_advertising_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
3879 3880
{
	struct cmd_lookup match = { NULL, hdev };
3881
	struct hci_request req;
3882 3883 3884
	u8 instance;
	struct adv_info *adv_instance;
	int err;
3885

3886 3887
	hci_dev_lock(hdev);

3888 3889 3890 3891 3892
	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
				     cmd_status_rsp, &mgmt_err);
3893
		goto unlock;
3894 3895
	}

3896
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3897
		hci_dev_set_flag(hdev, HCI_ADVERTISING);
3898
	else
3899
		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3900

3901 3902 3903 3904 3905 3906 3907
	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
			     &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
3908

3909
	/* If "Set Advertising" was just disabled and instance advertising was
3910
	 * set up earlier, then re-enable multi-instance advertising.
3911 3912
	 */
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3913
	    list_empty(&hdev->adv_instances))
3914 3915
		goto unlock;

3916 3917 3918 3919 3920 3921 3922 3923 3924 3925
	instance = hdev->cur_adv_instance;
	if (!instance) {
		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
							struct adv_info, list);
		if (!adv_instance)
			goto unlock;

		instance = adv_instance->instance;
	}

3926 3927
	hci_req_init(&req, hdev);

3928
	err = __hci_req_schedule_adv_instance(&req, instance, true);
3929 3930 3931

	if (!err)
		err = hci_req_run(&req, enable_advertising_instance);
3932

3933
	if (err)
3934 3935
		BT_ERR("Failed to re-configure advertising");

3936 3937
unlock:
	hci_dev_unlock(hdev);
3938 3939
}

3940 3941
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
3942 3943
{
	struct mgmt_mode *cp = data;
3944
	struct mgmt_pending_cmd *cmd;
3945
	struct hci_request req;
3946
	u8 val, status;
3947 3948 3949 3950
	int err;

	BT_DBG("request for %s", hdev->name);

3951 3952
	status = mgmt_le_support(hdev);
	if (status)
3953 3954
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				       status);
3955

3956
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
3957 3958
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);
3959 3960 3961 3962 3963

	hci_dev_lock(hdev);

	val = !!cp->val;

3964 3965 3966 3967 3968
	/* The following conditions are ones which mean that we should
	 * not do any HCI communication but directly send a mgmt
	 * response to user space (after toggling the flag if
	 * necessary).
	 */
3969
	if (!hdev_is_powered(hdev) ||
3970 3971
	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
3972
	    hci_conn_num(hdev, LE_LINK) > 0 ||
3973
	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
3974
	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
3975
		bool changed;
3976

3977
		if (cp->val) {
3978
			hdev->cur_adv_instance = 0x00;
3979
			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
3980
			if (cp->val == 0x02)
3981
				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3982
			else
3983
				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3984
		} else {
3985
			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
3986
			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
		if (err < 0)
			goto unlock;

		if (changed)
			err = new_settings(hdev, sk);

		goto unlock;
	}

3999 4000
	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
	    pending_find(MGMT_OP_SET_LE, hdev)) {
4001 4002
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				      MGMT_STATUS_BUSY);
4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

4014
	if (cp->val == 0x02)
4015
		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4016
	else
4017
		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4018

4019 4020
	cancel_adv_timeout(hdev);

4021
	if (val) {
4022 4023 4024 4025
		/* Switch to instance "0" for the Set Advertising setting.
		 * We cannot use update_[adv|scan_rsp]_data() here as the
		 * HCI_ADVERTISING flag is not yet set.
		 */
4026
		hdev->cur_adv_instance = 0x00;
4027 4028 4029
		__hci_req_update_adv_data(&req, 0x00);
		__hci_req_update_scan_rsp_data(&req, 0x00);
		__hci_req_enable_advertising(&req);
4030
	} else {
4031
		__hci_req_disable_advertising(&req);
4032
	}
4033 4034 4035 4036 4037 4038 4039 4040 4041 4042

	err = hci_req_run(&req, set_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4043 4044 4045 4046 4047 4048 4049 4050
static int set_static_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_static_address *cp = data;
	int err;

	BT_DBG("%s", hdev->name);

4051
	if (!lmp_le_capable(hdev))
4052 4053
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				       MGMT_STATUS_NOT_SUPPORTED);
4054 4055

	if (hdev_is_powered(hdev))
4056 4057
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				       MGMT_STATUS_REJECTED);
4058 4059 4060

	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4061 4062 4063
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_SET_STATIC_ADDRESS,
					       MGMT_STATUS_INVALID_PARAMS);
4064 4065 4066

		/* Two most significant bits shall be set */
		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4067 4068 4069
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_SET_STATIC_ADDRESS,
					       MGMT_STATUS_INVALID_PARAMS);
4070 4071 4072 4073 4074 4075
	}

	hci_dev_lock(hdev);

	bacpy(&hdev->static_addr, &cp->bdaddr);

4076 4077 4078 4079 4080
	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
	if (err < 0)
		goto unlock;

	err = new_settings(hdev, sk);
4081

4082
unlock:
4083 4084 4085 4086
	hci_dev_unlock(hdev);
	return err;
}

4087 4088 4089 4090 4091 4092 4093 4094 4095 4096
static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_cp_set_scan_params *cp = data;
	__u16 interval, window;
	int err;

	BT_DBG("%s", hdev->name);

	if (!lmp_le_capable(hdev))
4097 4098
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_NOT_SUPPORTED);
4099 4100 4101 4102

	interval = __le16_to_cpu(cp->interval);

	if (interval < 0x0004 || interval > 0x4000)
4103 4104
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4105 4106 4107 4108

	window = __le16_to_cpu(cp->window);

	if (window < 0x0004 || window > 0x4000)
4109 4110
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4111

4112
	if (window > interval)
4113 4114
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4115

4116 4117 4118 4119 4120
	hci_dev_lock(hdev);

	hdev->le_scan_interval = interval;
	hdev->le_scan_window = window;

4121 4122
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
				NULL, 0);
4123

4124 4125 4126
	/* If background scan is running, restart it so new parameters are
	 * loaded.
	 */
4127
	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138
	    hdev->discovery.state == DISCOVERY_STOPPED) {
		struct hci_request req;

		hci_req_init(&req, hdev);

		hci_req_add_le_scan_disable(&req);
		hci_req_add_le_passive_scan(&req);

		hci_req_run(&req, NULL);
	}

4139 4140 4141 4142 4143
	hci_dev_unlock(hdev);

	return err;
}

4144 4145
static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
				      u16 opcode)
4146
{
4147
	struct mgmt_pending_cmd *cmd;
4148 4149 4150 4151 4152

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

4153
	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4154 4155 4156 4157
	if (!cmd)
		goto unlock;

	if (status) {
4158 4159
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
			        mgmt_status(status));
4160
	} else {
4161 4162 4163
		struct mgmt_mode *cp = cmd->param;

		if (cp->val)
4164
			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4165
		else
4166
			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4167

4168 4169 4170 4171 4172 4173 4174 4175 4176 4177
		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

4178
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4179
				void *data, u16 len)
4180
{
4181
	struct mgmt_mode *cp = data;
4182
	struct mgmt_pending_cmd *cmd;
4183
	struct hci_request req;
4184 4185
	int err;

4186
	BT_DBG("%s", hdev->name);
4187

4188
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4189
	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4190 4191
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				       MGMT_STATUS_NOT_SUPPORTED);
4192

4193
	if (cp->val != 0x00 && cp->val != 0x01)
4194 4195
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				       MGMT_STATUS_INVALID_PARAMS);
4196

4197 4198
	hci_dev_lock(hdev);

4199
	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4200 4201
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				      MGMT_STATUS_BUSY);
4202 4203 4204
		goto unlock;
	}

4205
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4206 4207 4208 4209 4210
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		goto unlock;
	}

4211
	if (!hdev_is_powered(hdev)) {
4212
		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4213 4214 4215 4216 4217 4218
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		new_settings(hdev, sk);
		goto unlock;
	}

4219 4220 4221 4222 4223
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
			       data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
4224 4225
	}

4226 4227
	hci_req_init(&req, hdev);

4228
	__hci_req_write_fast_connectable(&req, cp->val);
4229 4230

	err = hci_req_run(&req, fast_connectable_complete);
4231
	if (err < 0) {
4232 4233
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				      MGMT_STATUS_FAILED);
4234
		mgmt_pending_remove(cmd);
4235 4236
	}

4237
unlock:
4238
	hci_dev_unlock(hdev);
4239

4240 4241 4242
	return err;
}

4243
static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4244
{
4245
	struct mgmt_pending_cmd *cmd;
4246 4247 4248 4249 4250

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

4251
	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4252 4253 4254 4255 4256 4257 4258 4259 4260
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		/* We need to restore the flag if related HCI commands
		 * failed.
		 */
4261
		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4262

4263
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277
	} else {
		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
	struct mgmt_mode *cp = data;
4278
	struct mgmt_pending_cmd *cmd;
4279 4280 4281 4282 4283 4284
	struct hci_request req;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4285 4286
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_NOT_SUPPORTED);
4287

4288
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4289 4290
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_REJECTED);
4291 4292

	if (cp->val != 0x00 && cp->val != 0x01)
4293 4294
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_INVALID_PARAMS);
4295 4296 4297

	hci_dev_lock(hdev);

4298
	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4299 4300 4301 4302 4303 4304
		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		goto unlock;
	}

	if (!hdev_is_powered(hdev)) {
		if (!cp->val) {
4305 4306 4307 4308 4309
			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4310 4311
		}

4312
		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323

		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		if (err < 0)
			goto unlock;

		err = new_settings(hdev, sk);
		goto unlock;
	}

	/* Reject disabling when powered on */
	if (!cp->val) {
4324 4325
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				      MGMT_STATUS_REJECTED);
4326
		goto unlock;
4327 4328 4329 4330 4331 4332 4333 4334
	} else {
		/* When configuring a dual-mode controller to operate
		 * with LE only and using a static address, then switching
		 * BR/EDR back on is not allowed.
		 *
		 * Dual-mode controllers shall operate with the public
		 * address as its identity address for BR/EDR and LE. So
		 * reject the attempt to create an invalid configuration.
4335 4336 4337 4338 4339 4340
		 *
		 * The same restrictions applies when secure connections
		 * has been enabled. For BR/EDR this is a controller feature
		 * while for LE it is a host stack feature. This means that
		 * switching BR/EDR back on when secure connections has been
		 * enabled is not a supported transaction.
4341
		 */
4342
		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4343
		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4344
		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4345 4346
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
					      MGMT_STATUS_REJECTED);
4347 4348
			goto unlock;
		}
4349 4350
	}

4351
	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4352 4353
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				      MGMT_STATUS_BUSY);
4354 4355 4356 4357 4358 4359 4360 4361 4362
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

4363 4364
	/* We need to flip the bit already here so that
	 * hci_req_update_adv_data generates the correct flags.
4365
	 */
4366
	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4367 4368

	hci_req_init(&req, hdev);
4369

4370
	__hci_req_write_fast_connectable(&req, false);
4371
	__hci_req_update_scan(&req);
4372

4373 4374 4375
	/* Since only the advertising data flags will change, there
	 * is no need to update the scan response data.
	 */
4376
	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4377

4378 4379 4380 4381 4382 4383 4384 4385 4386
	err = hci_req_run(&req, set_bredr_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4387 4388
static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
4389
	struct mgmt_pending_cmd *cmd;
4390 4391 4392 4393 4394 4395
	struct mgmt_mode *cp;

	BT_DBG("%s status %u", hdev->name, status);

	hci_dev_lock(hdev);

4396
	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4397 4398 4399 4400
	if (!cmd)
		goto unlock;

	if (status) {
4401 4402
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
			        mgmt_status(status));
4403 4404 4405 4406 4407 4408 4409
		goto remove;
	}

	cp = cmd->param;

	switch (cp->val) {
	case 0x00:
4410 4411
		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4412 4413
		break;
	case 0x01:
4414
		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4415
		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4416 4417
		break;
	case 0x02:
4418 4419
		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
		hci_dev_set_flag(hdev, HCI_SC_ONLY);
4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431
		break;
	}

	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
	new_settings(hdev, cmd->sk);

remove:
	mgmt_pending_remove(cmd);
unlock:
	hci_dev_unlock(hdev);
}

4432 4433 4434 4435
static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_mode *cp = data;
4436
	struct mgmt_pending_cmd *cmd;
4437
	struct hci_request req;
4438
	u8 val;
4439 4440 4441 4442
	int err;

	BT_DBG("request for %s", hdev->name);

4443
	if (!lmp_sc_capable(hdev) &&
4444
	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4445 4446
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				       MGMT_STATUS_NOT_SUPPORTED);
4447

4448
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4449
	    lmp_sc_capable(hdev) &&
4450
	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4451 4452
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				       MGMT_STATUS_REJECTED);
4453

4454
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4455
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4456 4457 4458 4459
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

4460
	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4461
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4462 4463
		bool changed;

4464
		if (cp->val) {
4465 4466
			changed = !hci_dev_test_and_set_flag(hdev,
							     HCI_SC_ENABLED);
4467
			if (cp->val == 0x02)
4468
				hci_dev_set_flag(hdev, HCI_SC_ONLY);
4469
			else
4470
				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4471
		} else {
4472 4473
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_SC_ENABLED);
4474
			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4475
		}
4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486

		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

		goto failed;
	}

4487
	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4488 4489
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				      MGMT_STATUS_BUSY);
4490 4491 4492
		goto failed;
	}

4493 4494
	val = !!cp->val;

4495 4496
	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4497 4498 4499 4500 4501 4502 4503 4504 4505 4506
		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

4507 4508 4509
	hci_req_init(&req, hdev);
	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
	err = hci_req_run(&req, sc_enable_complete);
4510 4511 4512 4513 4514 4515 4516 4517 4518 4519
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

4520 4521 4522 4523
static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
			  void *data, u16 len)
{
	struct mgmt_mode *cp = data;
4524
	bool changed, use_changed;
4525 4526 4527 4528
	int err;

	BT_DBG("request for %s", hdev->name);

4529
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4530 4531
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
4532 4533 4534 4535

	hci_dev_lock(hdev);

	if (cp->val)
4536
		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4537
	else
4538 4539
		changed = hci_dev_test_and_clear_flag(hdev,
						      HCI_KEEP_DEBUG_KEYS);
4540

4541
	if (cp->val == 0x02)
4542 4543
		use_changed = !hci_dev_test_and_set_flag(hdev,
							 HCI_USE_DEBUG_KEYS);
4544
	else
4545 4546
		use_changed = hci_dev_test_and_clear_flag(hdev,
							  HCI_USE_DEBUG_KEYS);
4547 4548

	if (hdev_is_powered(hdev) && use_changed &&
4549
	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4550 4551 4552 4553 4554
		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
			     sizeof(mode), &mode);
	}

4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566
	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4567 4568 4569 4570 4571 4572 4573 4574 4575 4576
static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
		       u16 len)
{
	struct mgmt_cp_set_privacy *cp = cp_data;
	bool changed;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
4577 4578
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_NOT_SUPPORTED);
4579

4580
	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
4581 4582
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_INVALID_PARAMS);
4583 4584

	if (hdev_is_powered(hdev))
4585 4586
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_REJECTED);
4587 4588 4589

	hci_dev_lock(hdev);

4590 4591 4592
	/* If user space supports this command it is also expected to
	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
	 */
4593
	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4594

4595
	if (cp->privacy) {
4596
		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
4597
		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4598
		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4599 4600 4601 4602
		if (cp->privacy == 0x02)
			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
		else
			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4603
	} else {
4604
		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
4605
		memset(hdev->irk, 0, sizeof(hdev->irk));
4606
		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
4607
		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621
	}

	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641
static bool irk_is_valid(struct mgmt_irk_info *irk)
{
	switch (irk->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
}

static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
		     u16 len)
{
	struct mgmt_cp_load_irks *cp = cp_data;
4642 4643
	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_irk_info));
4644 4645 4646 4647 4648 4649
	u16 irk_count, expected_len;
	int i, err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
4650 4651
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_NOT_SUPPORTED);
4652 4653

	irk_count = __le16_to_cpu(cp->irk_count);
4654 4655
	if (irk_count > max_irk_count) {
		BT_ERR("load_irks: too big irk_count value %u", irk_count);
4656 4657
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_INVALID_PARAMS);
4658
	}
4659 4660 4661 4662

	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
	if (expected_len != len) {
		BT_ERR("load_irks: expected %u bytes, got %u bytes",
4663
		       expected_len, len);
4664 4665
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_INVALID_PARAMS);
4666 4667 4668 4669 4670 4671 4672 4673
	}

	BT_DBG("%s irk_count %u", hdev->name, irk_count);

	for (i = 0; i < irk_count; i++) {
		struct mgmt_irk_info *key = &cp->irks[i];

		if (!irk_is_valid(key))
4674 4675 4676
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_IRKS,
					       MGMT_STATUS_INVALID_PARAMS);
4677 4678 4679 4680 4681 4682 4683 4684 4685
	}

	hci_dev_lock(hdev);

	hci_smp_irks_clear(hdev);

	for (i = 0; i < irk_count; i++) {
		struct mgmt_irk_info *irk = &cp->irks[i];

4686 4687
		hci_add_irk(hdev, &irk->addr.bdaddr,
			    le_addr_type(irk->addr.type), irk->val,
4688 4689 4690
			    BDADDR_ANY);
	}

4691
	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4692

4693
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4694 4695 4696 4697 4698 4699

	hci_dev_unlock(hdev);

	return err;
}

4700 4701 4702 4703
static bool ltk_is_valid(struct mgmt_ltk_info *key)
{
	if (key->master != 0x00 && key->master != 0x01)
		return false;
4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716

	switch (key->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
4717 4718
}

4719
static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4720
			       void *cp_data, u16 len)
4721 4722
{
	struct mgmt_cp_load_long_term_keys *cp = cp_data;
4723 4724
	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_ltk_info));
4725
	u16 key_count, expected_len;
4726
	int i, err;
4727

4728 4729 4730
	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
4731 4732
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_NOT_SUPPORTED);
4733

4734
	key_count = __le16_to_cpu(cp->key_count);
4735 4736
	if (key_count > max_key_count) {
		BT_ERR("load_ltks: too big key_count value %u", key_count);
4737 4738
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
4739
	}
4740 4741 4742 4743 4744

	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_ltk_info);
	if (expected_len != len) {
		BT_ERR("load_keys: expected %u bytes, got %u bytes",
4745
		       expected_len, len);
4746 4747
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
4748 4749
	}

4750
	BT_DBG("%s key_count %u", hdev->name, key_count);
4751

4752 4753 4754
	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];

4755
		if (!ltk_is_valid(key))
4756 4757 4758
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_LONG_TERM_KEYS,
					       MGMT_STATUS_INVALID_PARAMS);
4759 4760
	}

4761 4762 4763 4764 4765 4766
	hci_dev_lock(hdev);

	hci_smp_ltks_clear(hdev);

	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];
4767
		u8 type, authenticated;
4768

4769 4770
		switch (key->type) {
		case MGMT_LTK_UNAUTHENTICATED:
4771
			authenticated = 0x00;
4772
			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4773 4774
			break;
		case MGMT_LTK_AUTHENTICATED:
4775
			authenticated = 0x01;
4776 4777 4778 4779 4780
			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
			break;
		case MGMT_LTK_P256_UNAUTH:
			authenticated = 0x00;
			type = SMP_LTK_P256;
4781
			break;
4782 4783 4784
		case MGMT_LTK_P256_AUTH:
			authenticated = 0x01;
			type = SMP_LTK_P256;
4785
			break;
4786 4787 4788
		case MGMT_LTK_P256_DEBUG:
			authenticated = 0x00;
			type = SMP_LTK_P256_DEBUG;
4789 4790 4791
		default:
			continue;
		}
4792

4793 4794 4795
		hci_add_ltk(hdev, &key->addr.bdaddr,
			    le_addr_type(key->addr.type), type, authenticated,
			    key->val, key->enc_size, key->ediv, key->rand);
4796 4797
	}

4798
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4799 4800
			   NULL, 0);

4801 4802
	hci_dev_unlock(hdev);

4803
	return err;
4804 4805
}

4806
static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
4807 4808
{
	struct hci_conn *conn = cmd->user_data;
4809
	struct mgmt_rp_get_conn_info rp;
4810
	int err;
4811

4812
	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
4813

4814
	if (status == MGMT_STATUS_SUCCESS) {
4815
		rp.rssi = conn->rssi;
4816 4817 4818 4819 4820 4821
		rp.tx_power = conn->tx_power;
		rp.max_tx_power = conn->max_tx_power;
	} else {
		rp.rssi = HCI_RSSI_INVALID;
		rp.tx_power = HCI_TX_POWER_INVALID;
		rp.max_tx_power = HCI_TX_POWER_INVALID;
4822 4823
	}

4824 4825
	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
				status, &rp, sizeof(rp));
4826 4827

	hci_conn_drop(conn);
4828
	hci_conn_put(conn);
4829 4830

	return err;
4831 4832
}

4833 4834
static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
				       u16 opcode)
4835 4836
{
	struct hci_cp_read_rssi *cp;
4837
	struct mgmt_pending_cmd *cmd;
4838 4839
	struct hci_conn *conn;
	u16 handle;
4840
	u8 status;
4841

4842
	BT_DBG("status 0x%02x", hci_status);
4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857

	hci_dev_lock(hdev);

	/* Commands sent in request are either Read RSSI or Read Transmit Power
	 * Level so we check which one was last sent to retrieve connection
	 * handle.  Both commands have handle as first parameter so it's safe to
	 * cast data on the same command struct.
	 *
	 * First command sent is always Read RSSI and we fail only if it fails.
	 * In other case we simply override error to indicate success as we
	 * already remembered if TX power value is actually valid.
	 */
	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
	if (!cp) {
		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4858 4859 4860
		status = MGMT_STATUS_SUCCESS;
	} else {
		status = mgmt_status(hci_status);
4861 4862 4863
	}

	if (!cp) {
4864
		BT_ERR("invalid sent_cmd in conn_info response");
4865 4866 4867 4868 4869 4870
		goto unlock;
	}

	handle = __le16_to_cpu(cp->handle);
	conn = hci_conn_hash_lookup_handle(hdev, handle);
	if (!conn) {
4871
		BT_ERR("unknown handle (%d) in conn_info response", handle);
4872 4873 4874
		goto unlock;
	}

4875
	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
4876 4877
	if (!cmd)
		goto unlock;
4878

4879 4880
	cmd->cmd_complete(cmd, status);
	mgmt_pending_remove(cmd);
4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901

unlock:
	hci_dev_unlock(hdev);
}

static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_get_conn_info *cp = data;
	struct mgmt_rp_get_conn_info rp;
	struct hci_conn *conn;
	unsigned long conn_info_age;
	int err = 0;

	BT_DBG("%s", hdev->name);

	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

	if (!bdaddr_type_is_valid(cp->addr.type))
4902 4903 4904
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
4905 4906 4907 4908

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
4909 4910 4911
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
4912 4913 4914 4915 4916 4917 4918 4919 4920 4921
		goto unlock;
	}

	if (cp->addr.type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);

	if (!conn || conn->state != BT_CONNECTED) {
4922 4923 4924
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_NOT_CONNECTED, &rp,
					sizeof(rp));
4925 4926 4927
		goto unlock;
	}

4928
	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
4929 4930
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
4931 4932 4933
		goto unlock;
	}

4934 4935 4936 4937 4938 4939 4940 4941 4942 4943
	/* To avoid client trying to guess when to poll again for information we
	 * calculate conn info age as random value between min/max set in hdev.
	 */
	conn_info_age = hdev->conn_info_min_age +
			prandom_u32_max(hdev->conn_info_max_age -
					hdev->conn_info_min_age);

	/* Query controller to refresh cached values if they are too old or were
	 * never read.
	 */
4944 4945
	if (time_after(jiffies, conn->conn_info_timestamp +
		       msecs_to_jiffies(conn_info_age)) ||
4946 4947 4948 4949
	    !conn->conn_info_timestamp) {
		struct hci_request req;
		struct hci_cp_read_tx_power req_txp_cp;
		struct hci_cp_read_rssi req_rssi_cp;
4950
		struct mgmt_pending_cmd *cmd;
4951 4952 4953 4954 4955 4956

		hci_req_init(&req, hdev);
		req_rssi_cp.handle = cpu_to_le16(conn->handle);
		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
			    &req_rssi_cp);

4957 4958 4959 4960 4961 4962 4963 4964 4965 4966
		/* For LE links TX power does not change thus we don't need to
		 * query for it once value is known.
		 */
		if (!bdaddr_type_is_le(cp->addr.type) ||
		    conn->tx_power == HCI_TX_POWER_INVALID) {
			req_txp_cp.handle = cpu_to_le16(conn->handle);
			req_txp_cp.type = 0x00;
			hci_req_add(&req, HCI_OP_READ_TX_POWER,
				    sizeof(req_txp_cp), &req_txp_cp);
		}
4967

4968 4969 4970 4971 4972 4973 4974 4975
		/* Max TX power needs to be read only once per connection */
		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
			req_txp_cp.handle = cpu_to_le16(conn->handle);
			req_txp_cp.type = 0x01;
			hci_req_add(&req, HCI_OP_READ_TX_POWER,
				    sizeof(req_txp_cp), &req_txp_cp);
		}

4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987
		err = hci_req_run(&req, conn_info_refresh_complete);
		if (err < 0)
			goto unlock;

		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
				       data, len);
		if (!cmd) {
			err = -ENOMEM;
			goto unlock;
		}

		hci_conn_hold(conn);
4988
		cmd->user_data = hci_conn_get(conn);
4989
		cmd->cmd_complete = conn_info_cmd_complete;
4990 4991 4992 4993 4994 4995

		conn->conn_info_timestamp = jiffies;
	} else {
		/* Cache is valid, just reply with values cached in hci_conn */
		rp.rssi = conn->rssi;
		rp.tx_power = conn->tx_power;
4996
		rp.max_tx_power = conn->max_tx_power;
4997

4998 4999
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5000 5001 5002 5003 5004 5005 5006
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5007
static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5008
{
5009
	struct hci_conn *conn = cmd->user_data;
5010
	struct mgmt_rp_get_clock_info rp;
5011
	struct hci_dev *hdev;
5012
	int err;
5013 5014

	memset(&rp, 0, sizeof(rp));
5015
	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031

	if (status)
		goto complete;

	hdev = hci_dev_get(cmd->index);
	if (hdev) {
		rp.local_clock = cpu_to_le32(hdev->clock);
		hci_dev_put(hdev);
	}

	if (conn) {
		rp.piconet_clock = cpu_to_le32(conn->clock);
		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
	}

complete:
5032 5033
	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
				sizeof(rp));
5034 5035 5036 5037 5038

	if (conn) {
		hci_conn_drop(conn);
		hci_conn_put(conn);
	}
5039 5040

	return err;
5041 5042
}

5043
static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5044
{
5045
	struct hci_cp_read_clock *hci_cp;
5046
	struct mgmt_pending_cmd *cmd;
5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063
	struct hci_conn *conn;

	BT_DBG("%s status %u", hdev->name, status);

	hci_dev_lock(hdev);

	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
	if (!hci_cp)
		goto unlock;

	if (hci_cp->which) {
		u16 handle = __le16_to_cpu(hci_cp->handle);
		conn = hci_conn_hash_lookup_handle(hdev, handle);
	} else {
		conn = NULL;
	}

5064
	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5065 5066 5067
	if (!cmd)
		goto unlock;

5068
	cmd->cmd_complete(cmd, mgmt_status(status));
5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_get_clock_info *cp = data;
	struct mgmt_rp_get_clock_info rp;
	struct hci_cp_read_clock hci_cp;
5081
	struct mgmt_pending_cmd *cmd;
5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092
	struct hci_request req;
	struct hci_conn *conn;
	int err;

	BT_DBG("%s", hdev->name);

	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

	if (cp->addr.type != BDADDR_BREDR)
5093 5094 5095
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
5096 5097 5098 5099

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
5100 5101 5102
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
5103 5104 5105 5106 5107 5108 5109
		goto unlock;
	}

	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
		if (!conn || conn->state != BT_CONNECTED) {
5110 5111 5112 5113
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_GET_CLOCK_INFO,
						MGMT_STATUS_NOT_CONNECTED,
						&rp, sizeof(rp));
5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125
			goto unlock;
		}
	} else {
		conn = NULL;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

5126 5127
	cmd->cmd_complete = clock_info_cmd_complete;

5128 5129 5130 5131 5132 5133 5134
	hci_req_init(&req, hdev);

	memset(&hci_cp, 0, sizeof(hci_cp));
	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);

	if (conn) {
		hci_conn_hold(conn);
5135
		cmd->user_data = hci_conn_get(conn);
5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150

		hci_cp.handle = cpu_to_le16(conn->handle);
		hci_cp.which = 0x01; /* Piconet clock */
		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
	}

	err = hci_req_run(&req, get_clock_info_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168
static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
{
	struct hci_conn *conn;

	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
	if (!conn)
		return false;

	if (conn->dst_type != type)
		return false;

	if (conn->state != BT_CONNECTED)
		return false;

	return true;
}

/* This function requires the caller holds hdev->lock */
5169
static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185
			       u8 addr_type, u8 auto_connect)
{
	struct hci_conn_params *params;

	params = hci_conn_params_add(hdev, addr, addr_type);
	if (!params)
		return -EIO;

	if (params->auto_connect == auto_connect)
		return 0;

	list_del_init(&params->action);

	switch (auto_connect) {
	case HCI_AUTO_CONN_DISABLED:
	case HCI_AUTO_CONN_LINK_LOSS:
5186 5187 5188 5189 5190
		/* If auto connect is being disabled when we're trying to
		 * connect to device, keep connecting.
		 */
		if (params->explicit_connect)
			list_add(&params->action, &hdev->pend_le_conns);
5191 5192
		break;
	case HCI_AUTO_CONN_REPORT:
5193 5194 5195 5196
		if (params->explicit_connect)
			list_add(&params->action, &hdev->pend_le_conns);
		else
			list_add(&params->action, &hdev->pend_le_reports);
5197 5198 5199
		break;
	case HCI_AUTO_CONN_DIRECT:
	case HCI_AUTO_CONN_ALWAYS:
5200
		if (!is_connected(hdev, addr, addr_type))
5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212
			list_add(&params->action, &hdev->pend_le_conns);
		break;
	}

	params->auto_connect = auto_connect;

	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
	       auto_connect);

	return 0;
}

5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224
static void device_added(struct sock *sk, struct hci_dev *hdev,
			 bdaddr_t *bdaddr, u8 type, u8 action)
{
	struct mgmt_ev_device_added ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
	ev.action = action;

	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
}

5225 5226 5227 5228 5229 5230 5231 5232 5233
static int add_device(struct sock *sk, struct hci_dev *hdev,
		      void *data, u16 len)
{
	struct mgmt_cp_add_device *cp = data;
	u8 auto_conn, addr_type;
	int err;

	BT_DBG("%s", hdev->name);

5234
	if (!bdaddr_type_is_valid(cp->addr.type) ||
5235
	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5236 5237 5238
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
5239

5240
	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5241 5242 5243
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
5244 5245 5246

	hci_dev_lock(hdev);

5247
	if (cp->addr.type == BDADDR_BREDR) {
5248
		/* Only incoming connections action is supported for now */
5249
		if (cp->action != 0x01) {
5250 5251 5252 5253
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_ADD_DEVICE,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
5254 5255 5256 5257 5258 5259 5260
			goto unlock;
		}

		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
					  cp->addr.type);
		if (err)
			goto unlock;
5261

5262
		hci_req_update_scan(hdev);
5263

5264 5265 5266
		goto added;
	}

5267
	addr_type = le_addr_type(cp->addr.type);
5268

5269
	if (cp->action == 0x02)
5270
		auto_conn = HCI_AUTO_CONN_ALWAYS;
5271 5272
	else if (cp->action == 0x01)
		auto_conn = HCI_AUTO_CONN_DIRECT;
5273
	else
5274
		auto_conn = HCI_AUTO_CONN_REPORT;
5275

5276 5277 5278 5279 5280 5281
	/* Kernel internally uses conn_params with resolvable private
	 * address, but Add Device allows only identity addresses.
	 * Make sure it is enforced before calling
	 * hci_conn_params_lookup.
	 */
	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5282 5283 5284
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					MGMT_STATUS_INVALID_PARAMS,
					&cp->addr, sizeof(cp->addr));
5285 5286 5287
		goto unlock;
	}

5288 5289 5290
	/* If the connection parameters don't exist for this device,
	 * they will be created and configured with defaults.
	 */
5291
	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5292
				auto_conn) < 0) {
5293 5294 5295
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					MGMT_STATUS_FAILED, &cp->addr,
					sizeof(cp->addr));
5296 5297 5298
		goto unlock;
	}

5299 5300
	hci_update_background_scan(hdev);

5301
added:
5302 5303
	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);

5304 5305 5306
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
				MGMT_STATUS_SUCCESS, &cp->addr,
				sizeof(cp->addr));
5307 5308 5309 5310 5311 5312

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323
static void device_removed(struct sock *sk, struct hci_dev *hdev,
			   bdaddr_t *bdaddr, u8 type)
{
	struct mgmt_ev_device_removed ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;

	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
}

5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334
static int remove_device(struct sock *sk, struct hci_dev *hdev,
			 void *data, u16 len)
{
	struct mgmt_cp_remove_device *cp = data;
	int err;

	BT_DBG("%s", hdev->name);

	hci_dev_lock(hdev);

	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5335
		struct hci_conn_params *params;
5336 5337
		u8 addr_type;

5338
		if (!bdaddr_type_is_valid(cp->addr.type)) {
5339 5340 5341 5342
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_DEVICE,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
5343 5344 5345
			goto unlock;
		}

5346 5347 5348 5349 5350
		if (cp->addr.type == BDADDR_BREDR) {
			err = hci_bdaddr_list_del(&hdev->whitelist,
						  &cp->addr.bdaddr,
						  cp->addr.type);
			if (err) {
5351 5352 5353 5354 5355
				err = mgmt_cmd_complete(sk, hdev->id,
							MGMT_OP_REMOVE_DEVICE,
							MGMT_STATUS_INVALID_PARAMS,
							&cp->addr,
							sizeof(cp->addr));
5356 5357 5358
				goto unlock;
			}

5359
			hci_req_update_scan(hdev);
5360

5361 5362 5363 5364 5365
			device_removed(sk, hdev, &cp->addr.bdaddr,
				       cp->addr.type);
			goto complete;
		}

5366
		addr_type = le_addr_type(cp->addr.type);
5367

5368 5369 5370 5371 5372 5373
		/* Kernel internally uses conn_params with resolvable private
		 * address, but Remove Device allows only identity addresses.
		 * Make sure it is enforced before calling
		 * hci_conn_params_lookup.
		 */
		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5374 5375 5376 5377
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_DEVICE,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
5378 5379 5380
			goto unlock;
		}

5381 5382 5383
		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
						addr_type);
		if (!params) {
5384 5385 5386 5387
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_DEVICE,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
5388 5389 5390
			goto unlock;
		}

5391 5392
		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
5393 5394 5395 5396
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_DEVICE,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
5397 5398 5399
			goto unlock;
		}

5400
		list_del(&params->action);
5401 5402
		list_del(&params->list);
		kfree(params);
5403
		hci_update_background_scan(hdev);
5404 5405

		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5406
	} else {
5407
		struct hci_conn_params *p, *tmp;
5408
		struct bdaddr_list *b, *btmp;
5409

5410
		if (cp->addr.type) {
5411 5412 5413 5414
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_DEVICE,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
5415 5416 5417
			goto unlock;
		}

5418 5419 5420 5421 5422 5423
		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
			list_del(&b->list);
			kfree(b);
		}

5424
		hci_req_update_scan(hdev);
5425

5426 5427 5428 5429
		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
				continue;
			device_removed(sk, hdev, &p->addr, p->addr_type);
5430 5431 5432 5433
			if (p->explicit_connect) {
				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
				continue;
			}
5434 5435 5436 5437 5438 5439 5440
			list_del(&p->action);
			list_del(&p->list);
			kfree(p);
		}

		BT_DBG("All LE connection parameters were removed");

5441
		hci_update_background_scan(hdev);
5442 5443
	}

5444
complete:
5445 5446 5447
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
				MGMT_STATUS_SUCCESS, &cp->addr,
				sizeof(cp->addr));
5448 5449 5450 5451 5452
unlock:
	hci_dev_unlock(hdev);
	return err;
}

5453 5454 5455 5456
static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
{
	struct mgmt_cp_load_conn_param *cp = data;
5457 5458
	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
				     sizeof(struct mgmt_conn_param));
5459 5460 5461 5462
	u16 param_count, expected_len;
	int i;

	if (!lmp_le_capable(hdev))
5463 5464
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_NOT_SUPPORTED);
5465 5466

	param_count = __le16_to_cpu(cp->param_count);
5467 5468 5469
	if (param_count > max_param_count) {
		BT_ERR("load_conn_param: too big param_count value %u",
		       param_count);
5470 5471
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_INVALID_PARAMS);
5472
	}
5473 5474 5475 5476 5477 5478

	expected_len = sizeof(*cp) + param_count *
					sizeof(struct mgmt_conn_param);
	if (expected_len != len) {
		BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
		       expected_len, len);
5479 5480
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_INVALID_PARAMS);
5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534
	}

	BT_DBG("%s param_count %u", hdev->name, param_count);

	hci_dev_lock(hdev);

	hci_conn_params_clear_disabled(hdev);

	for (i = 0; i < param_count; i++) {
		struct mgmt_conn_param *param = &cp->params[i];
		struct hci_conn_params *hci_param;
		u16 min, max, latency, timeout;
		u8 addr_type;

		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
		       param->addr.type);

		if (param->addr.type == BDADDR_LE_PUBLIC) {
			addr_type = ADDR_LE_DEV_PUBLIC;
		} else if (param->addr.type == BDADDR_LE_RANDOM) {
			addr_type = ADDR_LE_DEV_RANDOM;
		} else {
			BT_ERR("Ignoring invalid connection parameters");
			continue;
		}

		min = le16_to_cpu(param->min_interval);
		max = le16_to_cpu(param->max_interval);
		latency = le16_to_cpu(param->latency);
		timeout = le16_to_cpu(param->timeout);

		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
		       min, max, latency, timeout);

		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
			BT_ERR("Ignoring invalid connection parameters");
			continue;
		}

		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
						addr_type);
		if (!hci_param) {
			BT_ERR("Failed to add connection parameters");
			continue;
		}

		hci_param->conn_min_interval = min;
		hci_param->conn_max_interval = max;
		hci_param->conn_latency = latency;
		hci_param->supervision_timeout = timeout;
	}

	hci_dev_unlock(hdev);

5535 5536
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
				 NULL, 0);
5537 5538
}

5539 5540 5541 5542 5543 5544 5545 5546 5547 5548
static int set_external_config(struct sock *sk, struct hci_dev *hdev,
			       void *data, u16 len)
{
	struct mgmt_cp_set_external_config *cp = data;
	bool changed;
	int err;

	BT_DBG("%s", hdev->name);

	if (hdev_is_powered(hdev))
5549 5550
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				       MGMT_STATUS_REJECTED);
5551 5552

	if (cp->config != 0x00 && cp->config != 0x01)
5553 5554
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				         MGMT_STATUS_INVALID_PARAMS);
5555 5556

	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5557 5558
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				       MGMT_STATUS_NOT_SUPPORTED);
5559 5560 5561 5562

	hci_dev_lock(hdev);

	if (cp->config)
5563
		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
5564
	else
5565
		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
5566 5567 5568 5569 5570 5571 5572 5573

	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
	if (err < 0)
		goto unlock;

	if (!changed)
		goto unlock;

5574 5575
	err = new_options(hdev, sk);

5576
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
5577
		mgmt_index_removed(hdev);
5578

5579
		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
5580 5581
			hci_dev_set_flag(hdev, HCI_CONFIG);
			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5582 5583 5584

			queue_work(hdev->req_workqueue, &hdev->power_on);
		} else {
5585
			set_bit(HCI_RAW, &hdev->flags);
5586 5587
			mgmt_index_added(hdev);
		}
5588 5589 5590 5591 5592 5593 5594
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5595 5596 5597 5598 5599 5600 5601 5602 5603 5604
static int set_public_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_public_address *cp = data;
	bool changed;
	int err;

	BT_DBG("%s", hdev->name);

	if (hdev_is_powered(hdev))
5605 5606
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_REJECTED);
5607 5608

	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5609 5610
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_INVALID_PARAMS);
5611 5612

	if (!hdev->set_bdaddr)
5613 5614
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_NOT_SUPPORTED);
5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627

	hci_dev_lock(hdev);

	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
	bacpy(&hdev->public_addr, &cp->bdaddr);

	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
	if (err < 0)
		goto unlock;

	if (!changed)
		goto unlock;

5628
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5629 5630 5631 5632 5633
		err = new_options(hdev, sk);

	if (is_configured(hdev)) {
		mgmt_index_removed(hdev);

5634
		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
5635

5636 5637
		hci_dev_set_flag(hdev, HCI_CONFIG);
		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5638 5639 5640 5641 5642 5643 5644 5645 5646

		queue_work(hdev->req_workqueue, &hdev->power_on);
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785
static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
					     u16 opcode, struct sk_buff *skb)
{
	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
	u8 *h192, *r192, *h256, *r256;
	struct mgmt_pending_cmd *cmd;
	u16 eir_len;
	int err;

	BT_DBG("%s status %u", hdev->name, status);

	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
	if (!cmd)
		return;

	mgmt_cp = cmd->param;

	if (status) {
		status = mgmt_status(status);
		eir_len = 0;

		h192 = NULL;
		r192 = NULL;
		h256 = NULL;
		r256 = NULL;
	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
		struct hci_rp_read_local_oob_data *rp;

		if (skb->len != sizeof(*rp)) {
			status = MGMT_STATUS_FAILED;
			eir_len = 0;
		} else {
			status = MGMT_STATUS_SUCCESS;
			rp = (void *)skb->data;

			eir_len = 5 + 18 + 18;
			h192 = rp->hash;
			r192 = rp->rand;
			h256 = NULL;
			r256 = NULL;
		}
	} else {
		struct hci_rp_read_local_oob_ext_data *rp;

		if (skb->len != sizeof(*rp)) {
			status = MGMT_STATUS_FAILED;
			eir_len = 0;
		} else {
			status = MGMT_STATUS_SUCCESS;
			rp = (void *)skb->data;

			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
				eir_len = 5 + 18 + 18;
				h192 = NULL;
				r192 = NULL;
			} else {
				eir_len = 5 + 18 + 18 + 18 + 18;
				h192 = rp->hash192;
				r192 = rp->rand192;
			}

			h256 = rp->hash256;
			r256 = rp->rand256;
		}
	}

	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
	if (!mgmt_rp)
		goto done;

	if (status)
		goto send_rsp;

	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
				  hdev->dev_class, 3);

	if (h192 && r192) {
		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
					  EIR_SSP_HASH_C192, h192, 16);
		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
					  EIR_SSP_RAND_R192, r192, 16);
	}

	if (h256 && r256) {
		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
					  EIR_SSP_HASH_C256, h256, 16);
		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
					  EIR_SSP_RAND_R256, r256, 16);
	}

send_rsp:
	mgmt_rp->type = mgmt_cp->type;
	mgmt_rp->eir_len = cpu_to_le16(eir_len);

	err = mgmt_cmd_complete(cmd->sk, hdev->id,
				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
	if (err < 0 || status)
		goto done;

	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);

	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
done:
	kfree(mgmt_rp);
	mgmt_pending_remove(cmd);
}

static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
				  struct mgmt_cp_read_local_oob_ext_data *cp)
{
	struct mgmt_pending_cmd *cmd;
	struct hci_request req;
	int err;

	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
			       cp, sizeof(*cp));
	if (!cmd)
		return -ENOMEM;

	hci_req_init(&req, hdev);

	if (bredr_sc_enabled(hdev))
		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
	else
		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);

	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
	if (err < 0) {
		mgmt_pending_remove(cmd);
		return err;
	}

	return 0;
}

5786 5787 5788 5789 5790 5791 5792
static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
				   void *data, u16 data_len)
{
	struct mgmt_cp_read_local_oob_ext_data *cp = data;
	struct mgmt_rp_read_local_oob_ext_data *rp;
	size_t rp_len;
	u16 eir_len;
5793
	u8 status, flags, role, addr[7], hash[16], rand[16];
5794 5795 5796 5797
	int err;

	BT_DBG("%s", hdev->name);

5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821
	if (hdev_is_powered(hdev)) {
		switch (cp->type) {
		case BIT(BDADDR_BREDR):
			status = mgmt_bredr_support(hdev);
			if (status)
				eir_len = 0;
			else
				eir_len = 5;
			break;
		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
			status = mgmt_le_support(hdev);
			if (status)
				eir_len = 0;
			else
				eir_len = 9 + 3 + 18 + 18 + 3;
			break;
		default:
			status = MGMT_STATUS_INVALID_PARAMS;
			eir_len = 0;
			break;
		}
	} else {
		status = MGMT_STATUS_NOT_POWERED;
		eir_len = 0;
5822 5823 5824 5825
	}

	rp_len = sizeof(*rp) + eir_len;
	rp = kmalloc(rp_len, GFP_ATOMIC);
5826
	if (!rp)
5827
		return -ENOMEM;
5828

5829 5830 5831
	if (status)
		goto complete;

5832
	hci_dev_lock(hdev);
5833 5834 5835 5836

	eir_len = 0;
	switch (cp->type) {
	case BIT(BDADDR_BREDR):
5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849
		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
			err = read_local_ssp_oob_req(hdev, sk, cp);
			hci_dev_unlock(hdev);
			if (!err)
				goto done;

			status = MGMT_STATUS_FAILED;
			goto complete;
		} else {
			eir_len = eir_append_data(rp->eir, eir_len,
						  EIR_CLASS_OF_DEV,
						  hdev->dev_class, 3);
		}
5850 5851
		break;
	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
5852 5853
		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
		    smp_generate_oob(hdev, hash, rand) < 0) {
5854
			hci_dev_unlock(hdev);
5855 5856
			status = MGMT_STATUS_FAILED;
			goto complete;
5857 5858
		}

5859 5860 5861 5862 5863 5864 5865 5866 5867 5868
		/* This should return the active RPA, but since the RPA
		 * is only programmed on demand, it is really hard to fill
		 * this in at the moment. For now disallow retrieving
		 * local out-of-band data when privacy is in use.
		 *
		 * Returning the identity address will not help here since
		 * pairing happens before the identity resolving key is
		 * known and thus the connection establishment happens
		 * based on the RPA and not the identity address.
		 */
5869
		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5870 5871 5872 5873 5874 5875 5876 5877 5878
			hci_dev_unlock(hdev);
			status = MGMT_STATUS_REJECTED;
			goto complete;
		}

		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896
			memcpy(addr, &hdev->static_addr, 6);
			addr[6] = 0x01;
		} else {
			memcpy(addr, &hdev->bdaddr, 6);
			addr[6] = 0x00;
		}

		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
					  addr, sizeof(addr));

		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
			role = 0x02;
		else
			role = 0x01;

		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
					  &role, sizeof(role));

5897 5898 5899 5900
		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
			eir_len = eir_append_data(rp->eir, eir_len,
						  EIR_LE_SC_CONFIRM,
						  hash, sizeof(hash));
5901

5902 5903 5904 5905
			eir_len = eir_append_data(rp->eir, eir_len,
						  EIR_LE_SC_RANDOM,
						  rand, sizeof(rand));
		}
5906

5907
		flags = mgmt_get_adv_discov_flags(hdev);
5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918

		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
			flags |= LE_AD_NO_BREDR;

		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
					  &flags, sizeof(flags));
		break;
	}

	hci_dev_unlock(hdev);

5919 5920
	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);

5921 5922 5923
	status = MGMT_STATUS_SUCCESS;

complete:
5924 5925 5926
	rp->type = cp->type;
	rp->eir_len = cpu_to_le16(eir_len);

5927
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
5928 5929
				status, rp, sizeof(*rp) + eir_len);
	if (err < 0 || status)
5930 5931 5932 5933 5934
		goto done;

	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
				 rp, sizeof(*rp) + eir_len,
				 HCI_MGMT_OOB_DATA_EVENTS, sk);
5935

5936
done:
5937 5938 5939 5940 5941
	kfree(rp);

	return err;
}

5942 5943 5944 5945 5946 5947 5948 5949
static u32 get_supported_adv_flags(struct hci_dev *hdev)
{
	u32 flags = 0;

	flags |= MGMT_ADV_FLAG_CONNECTABLE;
	flags |= MGMT_ADV_FLAG_DISCOV;
	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
5950
	flags |= MGMT_ADV_FLAG_APPEARANCE;
5951
	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
5952 5953 5954 5955 5956 5957 5958

	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
		flags |= MGMT_ADV_FLAG_TX_POWER;

	return flags;
}

5959 5960 5961 5962 5963
static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
			     void *data, u16 data_len)
{
	struct mgmt_rp_read_adv_features *rp;
	size_t rp_len;
5964
	int err;
5965
	struct adv_info *adv_instance;
5966
	u32 supported_flags;
5967
	u8 *instance;
5968 5969 5970

	BT_DBG("%s", hdev->name);

5971 5972 5973 5974
	if (!lmp_le_capable(hdev))
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
				       MGMT_STATUS_REJECTED);

5975 5976
	hci_dev_lock(hdev);

5977
	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
5978 5979 5980 5981 5982 5983
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		hci_dev_unlock(hdev);
		return -ENOMEM;
	}

5984 5985 5986
	supported_flags = get_supported_adv_flags(hdev);

	rp->supported_flags = cpu_to_le32(supported_flags);
5987 5988
	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
5989
	rp->max_instances = HCI_MAX_ADV_INSTANCES;
5990
	rp->num_instances = hdev->adv_instance_cnt;
5991

5992 5993 5994 5995
	instance = rp->instance;
	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
		*instance = adv_instance->instance;
		instance++;
5996
	}
5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007

	hci_dev_unlock(hdev);

	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
				MGMT_STATUS_SUCCESS, rp, rp_len);

	kfree(rp);

	return err;
}

6008
static bool tlv_data_is_valid(u32 adv_flags, u8 *data, u8 len, bool is_adv_data)
6009
{
6010
	u8 max_len = HCI_MAX_AD_LENGTH;
6011
	int i, cur_len;
6012
	bool flags_managed = false;
6013
	bool tx_power_managed = false;
6014

6015 6016 6017 6018 6019 6020 6021
	if (is_adv_data) {
		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
				 MGMT_ADV_FLAG_LIMITED_DISCOV |
				 MGMT_ADV_FLAG_MANAGED_FLAGS)) {
			flags_managed = true;
			max_len -= 3;
		}
6022

6023 6024 6025 6026
		if (adv_flags & MGMT_ADV_FLAG_TX_POWER) {
			tx_power_managed = true;
			max_len -= 3;
		}
6027 6028 6029 6030
	} else {
		/* at least 1 byte of name should fit in */
		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
			max_len -= 3;
6031 6032 6033

		if (adv_flags & MGMT_ADV_FLAG_APPEARANCE)
			max_len -= 4;
6034 6035
	}

6036
	if (len > max_len)
6037 6038
		return false;

6039 6040 6041
	/* Make sure that the data is correctly formatted. */
	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
		cur_len = data[i];
6042

6043 6044 6045
		if (flags_managed && data[i + 1] == EIR_FLAGS)
			return false;

6046 6047 6048
		if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
			return false;

6049 6050 6051
		/* If the current field length would exceed the total data
		 * length, then it's invalid.
		 */
6052
		if (i + cur_len >= len)
6053 6054 6055 6056 6057 6058 6059 6060 6061 6062
			return false;
	}

	return true;
}

static void add_advertising_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
{
	struct mgmt_pending_cmd *cmd;
6063
	struct mgmt_cp_add_advertising *cp;
6064
	struct mgmt_rp_add_advertising rp;
6065 6066
	struct adv_info *adv_instance, *n;
	u8 instance;
6067 6068 6069 6070 6071 6072 6073

	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);

6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088
	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
		if (!adv_instance->pending)
			continue;

		if (!status) {
			adv_instance->pending = false;
			continue;
		}

		instance = adv_instance->instance;

		if (hdev->cur_adv_instance == instance)
			cancel_adv_timeout(hdev);

		hci_remove_adv_instance(hdev, instance);
6089
		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6090 6091 6092 6093 6094
	}

	if (!cmd)
		goto unlock;

6095 6096
	cp = cmd->param;
	rp.instance = cp->instance;
6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116

	if (status)
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
				mgmt_status(status));
	else
		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
				  mgmt_status(status), &rp, sizeof(rp));

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int add_advertising(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 data_len)
{
	struct mgmt_cp_add_advertising *cp = data;
	struct mgmt_rp_add_advertising rp;
	u32 flags;
6117
	u32 supported_flags;
6118
	u8 status;
6119 6120 6121 6122
	u16 timeout, duration;
	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
	u8 schedule_instance = 0;
	struct adv_info *next_instance;
6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133
	int err;
	struct mgmt_pending_cmd *cmd;
	struct hci_request req;

	BT_DBG("%s", hdev->name);

	status = mgmt_le_support(hdev);
	if (status)
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				       status);

6134
	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6135 6136 6137 6138
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);

	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6139 6140 6141
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);

6142
	flags = __le32_to_cpu(cp->flags);
6143
	timeout = __le16_to_cpu(cp->timeout);
6144
	duration = __le16_to_cpu(cp->duration);
6145

6146 6147
	/* The current implementation only supports a subset of the specified
	 * flags.
6148 6149
	 */
	supported_flags = get_supported_adv_flags(hdev);
6150
	if (flags & ~supported_flags)
6151 6152 6153 6154 6155
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

6156 6157 6158 6159 6160 6161
	if (timeout && !hdev_is_powered(hdev)) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				      MGMT_STATUS_REJECTED);
		goto unlock;
	}

6162
	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6163
	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6164 6165 6166 6167 6168 6169
	    pending_find(MGMT_OP_SET_LE, hdev)) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				      MGMT_STATUS_BUSY);
		goto unlock;
	}

6170 6171
	if (!tlv_data_is_valid(flags, cp->data, cp->adv_data_len, true) ||
	    !tlv_data_is_valid(flags, cp->data + cp->adv_data_len,
6172
			       cp->scan_rsp_len, false)) {
6173 6174 6175 6176 6177
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				      MGMT_STATUS_INVALID_PARAMS);
		goto unlock;
	}

6178 6179 6180 6181 6182 6183 6184 6185 6186 6187
	err = hci_add_adv_instance(hdev, cp->instance, flags,
				   cp->adv_data_len, cp->data,
				   cp->scan_rsp_len,
				   cp->data + cp->adv_data_len,
				   timeout, duration);
	if (err < 0) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				      MGMT_STATUS_FAILED);
		goto unlock;
	}
6188

6189 6190 6191 6192
	/* Only trigger an advertising added event if a new instance was
	 * actually added.
	 */
	if (hdev->adv_instance_cnt > prev_instance_cnt)
6193
		mgmt_advertising_added(sk, hdev, cp->instance);
6194

6195 6196 6197 6198 6199 6200 6201
	if (hdev->cur_adv_instance == cp->instance) {
		/* If the currently advertised instance is being changed then
		 * cancel the current advertising and schedule the next
		 * instance. If there is only one instance then the overridden
		 * advertising data will be visible right away.
		 */
		cancel_adv_timeout(hdev);
6202

6203 6204 6205 6206 6207 6208 6209 6210 6211
		next_instance = hci_get_next_instance(hdev, cp->instance);
		if (next_instance)
			schedule_instance = next_instance->instance;
	} else if (!hdev->adv_instance_timeout) {
		/* Immediately advertise the new instance if no other
		 * instance is currently being advertised.
		 */
		schedule_instance = cp->instance;
	}
6212

6213 6214 6215
	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
	 * there is no instance to be advertised then we have no HCI
	 * communication to make. Simply return.
6216 6217
	 */
	if (!hdev_is_powered(hdev) ||
6218 6219 6220
	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
	    !schedule_instance) {
		rp.instance = cp->instance;
6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
		goto unlock;
	}

	/* We're good to go, update advertising data, parameters, and start
	 * advertising.
	 */
	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
			       data_len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

6238
	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6239 6240 6241

	if (!err)
		err = hci_req_run(&req, add_advertising_complete);
6242 6243 6244 6245 6246 6247 6248 6249 6250 6251

	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);

	return err;
}

6252 6253 6254 6255
static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
					u16 opcode)
{
	struct mgmt_pending_cmd *cmd;
6256
	struct mgmt_cp_remove_advertising *cp;
6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270
	struct mgmt_rp_remove_advertising rp;

	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

	/* A failure status here only means that we failed to disable
	 * advertising. Otherwise, the advertising instance has been removed,
	 * so report success.
	 */
	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
	if (!cmd)
		goto unlock;

6271 6272
	cp = cmd->param;
	rp.instance = cp->instance;
6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288

	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
			  &rp, sizeof(rp));
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 data_len)
{
	struct mgmt_cp_remove_advertising *cp = data;
	struct mgmt_rp_remove_advertising rp;
	struct mgmt_pending_cmd *cmd;
	struct hci_request req;
6289
	int err;
6290 6291 6292 6293 6294

	BT_DBG("%s", hdev->name);

	hci_dev_lock(hdev);

6295
	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
6296 6297 6298 6299 6300 6301
		err = mgmt_cmd_status(sk, hdev->id,
				      MGMT_OP_REMOVE_ADVERTISING,
				      MGMT_STATUS_INVALID_PARAMS);
		goto unlock;
	}

6302 6303 6304 6305 6306 6307 6308 6309
	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
	    pending_find(MGMT_OP_SET_LE, hdev)) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
				      MGMT_STATUS_BUSY);
		goto unlock;
	}

6310
	if (list_empty(&hdev->adv_instances)) {
6311 6312 6313 6314 6315
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
				      MGMT_STATUS_INVALID_PARAMS);
		goto unlock;
	}

6316
	hci_req_init(&req, hdev);
6317

6318
	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
6319

6320
	if (list_empty(&hdev->adv_instances))
6321
		__hci_req_disable_advertising(&req);
6322

6323 6324 6325
	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
	 * flag is set or the device isn't powered then we have no HCI
	 * communication to make. Simply return.
6326
	 */
6327 6328
	if (skb_queue_empty(&req.cmd_q) ||
	    !hdev_is_powered(hdev) ||
6329
	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6330
		rp.instance = cp->instance;
6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_REMOVE_ADVERTISING,
					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
			       data_len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	err = hci_req_run(&req, remove_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);

	return err;
}

6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365
static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
{
	u8 max_len = HCI_MAX_AD_LENGTH;

	if (is_adv_data) {
		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
				 MGMT_ADV_FLAG_LIMITED_DISCOV |
				 MGMT_ADV_FLAG_MANAGED_FLAGS))
			max_len -= 3;

		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
			max_len -= 3;
6366 6367 6368 6369
	} else {
		/* at least 1 byte of name should fit in */
		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
			max_len -= 3;
6370 6371 6372

		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
			max_len -= 4;
6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416
	}

	return max_len;
}

static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
			     void *data, u16 data_len)
{
	struct mgmt_cp_get_adv_size_info *cp = data;
	struct mgmt_rp_get_adv_size_info rp;
	u32 flags, supported_flags;
	int err;

	BT_DBG("%s", hdev->name);

	if (!lmp_le_capable(hdev))
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
				       MGMT_STATUS_REJECTED);

	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
				       MGMT_STATUS_INVALID_PARAMS);

	flags = __le32_to_cpu(cp->flags);

	/* The current implementation only supports a subset of the specified
	 * flags.
	 */
	supported_flags = get_supported_adv_flags(hdev);
	if (flags & ~supported_flags)
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
				       MGMT_STATUS_INVALID_PARAMS);

	rp.instance = cp->instance;
	rp.flags = cp->flags;
	rp.max_adv_data_len = tlv_data_max_len(flags, true);
	rp.max_scan_rsp_len = tlv_data_max_len(flags, false);

	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));

	return err;
}

6417
static const struct hci_mgmt_handler mgmt_handlers[] = {
6418
	{ NULL }, /* 0x0000 (no command) */
6419
	{ read_version,            MGMT_READ_VERSION_SIZE,
6420 6421
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
6422
	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
6423 6424
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
6425
	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
6426 6427 6428 6429
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
	{ read_controller_info,    MGMT_READ_INFO_SIZE,
						HCI_MGMT_UNTRUSTED },
6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442
	{ set_powered,             MGMT_SETTING_SIZE },
	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
	{ set_connectable,         MGMT_SETTING_SIZE },
	{ set_fast_connectable,    MGMT_SETTING_SIZE },
	{ set_bondable,            MGMT_SETTING_SIZE },
	{ set_link_security,       MGMT_SETTING_SIZE },
	{ set_ssp,                 MGMT_SETTING_SIZE },
	{ set_hs,                  MGMT_SETTING_SIZE },
	{ set_le,                  MGMT_SETTING_SIZE },
	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
	{ add_uuid,                MGMT_ADD_UUID_SIZE },
	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
6443 6444 6445 6446
	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
						HCI_MGMT_VAR_LEN },
	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
						HCI_MGMT_VAR_LEN },
6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458
	{ disconnect,              MGMT_DISCONNECT_SIZE },
	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6459 6460 6461
	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
						HCI_MGMT_VAR_LEN },
6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475
	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
	{ set_advertising,         MGMT_SETTING_SIZE },
	{ set_bredr,               MGMT_SETTING_SIZE },
	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
	{ set_secure_conn,         MGMT_SETTING_SIZE },
	{ set_debug_keys,          MGMT_SETTING_SIZE },
	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
6476 6477
	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
						HCI_MGMT_VAR_LEN },
6478 6479 6480 6481
	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
	{ add_device,              MGMT_ADD_DEVICE_SIZE },
	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
6482 6483 6484
	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
						HCI_MGMT_VAR_LEN },
	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6485 6486
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
6487
	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
6488 6489
						HCI_MGMT_UNCONFIGURED |
						HCI_MGMT_UNTRUSTED },
6490 6491 6492 6493 6494 6495
	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
						HCI_MGMT_UNCONFIGURED },
	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
						HCI_MGMT_UNCONFIGURED },
	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
						HCI_MGMT_VAR_LEN },
6496
	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6497
	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
6498 6499
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
6500
	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
6501 6502
	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
						HCI_MGMT_VAR_LEN },
6503
	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
6504
	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
6505
	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
6506 6507
	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
						HCI_MGMT_UNTRUSTED },
6508
	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
6509 6510
};

6511
void mgmt_index_added(struct hci_dev *hdev)
6512
{
6513
	struct mgmt_ev_ext_index ev;
6514

6515 6516 6517
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		return;

6518
	switch (hdev->dev_type) {
6519
	case HCI_PRIMARY:
6520 6521 6522
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6523
			ev.type = 0x01;
6524 6525 6526
		} else {
			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
					 HCI_MGMT_INDEX_EVENTS);
6527
			ev.type = 0x00;
6528 6529
		}
		break;
6530 6531 6532 6533 6534
	case HCI_AMP:
		ev.type = 0x02;
		break;
	default:
		return;
6535
	}
6536 6537 6538 6539 6540

	ev.bus = hdev->bus;

	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
			 HCI_MGMT_EXT_INDEX_EVENTS);
6541 6542
}

6543
void mgmt_index_removed(struct hci_dev *hdev)
6544
{
6545
	struct mgmt_ev_ext_index ev;
6546
	u8 status = MGMT_STATUS_INVALID_INDEX;
6547

6548 6549 6550
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		return;

6551
	switch (hdev->dev_type) {
6552
	case HCI_PRIMARY:
6553
		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6554

6555 6556 6557
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6558
			ev.type = 0x01;
6559 6560 6561
		} else {
			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
					 HCI_MGMT_INDEX_EVENTS);
6562
			ev.type = 0x00;
6563 6564
		}
		break;
6565 6566 6567 6568 6569
	case HCI_AMP:
		ev.type = 0x02;
		break;
	default:
		return;
6570
	}
6571 6572 6573 6574 6575

	ev.bus = hdev->bus;

	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
			 HCI_MGMT_EXT_INDEX_EVENTS);
6576 6577
}

6578
/* This function requires the caller holds hdev->lock */
6579
static void restart_le_actions(struct hci_dev *hdev)
6580 6581 6582 6583
{
	struct hci_conn_params *p;

	list_for_each_entry(p, &hdev->le_conn_params, list) {
6584 6585 6586 6587 6588 6589
		/* Needed for AUTO_OFF case where might not "really"
		 * have been powered off.
		 */
		list_del_init(&p->action);

		switch (p->auto_connect) {
6590
		case HCI_AUTO_CONN_DIRECT:
6591 6592 6593 6594 6595 6596 6597 6598
		case HCI_AUTO_CONN_ALWAYS:
			list_add(&p->action, &hdev->pend_le_conns);
			break;
		case HCI_AUTO_CONN_REPORT:
			list_add(&p->action, &hdev->pend_le_reports);
			break;
		default:
			break;
6599
		}
6600 6601 6602
	}
}

6603
void mgmt_power_on(struct hci_dev *hdev, int err)
6604 6605 6606
{
	struct cmd_lookup match = { NULL, hdev };

6607
	BT_DBG("err %d", err);
6608

6609 6610 6611
	hci_dev_lock(hdev);

	if (!err) {
6612 6613
		restart_le_actions(hdev);
		hci_update_background_scan(hdev);
6614 6615
	}

6616 6617 6618 6619 6620 6621 6622
	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);

6623
	hci_dev_unlock(hdev);
6624
}
6625

6626
void __mgmt_power_off(struct hci_dev *hdev)
6627 6628
{
	struct cmd_lookup match = { NULL, hdev };
6629
	u8 status, zero_cod[] = { 0, 0, 0 };
6630

6631
	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6632 6633 6634 6635 6636 6637 6638 6639

	/* If the power off is because of hdev unregistration let
	 * use the appropriate INVALID_INDEX status. Otherwise use
	 * NOT_POWERED. We cover both scenarios here since later in
	 * mgmt_index_removed() any hci_conn callbacks will have already
	 * been triggered, potentially causing misleading DISCONNECTED
	 * status responses.
	 */
6640
	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
6641 6642 6643 6644 6645
		status = MGMT_STATUS_INVALID_INDEX;
	else
		status = MGMT_STATUS_NOT_POWERED;

	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6646

6647
	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
6648 6649 6650
		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
				   zero_cod, sizeof(zero_cod),
				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
6651 6652
		ext_info_changed(hdev, NULL);
	}
6653

6654
	new_settings(hdev, match.sk);
6655 6656 6657

	if (match.sk)
		sock_put(match.sk);
6658
}
6659

6660
void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6661
{
6662
	struct mgmt_pending_cmd *cmd;
6663 6664
	u8 status;

6665
	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6666
	if (!cmd)
6667
		return;
6668 6669 6670 6671 6672 6673

	if (err == -ERFKILL)
		status = MGMT_STATUS_RFKILLED;
	else
		status = MGMT_STATUS_FAILED;

6674
	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6675 6676 6677 6678

	mgmt_pending_remove(cmd);
}

6679 6680
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
		       bool persistent)
6681
{
6682
	struct mgmt_ev_new_link_key ev;
6683

6684
	memset(&ev, 0, sizeof(ev));
6685

6686
	ev.store_hint = persistent;
6687
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6688
	ev.key.addr.type = BDADDR_BREDR;
6689
	ev.key.type = key->type;
6690
	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6691
	ev.key.pin_len = key->pin_len;
6692

6693
	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6694
}
6695

6696 6697
static u8 mgmt_ltk_type(struct smp_ltk *ltk)
{
6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710
	switch (ltk->type) {
	case SMP_LTK:
	case SMP_LTK_SLAVE:
		if (ltk->authenticated)
			return MGMT_LTK_AUTHENTICATED;
		return MGMT_LTK_UNAUTHENTICATED;
	case SMP_LTK_P256:
		if (ltk->authenticated)
			return MGMT_LTK_P256_AUTH;
		return MGMT_LTK_P256_UNAUTH;
	case SMP_LTK_P256_DEBUG:
		return MGMT_LTK_P256_DEBUG;
	}
6711 6712 6713 6714

	return MGMT_LTK_UNAUTHENTICATED;
}

6715
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6716 6717 6718 6719 6720
{
	struct mgmt_ev_new_long_term_key ev;

	memset(&ev, 0, sizeof(ev));

6721
	/* Devices using resolvable or non-resolvable random addresses
F
Florian Grandel 已提交
6722
	 * without providing an identity resolving key don't require
6723 6724 6725 6726 6727 6728 6729 6730 6731
	 * to store long term keys. Their addresses will change the
	 * next time around.
	 *
	 * Only when a remote device provides an identity address
	 * make sure the long term key is stored. If the remote
	 * identity is known, the long term keys are internally
	 * mapped to the identity address. So allow static random
	 * and public addresses here.
	 */
6732 6733 6734 6735
	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
		ev.store_hint = 0x00;
	else
6736
		ev.store_hint = persistent;
6737

6738
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6739
	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6740
	ev.key.type = mgmt_ltk_type(key);
6741 6742
	ev.key.enc_size = key->enc_size;
	ev.key.ediv = key->ediv;
6743
	ev.key.rand = key->rand;
6744

6745
	if (key->type == SMP_LTK)
6746 6747
		ev.key.master = 1;

6748 6749 6750
	/* Make sure we copy only the significant bytes based on the
	 * encryption key size, and set the rest of the value to zeroes.
	 */
6751
	memcpy(ev.key.val, key->val, key->enc_size);
6752 6753
	memset(ev.key.val + key->enc_size, 0,
	       sizeof(ev.key.val) - key->enc_size);
6754

6755
	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6756 6757
}

6758
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
6759 6760 6761 6762 6763
{
	struct mgmt_ev_new_irk ev;

	memset(&ev, 0, sizeof(ev));

6764
	ev.store_hint = persistent;
6765

6766 6767 6768 6769 6770 6771 6772 6773
	bacpy(&ev.rpa, &irk->rpa);
	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
	memcpy(ev.irk.val, irk->val, sizeof(irk->val));

	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
}

6774 6775
void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
		   bool persistent)
6776 6777 6778 6779 6780 6781
{
	struct mgmt_ev_new_csrk ev;

	memset(&ev, 0, sizeof(ev));

	/* Devices using resolvable or non-resolvable random addresses
F
Florian Grandel 已提交
6782
	 * without providing an identity resolving key don't require
6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793
	 * to store signature resolving keys. Their addresses will change
	 * the next time around.
	 *
	 * Only when a remote device provides an identity address
	 * make sure the signature resolving key is stored. So allow
	 * static random and public addresses here.
	 */
	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
		ev.store_hint = 0x00;
	else
6794
		ev.store_hint = persistent;
6795 6796 6797

	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6798
	ev.key.type = csrk->type;
6799 6800 6801 6802 6803
	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));

	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
}

6804
void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6805 6806
			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
			 u16 max_interval, u16 latency, u16 timeout)
6807 6808 6809
{
	struct mgmt_ev_new_conn_param ev;

6810 6811 6812
	if (!hci_is_identity_address(bdaddr, bdaddr_type))
		return;

6813 6814 6815
	memset(&ev, 0, sizeof(ev));
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6816
	ev.store_hint = store_hint;
6817 6818 6819 6820 6821 6822 6823 6824
	ev.min_interval = cpu_to_le16(min_interval);
	ev.max_interval = cpu_to_le16(max_interval);
	ev.latency = cpu_to_le16(latency);
	ev.timeout = cpu_to_le16(timeout);

	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
}

6825 6826
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
			   u32 flags, u8 *name, u8 name_len)
6827
{
6828 6829 6830
	char buf[512];
	struct mgmt_ev_device_connected *ev = (void *) buf;
	u16 eir_len = 0;
6831

6832 6833
	bacpy(&ev->addr.bdaddr, &conn->dst);
	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6834

6835
	ev->flags = __cpu_to_le32(flags);
6836

6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848
	/* We must ensure that the EIR Data fields are ordered and
	 * unique. Keep it simple for now and avoid the problem by not
	 * adding any BR/EDR data to the LE adv.
	 */
	if (conn->le_adv_data_len > 0) {
		memcpy(&ev->eir[eir_len],
		       conn->le_adv_data, conn->le_adv_data_len);
		eir_len = conn->le_adv_data_len;
	} else {
		if (name_len > 0)
			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
						  name, name_len);
6849

6850
		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6851 6852 6853 6854
			eir_len = eir_append_data(ev->eir, eir_len,
						  EIR_CLASS_OF_DEV,
						  conn->dev_class, 3);
	}
6855

6856
	ev->eir_len = cpu_to_le16(eir_len);
6857

6858 6859
	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
		    sizeof(*ev) + eir_len, NULL);
6860 6861
}

6862
static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
6863 6864 6865
{
	struct sock **sk = data;

6866
	cmd->cmd_complete(cmd, 0);
6867 6868 6869 6870

	*sk = cmd->sk;
	sock_hold(*sk);

6871
	mgmt_pending_remove(cmd);
6872 6873
}

6874
static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
6875
{
6876
	struct hci_dev *hdev = data;
6877
	struct mgmt_cp_unpair_device *cp = cmd->param;
6878

6879 6880
	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);

6881
	cmd->cmd_complete(cmd, 0);
6882 6883 6884
	mgmt_pending_remove(cmd);
}

6885 6886
bool mgmt_powering_down(struct hci_dev *hdev)
{
6887
	struct mgmt_pending_cmd *cmd;
6888 6889
	struct mgmt_mode *cp;

6890
	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6891 6892 6893 6894 6895 6896 6897 6898 6899 6900
	if (!cmd)
		return false;

	cp = cmd->param;
	if (!cp->val)
		return true;

	return false;
}

6901
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6902 6903
			      u8 link_type, u8 addr_type, u8 reason,
			      bool mgmt_connected)
6904
{
6905
	struct mgmt_ev_device_disconnected ev;
6906 6907
	struct sock *sk = NULL;

6908 6909 6910 6911 6912 6913
	/* The connection is still in hci_conn_hash so test for 1
	 * instead of 0 to know if this is the last one.
	 */
	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
		cancel_delayed_work(&hdev->power_off);
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
6914 6915
	}

6916 6917 6918
	if (!mgmt_connected)
		return;

6919 6920 6921
	if (link_type != ACL_LINK && link_type != LE_LINK)
		return;

6922
	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6923

6924 6925 6926
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.reason = reason;
6927

6928
	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6929 6930

	if (sk)
6931
		sock_put(sk);
6932

6933
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6934
			     hdev);
6935 6936
}

6937 6938
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
			    u8 link_type, u8 addr_type, u8 status)
6939
{
6940 6941
	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
	struct mgmt_cp_disconnect *cp;
6942
	struct mgmt_pending_cmd *cmd;
6943

6944 6945 6946
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
			     hdev);

6947
	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
6948
	if (!cmd)
6949
		return;
6950

6951 6952 6953 6954 6955 6956 6957 6958
	cp = cmd->param;

	if (bacmp(bdaddr, &cp->addr.bdaddr))
		return;

	if (cp->addr.type != bdaddr_type)
		return;

6959
	cmd->cmd_complete(cmd, mgmt_status(status));
6960
	mgmt_pending_remove(cmd);
6961
}
6962

6963 6964
void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			 u8 addr_type, u8 status)
6965 6966
{
	struct mgmt_ev_connect_failed ev;
6967

6968 6969 6970 6971 6972 6973
	/* The connection is still in hci_conn_hash so test for 1
	 * instead of 0 to know if this is the last one.
	 */
	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
		cancel_delayed_work(&hdev->power_off);
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
6974
	}
6975

6976
	bacpy(&ev.addr.bdaddr, bdaddr);
6977
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6978
	ev.status = mgmt_status(status);
6979

6980
	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6981
}
6982

6983
void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6984 6985 6986
{
	struct mgmt_ev_pin_code_request ev;

6987
	bacpy(&ev.addr.bdaddr, bdaddr);
6988
	ev.addr.type = BDADDR_BREDR;
6989
	ev.secure = secure;
6990

6991
	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6992 6993
}

6994 6995
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				  u8 status)
6996
{
6997
	struct mgmt_pending_cmd *cmd;
6998

6999
	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7000
	if (!cmd)
7001
		return;
7002

7003
	cmd->cmd_complete(cmd, mgmt_status(status));
7004
	mgmt_pending_remove(cmd);
7005 7006
}

7007 7008
void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				      u8 status)
7009
{
7010
	struct mgmt_pending_cmd *cmd;
7011

7012
	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7013
	if (!cmd)
7014
		return;
7015

7016
	cmd->cmd_complete(cmd, mgmt_status(status));
7017
	mgmt_pending_remove(cmd);
7018
}
7019

7020
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7021
			      u8 link_type, u8 addr_type, u32 value,
7022
			      u8 confirm_hint)
7023 7024 7025
{
	struct mgmt_ev_user_confirm_request ev;

7026
	BT_DBG("%s", hdev->name);
7027

7028
	bacpy(&ev.addr.bdaddr, bdaddr);
7029
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7030
	ev.confirm_hint = confirm_hint;
7031
	ev.value = cpu_to_le32(value);
7032

7033
	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7034
			  NULL);
7035 7036
}

7037
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7038
			      u8 link_type, u8 addr_type)
7039 7040 7041 7042 7043
{
	struct mgmt_ev_user_passkey_request ev;

	BT_DBG("%s", hdev->name);

7044
	bacpy(&ev.addr.bdaddr, bdaddr);
7045
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7046 7047

	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7048
			  NULL);
7049 7050
}

7051
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7052 7053
				      u8 link_type, u8 addr_type, u8 status,
				      u8 opcode)
7054
{
7055
	struct mgmt_pending_cmd *cmd;
7056

7057
	cmd = pending_find(opcode, hdev);
7058 7059 7060
	if (!cmd)
		return -ENOENT;

7061
	cmd->cmd_complete(cmd, mgmt_status(status));
7062
	mgmt_pending_remove(cmd);
7063

7064
	return 0;
7065 7066
}

7067
int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7068
				     u8 link_type, u8 addr_type, u8 status)
7069
{
7070
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7071
					  status, MGMT_OP_USER_CONFIRM_REPLY);
7072 7073
}

7074
int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7075
					 u8 link_type, u8 addr_type, u8 status)
7076
{
7077
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7078 7079
					  status,
					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
7080
}
7081

7082
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7083
				     u8 link_type, u8 addr_type, u8 status)
7084
{
7085
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7086
					  status, MGMT_OP_USER_PASSKEY_REPLY);
7087 7088
}

7089
int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7090
					 u8 link_type, u8 addr_type, u8 status)
7091
{
7092
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7093 7094
					  status,
					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
7095 7096
}

7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112
int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
			     u8 link_type, u8 addr_type, u32 passkey,
			     u8 entered)
{
	struct mgmt_ev_passkey_notify ev;

	BT_DBG("%s", hdev->name);

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.passkey = __cpu_to_le32(passkey);
	ev.entered = entered;

	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
}

7113
void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7114 7115
{
	struct mgmt_ev_auth_failed ev;
7116
	struct mgmt_pending_cmd *cmd;
7117
	u8 status = mgmt_status(hci_status);
7118

7119 7120 7121
	bacpy(&ev.addr.bdaddr, &conn->dst);
	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
	ev.status = status;
7122

7123 7124 7125 7126 7127
	cmd = find_pairing(conn);

	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
		    cmd ? cmd->sk : NULL);

7128 7129 7130 7131
	if (cmd) {
		cmd->cmd_complete(cmd, status);
		mgmt_pending_remove(cmd);
	}
7132
}
7133

7134
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7135 7136
{
	struct cmd_lookup match = { NULL, hdev };
7137
	bool changed;
7138 7139 7140 7141

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7142
				     cmd_status_rsp, &mgmt_err);
7143
		return;
7144 7145
	}

7146
	if (test_bit(HCI_AUTH, &hdev->flags))
7147
		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7148
	else
7149
		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7150

7151
	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7152
			     &match);
7153

7154
	if (changed)
7155
		new_settings(hdev, match.sk);
7156 7157 7158 7159 7160

	if (match.sk)
		sock_put(match.sk);
}

7161
static void clear_eir(struct hci_request *req)
7162
{
7163
	struct hci_dev *hdev = req->hdev;
7164 7165
	struct hci_cp_write_eir cp;

7166
	if (!lmp_ext_inq_capable(hdev))
7167
		return;
7168

7169 7170
	memset(hdev->eir, 0, sizeof(hdev->eir));

7171 7172
	memset(&cp, 0, sizeof(cp));

7173
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7174 7175
}

7176
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7177 7178
{
	struct cmd_lookup match = { NULL, hdev };
7179
	struct hci_request req;
7180
	bool changed = false;
7181 7182 7183

	if (status) {
		u8 mgmt_err = mgmt_status(status);
7184

7185 7186
		if (enable && hci_dev_test_and_clear_flag(hdev,
							  HCI_SSP_ENABLED)) {
7187
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7188
			new_settings(hdev, NULL);
7189
		}
7190

7191 7192
		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
				     &mgmt_err);
7193
		return;
7194 7195 7196
	}

	if (enable) {
7197
		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7198
	} else {
7199
		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7200
		if (!changed)
7201 7202
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_HS_ENABLED);
7203
		else
7204
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7205 7206 7207 7208
	}

	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);

7209
	if (changed)
7210
		new_settings(hdev, match.sk);
7211

7212
	if (match.sk)
7213 7214
		sock_put(match.sk);

7215 7216
	hci_req_init(&req, hdev);

7217 7218
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7219 7220
			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
				    sizeof(enable), &enable);
7221
		__hci_req_update_eir(&req);
7222
	} else {
7223
		clear_eir(&req);
7224
	}
7225 7226

	hci_req_run(&req, NULL);
7227 7228
}

7229
static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7230 7231 7232 7233 7234 7235 7236 7237 7238
{
	struct cmd_lookup *match = data;

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}
}

7239 7240
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
				    u8 status)
7241
{
7242
	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7243

7244 7245 7246
	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7247

7248
	if (!status) {
7249 7250
		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7251 7252
		ext_info_changed(hdev, NULL);
	}
7253 7254 7255

	if (match.sk)
		sock_put(match.sk);
7256 7257
}

7258
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7259 7260
{
	struct mgmt_cp_set_local_name ev;
7261
	struct mgmt_pending_cmd *cmd;
7262

7263
	if (status)
7264
		return;
7265 7266 7267

	memset(&ev, 0, sizeof(ev));
	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7268
	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7269

7270
	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7271 7272
	if (!cmd) {
		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7273

7274 7275 7276
		/* If this is a HCI command related to powering on the
		 * HCI dev don't send any mgmt signals.
		 */
7277
		if (pending_find(MGMT_OP_SET_POWERED, hdev))
7278
			return;
7279
	}
7280

7281 7282
	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
7283
	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
7284
}
7285

7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297
static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
{
	int i;

	for (i = 0; i < uuid_count; i++) {
		if (!memcmp(uuid, uuids[i], 16))
			return true;
	}

	return false;
}

7298 7299
static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
{
7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316
	u16 parsed = 0;

	while (parsed < eir_len) {
		u8 field_len = eir[0];
		u8 uuid[16];
		int i;

		if (field_len == 0)
			break;

		if (eir_len - parsed < field_len + 1)
			break;

		switch (eir[1]) {
		case EIR_UUID16_ALL:
		case EIR_UUID16_SOME:
			for (i = 0; i + 3 <= field_len; i += 2) {
7317
				memcpy(uuid, bluetooth_base_uuid, 16);
7318 7319 7320 7321 7322 7323 7324 7325 7326
				uuid[13] = eir[i + 3];
				uuid[12] = eir[i + 2];
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		case EIR_UUID32_ALL:
		case EIR_UUID32_SOME:
			for (i = 0; i + 5 <= field_len; i += 4) {
7327
				memcpy(uuid, bluetooth_base_uuid, 16);
7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349
				uuid[15] = eir[i + 5];
				uuid[14] = eir[i + 4];
				uuid[13] = eir[i + 3];
				uuid[12] = eir[i + 2];
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		case EIR_UUID128_ALL:
		case EIR_UUID128_SOME:
			for (i = 0; i + 17 <= field_len; i += 16) {
				memcpy(uuid, eir + i + 2, 16);
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		}

		parsed += field_len + 1;
		eir += field_len + 1;
	}

7350 7351 7352
	return false;
}

7353 7354 7355
static void restart_le_scan(struct hci_dev *hdev)
{
	/* If controller is not scanning we are done. */
7356
	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7357 7358 7359 7360 7361 7362 7363
		return;

	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
		       hdev->discovery.scan_start +
		       hdev->discovery.scan_duration))
		return;

7364
	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
7365 7366 7367
			   DISCOV_LE_RESTART_DELAY);
}

7368 7369
static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7370
{
7371 7372 7373 7374 7375
	/* If a RSSI threshold has been specified, and
	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
	 * is set, let it through for further processing, as we might need to
	 * restart the scan.
7376 7377 7378
	 *
	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
	 * the results are also dropped.
7379 7380
	 */
	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7381 7382 7383
	    (rssi == HCI_RSSI_INVALID ||
	    (rssi < hdev->discovery.rssi &&
	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7384
		return  false;
7385

7386 7387 7388
	if (hdev->discovery.uuid_count != 0) {
		/* If a list of UUIDs is provided in filter, results with no
		 * matching UUID should be dropped.
7389
		 */
7390 7391 7392 7393 7394 7395
		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
				   hdev->discovery.uuids) &&
		    !eir_has_uuids(scan_rsp, scan_rsp_len,
				   hdev->discovery.uuid_count,
				   hdev->discovery.uuids))
			return false;
7396
	}
7397

7398 7399
	/* If duplicate filtering does not report RSSI changes, then restart
	 * scanning to ensure updated result with updated RSSI values.
7400
	 */
7401 7402 7403 7404 7405 7406 7407 7408
	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
		restart_le_scan(hdev);

		/* Validate RSSI value against the RSSI threshold once more. */
		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
		    rssi < hdev->discovery.rssi)
			return false;
	}
7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431

	return true;
}

void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
{
	char buf[512];
	struct mgmt_ev_device_found *ev = (void *)buf;
	size_t ev_size;

	/* Don't send events for a non-kernel initiated discovery. With
	 * LE one exception is if we have pend_le_reports > 0 in which
	 * case we're doing passive scanning and want these events.
	 */
	if (!hci_discovery_active(hdev)) {
		if (link_type == ACL_LINK)
			return;
		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
			return;
	}

7432
	if (hdev->discovery.result_filtering) {
7433 7434 7435 7436 7437 7438
		/* We are using service discovery */
		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
				     scan_rsp_len))
			return;
	}

7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450
	if (hdev->discovery.limited) {
		/* Check for limited discoverable bit */
		if (dev_class) {
			if (!(dev_class[1] & 0x20))
				return;
		} else {
			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
			if (!flags || !(flags[0] & LE_AD_LIMITED))
				return;
		}
	}

7451 7452 7453 7454
	/* Make sure that the buffer is big enough. The 5 extra bytes
	 * are for the potential CoD field.
	 */
	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7455 7456
		return;

7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479
	memset(buf, 0, sizeof(buf));

	/* In case of device discovery with BR/EDR devices (pre 1.2), the
	 * RSSI value was reported as 0 when not available. This behavior
	 * is kept when using device discovery. This is required for full
	 * backwards compatibility with the API.
	 *
	 * However when using service discovery, the value 127 will be
	 * returned when the RSSI is not available.
	 */
	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
	    link_type == ACL_LINK)
		rssi = 0;

	bacpy(&ev->addr.bdaddr, bdaddr);
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
	ev->rssi = rssi;
	ev->flags = cpu_to_le32(flags);

	if (eir_len > 0)
		/* Copy EIR or advertising data into event */
		memcpy(ev->eir, eir, eir_len);

7480 7481
	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
				       NULL))
7482 7483 7484 7485 7486 7487 7488
		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
					  dev_class, 3);

	if (scan_rsp_len > 0)
		/* Append scan response data to event */
		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);

7489 7490
	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7491

7492
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7493
}
7494

7495 7496
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7497
{
7498 7499 7500
	struct mgmt_ev_device_found *ev;
	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
	u16 eir_len;
7501

7502
	ev = (struct mgmt_ev_device_found *) buf;
7503

7504 7505 7506
	memset(buf, 0, sizeof(buf));

	bacpy(&ev->addr.bdaddr, bdaddr);
7507
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7508 7509 7510
	ev->rssi = rssi;

	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7511
				  name_len);
7512

7513
	ev->eir_len = cpu_to_le16(eir_len);
7514

7515
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7516
}
7517

7518
void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7519
{
7520
	struct mgmt_ev_discovering ev;
7521

7522 7523
	BT_DBG("%s discovering %u", hdev->name, discovering);

7524 7525 7526 7527
	memset(&ev, 0, sizeof(ev));
	ev.type = hdev->discovery.type;
	ev.discovering = discovering;

7528
	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7529
}
7530

7531 7532 7533 7534
static struct hci_mgmt_chan chan = {
	.channel	= HCI_CHANNEL_CONTROL,
	.handler_count	= ARRAY_SIZE(mgmt_handlers),
	.handlers	= mgmt_handlers,
7535
	.hdev_init	= mgmt_init_hdev,
7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546
};

int mgmt_init(void)
{
	return hci_mgmt_chan_register(&chan);
}

void mgmt_exit(void)
{
	hci_mgmt_chan_unregister(&chan);
}