mgmt.c 203.6 KB
Newer Older
1 2
/*
   BlueZ - Bluetooth protocol stack for Linux
3

4
   Copyright (C) 2010  Nokia Corporation
5
   Copyright (C) 2011-2012 Intel Corporation
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI Management interface */

27
#include <linux/module.h>
28 29 30 31
#include <asm/unaligned.h>

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
32
#include <net/bluetooth/hci_sock.h>
33
#include <net/bluetooth/l2cap.h>
34
#include <net/bluetooth/mgmt.h>
35

36
#include "hci_request.h"
37
#include "smp.h"
38
#include "mgmt_util.h"
39

40
#define MGMT_VERSION	1
41
#define MGMT_REVISION	9
42

43 44 45 46 47 48 49
static const u16 mgmt_commands[] = {
	MGMT_OP_READ_INDEX_LIST,
	MGMT_OP_READ_INFO,
	MGMT_OP_SET_POWERED,
	MGMT_OP_SET_DISCOVERABLE,
	MGMT_OP_SET_CONNECTABLE,
	MGMT_OP_SET_FAST_CONNECTABLE,
50
	MGMT_OP_SET_BONDABLE,
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
	MGMT_OP_SET_LINK_SECURITY,
	MGMT_OP_SET_SSP,
	MGMT_OP_SET_HS,
	MGMT_OP_SET_LE,
	MGMT_OP_SET_DEV_CLASS,
	MGMT_OP_SET_LOCAL_NAME,
	MGMT_OP_ADD_UUID,
	MGMT_OP_REMOVE_UUID,
	MGMT_OP_LOAD_LINK_KEYS,
	MGMT_OP_LOAD_LONG_TERM_KEYS,
	MGMT_OP_DISCONNECT,
	MGMT_OP_GET_CONNECTIONS,
	MGMT_OP_PIN_CODE_REPLY,
	MGMT_OP_PIN_CODE_NEG_REPLY,
	MGMT_OP_SET_IO_CAPABILITY,
	MGMT_OP_PAIR_DEVICE,
	MGMT_OP_CANCEL_PAIR_DEVICE,
	MGMT_OP_UNPAIR_DEVICE,
	MGMT_OP_USER_CONFIRM_REPLY,
	MGMT_OP_USER_CONFIRM_NEG_REPLY,
	MGMT_OP_USER_PASSKEY_REPLY,
	MGMT_OP_USER_PASSKEY_NEG_REPLY,
	MGMT_OP_READ_LOCAL_OOB_DATA,
	MGMT_OP_ADD_REMOTE_OOB_DATA,
	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
	MGMT_OP_START_DISCOVERY,
	MGMT_OP_STOP_DISCOVERY,
	MGMT_OP_CONFIRM_NAME,
	MGMT_OP_BLOCK_DEVICE,
	MGMT_OP_UNBLOCK_DEVICE,
81
	MGMT_OP_SET_DEVICE_ID,
82
	MGMT_OP_SET_ADVERTISING,
83
	MGMT_OP_SET_BREDR,
84
	MGMT_OP_SET_STATIC_ADDRESS,
85
	MGMT_OP_SET_SCAN_PARAMS,
86
	MGMT_OP_SET_SECURE_CONN,
87
	MGMT_OP_SET_DEBUG_KEYS,
88
	MGMT_OP_SET_PRIVACY,
89
	MGMT_OP_LOAD_IRKS,
90
	MGMT_OP_GET_CONN_INFO,
91
	MGMT_OP_GET_CLOCK_INFO,
92 93
	MGMT_OP_ADD_DEVICE,
	MGMT_OP_REMOVE_DEVICE,
94
	MGMT_OP_LOAD_CONN_PARAM,
95
	MGMT_OP_READ_UNCONF_INDEX_LIST,
96
	MGMT_OP_READ_CONFIG_INFO,
97
	MGMT_OP_SET_EXTERNAL_CONFIG,
98
	MGMT_OP_SET_PUBLIC_ADDRESS,
99
	MGMT_OP_START_SERVICE_DISCOVERY,
100
	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101
	MGMT_OP_READ_EXT_INDEX_LIST,
102
	MGMT_OP_READ_ADV_FEATURES,
103
	MGMT_OP_ADD_ADVERTISING,
104
	MGMT_OP_REMOVE_ADVERTISING,
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
};

static const u16 mgmt_events[] = {
	MGMT_EV_CONTROLLER_ERROR,
	MGMT_EV_INDEX_ADDED,
	MGMT_EV_INDEX_REMOVED,
	MGMT_EV_NEW_SETTINGS,
	MGMT_EV_CLASS_OF_DEV_CHANGED,
	MGMT_EV_LOCAL_NAME_CHANGED,
	MGMT_EV_NEW_LINK_KEY,
	MGMT_EV_NEW_LONG_TERM_KEY,
	MGMT_EV_DEVICE_CONNECTED,
	MGMT_EV_DEVICE_DISCONNECTED,
	MGMT_EV_CONNECT_FAILED,
	MGMT_EV_PIN_CODE_REQUEST,
	MGMT_EV_USER_CONFIRM_REQUEST,
	MGMT_EV_USER_PASSKEY_REQUEST,
	MGMT_EV_AUTH_FAILED,
	MGMT_EV_DEVICE_FOUND,
	MGMT_EV_DISCOVERING,
	MGMT_EV_DEVICE_BLOCKED,
	MGMT_EV_DEVICE_UNBLOCKED,
	MGMT_EV_DEVICE_UNPAIRED,
128
	MGMT_EV_PASSKEY_NOTIFY,
129
	MGMT_EV_NEW_IRK,
130
	MGMT_EV_NEW_CSRK,
131 132
	MGMT_EV_DEVICE_ADDED,
	MGMT_EV_DEVICE_REMOVED,
133
	MGMT_EV_NEW_CONN_PARAM,
134
	MGMT_EV_UNCONF_INDEX_ADDED,
135
	MGMT_EV_UNCONF_INDEX_REMOVED,
136
	MGMT_EV_NEW_CONFIG_OPTIONS,
137 138
	MGMT_EV_EXT_INDEX_ADDED,
	MGMT_EV_EXT_INDEX_REMOVED,
139
	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 141
	MGMT_EV_ADVERTISING_ADDED,
	MGMT_EV_ADVERTISING_REMOVED,
142 143
};

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
static const u16 mgmt_untrusted_commands[] = {
	MGMT_OP_READ_INDEX_LIST,
	MGMT_OP_READ_INFO,
	MGMT_OP_READ_UNCONF_INDEX_LIST,
	MGMT_OP_READ_CONFIG_INFO,
	MGMT_OP_READ_EXT_INDEX_LIST,
};

static const u16 mgmt_untrusted_events[] = {
	MGMT_EV_INDEX_ADDED,
	MGMT_EV_INDEX_REMOVED,
	MGMT_EV_NEW_SETTINGS,
	MGMT_EV_CLASS_OF_DEV_CHANGED,
	MGMT_EV_LOCAL_NAME_CHANGED,
	MGMT_EV_UNCONF_INDEX_ADDED,
	MGMT_EV_UNCONF_INDEX_REMOVED,
	MGMT_EV_NEW_CONFIG_OPTIONS,
	MGMT_EV_EXT_INDEX_ADDED,
	MGMT_EV_EXT_INDEX_REMOVED,
};

165
#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
166

167 168 169
#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
		 "\x00\x00\x00\x00\x00\x00\x00\x00"

170 171 172 173 174 175 176 177
/* HCI to MGMT error code conversion table */
static u8 mgmt_status_table[] = {
	MGMT_STATUS_SUCCESS,
	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
	MGMT_STATUS_FAILED,		/* Hardware Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
178
	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
	MGMT_STATUS_BUSY,		/* Command Disallowed */
	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
	MGMT_STATUS_REJECTED,		/* Rejected Security */
	MGMT_STATUS_REJECTED,		/* Rejected Personal */
	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
	MGMT_STATUS_BUSY,		/* Repeated Attempts */
	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
	MGMT_STATUS_FAILED,		/* Unspecified Error */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
	MGMT_STATUS_FAILED,		/* Transaction Collision */
	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
	MGMT_STATUS_REJECTED,		/* QoS Rejected */
	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
	MGMT_STATUS_REJECTED,		/* Insufficient Security */
	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
	MGMT_STATUS_BUSY,		/* Role Switch Pending */
	MGMT_STATUS_FAILED,		/* Slot Violation */
	MGMT_STATUS_FAILED,		/* Role Switch Failed */
	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
	MGMT_STATUS_BUSY,		/* Controller Busy */
	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
};

static u8 mgmt_status(u8 hci_status)
{
	if (hci_status < ARRAY_SIZE(mgmt_status_table))
		return mgmt_status_table[hci_status];

	return MGMT_STATUS_FAILED;
}

243 244
static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
			    u16 len, int flag)
245
{
246 247
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
			       flag, NULL);
248 249
}

250 251 252 253 254 255 256
static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
			      u16 len, int flag, struct sock *skip_sk)
{
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
			       flag, skip_sk);
}

257 258 259 260 261 262 263
static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
			      u16 len, struct sock *skip_sk)
{
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
			       HCI_MGMT_GENERIC_EVENTS, skip_sk);
}

264 265 266 267
static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
		      struct sock *skip_sk)
{
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268
			       HCI_SOCK_TRUSTED, skip_sk);
269 270
}

271 272
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
			u16 data_len)
273 274 275 276 277 278
{
	struct mgmt_rp_read_version rp;

	BT_DBG("sock %p", sk);

	rp.version = MGMT_VERSION;
279
	rp.revision = cpu_to_le16(MGMT_REVISION);
280

281 282
	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
				 &rp, sizeof(rp));
283 284
}

285 286
static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 data_len)
287 288
{
	struct mgmt_rp_read_commands *rp;
289
	u16 num_commands, num_events;
290 291 292 293 294
	size_t rp_size;
	int i, err;

	BT_DBG("sock %p", sk);

295 296 297 298 299 300 301 302
	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
		num_commands = ARRAY_SIZE(mgmt_commands);
		num_events = ARRAY_SIZE(mgmt_events);
	} else {
		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
		num_events = ARRAY_SIZE(mgmt_untrusted_events);
	}

303 304 305 306 307 308
	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));

	rp = kmalloc(rp_size, GFP_KERNEL);
	if (!rp)
		return -ENOMEM;

309 310
	rp->num_commands = cpu_to_le16(num_commands);
	rp->num_events = cpu_to_le16(num_events);
311

312 313 314 315 316
	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
		__le16 *opcode = rp->opcodes;

		for (i = 0; i < num_commands; i++, opcode++)
			put_unaligned_le16(mgmt_commands[i], opcode);
317

318 319 320 321 322 323 324 325 326 327 328
		for (i = 0; i < num_events; i++, opcode++)
			put_unaligned_le16(mgmt_events[i], opcode);
	} else {
		__le16 *opcode = rp->opcodes;

		for (i = 0; i < num_commands; i++, opcode++)
			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);

		for (i = 0; i < num_events; i++, opcode++)
			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
	}
329

330 331
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
				rp, rp_size);
332 333 334 335 336
	kfree(rp);

	return err;
}

337 338
static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
339 340
{
	struct mgmt_rp_read_index_list *rp;
341
	struct hci_dev *d;
342
	size_t rp_len;
343
	u16 count;
344
	int err;
345 346 347 348 349 350

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
351
	list_for_each_entry(d, &hci_dev_list, list) {
352
		if (d->dev_type == HCI_BREDR &&
353
		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
354
			count++;
355 356
	}

357 358 359
	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
360
		read_unlock(&hci_dev_list_lock);
361
		return -ENOMEM;
362
	}
363

364
	count = 0;
365
	list_for_each_entry(d, &hci_dev_list, list) {
366 367 368
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
369 370
			continue;

371 372 373 374
		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
375 376
			continue;

377
		if (d->dev_type == HCI_BREDR &&
378
		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
379 380 381
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
382 383
	}

384 385 386
	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

387 388
	read_unlock(&hci_dev_list_lock);

389 390
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
				0, rp, rp_len);
391

392 393 394
	kfree(rp);

	return err;
395 396
}

397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
				  void *data, u16 data_len)
{
	struct mgmt_rp_read_unconf_index_list *rp;
	struct hci_dev *d;
	size_t rp_len;
	u16 count;
	int err;

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
		if (d->dev_type == HCI_BREDR &&
413
		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
414 415 416 417 418 419 420 421 422 423 424 425
			count++;
	}

	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		read_unlock(&hci_dev_list_lock);
		return -ENOMEM;
	}

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
426 427 428
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
429 430 431 432 433 434 435 436 437
			continue;

		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
			continue;

		if (d->dev_type == HCI_BREDR &&
438
		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
439 440 441 442 443 444 445 446 447 448
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
	}

	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

	read_unlock(&hci_dev_list_lock);

449 450
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
451 452 453 454 455 456

	kfree(rp);

	return err;
}

457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
			       void *data, u16 data_len)
{
	struct mgmt_rp_read_ext_index_list *rp;
	struct hci_dev *d;
	size_t rp_len;
	u16 count;
	int err;

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
		if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
			count++;
	}

	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		read_unlock(&hci_dev_list_lock);
		return -ENOMEM;
	}

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
			continue;

		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
			continue;

		if (d->dev_type == HCI_BREDR) {
			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
				rp->entry[count].type = 0x01;
			else
				rp->entry[count].type = 0x00;
		} else if (d->dev_type == HCI_AMP) {
			rp->entry[count].type = 0x02;
		} else {
			continue;
		}

		rp->entry[count].bus = d->bus;
		rp->entry[count++].index = cpu_to_le16(d->id);
		BT_DBG("Added hci%u", d->id);
	}

	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);

	read_unlock(&hci_dev_list_lock);

	/* If this command is called at least once, then all the
	 * default index and unconfigured index events are disabled
	 * and from now on only extended index events are used.
	 */
	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);

	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);

	kfree(rp);

	return err;
}

533 534 535
static bool is_configured(struct hci_dev *hdev)
{
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
536
	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
537 538 539 540 541 542 543 544 545
		return false;

	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
	    !bacmp(&hdev->public_addr, BDADDR_ANY))
		return false;

	return true;
}

546 547 548 549
static __le32 get_missing_options(struct hci_dev *hdev)
{
	u32 options = 0;

550
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551
	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 553
		options |= MGMT_OPTION_EXTERNAL_CONFIG;

554 555 556 557 558 559 560
	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
	    !bacmp(&hdev->public_addr, BDADDR_ANY))
		options |= MGMT_OPTION_PUBLIC_ADDRESS;

	return cpu_to_le32(options);
}

561 562 563 564
static int new_options(struct hci_dev *hdev, struct sock *skip)
{
	__le32 options = get_missing_options(hdev);

565 566
	return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
				  sizeof(options), skip);
567 568
}

569 570 571 572
static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
{
	__le32 options = get_missing_options(hdev);

573 574
	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
				 sizeof(options));
575 576
}

577 578 579 580
static int read_config_info(struct sock *sk, struct hci_dev *hdev,
			    void *data, u16 data_len)
{
	struct mgmt_rp_read_config_info rp;
581
	u32 options = 0;
582 583 584 585 586 587 588

	BT_DBG("sock %p %s", sk, hdev->name);

	hci_dev_lock(hdev);

	memset(&rp, 0, sizeof(rp));
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
589

590 591 592
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
		options |= MGMT_OPTION_EXTERNAL_CONFIG;

593
	if (hdev->set_bdaddr)
594 595 596 597
		options |= MGMT_OPTION_PUBLIC_ADDRESS;

	rp.supported_options = cpu_to_le32(options);
	rp.missing_options = get_missing_options(hdev);
598 599 600

	hci_dev_unlock(hdev);

601 602
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
				 &rp, sizeof(rp));
603 604
}

605 606 607 608 609
static u32 get_supported_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

	settings |= MGMT_SETTING_POWERED;
610
	settings |= MGMT_SETTING_BONDABLE;
611
	settings |= MGMT_SETTING_DEBUG_KEYS;
612 613
	settings |= MGMT_SETTING_CONNECTABLE;
	settings |= MGMT_SETTING_DISCOVERABLE;
614

615
	if (lmp_bredr_capable(hdev)) {
616 617
		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
			settings |= MGMT_SETTING_FAST_CONNECTABLE;
618 619
		settings |= MGMT_SETTING_BREDR;
		settings |= MGMT_SETTING_LINK_SECURITY;
620 621 622 623 624

		if (lmp_ssp_capable(hdev)) {
			settings |= MGMT_SETTING_SSP;
			settings |= MGMT_SETTING_HS;
		}
625

626
		if (lmp_sc_capable(hdev))
627
			settings |= MGMT_SETTING_SECURE_CONN;
628
	}
629

630
	if (lmp_le_capable(hdev)) {
631
		settings |= MGMT_SETTING_LE;
632
		settings |= MGMT_SETTING_ADVERTISING;
633
		settings |= MGMT_SETTING_SECURE_CONN;
634
		settings |= MGMT_SETTING_PRIVACY;
635
		settings |= MGMT_SETTING_STATIC_ADDRESS;
636
	}
637

638 639
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
	    hdev->set_bdaddr)
640 641
		settings |= MGMT_SETTING_CONFIGURATION;

642 643 644 645 646 647 648
	return settings;
}

static u32 get_current_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

649
	if (hdev_is_powered(hdev))
650 651
		settings |= MGMT_SETTING_POWERED;

652
	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
653 654
		settings |= MGMT_SETTING_CONNECTABLE;

655
	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
656 657
		settings |= MGMT_SETTING_FAST_CONNECTABLE;

658
	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
659 660
		settings |= MGMT_SETTING_DISCOVERABLE;

661
	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
662
		settings |= MGMT_SETTING_BONDABLE;
663

664
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
665 666
		settings |= MGMT_SETTING_BREDR;

667
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
668 669
		settings |= MGMT_SETTING_LE;

670
	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
671 672
		settings |= MGMT_SETTING_LINK_SECURITY;

673
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
674 675
		settings |= MGMT_SETTING_SSP;

676
	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
677 678
		settings |= MGMT_SETTING_HS;

679
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
680 681
		settings |= MGMT_SETTING_ADVERTISING;

682
	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
683 684
		settings |= MGMT_SETTING_SECURE_CONN;

685
	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
686 687
		settings |= MGMT_SETTING_DEBUG_KEYS;

688
	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
689 690
		settings |= MGMT_SETTING_PRIVACY;

691 692 693 694 695
	/* The current setting for static address has two purposes. The
	 * first is to indicate if the static address will be used and
	 * the second is to indicate if it is actually set.
	 *
	 * This means if the static address is not configured, this flag
696
	 * will never be set. If the address is configured, then if the
697 698 699 700 701 702
	 * address is actually used decides if the flag is set or not.
	 *
	 * For single mode LE only controllers and dual-mode controllers
	 * with BR/EDR disabled, the existence of the static address will
	 * be evaluated.
	 */
703
	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
704
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
705 706 707 708 709
	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
		if (bacmp(&hdev->static_addr, BDADDR_ANY))
			settings |= MGMT_SETTING_STATIC_ADDRESS;
	}

710 711 712
	return settings;
}

713 714
#define PNP_INFO_SVCLASS_ID		0x1200

715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 4)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		u16 uuid16;

		if (uuid->size != 16)
			continue;

		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
		if (uuid16 < 0x1100)
			continue;

		if (uuid16 == PNP_INFO_SVCLASS_ID)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID16_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u16) > len) {
			uuids_start[1] = EIR_UUID16_SOME;
			break;
		}

		*ptr++ = (uuid16 & 0x00ff);
		*ptr++ = (uuid16 & 0xff00) >> 8;
		uuids_start[0] += sizeof(uuid16);
	}

	return ptr;
}

757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 6)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 32)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID32_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u32) > len) {
			uuids_start[1] = EIR_UUID32_SOME;
			break;
		}

		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
		ptr += sizeof(u32);
		uuids_start[0] += sizeof(u32);
	}

	return ptr;
}

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 18)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 128)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID128_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + 16 > len) {
			uuids_start[1] = EIR_UUID128_SOME;
			break;
		}

		memcpy(ptr, uuid->uuid, 16);
		ptr += 16;
		uuids_start[0] += 16;
	}

	return ptr;
}

823 824 825 826 827 828 829 830 831 832 833 834
static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
{
	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
}

static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
						  struct hci_dev *hdev,
						  const void *data)
{
	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
}

835 836 837 838 839 840 841 842 843 844 845 846 847 848
static u8 get_current_adv_instance(struct hci_dev *hdev)
{
	/* The "Set Advertising" setting supersedes the "Add Advertising"
	 * setting. Here we set the advertising data based on which
	 * setting was set. When neither apply, default to the global settings,
	 * represented by instance "0".
	 */
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
	    !hci_dev_test_flag(hdev, HCI_ADVERTISING))
		return 0x01;

	return 0x00;
}

849
static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
850
{
851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
	u8 ad_len = 0;
	size_t name_len;

	name_len = strlen(hdev->dev_name);
	if (name_len > 0) {
		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;

		if (name_len > max_len) {
			name_len = max_len;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ad_len += (name_len + 2);
		ptr += (name_len + 2);
	}

	return ad_len;
873 874
}

875 876 877 878 879 880 881 882 883 884 885
static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
{
	/* TODO: Set the appropriate entries based on advertising instance flags
	 * here once flags other than 0 are supported.
	 */
	memcpy(ptr, hdev->adv_instance.scan_rsp_data,
	       hdev->adv_instance.scan_rsp_len);

	return hdev->adv_instance.scan_rsp_len;
}

886
static void update_inst_scan_rsp_data(struct hci_request *req, u8 instance)
887 888 889 890 891
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_scan_rsp_data cp;
	u8 len;

892
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
893 894 895 896
		return;

	memset(&cp, 0, sizeof(cp));

897 898 899 900
	if (instance)
		len = create_instance_scan_rsp_data(hdev, cp.data);
	else
		len = create_default_scan_rsp_data(hdev, cp.data);
901

902
	if (hdev->scan_rsp_data_len == len &&
903
	    !memcmp(cp.data, hdev->scan_rsp_data, len))
904 905
		return;

906 907
	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
	hdev->scan_rsp_data_len = len;
908 909 910 911 912 913

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
}

914 915
static void update_scan_rsp_data(struct hci_request *req)
{
916
	update_inst_scan_rsp_data(req, get_current_adv_instance(req->hdev));
917 918
}

919 920
static u8 get_adv_discov_flags(struct hci_dev *hdev)
{
921
	struct mgmt_pending_cmd *cmd;
922 923 924 925

	/* If there's a pending mgmt command the flags will not yet have
	 * their final values, so check for this first.
	 */
926
	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
927 928 929 930 931 932 933
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
		if (cp->val == 0x01)
			return LE_AD_GENERAL;
		else if (cp->val == 0x02)
			return LE_AD_LIMITED;
	} else {
934
		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
935
			return LE_AD_LIMITED;
936
		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
937 938 939 940 941 942
			return LE_AD_GENERAL;
	}

	return 0;
}

943 944 945
static bool get_connectable(struct hci_dev *hdev)
{
	struct mgmt_pending_cmd *cmd;
946

947 948 949 950 951 952
	/* If there's a pending mgmt command the flag will not yet have
	 * it's final value, so check for this first.
	 */
	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
953

954
		return cp->val;
955 956
	}

957 958
	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
}
959

960 961 962
static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
{
	u32 flags;
963

964 965 966 967 968 969 970 971 972
	if (instance > 0x01)
		return 0;

	if (instance == 0x01)
		return hdev->adv_instance.flags;

	/* Instance 0 always manages the "Tx Power" and "Flags" fields */
	flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;

973 974 975 976
	/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds
	 * to the "connectable" instance flag.
	 */
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
977 978 979
		flags |= MGMT_ADV_FLAG_CONNECTABLE;

	return flags;
980 981
}

982 983 984 985 986 987 988 989 990 991 992 993
static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
{
	/* Ignore instance 0 and other unsupported instances */
	if (instance != 0x01)
		return 0;

	/* TODO: Take into account the "appearance" and "local-name" flags here.
	 * These are currently being ignored as they are not supported.
	 */
	return hdev->adv_instance.scan_rsp_len;
}

994
static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
995
{
996
	u8 ad_len = 0, flags = 0;
997
	u32 instance_flags = get_adv_instance_flags(hdev, instance);
998

999 1000 1001
	/* The Add Advertising command allows userspace to set both the general
	 * and limited discoverable flags.
	 */
1002
	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1003 1004
		flags |= LE_AD_GENERAL;

1005
	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1006 1007
		flags |= LE_AD_LIMITED;

1008
	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1009 1010 1011 1012 1013 1014
		/* If a discovery flag wasn't provided, simply use the global
		 * settings.
		 */
		if (!flags)
			flags |= get_adv_discov_flags(hdev);

1015 1016 1017
		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
			flags |= LE_AD_NO_BREDR;

1018 1019 1020 1021 1022 1023 1024
		/* If flags would still be empty, then there is no need to
		 * include the "Flags" AD field".
		 */
		if (flags) {
			ptr[0] = 0x02;
			ptr[1] = EIR_FLAGS;
			ptr[2] = flags;
1025

1026 1027 1028
			ad_len += 3;
			ptr += 3;
		}
1029 1030
	}

1031 1032 1033 1034 1035 1036 1037 1038
	if (instance) {
		memcpy(ptr, hdev->adv_instance.adv_data,
		       hdev->adv_instance.adv_data_len);

		ad_len += hdev->adv_instance.adv_data_len;
		ptr += hdev->adv_instance.adv_data_len;
	}

1039
	/* Provide Tx Power only if we can provide a valid value for it */
1040
	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1041
	    (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1042 1043 1044 1045 1046 1047 1048 1049
		ptr[0] = 0x02;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8)hdev->adv_tx_power;

		ad_len += 3;
		ptr += 3;
	}

1050
	return ad_len;
1051 1052
}

1053
static void update_inst_adv_data(struct hci_request *req, u8 instance)
1054 1055 1056 1057 1058
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_data cp;
	u8 len;

1059
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1060 1061 1062 1063
		return;

	memset(&cp, 0, sizeof(cp));

1064
	len = create_instance_adv_data(hdev, instance, cp.data);
1065

1066
	/* There's nothing to do if the data hasn't changed */
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
	if (hdev->adv_data_len == len &&
	    memcmp(cp.data, hdev->adv_data, len) == 0)
		return;

	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
	hdev->adv_data_len = len;

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
}

1079 1080
static void update_adv_data(struct hci_request *req)
{
1081
	update_inst_adv_data(req, get_current_adv_instance(req->hdev));
1082 1083
}

1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
int mgmt_update_adv_data(struct hci_dev *hdev)
{
	struct hci_request req;

	hci_req_init(&req, hdev);
	update_adv_data(&req);

	return hci_req_run(&req, NULL);
}

1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
static void create_eir(struct hci_dev *hdev, u8 *data)
{
	u8 *ptr = data;
	size_t name_len;

	name_len = strlen(hdev->dev_name);

	if (name_len > 0) {
		/* EIR Data type */
		if (name_len > 48) {
			name_len = 48;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		/* EIR Data length */
		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ptr += (name_len + 2);
	}

1117
	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1118 1119 1120 1121 1122 1123 1124
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->inq_tx_power;

		ptr += 3;
	}

1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
	if (hdev->devid_source > 0) {
		ptr[0] = 9;
		ptr[1] = EIR_DEVICE_ID;

		put_unaligned_le16(hdev->devid_source, ptr + 2);
		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
		put_unaligned_le16(hdev->devid_product, ptr + 6);
		put_unaligned_le16(hdev->devid_version, ptr + 8);

		ptr += 10;
	}

1137
	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1138
	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1139
	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1140 1141
}

1142
static void update_eir(struct hci_request *req)
1143
{
1144
	struct hci_dev *hdev = req->hdev;
1145 1146
	struct hci_cp_write_eir cp;

1147
	if (!hdev_is_powered(hdev))
1148
		return;
1149

1150
	if (!lmp_ext_inq_capable(hdev))
1151
		return;
1152

1153
	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1154
		return;
1155

1156
	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1157
		return;
1158 1159 1160 1161 1162 1163

	memset(&cp, 0, sizeof(cp));

	create_eir(hdev, cp.data);

	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1164
		return;
1165 1166 1167

	memcpy(hdev->eir, cp.data, sizeof(cp.data));

1168
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
}

static u8 get_service_classes(struct hci_dev *hdev)
{
	struct bt_uuid *uuid;
	u8 val = 0;

	list_for_each_entry(uuid, &hdev->uuids, list)
		val |= uuid->svc_hint;

	return val;
}

1182
static void update_class(struct hci_request *req)
1183
{
1184
	struct hci_dev *hdev = req->hdev;
1185 1186 1187 1188
	u8 cod[3];

	BT_DBG("%s", hdev->name);

1189
	if (!hdev_is_powered(hdev))
1190
		return;
1191

1192
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1193 1194
		return;

1195
	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1196
		return;
1197 1198 1199 1200 1201

	cod[0] = hdev->minor_class;
	cod[1] = hdev->major_class;
	cod[2] = get_service_classes(hdev);

1202
	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1203 1204
		cod[1] |= 0x20;

1205
	if (memcmp(cod, hdev->dev_class, 3) == 0)
1206
		return;
1207

1208
	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1209 1210
}

1211 1212 1213 1214 1215 1216 1217
static void disable_advertising(struct hci_request *req)
{
	u8 enable = 0x00;

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

1218 1219 1220 1221
static void enable_advertising(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_param cp;
1222
	u8 own_addr_type, enable = 0x01;
1223
	bool connectable;
1224 1225
	u8 instance;
	u32 flags;
1226

1227 1228 1229
	if (hci_conn_num(hdev, LE_LINK) > 0)
		return;

1230
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1231 1232
		disable_advertising(req);

1233
	/* Clear the HCI_LE_ADV bit temporarily so that the
1234 1235 1236 1237
	 * hci_update_random_address knows that it's safe to go ahead
	 * and write a new random address. The flag will be set back on
	 * as soon as the SET_ADV_ENABLE HCI command completes.
	 */
1238
	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1239

1240 1241
	instance = get_current_adv_instance(hdev);
	flags = get_adv_instance_flags(hdev, instance);
1242 1243 1244 1245 1246 1247

	/* If the "connectable" instance flag was not set, then choose between
	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
	 */
	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
		      get_connectable(hdev);
1248

1249 1250 1251 1252 1253
	/* Set require_privacy to true only when non-connectable
	 * advertising is used. In that case it is fine to use a
	 * non-resolvable private address.
	 */
	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1254 1255
		return;

1256
	memset(&cp, 0, sizeof(cp));
1257 1258
	cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
	cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1259 1260 1261 1262 1263 1264 1265 1266

	if (connectable)
		cp.type = LE_ADV_IND;
	else if (get_adv_instance_scan_rsp_len(hdev, instance))
		cp.type = LE_ADV_SCAN_IND;
	else
		cp.type = LE_ADV_NONCONN_IND;

1267
	cp.own_address_type = own_addr_type;
1268 1269 1270 1271 1272 1273 1274
	cp.channel_map = hdev->le_adv_channel_map;

	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

1275 1276 1277
static void service_cache_off(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
1278
					    service_cache.work);
1279
	struct hci_request req;
1280

1281
	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1282 1283
		return;

1284 1285
	hci_req_init(&req, hdev);

1286 1287
	hci_dev_lock(hdev);

1288 1289
	update_eir(&req);
	update_class(&req);
1290 1291

	hci_dev_unlock(hdev);
1292 1293

	hci_req_run(&req, NULL);
1294 1295
}

1296 1297 1298 1299 1300 1301 1302 1303
static void rpa_expired(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
					    rpa_expired.work);
	struct hci_request req;

	BT_DBG("");

1304
	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1305

1306
	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
		return;

	/* The generation of a new RPA and programming it into the
	 * controller happens in the enable_advertising() function.
	 */
	hci_req_init(&req, hdev);
	enable_advertising(&req);
	hci_req_run(&req, NULL);
}

1317
static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1318
{
1319
	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1320 1321
		return;

1322
	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1323
	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1324

1325 1326 1327 1328 1329
	/* Non-mgmt controlled devices get this bit set
	 * implicitly so that pairing works for them, however
	 * for mgmt we require user-space to explicitly enable
	 * it
	 */
1330
	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1331 1332
}

1333
static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1334
				void *data, u16 data_len)
1335
{
1336
	struct mgmt_rp_read_info rp;
1337

1338
	BT_DBG("sock %p %s", sk, hdev->name);
1339

1340
	hci_dev_lock(hdev);
1341

1342 1343
	memset(&rp, 0, sizeof(rp));

1344
	bacpy(&rp.bdaddr, &hdev->bdaddr);
1345

1346
	rp.version = hdev->hci_ver;
1347
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1348 1349 1350

	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1351

1352
	memcpy(rp.dev_class, hdev->dev_class, 3);
1353

1354
	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1355
	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1356

1357
	hci_dev_unlock(hdev);
1358

1359 1360
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
				 sizeof(rp));
1361 1362
}

1363
static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1364
{
1365
	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1366

1367 1368
	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
				 sizeof(settings));
1369 1370
}

1371
static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1372 1373 1374
{
	BT_DBG("%s status 0x%02x", hdev->name, status);

1375 1376
	if (hci_conn_count(hdev) == 0) {
		cancel_delayed_work(&hdev->power_off);
1377
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1378
	}
1379 1380
}

1381
static bool hci_stop_discovery(struct hci_request *req)
1382 1383 1384 1385 1386 1387 1388
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_remote_name_req_cancel cp;
	struct inquiry_entry *e;

	switch (hdev->discovery.state) {
	case DISCOVERY_FINDING:
1389
		if (test_bit(HCI_INQUIRY, &hdev->flags))
1390
			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1391 1392

		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1393 1394 1395 1396
			cancel_delayed_work(&hdev->le_scan_disable);
			hci_req_add_le_scan_disable(req);
		}

1397
		return true;
1398 1399 1400 1401 1402

	case DISCOVERY_RESOLVING:
		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
						     NAME_PENDING);
		if (!e)
1403
			break;
1404 1405 1406 1407 1408

		bacpy(&cp.bdaddr, &e->data.bdaddr);
		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
			    &cp);

1409
		return true;
1410 1411 1412

	default:
		/* Passive scanning */
1413
		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1414
			hci_req_add_le_scan_disable(req);
1415 1416 1417
			return true;
		}

1418 1419
		break;
	}
1420 1421

	return false;
1422 1423
}

1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
static void advertising_added(struct sock *sk, struct hci_dev *hdev,
			      u8 instance)
{
	struct mgmt_ev_advertising_added ev;

	ev.instance = instance;

	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
}

static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
				u8 instance)
{
	struct mgmt_ev_advertising_removed ev;

	ev.instance = instance;

	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
}

static void clear_adv_instance(struct hci_dev *hdev)
{
	struct hci_request req;

	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
		return;

1451 1452
	if (hdev->adv_instance_timeout)
		cancel_delayed_work(&hdev->adv_instance_expire);
1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466

	memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
	advertising_removed(NULL, hdev, 1);
	hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);

	if (!hdev_is_powered(hdev) ||
	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
		return;

	hci_req_init(&req, hdev);
	disable_advertising(&req);
	hci_req_run(&req, NULL);
}

1467 1468 1469 1470
static int clean_up_hci_state(struct hci_dev *hdev)
{
	struct hci_request req;
	struct hci_conn *conn;
1471 1472
	bool discov_stopped;
	int err;
1473 1474 1475 1476 1477 1478 1479 1480 1481

	hci_req_init(&req, hdev);

	if (test_bit(HCI_ISCAN, &hdev->flags) ||
	    test_bit(HCI_PSCAN, &hdev->flags)) {
		u8 scan = 0x00;
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}

1482
	if (hdev->adv_instance_timeout)
1483 1484
		clear_adv_instance(hdev);

1485
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1486 1487
		disable_advertising(&req);

1488
	discov_stopped = hci_stop_discovery(&req);
1489 1490 1491

	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
		struct hci_cp_disconnect dc;
1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519
		struct hci_cp_reject_conn_req rej;

		switch (conn->state) {
		case BT_CONNECTED:
		case BT_CONFIG:
			dc.handle = cpu_to_le16(conn->handle);
			dc.reason = 0x15; /* Terminated due to Power Off */
			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
			break;
		case BT_CONNECT:
			if (conn->type == LE_LINK)
				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
					    0, NULL);
			else if (conn->type == ACL_LINK)
				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
					    6, &conn->dst);
			break;
		case BT_CONNECT2:
			bacpy(&rej.bdaddr, &conn->dst);
			rej.reason = 0x15; /* Terminated due to Power Off */
			if (conn->type == ACL_LINK)
				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
					    sizeof(rej), &rej);
			else if (conn->type == SCO_LINK)
				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
					    sizeof(rej), &rej);
			break;
		}
1520 1521
	}

1522 1523 1524 1525 1526
	err = hci_req_run(&req, clean_up_hci_complete);
	if (!err && discov_stopped)
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);

	return err;
1527 1528
}

1529
static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1530
		       u16 len)
1531
{
1532
	struct mgmt_mode *cp = data;
1533
	struct mgmt_pending_cmd *cmd;
1534
	int err;
1535

1536
	BT_DBG("request for %s", hdev->name);
1537

1538
	if (cp->val != 0x00 && cp->val != 0x01)
1539 1540
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				       MGMT_STATUS_INVALID_PARAMS);
1541

1542
	hci_dev_lock(hdev);
1543

1544
	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1545 1546
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				      MGMT_STATUS_BUSY);
1547 1548 1549
		goto failed;
	}

1550
	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1551 1552 1553
		cancel_delayed_work(&hdev->power_off);

		if (cp->val) {
1554 1555 1556
			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
					 data, len);
			err = mgmt_powered(hdev, 1);
1557 1558 1559 1560
			goto failed;
		}
	}

1561
	if (!!cp->val == hdev_is_powered(hdev)) {
1562
		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1563 1564 1565
		goto failed;
	}

1566
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1567 1568
	if (!cmd) {
		err = -ENOMEM;
1569
		goto failed;
1570
	}
1571

1572
	if (cp->val) {
1573
		queue_work(hdev->req_workqueue, &hdev->power_on);
1574 1575 1576 1577
		err = 0;
	} else {
		/* Disconnect connections, stop scans, etc */
		err = clean_up_hci_state(hdev);
1578 1579 1580
		if (!err)
			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
					   HCI_POWER_OFF_TIMEOUT);
1581

1582 1583
		/* ENODATA means there were no HCI commands queued */
		if (err == -ENODATA) {
1584
			cancel_delayed_work(&hdev->power_off);
1585 1586 1587 1588
			queue_work(hdev->req_workqueue, &hdev->power_off.work);
			err = 0;
		}
	}
1589 1590

failed:
1591
	hci_dev_unlock(hdev);
1592
	return err;
1593 1594
}

1595 1596
static int new_settings(struct hci_dev *hdev, struct sock *skip)
{
1597
	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1598

1599 1600
	return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
				  sizeof(ev), skip);
1601 1602
}

1603 1604 1605 1606 1607
int mgmt_new_settings(struct hci_dev *hdev)
{
	return new_settings(hdev, NULL);
}

1608 1609 1610 1611 1612 1613
struct cmd_lookup {
	struct sock *sk;
	struct hci_dev *hdev;
	u8 mgmt_status;
};

1614
static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
{
	struct cmd_lookup *match = data;

	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);

	list_del(&cmd->list);

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}

	mgmt_pending_free(cmd);
}

1630
static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1631 1632 1633
{
	u8 *status = data;

1634
	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1635 1636 1637
	mgmt_pending_remove(cmd);
}

1638
static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651
{
	if (cmd->cmd_complete) {
		u8 *status = data;

		cmd->cmd_complete(cmd, *status);
		mgmt_pending_remove(cmd);

		return;
	}

	cmd_status_rsp(cmd, data);
}

1652
static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1653
{
1654 1655
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, cmd->param_len);
1656 1657
}

1658
static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1659
{
1660 1661
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, sizeof(struct mgmt_addr_info));
1662 1663
}

1664 1665 1666 1667
static u8 mgmt_bredr_support(struct hci_dev *hdev)
{
	if (!lmp_bredr_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
1668
	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1669 1670 1671 1672 1673 1674 1675 1676 1677
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

static u8 mgmt_le_support(struct hci_dev *hdev)
{
	if (!lmp_le_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
1678
	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1679 1680 1681 1682 1683
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

1684 1685
static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
				      u16 opcode)
1686
{
1687
	struct mgmt_pending_cmd *cmd;
1688
	struct mgmt_mode *cp;
1689
	struct hci_request req;
1690 1691 1692 1693 1694 1695
	bool changed;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

1696
	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1697 1698 1699 1700 1701
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);
1702
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1703
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1704 1705 1706 1707
		goto remove_cmd;
	}

	cp = cmd->param;
1708
	if (cp->val) {
1709
		changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1710 1711 1712 1713 1714 1715 1716

		if (hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
					   to);
		}
	} else {
1717
		changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1718
	}
1719 1720 1721 1722 1723 1724

	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);

	if (changed)
		new_settings(hdev, cmd->sk);

1725 1726
	/* When the discoverable mode gets changed, make sure
	 * that class of device has the limited discoverable
1727 1728
	 * bit correctly set. Also update page scan based on whitelist
	 * entries.
1729 1730
	 */
	hci_req_init(&req, hdev);
1731
	__hci_update_page_scan(&req);
1732 1733 1734
	update_class(&req);
	hci_req_run(&req, NULL);

1735 1736 1737 1738 1739 1740 1741
remove_cmd:
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1742
static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1743
			    u16 len)
1744
{
1745
	struct mgmt_cp_set_discoverable *cp = data;
1746
	struct mgmt_pending_cmd *cmd;
1747
	struct hci_request req;
1748
	u16 timeout;
1749
	u8 scan;
1750 1751
	int err;

1752
	BT_DBG("request for %s", hdev->name);
1753

1754 1755
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1756 1757
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_REJECTED);
1758

1759
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1760 1761
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1762

1763
	timeout = __le16_to_cpu(cp->timeout);
1764 1765 1766 1767 1768 1769

	/* Disabling discoverable requires that no timeout is set,
	 * and enabling limited discoverable requires a timeout.
	 */
	if ((cp->val == 0x00 && timeout > 0) ||
	    (cp->val == 0x02 && timeout == 0))
1770 1771
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1772

1773
	hci_dev_lock(hdev);
1774

1775
	if (!hdev_is_powered(hdev) && timeout > 0) {
1776 1777
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_NOT_POWERED);
1778 1779 1780
		goto failed;
	}

1781 1782
	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1783 1784
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_BUSY);
1785 1786 1787
		goto failed;
	}

1788
	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1789 1790
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_REJECTED);
1791 1792 1793 1794
		goto failed;
	}

	if (!hdev_is_powered(hdev)) {
1795 1796
		bool changed = false;

1797 1798 1799 1800
		/* Setting limited discoverable when powered off is
		 * not a valid operation since it requires a timeout
		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
		 */
1801
		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1802
			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1803 1804 1805
			changed = true;
		}

1806
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1807 1808 1809 1810 1811 1812
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1813 1814 1815
		goto failed;
	}

1816 1817 1818 1819
	/* If the current mode is the same, then just update the timeout
	 * value with the new value. And if only the timeout gets updated,
	 * then no need for any HCI transactions.
	 */
1820 1821 1822
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
						   HCI_LIMITED_DISCOVERABLE)) {
1823 1824
		cancel_delayed_work(&hdev->discov_off);
		hdev->discov_timeout = timeout;
1825

1826 1827
		if (cp->val && hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1828
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1829
					   to);
1830 1831
		}

1832
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1833 1834 1835
		goto failed;
	}

1836
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1837 1838
	if (!cmd) {
		err = -ENOMEM;
1839
		goto failed;
1840
	}
1841

1842 1843 1844 1845 1846 1847 1848
	/* Cancel any potential discoverable timeout that might be
	 * still active and store new timeout value. The arming of
	 * the timeout happens in the complete handler.
	 */
	cancel_delayed_work(&hdev->discov_off);
	hdev->discov_timeout = timeout;

1849 1850
	/* Limited discoverable mode */
	if (cp->val == 0x02)
1851
		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1852
	else
1853
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1854

1855 1856
	hci_req_init(&req, hdev);

1857 1858 1859
	/* The procedure for LE-only controllers is much simpler - just
	 * update the advertising data.
	 */
1860
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1861 1862
		goto update_ad;

1863 1864
	scan = SCAN_PAGE;

1865 1866 1867 1868 1869
	if (cp->val) {
		struct hci_cp_write_current_iac_lap hci_cp;

		if (cp->val == 0x02) {
			/* Limited discoverable mode */
1870
			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887
			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
			hci_cp.iac_lap[4] = 0x8b;
			hci_cp.iac_lap[5] = 0x9e;
		} else {
			/* General discoverable mode */
			hci_cp.num_iac = 1;
			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
		}

		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
			    (hci_cp.num_iac * 3) + 1, &hci_cp);

1888
		scan |= SCAN_INQUIRY;
1889
	} else {
1890
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1891
	}
1892

1893
	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1894

1895 1896 1897
update_ad:
	update_adv_data(&req);

1898
	err = hci_req_run(&req, set_discoverable_complete);
1899
	if (err < 0)
1900
		mgmt_pending_remove(cmd);
1901 1902

failed:
1903
	hci_dev_unlock(hdev);
1904 1905 1906
	return err;
}

1907 1908
static void write_fast_connectable(struct hci_request *req, bool enable)
{
1909
	struct hci_dev *hdev = req->hdev;
1910 1911 1912
	struct hci_cp_write_page_scan_activity acp;
	u8 type;

1913
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1914 1915
		return;

1916 1917 1918
	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
		return;

1919 1920 1921 1922
	if (enable) {
		type = PAGE_SCAN_TYPE_INTERLACED;

		/* 160 msec page scan interval */
1923
		acp.interval = cpu_to_le16(0x0100);
1924 1925 1926 1927
	} else {
		type = PAGE_SCAN_TYPE_STANDARD;	/* default */

		/* default 1.28 sec page scan */
1928
		acp.interval = cpu_to_le16(0x0800);
1929 1930
	}

1931
	acp.window = cpu_to_le16(0x0012);
1932

1933 1934 1935 1936 1937 1938 1939
	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
			    sizeof(acp), &acp);

	if (hdev->page_scan_type != type)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1940 1941
}

1942 1943
static void set_connectable_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
1944
{
1945
	struct mgmt_pending_cmd *cmd;
1946
	struct mgmt_mode *cp;
1947
	bool conn_changed, discov_changed;
1948 1949 1950 1951 1952

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

1953
	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1954 1955 1956
	if (!cmd)
		goto unlock;

1957 1958
	if (status) {
		u8 mgmt_err = mgmt_status(status);
1959
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1960 1961 1962
		goto remove_cmd;
	}

1963
	cp = cmd->param;
1964
	if (cp->val) {
1965 1966
		conn_changed = !hci_dev_test_and_set_flag(hdev,
							  HCI_CONNECTABLE);
1967 1968
		discov_changed = false;
	} else {
1969 1970 1971 1972
		conn_changed = hci_dev_test_and_clear_flag(hdev,
							   HCI_CONNECTABLE);
		discov_changed = hci_dev_test_and_clear_flag(hdev,
							     HCI_DISCOVERABLE);
1973
	}
1974

1975 1976
	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);

1977
	if (conn_changed || discov_changed) {
1978
		new_settings(hdev, cmd->sk);
1979
		hci_update_page_scan(hdev);
1980 1981
		if (discov_changed)
			mgmt_update_adv_data(hdev);
1982 1983
		hci_update_background_scan(hdev);
	}
1984

1985
remove_cmd:
1986 1987 1988 1989 1990 1991
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1992 1993 1994 1995 1996 1997
static int set_connectable_update_settings(struct hci_dev *hdev,
					   struct sock *sk, u8 val)
{
	bool changed = false;
	int err;

1998
	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1999 2000 2001
		changed = true;

	if (val) {
2002
		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
2003
	} else {
2004 2005
		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2006 2007 2008 2009 2010 2011
	}

	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
	if (err < 0)
		return err;

2012
	if (changed) {
2013
		hci_update_page_scan(hdev);
2014
		hci_update_background_scan(hdev);
2015
		return new_settings(hdev, sk);
2016
	}
2017 2018 2019 2020

	return 0;
}

2021
static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2022
			   u16 len)
2023
{
2024
	struct mgmt_mode *cp = data;
2025
	struct mgmt_pending_cmd *cmd;
2026
	struct hci_request req;
2027
	u8 scan;
2028 2029
	int err;

2030
	BT_DBG("request for %s", hdev->name);
2031

2032 2033
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2034 2035
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				       MGMT_STATUS_REJECTED);
2036

2037
	if (cp->val != 0x00 && cp->val != 0x01)
2038 2039
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				       MGMT_STATUS_INVALID_PARAMS);
2040

2041
	hci_dev_lock(hdev);
2042

2043
	if (!hdev_is_powered(hdev)) {
2044
		err = set_connectable_update_settings(hdev, sk, cp->val);
2045 2046 2047
		goto failed;
	}

2048 2049
	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2050 2051
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				      MGMT_STATUS_BUSY);
2052 2053 2054
		goto failed;
	}

2055
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2056 2057
	if (!cmd) {
		err = -ENOMEM;
2058
		goto failed;
2059
	}
2060

2061
	hci_req_init(&req, hdev);
2062

2063 2064 2065 2066
	/* If BR/EDR is not enabled and we disable advertising as a
	 * by-product of disabling connectable, we need to update the
	 * advertising flags.
	 */
2067
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2068
		if (!cp->val) {
2069 2070
			hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2071 2072 2073
		}
		update_adv_data(&req);
	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2074 2075 2076
		if (cp->val) {
			scan = SCAN_PAGE;
		} else {
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088
			/* If we don't have any whitelist entries just
			 * disable all scanning. If there are entries
			 * and we had both page and inquiry scanning
			 * enabled then fall back to only page scanning.
			 * Otherwise no changes are needed.
			 */
			if (list_empty(&hdev->whitelist))
				scan = SCAN_DISABLED;
			else if (test_bit(HCI_ISCAN, &hdev->flags))
				scan = SCAN_PAGE;
			else
				goto no_scan_update;
2089 2090

			if (test_bit(HCI_ISCAN, &hdev->flags) &&
2091
			    hdev->discov_timeout > 0)
2092 2093
				cancel_delayed_work(&hdev->discov_off);
		}
2094

2095 2096
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}
2097

2098
no_scan_update:
2099
	/* Update the advertising parameters if necessary */
2100 2101
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
	    hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
2102 2103
		enable_advertising(&req);

2104
	err = hci_req_run(&req, set_connectable_complete);
2105
	if (err < 0) {
2106
		mgmt_pending_remove(cmd);
2107
		if (err == -ENODATA)
2108 2109
			err = set_connectable_update_settings(hdev, sk,
							      cp->val);
2110 2111
		goto failed;
	}
2112 2113

failed:
2114
	hci_dev_unlock(hdev);
2115 2116 2117
	return err;
}

2118
static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2119
			u16 len)
2120
{
2121
	struct mgmt_mode *cp = data;
2122
	bool changed;
2123 2124
	int err;

2125
	BT_DBG("request for %s", hdev->name);
2126

2127
	if (cp->val != 0x00 && cp->val != 0x01)
2128 2129
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
				       MGMT_STATUS_INVALID_PARAMS);
2130

2131
	hci_dev_lock(hdev);
2132 2133

	if (cp->val)
2134
		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2135
	else
2136
		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2137

2138
	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2139
	if (err < 0)
2140
		goto unlock;
2141

2142 2143
	if (changed)
		err = new_settings(hdev, sk);
2144

2145
unlock:
2146
	hci_dev_unlock(hdev);
2147 2148 2149
	return err;
}

2150 2151
static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
2152 2153
{
	struct mgmt_mode *cp = data;
2154
	struct mgmt_pending_cmd *cmd;
2155
	u8 val, status;
2156 2157
	int err;

2158
	BT_DBG("request for %s", hdev->name);
2159

2160 2161
	status = mgmt_bredr_support(hdev);
	if (status)
2162 2163
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				       status);
2164

2165
	if (cp->val != 0x00 && cp->val != 0x01)
2166 2167
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				       MGMT_STATUS_INVALID_PARAMS);
2168

2169 2170
	hci_dev_lock(hdev);

2171
	if (!hdev_is_powered(hdev)) {
2172 2173
		bool changed = false;

2174
		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2175
			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2176 2177 2178 2179 2180 2181 2182 2183 2184 2185
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

2186 2187 2188
		goto failed;
	}

2189
	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2190 2191
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				      MGMT_STATUS_BUSY);
2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218
		goto failed;
	}

	val = !!cp->val;

	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

2219
static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2220 2221
{
	struct mgmt_mode *cp = data;
2222
	struct mgmt_pending_cmd *cmd;
2223
	u8 status;
2224 2225
	int err;

2226
	BT_DBG("request for %s", hdev->name);
2227

2228 2229
	status = mgmt_bredr_support(hdev);
	if (status)
2230
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2231

2232
	if (!lmp_ssp_capable(hdev))
2233 2234
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				       MGMT_STATUS_NOT_SUPPORTED);
2235

2236
	if (cp->val != 0x00 && cp->val != 0x01)
2237 2238
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				       MGMT_STATUS_INVALID_PARAMS);
2239

2240
	hci_dev_lock(hdev);
2241

2242
	if (!hdev_is_powered(hdev)) {
2243
		bool changed;
2244

2245
		if (cp->val) {
2246 2247
			changed = !hci_dev_test_and_set_flag(hdev,
							     HCI_SSP_ENABLED);
2248
		} else {
2249 2250
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_SSP_ENABLED);
2251
			if (!changed)
2252 2253
				changed = hci_dev_test_and_clear_flag(hdev,
								      HCI_HS_ENABLED);
2254
			else
2255
				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2256 2257 2258 2259 2260 2261 2262 2263 2264
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

2265 2266 2267
		goto failed;
	}

2268
	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2269 2270
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				      MGMT_STATUS_BUSY);
2271 2272 2273
		goto failed;
	}

2274
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

2285
	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2286 2287 2288
		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
			     sizeof(cp->val), &cp->val);

2289
	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2290 2291 2292 2293 2294 2295 2296 2297 2298 2299
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

2300
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2301 2302
{
	struct mgmt_mode *cp = data;
2303
	bool changed;
2304
	u8 status;
2305
	int err;
2306

2307
	BT_DBG("request for %s", hdev->name);
2308

2309 2310
	status = mgmt_bredr_support(hdev);
	if (status)
2311
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2312

2313
	if (!lmp_ssp_capable(hdev))
2314 2315
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_NOT_SUPPORTED);
2316

2317
	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2318 2319
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_REJECTED);
2320

2321
	if (cp->val != 0x00 && cp->val != 0x01)
2322 2323
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_INVALID_PARAMS);
2324

2325 2326
	hci_dev_lock(hdev);

2327
	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2328 2329
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				      MGMT_STATUS_BUSY);
2330 2331 2332
		goto unlock;
	}

2333
	if (cp->val) {
2334
		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2335 2336
	} else {
		if (hdev_is_powered(hdev)) {
2337 2338
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
					      MGMT_STATUS_REJECTED);
2339 2340 2341
			goto unlock;
		}

2342
		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2343
	}
2344 2345 2346 2347 2348 2349 2350

	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);
2351

2352 2353 2354
unlock:
	hci_dev_unlock(hdev);
	return err;
2355 2356
}

2357
static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2358 2359 2360
{
	struct cmd_lookup match = { NULL, hdev };

2361 2362
	hci_dev_lock(hdev);

2363 2364 2365 2366 2367
	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
				     &mgmt_err);
2368
		goto unlock;
2369 2370 2371 2372 2373 2374 2375 2376
	}

	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
2377 2378 2379 2380 2381 2382

	/* Make sure the controller has a good default for
	 * advertising data. Restrict the update to when LE
	 * has actually been enabled. During power on, the
	 * update in powered_update_hci will take care of it.
	 */
2383
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2384 2385 2386
		struct hci_request req;

		hci_req_init(&req, hdev);
2387
		update_adv_data(&req);
2388
		update_scan_rsp_data(&req);
2389
		__hci_update_background_scan(&req);
2390 2391
		hci_req_run(&req, NULL);
	}
2392 2393 2394

unlock:
	hci_dev_unlock(hdev);
2395 2396
}

2397
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2398 2399 2400
{
	struct mgmt_mode *cp = data;
	struct hci_cp_write_le_host_supported hci_cp;
2401
	struct mgmt_pending_cmd *cmd;
2402
	struct hci_request req;
2403
	int err;
2404
	u8 val, enabled;
2405

2406
	BT_DBG("request for %s", hdev->name);
2407

2408
	if (!lmp_le_capable(hdev))
2409 2410
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_NOT_SUPPORTED);
2411

2412
	if (cp->val != 0x00 && cp->val != 0x01)
2413 2414
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_INVALID_PARAMS);
2415

2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428
	/* Bluetooth single mode LE only controllers or dual-mode
	 * controllers configured as LE only devices, do not allow
	 * switching LE off. These have either LE enabled explicitly
	 * or BR/EDR has been previously switched off.
	 *
	 * When trying to enable an already enabled LE, then gracefully
	 * send a positive response. Trying to disable it however will
	 * result into rejection.
	 */
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
		if (cp->val == 0x01)
			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);

2429 2430
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_REJECTED);
2431
	}
2432

2433
	hci_dev_lock(hdev);
2434 2435

	val = !!cp->val;
2436
	enabled = lmp_host_le_capable(hdev);
2437

2438
	if (!hdev_is_powered(hdev) || val == enabled) {
2439 2440
		bool changed = false;

2441
		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2442
			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2443 2444 2445
			changed = true;
		}

2446
		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2447
			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2448 2449 2450
			changed = true;
		}

2451 2452
		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
		if (err < 0)
2453
			goto unlock;
2454 2455 2456 2457

		if (changed)
			err = new_settings(hdev, sk);

2458
		goto unlock;
2459 2460
	}

2461 2462
	if (pending_find(MGMT_OP_SET_LE, hdev) ||
	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2463 2464
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				      MGMT_STATUS_BUSY);
2465
		goto unlock;
2466 2467 2468 2469 2470
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
2471
		goto unlock;
2472 2473
	}

2474 2475
	hci_req_init(&req, hdev);

2476 2477 2478 2479
	memset(&hci_cp, 0, sizeof(hci_cp));

	if (val) {
		hci_cp.le = val;
2480
		hci_cp.simul = 0x00;
2481
	} else {
2482
		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2483
			disable_advertising(&req);
2484 2485
	}

2486 2487 2488 2489
	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
		    &hci_cp);

	err = hci_req_run(&req, le_enable_complete);
2490
	if (err < 0)
2491 2492
		mgmt_pending_remove(cmd);

2493 2494
unlock:
	hci_dev_unlock(hdev);
2495 2496 2497
	return err;
}

2498 2499 2500 2501 2502 2503 2504 2505
/* This is a helper function to test for pending mgmt commands that can
 * cause CoD or EIR HCI commands. We can only allow one such pending
 * mgmt command at a time since otherwise we cannot easily track what
 * the current values are, will be, and based on that calculate if a new
 * HCI command needs to be sent and if yes with what value.
 */
static bool pending_eir_or_class(struct hci_dev *hdev)
{
2506
	struct mgmt_pending_cmd *cmd;
2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		switch (cmd->opcode) {
		case MGMT_OP_ADD_UUID:
		case MGMT_OP_REMOVE_UUID:
		case MGMT_OP_SET_DEV_CLASS:
		case MGMT_OP_SET_POWERED:
			return true;
		}
	}

	return false;
}

2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539
static const u8 bluetooth_base_uuid[] = {
			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};

static u8 get_uuid_size(const u8 *uuid)
{
	u32 val;

	if (memcmp(uuid, bluetooth_base_uuid, 12))
		return 128;

	val = get_unaligned_le32(&uuid[12]);
	if (val > 0xffff)
		return 32;

	return 16;
}

2540 2541
static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
{
2542
	struct mgmt_pending_cmd *cmd;
2543 2544 2545

	hci_dev_lock(hdev);

2546
	cmd = pending_find(mgmt_op, hdev);
2547 2548 2549
	if (!cmd)
		goto unlock;

2550 2551
	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
			  mgmt_status(status), hdev->dev_class, 3);
2552 2553 2554 2555 2556 2557 2558

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

2559
static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2560 2561 2562 2563 2564 2565
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
}

2566
static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2567
{
2568
	struct mgmt_cp_add_uuid *cp = data;
2569
	struct mgmt_pending_cmd *cmd;
2570
	struct hci_request req;
2571 2572 2573
	struct bt_uuid *uuid;
	int err;

2574
	BT_DBG("request for %s", hdev->name);
2575

2576
	hci_dev_lock(hdev);
2577

2578
	if (pending_eir_or_class(hdev)) {
2579 2580
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
				      MGMT_STATUS_BUSY);
2581 2582 2583
		goto failed;
	}

2584
	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2585 2586 2587 2588 2589 2590
	if (!uuid) {
		err = -ENOMEM;
		goto failed;
	}

	memcpy(uuid->uuid, cp->uuid, 16);
2591
	uuid->svc_hint = cp->svc_hint;
2592
	uuid->size = get_uuid_size(cp->uuid);
2593

2594
	list_add_tail(&uuid->list, &hdev->uuids);
2595

2596
	hci_req_init(&req, hdev);
2597

2598 2599 2600
	update_class(&req);
	update_eir(&req);

2601 2602 2603 2604
	err = hci_req_run(&req, add_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto failed;
2605

2606 2607
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
					hdev->dev_class, 3);
2608 2609 2610 2611
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2612
	if (!cmd) {
2613
		err = -ENOMEM;
2614 2615 2616 2617
		goto failed;
	}

	err = 0;
2618 2619

failed:
2620
	hci_dev_unlock(hdev);
2621 2622 2623
	return err;
}

2624 2625 2626 2627 2628
static bool enable_service_cache(struct hci_dev *hdev)
{
	if (!hdev_is_powered(hdev))
		return false;

2629
	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2630 2631
		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
				   CACHE_TIMEOUT);
2632 2633 2634 2635 2636 2637
		return true;
	}

	return false;
}

2638
static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2639 2640 2641 2642 2643 2644
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
}

2645
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2646
		       u16 len)
2647
{
2648
	struct mgmt_cp_remove_uuid *cp = data;
2649
	struct mgmt_pending_cmd *cmd;
2650
	struct bt_uuid *match, *tmp;
2651
	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2652
	struct hci_request req;
2653 2654
	int err, found;

2655
	BT_DBG("request for %s", hdev->name);
2656

2657
	hci_dev_lock(hdev);
2658

2659
	if (pending_eir_or_class(hdev)) {
2660 2661
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
				      MGMT_STATUS_BUSY);
2662 2663 2664
		goto unlock;
	}

2665
	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2666
		hci_uuids_clear(hdev);
2667

2668
		if (enable_service_cache(hdev)) {
2669 2670 2671
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_UUID,
						0, hdev->dev_class, 3);
2672 2673
			goto unlock;
		}
2674

2675
		goto update_class;
2676 2677 2678 2679
	}

	found = 0;

2680
	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2681 2682 2683 2684
		if (memcmp(match->uuid, cp->uuid, 16) != 0)
			continue;

		list_del(&match->list);
2685
		kfree(match);
2686 2687 2688 2689
		found++;
	}

	if (found == 0) {
2690 2691
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
				      MGMT_STATUS_INVALID_PARAMS);
2692 2693 2694
		goto unlock;
	}

2695
update_class:
2696
	hci_req_init(&req, hdev);
2697

2698 2699 2700
	update_class(&req);
	update_eir(&req);

2701 2702 2703 2704
	err = hci_req_run(&req, remove_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2705

2706 2707
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
					hdev->dev_class, 3);
2708 2709 2710 2711
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2712
	if (!cmd) {
2713
		err = -ENOMEM;
2714 2715 2716 2717
		goto unlock;
	}

	err = 0;
2718 2719

unlock:
2720
	hci_dev_unlock(hdev);
2721 2722 2723
	return err;
}

2724
static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2725 2726 2727 2728 2729 2730
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
}

2731
static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2732
			 u16 len)
2733
{
2734
	struct mgmt_cp_set_dev_class *cp = data;
2735
	struct mgmt_pending_cmd *cmd;
2736
	struct hci_request req;
2737 2738
	int err;

2739
	BT_DBG("request for %s", hdev->name);
2740

2741
	if (!lmp_bredr_capable(hdev))
2742 2743
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				       MGMT_STATUS_NOT_SUPPORTED);
2744

2745
	hci_dev_lock(hdev);
2746

2747
	if (pending_eir_or_class(hdev)) {
2748 2749
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				      MGMT_STATUS_BUSY);
2750 2751
		goto unlock;
	}
2752

2753
	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2754 2755
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				      MGMT_STATUS_INVALID_PARAMS);
2756 2757
		goto unlock;
	}
2758

2759 2760 2761
	hdev->major_class = cp->major;
	hdev->minor_class = cp->minor;

2762
	if (!hdev_is_powered(hdev)) {
2763 2764
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
					hdev->dev_class, 3);
2765 2766 2767
		goto unlock;
	}

2768 2769
	hci_req_init(&req, hdev);

2770
	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2771 2772 2773
		hci_dev_unlock(hdev);
		cancel_delayed_work_sync(&hdev->service_cache);
		hci_dev_lock(hdev);
2774
		update_eir(&req);
2775
	}
2776

2777 2778
	update_class(&req);

2779 2780 2781 2782
	err = hci_req_run(&req, set_class_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2783

2784 2785
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
					hdev->dev_class, 3);
2786 2787 2788 2789
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2790
	if (!cmd) {
2791
		err = -ENOMEM;
2792 2793 2794 2795
		goto unlock;
	}

	err = 0;
2796

2797
unlock:
2798
	hci_dev_unlock(hdev);
2799 2800 2801
	return err;
}

2802
static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2803
			  u16 len)
2804
{
2805
	struct mgmt_cp_load_link_keys *cp = data;
2806 2807
	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_link_key_info));
2808
	u16 key_count, expected_len;
2809
	bool changed;
2810
	int i;
2811

2812 2813 2814
	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev))
2815 2816
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_NOT_SUPPORTED);
2817

2818
	key_count = __le16_to_cpu(cp->key_count);
2819 2820 2821
	if (key_count > max_key_count) {
		BT_ERR("load_link_keys: too big key_count value %u",
		       key_count);
2822 2823
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2824
	}
2825

2826 2827
	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_link_key_info);
2828
	if (expected_len != len) {
2829
		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2830
		       expected_len, len);
2831 2832
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2833 2834
	}

2835
	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2836 2837
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2838

2839
	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2840
	       key_count);
2841

2842 2843 2844
	for (i = 0; i < key_count; i++) {
		struct mgmt_link_key_info *key = &cp->keys[i];

2845
		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2846 2847 2848
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_LINK_KEYS,
					       MGMT_STATUS_INVALID_PARAMS);
2849 2850
	}

2851
	hci_dev_lock(hdev);
2852 2853 2854 2855

	hci_link_keys_clear(hdev);

	if (cp->debug_keys)
2856
		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2857
	else
2858 2859
		changed = hci_dev_test_and_clear_flag(hdev,
						      HCI_KEEP_DEBUG_KEYS);
2860 2861 2862

	if (changed)
		new_settings(hdev, NULL);
2863

2864
	for (i = 0; i < key_count; i++) {
2865
		struct mgmt_link_key_info *key = &cp->keys[i];
2866

2867 2868 2869 2870 2871 2872
		/* Always ignore debug keys and require a new pairing if
		 * the user wants to use them.
		 */
		if (key->type == HCI_LK_DEBUG_COMBINATION)
			continue;

2873 2874
		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
				 key->type, key->pin_len, NULL);
2875 2876
	}

2877
	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2878

2879
	hci_dev_unlock(hdev);
2880

2881
	return 0;
2882 2883
}

2884
static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2885
			   u8 addr_type, struct sock *skip_sk)
2886 2887 2888 2889 2890 2891 2892
{
	struct mgmt_ev_device_unpaired ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = addr_type;

	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2893
			  skip_sk);
2894 2895
}

2896
static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2897
			 u16 len)
2898
{
2899 2900
	struct mgmt_cp_unpair_device *cp = data;
	struct mgmt_rp_unpair_device rp;
2901
	struct hci_cp_disconnect dc;
2902
	struct mgmt_pending_cmd *cmd;
2903 2904 2905
	struct hci_conn *conn;
	int err;

2906
	memset(&rp, 0, sizeof(rp));
2907 2908
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
2909

2910
	if (!bdaddr_type_is_valid(cp->addr.type))
2911 2912 2913
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2914

2915
	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2916 2917 2918
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2919

2920 2921
	hci_dev_lock(hdev);

2922
	if (!hdev_is_powered(hdev)) {
2923 2924 2925
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
2926 2927 2928
		goto unlock;
	}

2929
	if (cp->addr.type == BDADDR_BREDR) {
2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942
		/* If disconnection is requested, then look up the
		 * connection. If the remote device is connected, it
		 * will be later used to terminate the link.
		 *
		 * Setting it to NULL explicitly will cause no
		 * termination of the link.
		 */
		if (cp->disconnect)
			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
						       &cp->addr.bdaddr);
		else
			conn = NULL;

2943
		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2944 2945 2946
	} else {
		u8 addr_type;

2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
					       &cp->addr.bdaddr);
		if (conn) {
			/* Defer clearing up the connection parameters
			 * until closing to give a chance of keeping
			 * them if a repairing happens.
			 */
			set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);

			/* If disconnection is not requested, then
			 * clear the connection variable so that the
			 * link is not terminated.
			 */
			if (!cp->disconnect)
				conn = NULL;
		}

2964 2965 2966 2967 2968
		if (cp->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

2969 2970
		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);

2971 2972
		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
	}
2973

2974
	if (err < 0) {
2975 2976 2977
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					MGMT_STATUS_NOT_PAIRED, &rp,
					sizeof(rp));
2978 2979 2980
		goto unlock;
	}

2981 2982 2983
	/* If the connection variable is set, then termination of the
	 * link is requested.
	 */
2984
	if (!conn) {
2985 2986
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
					&rp, sizeof(rp));
2987
		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2988 2989
		goto unlock;
	}
2990

2991
	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2992
			       sizeof(*cp));
2993 2994 2995
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
2996 2997
	}

2998 2999
	cmd->cmd_complete = addr_cmd_complete;

3000
	dc.handle = cpu_to_le16(conn->handle);
3001 3002 3003 3004 3005
	dc.reason = 0x13; /* Remote User Terminated Connection */
	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
	if (err < 0)
		mgmt_pending_remove(cmd);

3006
unlock:
3007
	hci_dev_unlock(hdev);
3008 3009 3010
	return err;
}

3011
static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3012
		      u16 len)
3013
{
3014
	struct mgmt_cp_disconnect *cp = data;
3015
	struct mgmt_rp_disconnect rp;
3016
	struct mgmt_pending_cmd *cmd;
3017 3018 3019 3020 3021
	struct hci_conn *conn;
	int err;

	BT_DBG("");

3022 3023 3024 3025
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

3026
	if (!bdaddr_type_is_valid(cp->addr.type))
3027 3028 3029
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
3030

3031
	hci_dev_lock(hdev);
3032 3033

	if (!test_bit(HCI_UP, &hdev->flags)) {
3034 3035 3036
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
3037 3038 3039
		goto failed;
	}

3040
	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3041 3042
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3043 3044 3045
		goto failed;
	}

3046
	if (cp->addr.type == BDADDR_BREDR)
3047 3048
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
3049 3050
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3051

3052
	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3053 3054 3055
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_NOT_CONNECTED, &rp,
					sizeof(rp));
3056 3057 3058
		goto failed;
	}

3059
	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3060 3061
	if (!cmd) {
		err = -ENOMEM;
3062
		goto failed;
3063
	}
3064

3065 3066
	cmd->cmd_complete = generic_cmd_complete;

3067
	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3068
	if (err < 0)
3069
		mgmt_pending_remove(cmd);
3070 3071

failed:
3072
	hci_dev_unlock(hdev);
3073 3074 3075
	return err;
}

3076
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3077 3078 3079
{
	switch (link_type) {
	case LE_LINK:
3080 3081
		switch (addr_type) {
		case ADDR_LE_DEV_PUBLIC:
3082
			return BDADDR_LE_PUBLIC;
3083

3084
		default:
3085
			/* Fallback to LE Random address type */
3086
			return BDADDR_LE_RANDOM;
3087
		}
3088

3089
	default:
3090
		/* Fallback to BR/EDR type */
3091
		return BDADDR_BREDR;
3092 3093 3094
	}
}

3095 3096
static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
3097 3098
{
	struct mgmt_rp_get_connections *rp;
3099
	struct hci_conn *c;
3100
	size_t rp_len;
3101 3102
	int err;
	u16 i;
3103 3104 3105

	BT_DBG("");

3106
	hci_dev_lock(hdev);
3107

3108
	if (!hdev_is_powered(hdev)) {
3109 3110
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
				      MGMT_STATUS_NOT_POWERED);
3111 3112 3113
		goto unlock;
	}

3114
	i = 0;
3115 3116
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3117
			i++;
3118 3119
	}

3120
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3121
	rp = kmalloc(rp_len, GFP_KERNEL);
3122
	if (!rp) {
3123 3124 3125 3126 3127
		err = -ENOMEM;
		goto unlock;
	}

	i = 0;
3128
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3129 3130
		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
			continue;
3131
		bacpy(&rp->addr[i].bdaddr, &c->dst);
3132
		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3133
		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3134 3135 3136 3137
			continue;
		i++;
	}

3138
	rp->conn_count = cpu_to_le16(i);
3139

3140 3141
	/* Recalculate length in case of filtered SCO connections, etc */
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3142

3143 3144
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
				rp_len);
3145

3146
	kfree(rp);
3147 3148

unlock:
3149
	hci_dev_unlock(hdev);
3150 3151 3152
	return err;
}

3153
static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3154
				   struct mgmt_cp_pin_code_neg_reply *cp)
3155
{
3156
	struct mgmt_pending_cmd *cmd;
3157 3158
	int err;

3159
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3160
			       sizeof(*cp));
3161 3162 3163
	if (!cmd)
		return -ENOMEM;

3164
	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3165
			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3166 3167 3168 3169 3170 3171
	if (err < 0)
		mgmt_pending_remove(cmd);

	return err;
}

3172
static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3173
			  u16 len)
3174
{
3175
	struct hci_conn *conn;
3176
	struct mgmt_cp_pin_code_reply *cp = data;
3177
	struct hci_cp_pin_code_reply reply;
3178
	struct mgmt_pending_cmd *cmd;
3179 3180 3181 3182
	int err;

	BT_DBG("");

3183
	hci_dev_lock(hdev);
3184

3185
	if (!hdev_is_powered(hdev)) {
3186 3187
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
				      MGMT_STATUS_NOT_POWERED);
3188 3189 3190
		goto failed;
	}

3191
	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3192
	if (!conn) {
3193 3194
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
				      MGMT_STATUS_NOT_CONNECTED);
3195 3196 3197 3198
		goto failed;
	}

	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3199 3200 3201
		struct mgmt_cp_pin_code_neg_reply ncp;

		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3202 3203 3204

		BT_ERR("PIN code is not 16 bytes long");

3205
		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3206
		if (err >= 0)
3207 3208
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
					      MGMT_STATUS_INVALID_PARAMS);
3209 3210 3211 3212

		goto failed;
	}

3213
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3214 3215
	if (!cmd) {
		err = -ENOMEM;
3216
		goto failed;
3217
	}
3218

3219 3220
	cmd->cmd_complete = addr_cmd_complete;

3221
	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3222
	reply.pin_len = cp->pin_len;
3223
	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3224 3225 3226

	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
	if (err < 0)
3227
		mgmt_pending_remove(cmd);
3228 3229

failed:
3230
	hci_dev_unlock(hdev);
3231 3232 3233
	return err;
}

3234 3235
static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
3236
{
3237
	struct mgmt_cp_set_io_capability *cp = data;
3238 3239 3240

	BT_DBG("");

3241
	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3242 3243
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
					 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3244

3245
	hci_dev_lock(hdev);
3246 3247 3248 3249

	hdev->io_capability = cp->io_capability;

	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3250
	       hdev->io_capability);
3251

3252
	hci_dev_unlock(hdev);
3253

3254 3255
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
				 NULL, 0);
3256 3257
}

3258
static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3259 3260
{
	struct hci_dev *hdev = conn->hdev;
3261
	struct mgmt_pending_cmd *cmd;
3262

3263
	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275
		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
			continue;

		if (cmd->user_data != conn)
			continue;

		return cmd;
	}

	return NULL;
}

3276
static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3277 3278 3279
{
	struct mgmt_rp_pair_device rp;
	struct hci_conn *conn = cmd->user_data;
3280
	int err;
3281

3282 3283
	bacpy(&rp.addr.bdaddr, &conn->dst);
	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3284

3285 3286
	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
				status, &rp, sizeof(rp));
3287 3288 3289 3290 3291 3292

	/* So we don't get further callbacks for this connection */
	conn->connect_cfm_cb = NULL;
	conn->security_cfm_cb = NULL;
	conn->disconn_cfm_cb = NULL;

3293
	hci_conn_drop(conn);
3294 3295 3296 3297 3298

	/* The device is paired so there is no need to remove
	 * its connection parameters anymore.
	 */
	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3299 3300

	hci_conn_put(conn);
3301 3302

	return err;
3303 3304
}

3305 3306 3307
void mgmt_smp_complete(struct hci_conn *conn, bool complete)
{
	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3308
	struct mgmt_pending_cmd *cmd;
3309 3310

	cmd = find_pairing(conn);
3311
	if (cmd) {
3312
		cmd->cmd_complete(cmd, status);
3313 3314
		mgmt_pending_remove(cmd);
	}
3315 3316
}

3317 3318
static void pairing_complete_cb(struct hci_conn *conn, u8 status)
{
3319
	struct mgmt_pending_cmd *cmd;
3320 3321 3322 3323

	BT_DBG("status %u", status);

	cmd = find_pairing(conn);
3324
	if (!cmd) {
3325
		BT_DBG("Unable to find a pending command");
3326 3327 3328 3329 3330
		return;
	}

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);
3331 3332
}

3333
static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3334
{
3335
	struct mgmt_pending_cmd *cmd;
3336 3337 3338 3339 3340 3341 3342

	BT_DBG("status %u", status);

	if (!status)
		return;

	cmd = find_pairing(conn);
3343
	if (!cmd) {
3344
		BT_DBG("Unable to find a pending command");
3345 3346 3347 3348 3349
		return;
	}

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);
3350 3351
}

3352
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3353
		       u16 len)
3354
{
3355
	struct mgmt_cp_pair_device *cp = data;
3356
	struct mgmt_rp_pair_device rp;
3357
	struct mgmt_pending_cmd *cmd;
3358 3359 3360 3361 3362 3363
	u8 sec_level, auth_type;
	struct hci_conn *conn;
	int err;

	BT_DBG("");

3364 3365 3366 3367
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

3368
	if (!bdaddr_type_is_valid(cp->addr.type))
3369 3370 3371
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
3372

3373
	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3374 3375 3376
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
3377

3378
	hci_dev_lock(hdev);
3379

3380
	if (!hdev_is_powered(hdev)) {
3381 3382 3383
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
3384 3385 3386
		goto unlock;
	}

3387 3388 3389 3390 3391 3392 3393
	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_ALREADY_PAIRED, &rp,
					sizeof(rp));
		goto unlock;
	}

3394
	sec_level = BT_SECURITY_MEDIUM;
3395
	auth_type = HCI_AT_DEDICATED_BONDING;
3396

3397
	if (cp->addr.type == BDADDR_BREDR) {
3398 3399
		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
				       auth_type);
3400 3401 3402 3403 3404 3405 3406 3407 3408 3409
	} else {
		u8 addr_type;

		/* Convert from L2CAP channel address type to HCI address type
		 */
		if (cp->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420
		/* When pairing a new device, it is expected to remember
		 * this device for future connections. Adding the connection
		 * parameter information ahead of time allows tracking
		 * of the slave preferred values and will speed up any
		 * further connection establishment.
		 *
		 * If connection parameters already exist, then they
		 * will be kept and this function does nothing.
		 */
		hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);

3421
		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3422 3423
				      sec_level, HCI_LE_CONN_TIMEOUT,
				      HCI_ROLE_MASTER);
3424
	}
3425

3426
	if (IS_ERR(conn)) {
3427 3428 3429 3430
		int status;

		if (PTR_ERR(conn) == -EBUSY)
			status = MGMT_STATUS_BUSY;
3431 3432 3433 3434
		else if (PTR_ERR(conn) == -EOPNOTSUPP)
			status = MGMT_STATUS_NOT_SUPPORTED;
		else if (PTR_ERR(conn) == -ECONNREFUSED)
			status = MGMT_STATUS_REJECTED;
3435 3436 3437
		else
			status = MGMT_STATUS_CONNECT_FAILED;

3438 3439
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					status, &rp, sizeof(rp));
3440 3441 3442 3443
		goto unlock;
	}

	if (conn->connect_cfm_cb) {
3444
		hci_conn_drop(conn);
3445 3446
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3447 3448 3449
		goto unlock;
	}

3450
	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3451 3452
	if (!cmd) {
		err = -ENOMEM;
3453
		hci_conn_drop(conn);
3454 3455 3456
		goto unlock;
	}

3457 3458
	cmd->cmd_complete = pairing_complete;

3459
	/* For LE, just connecting isn't a proof that the pairing finished */
3460
	if (cp->addr.type == BDADDR_BREDR) {
3461
		conn->connect_cfm_cb = pairing_complete_cb;
3462 3463 3464 3465 3466 3467 3468
		conn->security_cfm_cb = pairing_complete_cb;
		conn->disconn_cfm_cb = pairing_complete_cb;
	} else {
		conn->connect_cfm_cb = le_pairing_complete_cb;
		conn->security_cfm_cb = le_pairing_complete_cb;
		conn->disconn_cfm_cb = le_pairing_complete_cb;
	}
3469

3470
	conn->io_capability = cp->io_cap;
3471
	cmd->user_data = hci_conn_get(conn);
3472

3473
	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3474 3475 3476 3477
	    hci_conn_security(conn, sec_level, auth_type, true)) {
		cmd->cmd_complete(cmd, 0);
		mgmt_pending_remove(cmd);
	}
3478 3479 3480 3481

	err = 0;

unlock:
3482
	hci_dev_unlock(hdev);
3483 3484 3485
	return err;
}

3486 3487
static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3488
{
3489
	struct mgmt_addr_info *addr = data;
3490
	struct mgmt_pending_cmd *cmd;
3491 3492 3493 3494 3495 3496 3497
	struct hci_conn *conn;
	int err;

	BT_DBG("");

	hci_dev_lock(hdev);

3498
	if (!hdev_is_powered(hdev)) {
3499 3500
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_NOT_POWERED);
3501 3502 3503
		goto unlock;
	}

3504
	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3505
	if (!cmd) {
3506 3507
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_INVALID_PARAMS);
3508 3509 3510 3511 3512 3513
		goto unlock;
	}

	conn = cmd->user_data;

	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3514 3515
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_INVALID_PARAMS);
3516 3517 3518
		goto unlock;
	}

3519 3520
	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
	mgmt_pending_remove(cmd);
3521

3522 3523
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
				addr, sizeof(*addr));
3524 3525 3526 3527 3528
unlock:
	hci_dev_unlock(hdev);
	return err;
}

3529
static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3530
			     struct mgmt_addr_info *addr, u16 mgmt_op,
3531
			     u16 hci_op, __le32 passkey)
3532
{
3533
	struct mgmt_pending_cmd *cmd;
3534
	struct hci_conn *conn;
3535 3536
	int err;

3537
	hci_dev_lock(hdev);
3538

3539
	if (!hdev_is_powered(hdev)) {
3540 3541 3542
		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
					MGMT_STATUS_NOT_POWERED, addr,
					sizeof(*addr));
3543
		goto done;
3544 3545
	}

3546 3547
	if (addr->type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3548
	else
3549
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3550 3551

	if (!conn) {
3552 3553 3554
		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
					MGMT_STATUS_NOT_CONNECTED, addr,
					sizeof(*addr));
3555 3556
		goto done;
	}
3557

3558
	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3559 3560
		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
		if (!err)
3561 3562 3563
			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
						MGMT_STATUS_SUCCESS, addr,
						sizeof(*addr));
3564
		else
3565 3566 3567
			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
						MGMT_STATUS_FAILED, addr,
						sizeof(*addr));
3568 3569 3570 3571

		goto done;
	}

3572
	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3573 3574
	if (!cmd) {
		err = -ENOMEM;
3575
		goto done;
3576 3577
	}

3578 3579
	cmd->cmd_complete = addr_cmd_complete;

3580
	/* Continue with pairing via HCI */
3581 3582 3583
	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
		struct hci_cp_user_passkey_reply cp;

3584
		bacpy(&cp.bdaddr, &addr->bdaddr);
3585 3586 3587
		cp.passkey = passkey;
		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
	} else
3588 3589
		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
				   &addr->bdaddr);
3590

3591 3592
	if (err < 0)
		mgmt_pending_remove(cmd);
3593

3594
done:
3595
	hci_dev_unlock(hdev);
3596 3597 3598
	return err;
}

3599 3600 3601 3602 3603 3604 3605
static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_pin_code_neg_reply *cp = data;

	BT_DBG("");

3606
	return user_pairing_resp(sk, hdev, &cp->addr,
3607 3608 3609 3610
				MGMT_OP_PIN_CODE_NEG_REPLY,
				HCI_OP_PIN_CODE_NEG_REPLY, 0);
}

3611 3612
static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3613
{
3614
	struct mgmt_cp_user_confirm_reply *cp = data;
3615 3616 3617 3618

	BT_DBG("");

	if (len != sizeof(*cp))
3619 3620
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
				       MGMT_STATUS_INVALID_PARAMS);
3621

3622
	return user_pairing_resp(sk, hdev, &cp->addr,
3623 3624
				 MGMT_OP_USER_CONFIRM_REPLY,
				 HCI_OP_USER_CONFIRM_REPLY, 0);
3625 3626
}

3627
static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3628
				  void *data, u16 len)
3629
{
3630
	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3631 3632 3633

	BT_DBG("");

3634
	return user_pairing_resp(sk, hdev, &cp->addr,
3635 3636
				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3637 3638
}

3639 3640
static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3641
{
3642
	struct mgmt_cp_user_passkey_reply *cp = data;
3643 3644 3645

	BT_DBG("");

3646
	return user_pairing_resp(sk, hdev, &cp->addr,
3647 3648
				 MGMT_OP_USER_PASSKEY_REPLY,
				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3649 3650
}

3651
static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3652
				  void *data, u16 len)
3653
{
3654
	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3655 3656 3657

	BT_DBG("");

3658
	return user_pairing_resp(sk, hdev, &cp->addr,
3659 3660
				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3661 3662
}

3663
static void update_name(struct hci_request *req)
3664
{
3665
	struct hci_dev *hdev = req->hdev;
3666 3667
	struct hci_cp_write_local_name cp;

3668
	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3669

3670
	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3671 3672
}

3673
static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3674 3675
{
	struct mgmt_cp_set_local_name *cp;
3676
	struct mgmt_pending_cmd *cmd;
3677 3678 3679 3680 3681

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

3682
	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3683 3684 3685 3686 3687 3688
	if (!cmd)
		goto unlock;

	cp = cmd->param;

	if (status)
3689 3690
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
			        mgmt_status(status));
3691
	else
3692 3693
		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
				  cp, sizeof(*cp));
3694 3695 3696 3697 3698 3699 3700

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

3701
static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3702
			  u16 len)
3703
{
3704
	struct mgmt_cp_set_local_name *cp = data;
3705
	struct mgmt_pending_cmd *cmd;
3706
	struct hci_request req;
3707 3708 3709 3710
	int err;

	BT_DBG("");

3711
	hci_dev_lock(hdev);
3712

3713 3714 3715 3716 3717 3718
	/* If the old values are the same as the new ones just return a
	 * direct command complete event.
	 */
	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
	    !memcmp(hdev->short_name, cp->short_name,
		    sizeof(hdev->short_name))) {
3719 3720
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
					data, len);
3721 3722 3723
		goto failed;
	}

3724
	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3725

3726
	if (!hdev_is_powered(hdev)) {
3727
		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3728

3729 3730
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
					data, len);
3731 3732 3733
		if (err < 0)
			goto failed;

3734 3735
		err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
					 data, len, sk);
3736

3737 3738 3739
		goto failed;
	}

3740
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3741 3742 3743 3744 3745
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

3746 3747
	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));

3748
	hci_req_init(&req, hdev);
3749 3750 3751 3752 3753 3754

	if (lmp_bredr_capable(hdev)) {
		update_name(&req);
		update_eir(&req);
	}

3755 3756 3757
	/* The name is stored in the scan response data and so
	 * no need to udpate the advertising data here.
	 */
3758
	if (lmp_le_capable(hdev))
3759
		update_scan_rsp_data(&req);
3760

3761
	err = hci_req_run(&req, set_name_complete);
3762 3763 3764 3765
	if (err < 0)
		mgmt_pending_remove(cmd);

failed:
3766
	hci_dev_unlock(hdev);
3767 3768 3769
	return err;
}

3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828
static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
				         u16 opcode, struct sk_buff *skb)
{
	struct mgmt_rp_read_local_oob_data mgmt_rp;
	size_t rp_size = sizeof(mgmt_rp);
	struct mgmt_pending_cmd *cmd;

	BT_DBG("%s status %u", hdev->name, status);

	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
	if (!cmd)
		return;

	if (status || !skb) {
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
		goto remove;
	}

	memset(&mgmt_rp, 0, sizeof(mgmt_rp));

	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;

		if (skb->len < sizeof(*rp)) {
			mgmt_cmd_status(cmd->sk, hdev->id,
					MGMT_OP_READ_LOCAL_OOB_DATA,
					MGMT_STATUS_FAILED);
			goto remove;
		}

		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));

		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
	} else {
		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;

		if (skb->len < sizeof(*rp)) {
			mgmt_cmd_status(cmd->sk, hdev->id,
					MGMT_OP_READ_LOCAL_OOB_DATA,
					MGMT_STATUS_FAILED);
			goto remove;
		}

		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));

		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
	}

	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);

remove:
	mgmt_pending_remove(cmd);
}

3829
static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3830
			       void *data, u16 data_len)
3831
{
3832
	struct mgmt_pending_cmd *cmd;
3833
	struct hci_request req;
3834 3835
	int err;

3836
	BT_DBG("%s", hdev->name);
3837

3838
	hci_dev_lock(hdev);
3839

3840
	if (!hdev_is_powered(hdev)) {
3841 3842
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_NOT_POWERED);
3843 3844 3845
		goto unlock;
	}

3846
	if (!lmp_ssp_capable(hdev)) {
3847 3848
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_NOT_SUPPORTED);
3849 3850 3851
		goto unlock;
	}

3852
	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3853 3854
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_BUSY);
3855 3856 3857
		goto unlock;
	}

3858
	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3859 3860 3861 3862 3863
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

3864 3865
	hci_req_init(&req, hdev);

3866
	if (bredr_sc_enabled(hdev))
3867
		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3868
	else
3869
		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3870

3871
	err = hci_req_run_skb(&req, read_local_oob_data_complete);
3872 3873 3874 3875
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
3876
	hci_dev_unlock(hdev);
3877 3878 3879
	return err;
}

3880
static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3881
			       void *data, u16 len)
3882
{
3883
	struct mgmt_addr_info *addr = data;
3884 3885
	int err;

3886
	BT_DBG("%s ", hdev->name);
3887

3888
	if (!bdaddr_type_is_valid(addr->type))
3889 3890 3891 3892
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_ADD_REMOTE_OOB_DATA,
					 MGMT_STATUS_INVALID_PARAMS,
					 addr, sizeof(*addr));
3893

3894
	hci_dev_lock(hdev);
3895

3896 3897 3898
	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_data *cp = data;
		u8 status;
3899

3900
		if (cp->addr.type != BDADDR_BREDR) {
3901 3902 3903 3904
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_ADD_REMOTE_OOB_DATA,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
3905 3906 3907
			goto unlock;
		}

3908
		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3909 3910
					      cp->addr.type, cp->hash,
					      cp->rand, NULL, NULL);
3911 3912 3913 3914 3915
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

3916 3917 3918
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
					&cp->addr, sizeof(cp->addr));
3919 3920
	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3921
		u8 *rand192, *hash192, *rand256, *hash256;
3922 3923
		u8 status;

3924
		if (bdaddr_type_is_le(cp->addr.type)) {
3925 3926 3927 3928 3929
			/* Enforce zero-valued 192-bit parameters as
			 * long as legacy SMP OOB isn't implemented.
			 */
			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
			    memcmp(cp->hash192, ZERO_KEY, 16)) {
3930 3931 3932 3933
				err = mgmt_cmd_complete(sk, hdev->id,
							MGMT_OP_ADD_REMOTE_OOB_DATA,
							MGMT_STATUS_INVALID_PARAMS,
							addr, sizeof(*addr));
3934 3935 3936
				goto unlock;
			}

3937 3938 3939
			rand192 = NULL;
			hash192 = NULL;
		} else {
3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962
			/* In case one of the P-192 values is set to zero,
			 * then just disable OOB data for P-192.
			 */
			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
				rand192 = NULL;
				hash192 = NULL;
			} else {
				rand192 = cp->rand192;
				hash192 = cp->hash192;
			}
		}

		/* In case one of the P-256 values is set to zero, then just
		 * disable OOB data for P-256.
		 */
		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
			rand256 = NULL;
			hash256 = NULL;
		} else {
			rand256 = cp->rand256;
			hash256 = cp->hash256;
3963 3964
		}

3965
		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3966
					      cp->addr.type, hash192, rand192,
3967
					      hash256, rand256);
3968 3969 3970 3971 3972
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

3973 3974 3975
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_ADD_REMOTE_OOB_DATA,
					status, &cp->addr, sizeof(cp->addr));
3976 3977
	} else {
		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3978 3979
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
				      MGMT_STATUS_INVALID_PARAMS);
3980
	}
3981

3982
unlock:
3983
	hci_dev_unlock(hdev);
3984 3985 3986
	return err;
}

3987
static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3988
				  void *data, u16 len)
3989
{
3990
	struct mgmt_cp_remove_remote_oob_data *cp = data;
3991
	u8 status;
3992 3993
	int err;

3994
	BT_DBG("%s", hdev->name);
3995

3996
	if (cp->addr.type != BDADDR_BREDR)
3997 3998 3999 4000
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
4001

4002
	hci_dev_lock(hdev);
4003

4004 4005 4006 4007 4008 4009
	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
		hci_remote_oob_data_clear(hdev);
		status = MGMT_STATUS_SUCCESS;
		goto done;
	}

4010
	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4011
	if (err < 0)
4012
		status = MGMT_STATUS_INVALID_PARAMS;
4013
	else
4014
		status = MGMT_STATUS_SUCCESS;
4015

4016
done:
4017 4018
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
				status, &cp->addr, sizeof(cp->addr));
4019

4020
	hci_dev_unlock(hdev);
4021 4022 4023
	return err;
}

4024
static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
4025
{
4026
	struct hci_dev *hdev = req->hdev;
4027
	struct hci_cp_inquiry cp;
4028 4029
	/* General inquiry access code (GIAC) */
	u8 lap[3] = { 0x33, 0x8b, 0x9e };
4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055

	*status = mgmt_bredr_support(hdev);
	if (*status)
		return false;

	if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
		*status = MGMT_STATUS_BUSY;
		return false;
	}

	hci_inquiry_cache_flush(hdev);

	memset(&cp, 0, sizeof(cp));
	memcpy(&cp.lap, lap, sizeof(cp.lap));
	cp.length = DISCOV_BREDR_INQUIRY_LEN;

	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);

	return true;
}

static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_scan_param param_cp;
	struct hci_cp_le_set_scan_enable enable_cp;
4056
	u8 own_addr_type;
4057 4058
	int err;

4059 4060 4061
	*status = mgmt_le_support(hdev);
	if (*status)
		return false;
4062

4063 4064 4065 4066 4067 4068
	if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
		/* Don't let discovery abort an outgoing connection attempt
		 * that's using directed advertising.
		 */
		if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
			*status = MGMT_STATUS_REJECTED;
4069 4070
			return false;
		}
4071

4072 4073
		disable_advertising(req);
	}
4074

4075 4076 4077 4078 4079 4080
	/* If controller is scanning, it means the background scanning is
	 * running. Thus, we should temporarily stop it in order to set the
	 * discovery scanning parameters.
	 */
	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
		hci_req_add_le_scan_disable(req);
4081

4082 4083 4084 4085 4086 4087 4088 4089 4090
	/* All active scans will be done with either a resolvable private
	 * address (when privacy feature has been enabled) or non-resolvable
	 * private address.
	 */
	err = hci_update_random_address(req, true, &own_addr_type);
	if (err < 0) {
		*status = MGMT_STATUS_FAILED;
		return false;
	}
4091

4092 4093 4094 4095 4096
	memset(&param_cp, 0, sizeof(param_cp));
	param_cp.type = LE_SCAN_ACTIVE;
	param_cp.interval = cpu_to_le16(interval);
	param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
	param_cp.own_address_type = own_addr_type;
4097

4098 4099
	hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
		    &param_cp);
4100

4101 4102 4103
	memset(&enable_cp, 0, sizeof(enable_cp));
	enable_cp.enable = LE_SCAN_ENABLE;
	enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4104

4105 4106 4107 4108 4109
	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
		    &enable_cp);

	return true;
}
4110

4111 4112 4113
static bool trigger_discovery(struct hci_request *req, u8 *status)
{
	struct hci_dev *hdev = req->hdev;
4114

4115 4116 4117 4118 4119 4120 4121
	switch (hdev->discovery.type) {
	case DISCOV_TYPE_BREDR:
		if (!trigger_bredr_inquiry(req, status))
			return false;
		break;

	case DISCOV_TYPE_INTERLEAVED:
4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137
		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
			     &hdev->quirks)) {
			/* During simultaneous discovery, we double LE scan
			 * interval. We must leave some time for the controller
			 * to do BR/EDR inquiry.
			 */
			if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
					     status))
				return false;

			if (!trigger_bredr_inquiry(req, status))
				return false;

			return true;
		}

4138 4139
		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
			*status = MGMT_STATUS_NOT_SUPPORTED;
4140 4141
			return false;
		}
4142
		/* fall through */
4143

4144 4145 4146
	case DISCOV_TYPE_LE:
		if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
			return false;
4147 4148 4149 4150 4151 4152 4153 4154
		break;

	default:
		*status = MGMT_STATUS_INVALID_PARAMS;
		return false;
	}

	return true;
4155 4156
}

4157 4158
static void start_discovery_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
4159
{
4160
	struct mgmt_pending_cmd *cmd;
4161
	unsigned long timeout;
4162

4163 4164
	BT_DBG("status %d", status);

4165
	hci_dev_lock(hdev);
4166

4167
	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4168
	if (!cmd)
4169
		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4170

4171
	if (cmd) {
4172
		cmd->cmd_complete(cmd, mgmt_status(status));
4173 4174
		mgmt_pending_remove(cmd);
	}
4175 4176

	if (status) {
4177 4178
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
		goto unlock;
4179 4180 4181 4182
	}

	hci_discovery_set_state(hdev, DISCOVERY_FINDING);

4183 4184 4185
	/* If the scan involves LE scan, pick proper timeout to schedule
	 * hdev->le_scan_disable that will stop it.
	 */
4186 4187
	switch (hdev->discovery.type) {
	case DISCOV_TYPE_LE:
4188
		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4189 4190
		break;
	case DISCOV_TYPE_INTERLEAVED:
4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202
		 /* When running simultaneous discovery, the LE scanning time
		 * should occupy the whole discovery time sine BR/EDR inquiry
		 * and LE scanning are scheduled by the controller.
		 *
		 * For interleaving discovery in comparison, BR/EDR inquiry
		 * and LE scanning are done sequentially with separate
		 * timeouts.
		 */
		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
		else
			timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4203 4204
		break;
	case DISCOV_TYPE_BREDR:
4205
		timeout = 0;
4206 4207 4208
		break;
	default:
		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4209 4210
		timeout = 0;
		break;
4211
	}
4212

4213 4214 4215 4216 4217 4218 4219 4220
	if (timeout) {
		/* When service discovery is used and the controller has
		 * a strict duplicate filter, it is important to remember
		 * the start and duration of the scan. This is required
		 * for restarting scanning during the discovery phase.
		 */
		if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
			     &hdev->quirks) &&
4221
		    hdev->discovery.result_filtering) {
4222 4223 4224 4225
			hdev->discovery.scan_start = jiffies;
			hdev->discovery.scan_duration = timeout;
		}

4226 4227
		queue_delayed_work(hdev->workqueue,
				   &hdev->le_scan_disable, timeout);
4228
	}
4229

4230 4231
unlock:
	hci_dev_unlock(hdev);
4232 4233
}

4234
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4235
			   void *data, u16 len)
4236
{
4237
	struct mgmt_cp_start_discovery *cp = data;
4238
	struct mgmt_pending_cmd *cmd;
4239
	struct hci_request req;
4240
	u8 status;
4241 4242
	int err;

4243
	BT_DBG("%s", hdev->name);
4244

4245
	hci_dev_lock(hdev);
4246

4247
	if (!hdev_is_powered(hdev)) {
4248 4249 4250
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					MGMT_STATUS_NOT_POWERED,
					&cp->type, sizeof(cp->type));
4251 4252 4253
		goto failed;
	}

4254
	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4255
	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4256 4257 4258
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					MGMT_STATUS_BUSY, &cp->type,
					sizeof(cp->type));
4259 4260 4261
		goto failed;
	}

4262
	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4263 4264 4265 4266 4267
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

4268 4269
	cmd->cmd_complete = generic_cmd_complete;

4270 4271 4272 4273 4274
	/* Clear the discovery filter first to free any previously
	 * allocated memory for the UUID list.
	 */
	hci_discovery_filter_clear(hdev);

A
Andre Guedes 已提交
4275
	hdev->discovery.type = cp->type;
4276
	hdev->discovery.report_invalid_rssi = false;
A
Andre Guedes 已提交
4277

4278 4279
	hci_req_init(&req, hdev);

4280
	if (!trigger_discovery(&req, &status)) {
4281 4282
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					status, &cp->type, sizeof(cp->type));
4283 4284
		mgmt_pending_remove(cmd);
		goto failed;
4285
	}
4286

4287
	err = hci_req_run(&req, start_discovery_complete);
4288
	if (err < 0) {
4289
		mgmt_pending_remove(cmd);
4290 4291
		goto failed;
	}
4292

4293
	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4294

4295
failed:
4296
	hci_dev_unlock(hdev);
4297 4298
	return err;
}
4299

4300 4301
static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
					  u8 status)
4302
{
4303 4304
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, 1);
4305
}
4306

4307 4308 4309 4310
static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
				   void *data, u16 len)
{
	struct mgmt_cp_start_service_discovery *cp = data;
4311
	struct mgmt_pending_cmd *cmd;
4312 4313 4314 4315 4316
	struct hci_request req;
	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
	u16 uuid_count, expected_len;
	u8 status;
	int err;
4317

4318
	BT_DBG("%s", hdev->name);
4319

4320
	hci_dev_lock(hdev);
4321

4322
	if (!hdev_is_powered(hdev)) {
4323 4324 4325 4326
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_NOT_POWERED,
					&cp->type, sizeof(cp->type));
4327 4328
		goto failed;
	}
4329

4330
	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4331
	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4332 4333 4334 4335
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_BUSY, &cp->type,
					sizeof(cp->type));
4336 4337
		goto failed;
	}
4338

4339 4340 4341 4342
	uuid_count = __le16_to_cpu(cp->uuid_count);
	if (uuid_count > max_uuid_count) {
		BT_ERR("service_discovery: too big uuid_count value %u",
		       uuid_count);
4343 4344 4345 4346
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS, &cp->type,
					sizeof(cp->type));
4347 4348 4349 4350 4351 4352 4353
		goto failed;
	}

	expected_len = sizeof(*cp) + uuid_count * 16;
	if (expected_len != len) {
		BT_ERR("service_discovery: expected %u bytes, got %u bytes",
		       expected_len, len);
4354 4355 4356 4357
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS, &cp->type,
					sizeof(cp->type));
4358 4359 4360 4361
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4362
			       hdev, data, len);
4363 4364 4365 4366 4367
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

4368 4369
	cmd->cmd_complete = service_discovery_cmd_complete;

4370 4371 4372 4373 4374
	/* Clear the discovery filter first to free any previously
	 * allocated memory for the UUID list.
	 */
	hci_discovery_filter_clear(hdev);

4375
	hdev->discovery.result_filtering = true;
4376 4377 4378 4379 4380 4381 4382 4383
	hdev->discovery.type = cp->type;
	hdev->discovery.rssi = cp->rssi;
	hdev->discovery.uuid_count = uuid_count;

	if (uuid_count > 0) {
		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
						GFP_KERNEL);
		if (!hdev->discovery.uuids) {
4384 4385 4386 4387
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_START_SERVICE_DISCOVERY,
						MGMT_STATUS_FAILED,
						&cp->type, sizeof(cp->type));
4388 4389 4390
			mgmt_pending_remove(cmd);
			goto failed;
		}
4391
	}
4392

4393
	hci_req_init(&req, hdev);
4394

4395
	if (!trigger_discovery(&req, &status)) {
4396 4397 4398
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					status, &cp->type, sizeof(cp->type));
4399 4400
		mgmt_pending_remove(cmd);
		goto failed;
4401
	}
4402

4403
	err = hci_req_run(&req, start_discovery_complete);
4404
	if (err < 0) {
4405
		mgmt_pending_remove(cmd);
4406 4407 4408 4409
		goto failed;
	}

	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4410 4411

failed:
4412
	hci_dev_unlock(hdev);
4413 4414 4415
	return err;
}

4416
static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4417
{
4418
	struct mgmt_pending_cmd *cmd;
4419

4420 4421 4422 4423
	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

4424
	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4425
	if (cmd) {
4426
		cmd->cmd_complete(cmd, mgmt_status(status));
4427
		mgmt_pending_remove(cmd);
4428 4429
	}

4430 4431
	if (!status)
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4432 4433 4434 4435

	hci_dev_unlock(hdev);
}

4436
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4437
			  u16 len)
4438
{
4439
	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4440
	struct mgmt_pending_cmd *cmd;
4441
	struct hci_request req;
4442 4443
	int err;

4444
	BT_DBG("%s", hdev->name);
4445

4446
	hci_dev_lock(hdev);
4447

4448
	if (!hci_discovery_active(hdev)) {
4449 4450 4451
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
					MGMT_STATUS_REJECTED, &mgmt_cp->type,
					sizeof(mgmt_cp->type));
4452 4453 4454 4455
		goto unlock;
	}

	if (hdev->discovery.type != mgmt_cp->type) {
4456 4457 4458
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS,
					&mgmt_cp->type, sizeof(mgmt_cp->type));
4459
		goto unlock;
4460 4461
	}

4462
	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4463 4464
	if (!cmd) {
		err = -ENOMEM;
4465 4466 4467
		goto unlock;
	}

4468 4469
	cmd->cmd_complete = generic_cmd_complete;

4470 4471
	hci_req_init(&req, hdev);

4472
	hci_stop_discovery(&req);
4473

4474 4475 4476
	err = hci_req_run(&req, stop_discovery_complete);
	if (!err) {
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4477
		goto unlock;
4478 4479
	}

4480 4481 4482 4483
	mgmt_pending_remove(cmd);

	/* If no HCI commands were sent we're done */
	if (err == -ENODATA) {
4484 4485
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
					&mgmt_cp->type, sizeof(mgmt_cp->type));
4486 4487
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
	}
4488

4489
unlock:
4490
	hci_dev_unlock(hdev);
4491 4492 4493
	return err;
}

4494
static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4495
			u16 len)
4496
{
4497
	struct mgmt_cp_confirm_name *cp = data;
4498 4499 4500
	struct inquiry_entry *e;
	int err;

4501
	BT_DBG("%s", hdev->name);
4502 4503 4504

	hci_dev_lock(hdev);

4505
	if (!hci_discovery_active(hdev)) {
4506 4507 4508
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
					MGMT_STATUS_FAILED, &cp->addr,
					sizeof(cp->addr));
4509 4510 4511
		goto failed;
	}

4512
	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4513
	if (!e) {
4514 4515 4516
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
					sizeof(cp->addr));
4517 4518 4519 4520 4521 4522 4523 4524
		goto failed;
	}

	if (cp->name_known) {
		e->name_state = NAME_KNOWN;
		list_del(&e->list);
	} else {
		e->name_state = NAME_NEEDED;
4525
		hci_inquiry_cache_update_resolve(hdev, e);
4526 4527
	}

4528 4529
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
				&cp->addr, sizeof(cp->addr));
4530 4531 4532 4533 4534 4535

failed:
	hci_dev_unlock(hdev);
	return err;
}

4536
static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4537
			u16 len)
4538
{
4539
	struct mgmt_cp_block_device *cp = data;
4540
	u8 status;
4541 4542
	int err;

4543
	BT_DBG("%s", hdev->name);
4544

4545
	if (!bdaddr_type_is_valid(cp->addr.type))
4546 4547 4548
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
4549

4550
	hci_dev_lock(hdev);
4551

4552 4553
	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
				  cp->addr.type);
4554
	if (err < 0) {
4555
		status = MGMT_STATUS_FAILED;
4556 4557 4558 4559 4560 4561
		goto done;
	}

	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
		   sk);
	status = MGMT_STATUS_SUCCESS;
4562

4563
done:
4564 4565
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
				&cp->addr, sizeof(cp->addr));
4566

4567
	hci_dev_unlock(hdev);
4568 4569 4570 4571

	return err;
}

4572
static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4573
			  u16 len)
4574
{
4575
	struct mgmt_cp_unblock_device *cp = data;
4576
	u8 status;
4577 4578
	int err;

4579
	BT_DBG("%s", hdev->name);
4580

4581
	if (!bdaddr_type_is_valid(cp->addr.type))
4582 4583 4584
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
4585

4586
	hci_dev_lock(hdev);
4587

4588 4589
	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
				  cp->addr.type);
4590
	if (err < 0) {
4591
		status = MGMT_STATUS_INVALID_PARAMS;
4592 4593 4594 4595 4596 4597
		goto done;
	}

	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
		   sk);
	status = MGMT_STATUS_SUCCESS;
4598

4599
done:
4600 4601
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
				&cp->addr, sizeof(cp->addr));
4602

4603
	hci_dev_unlock(hdev);
4604 4605 4606 4607

	return err;
}

4608 4609 4610 4611
static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_set_device_id *cp = data;
4612
	struct hci_request req;
4613
	int err;
4614
	__u16 source;
4615 4616 4617

	BT_DBG("%s", hdev->name);

4618 4619 4620
	source = __le16_to_cpu(cp->source);

	if (source > 0x0002)
4621 4622
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
				       MGMT_STATUS_INVALID_PARAMS);
4623

4624 4625
	hci_dev_lock(hdev);

4626
	hdev->devid_source = source;
4627 4628 4629 4630
	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
	hdev->devid_product = __le16_to_cpu(cp->product);
	hdev->devid_version = __le16_to_cpu(cp->version);

4631 4632
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
				NULL, 0);
4633

4634 4635 4636
	hci_req_init(&req, hdev);
	update_eir(&req);
	hci_req_run(&req, NULL);
4637 4638 4639 4640 4641 4642

	hci_dev_unlock(hdev);

	return err;
}

4643 4644 4645 4646 4647 4648
static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
					u16 opcode)
{
	BT_DBG("status %d", status);
}

4649 4650
static void set_advertising_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
4651 4652
{
	struct cmd_lookup match = { NULL, hdev };
4653
	struct hci_request req;
4654

4655 4656
	hci_dev_lock(hdev);

4657 4658 4659 4660 4661
	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
				     cmd_status_rsp, &mgmt_err);
4662
		goto unlock;
4663 4664
	}

4665
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4666
		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4667
	else
4668
		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4669

4670 4671 4672 4673 4674 4675 4676
	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
			     &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
4677

4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692
	/* If "Set Advertising" was just disabled and instance advertising was
	 * set up earlier, then enable the advertising instance.
	 */
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
	    !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
		goto unlock;

	hci_req_init(&req, hdev);

	update_adv_data(&req);
	enable_advertising(&req);

	if (hci_req_run(&req, enable_advertising_instance) < 0)
		BT_ERR("Failed to re-configure advertising");

4693 4694
unlock:
	hci_dev_unlock(hdev);
4695 4696
}

4697 4698
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
4699 4700
{
	struct mgmt_mode *cp = data;
4701
	struct mgmt_pending_cmd *cmd;
4702
	struct hci_request req;
4703
	u8 val, status;
4704 4705 4706 4707
	int err;

	BT_DBG("request for %s", hdev->name);

4708 4709
	status = mgmt_le_support(hdev);
	if (status)
4710 4711
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				       status);
4712

4713
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4714 4715
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);
4716 4717 4718 4719 4720

	hci_dev_lock(hdev);

	val = !!cp->val;

4721 4722 4723 4724 4725
	/* The following conditions are ones which mean that we should
	 * not do any HCI communication but directly send a mgmt
	 * response to user space (after toggling the flag if
	 * necessary).
	 */
4726
	if (!hdev_is_powered(hdev) ||
4727 4728
	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4729
	    hci_conn_num(hdev, LE_LINK) > 0 ||
4730
	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4731
	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4732
		bool changed;
4733

4734
		if (cp->val) {
4735
			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4736
			if (cp->val == 0x02)
4737
				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4738
			else
4739
				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4740
		} else {
4741
			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4742
			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
		if (err < 0)
			goto unlock;

		if (changed)
			err = new_settings(hdev, sk);

		goto unlock;
	}

4755 4756
	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
	    pending_find(MGMT_OP_SET_LE, hdev)) {
4757 4758
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				      MGMT_STATUS_BUSY);
4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

4770
	if (cp->val == 0x02)
4771
		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4772
	else
4773
		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4774

4775 4776
	if (val) {
		/* Switch to instance "0" for the Set Advertising setting. */
4777 4778
		update_inst_adv_data(&req, 0x00);
		update_inst_scan_rsp_data(&req, 0x00);
4779
		enable_advertising(&req);
4780
	} else {
4781
		disable_advertising(&req);
4782
	}
4783 4784 4785 4786 4787 4788 4789 4790 4791 4792

	err = hci_req_run(&req, set_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4793 4794 4795 4796 4797 4798 4799 4800
static int set_static_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_static_address *cp = data;
	int err;

	BT_DBG("%s", hdev->name);

4801
	if (!lmp_le_capable(hdev))
4802 4803
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				       MGMT_STATUS_NOT_SUPPORTED);
4804 4805

	if (hdev_is_powered(hdev))
4806 4807
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				       MGMT_STATUS_REJECTED);
4808 4809 4810

	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4811 4812 4813
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_SET_STATIC_ADDRESS,
					       MGMT_STATUS_INVALID_PARAMS);
4814 4815 4816

		/* Two most significant bits shall be set */
		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4817 4818 4819
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_SET_STATIC_ADDRESS,
					       MGMT_STATUS_INVALID_PARAMS);
4820 4821 4822 4823 4824 4825
	}

	hci_dev_lock(hdev);

	bacpy(&hdev->static_addr, &cp->bdaddr);

4826 4827 4828 4829 4830
	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
	if (err < 0)
		goto unlock;

	err = new_settings(hdev, sk);
4831

4832
unlock:
4833 4834 4835 4836
	hci_dev_unlock(hdev);
	return err;
}

4837 4838 4839 4840 4841 4842 4843 4844 4845 4846
static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_cp_set_scan_params *cp = data;
	__u16 interval, window;
	int err;

	BT_DBG("%s", hdev->name);

	if (!lmp_le_capable(hdev))
4847 4848
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_NOT_SUPPORTED);
4849 4850 4851 4852

	interval = __le16_to_cpu(cp->interval);

	if (interval < 0x0004 || interval > 0x4000)
4853 4854
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4855 4856 4857 4858

	window = __le16_to_cpu(cp->window);

	if (window < 0x0004 || window > 0x4000)
4859 4860
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4861

4862
	if (window > interval)
4863 4864
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4865

4866 4867 4868 4869 4870
	hci_dev_lock(hdev);

	hdev->le_scan_interval = interval;
	hdev->le_scan_window = window;

4871 4872
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
				NULL, 0);
4873

4874 4875 4876
	/* If background scan is running, restart it so new parameters are
	 * loaded.
	 */
4877
	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888
	    hdev->discovery.state == DISCOVERY_STOPPED) {
		struct hci_request req;

		hci_req_init(&req, hdev);

		hci_req_add_le_scan_disable(&req);
		hci_req_add_le_passive_scan(&req);

		hci_req_run(&req, NULL);
	}

4889 4890 4891 4892 4893
	hci_dev_unlock(hdev);

	return err;
}

4894 4895
static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
				      u16 opcode)
4896
{
4897
	struct mgmt_pending_cmd *cmd;
4898 4899 4900 4901 4902

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

4903
	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4904 4905 4906 4907
	if (!cmd)
		goto unlock;

	if (status) {
4908 4909
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
			        mgmt_status(status));
4910
	} else {
4911 4912 4913
		struct mgmt_mode *cp = cmd->param;

		if (cp->val)
4914
			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4915
		else
4916
			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4917

4918 4919 4920 4921 4922 4923 4924 4925 4926 4927
		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

4928
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4929
				void *data, u16 len)
4930
{
4931
	struct mgmt_mode *cp = data;
4932
	struct mgmt_pending_cmd *cmd;
4933
	struct hci_request req;
4934 4935
	int err;

4936
	BT_DBG("%s", hdev->name);
4937

4938
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4939
	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4940 4941
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				       MGMT_STATUS_NOT_SUPPORTED);
4942

4943
	if (cp->val != 0x00 && cp->val != 0x01)
4944 4945
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				       MGMT_STATUS_INVALID_PARAMS);
4946

4947 4948
	hci_dev_lock(hdev);

4949
	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4950 4951
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				      MGMT_STATUS_BUSY);
4952 4953 4954
		goto unlock;
	}

4955
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4956 4957 4958 4959 4960
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		goto unlock;
	}

4961
	if (!hdev_is_powered(hdev)) {
4962
		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4963 4964 4965 4966 4967 4968
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		new_settings(hdev, sk);
		goto unlock;
	}

4969 4970 4971 4972 4973
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
			       data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
4974 4975
	}

4976 4977
	hci_req_init(&req, hdev);

4978
	write_fast_connectable(&req, cp->val);
4979 4980

	err = hci_req_run(&req, fast_connectable_complete);
4981
	if (err < 0) {
4982 4983
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				      MGMT_STATUS_FAILED);
4984
		mgmt_pending_remove(cmd);
4985 4986
	}

4987
unlock:
4988
	hci_dev_unlock(hdev);
4989

4990 4991 4992
	return err;
}

4993
static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4994
{
4995
	struct mgmt_pending_cmd *cmd;
4996 4997 4998 4999 5000

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

5001
	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5002 5003 5004 5005 5006 5007 5008 5009 5010
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		/* We need to restore the flag if related HCI commands
		 * failed.
		 */
5011
		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5012

5013
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027
	} else {
		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
	struct mgmt_mode *cp = data;
5028
	struct mgmt_pending_cmd *cmd;
5029 5030 5031 5032 5033 5034
	struct hci_request req;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5035 5036
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_NOT_SUPPORTED);
5037

5038
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5039 5040
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_REJECTED);
5041 5042

	if (cp->val != 0x00 && cp->val != 0x01)
5043 5044
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_INVALID_PARAMS);
5045 5046 5047

	hci_dev_lock(hdev);

5048
	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5049 5050 5051 5052 5053 5054
		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		goto unlock;
	}

	if (!hdev_is_powered(hdev)) {
		if (!cp->val) {
5055 5056 5057 5058 5059
			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5060 5061
		}

5062
		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073

		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		if (err < 0)
			goto unlock;

		err = new_settings(hdev, sk);
		goto unlock;
	}

	/* Reject disabling when powered on */
	if (!cp->val) {
5074 5075
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				      MGMT_STATUS_REJECTED);
5076
		goto unlock;
5077 5078 5079 5080 5081 5082 5083 5084
	} else {
		/* When configuring a dual-mode controller to operate
		 * with LE only and using a static address, then switching
		 * BR/EDR back on is not allowed.
		 *
		 * Dual-mode controllers shall operate with the public
		 * address as its identity address for BR/EDR and LE. So
		 * reject the attempt to create an invalid configuration.
5085 5086 5087 5088 5089 5090
		 *
		 * The same restrictions applies when secure connections
		 * has been enabled. For BR/EDR this is a controller feature
		 * while for LE it is a host stack feature. This means that
		 * switching BR/EDR back on when secure connections has been
		 * enabled is not a supported transaction.
5091
		 */
5092
		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5093
		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5094
		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5095 5096
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
					      MGMT_STATUS_REJECTED);
5097 5098
			goto unlock;
		}
5099 5100
	}

5101
	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5102 5103
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				      MGMT_STATUS_BUSY);
5104 5105 5106 5107 5108 5109 5110 5111 5112
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

5113
	/* We need to flip the bit already here so that update_adv_data
5114 5115
	 * generates the correct flags.
	 */
5116
	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5117 5118

	hci_req_init(&req, hdev);
5119

5120
	write_fast_connectable(&req, false);
5121
	__hci_update_page_scan(&req);
5122

5123 5124 5125
	/* Since only the advertising data flags will change, there
	 * is no need to update the scan response data.
	 */
5126
	update_adv_data(&req);
5127

5128 5129 5130 5131 5132 5133 5134 5135 5136
	err = hci_req_run(&req, set_bredr_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5137 5138
static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
5139
	struct mgmt_pending_cmd *cmd;
5140 5141 5142 5143 5144 5145
	struct mgmt_mode *cp;

	BT_DBG("%s status %u", hdev->name, status);

	hci_dev_lock(hdev);

5146
	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5147 5148 5149 5150
	if (!cmd)
		goto unlock;

	if (status) {
5151 5152
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
			        mgmt_status(status));
5153 5154 5155 5156 5157 5158 5159
		goto remove;
	}

	cp = cmd->param;

	switch (cp->val) {
	case 0x00:
5160 5161
		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5162 5163
		break;
	case 0x01:
5164
		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5165
		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5166 5167
		break;
	case 0x02:
5168 5169
		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
		hci_dev_set_flag(hdev, HCI_SC_ONLY);
5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181
		break;
	}

	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
	new_settings(hdev, cmd->sk);

remove:
	mgmt_pending_remove(cmd);
unlock:
	hci_dev_unlock(hdev);
}

5182 5183 5184 5185
static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_mode *cp = data;
5186
	struct mgmt_pending_cmd *cmd;
5187
	struct hci_request req;
5188
	u8 val;
5189 5190 5191 5192
	int err;

	BT_DBG("request for %s", hdev->name);

5193
	if (!lmp_sc_capable(hdev) &&
5194
	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5195 5196
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				       MGMT_STATUS_NOT_SUPPORTED);
5197

5198
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5199
	    lmp_sc_capable(hdev) &&
5200
	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5201 5202
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				       MGMT_STATUS_REJECTED);
5203

5204
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5205
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5206 5207 5208 5209
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

5210
	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5211
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5212 5213
		bool changed;

5214
		if (cp->val) {
5215 5216
			changed = !hci_dev_test_and_set_flag(hdev,
							     HCI_SC_ENABLED);
5217
			if (cp->val == 0x02)
5218
				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5219
			else
5220
				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5221
		} else {
5222 5223
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_SC_ENABLED);
5224
			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5225
		}
5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236

		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

		goto failed;
	}

5237
	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5238 5239
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				      MGMT_STATUS_BUSY);
5240 5241 5242
		goto failed;
	}

5243 5244
	val = !!cp->val;

5245 5246
	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5247 5248 5249 5250 5251 5252 5253 5254 5255 5256
		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

5257 5258 5259
	hci_req_init(&req, hdev);
	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
	err = hci_req_run(&req, sc_enable_complete);
5260 5261 5262 5263 5264 5265 5266 5267 5268 5269
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

5270 5271 5272 5273
static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
			  void *data, u16 len)
{
	struct mgmt_mode *cp = data;
5274
	bool changed, use_changed;
5275 5276 5277 5278
	int err;

	BT_DBG("request for %s", hdev->name);

5279
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5280 5281
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5282 5283 5284 5285

	hci_dev_lock(hdev);

	if (cp->val)
5286
		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5287
	else
5288 5289
		changed = hci_dev_test_and_clear_flag(hdev,
						      HCI_KEEP_DEBUG_KEYS);
5290

5291
	if (cp->val == 0x02)
5292 5293
		use_changed = !hci_dev_test_and_set_flag(hdev,
							 HCI_USE_DEBUG_KEYS);
5294
	else
5295 5296
		use_changed = hci_dev_test_and_clear_flag(hdev,
							  HCI_USE_DEBUG_KEYS);
5297 5298

	if (hdev_is_powered(hdev) && use_changed &&
5299
	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5300 5301 5302 5303 5304
		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
			     sizeof(mode), &mode);
	}

5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316
	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5317 5318 5319 5320 5321 5322 5323 5324 5325 5326
static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
		       u16 len)
{
	struct mgmt_cp_set_privacy *cp = cp_data;
	bool changed;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
5327 5328
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_NOT_SUPPORTED);
5329 5330

	if (cp->privacy != 0x00 && cp->privacy != 0x01)
5331 5332
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_INVALID_PARAMS);
5333 5334

	if (hdev_is_powered(hdev))
5335 5336
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_REJECTED);
5337 5338 5339

	hci_dev_lock(hdev);

5340 5341 5342
	/* If user space supports this command it is also expected to
	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
	 */
5343
	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5344

5345
	if (cp->privacy) {
5346
		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5347
		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5348
		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5349
	} else {
5350
		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5351
		memset(hdev->irk, 0, sizeof(hdev->irk));
5352
		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366
	}

	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386
static bool irk_is_valid(struct mgmt_irk_info *irk)
{
	switch (irk->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
}

static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
		     u16 len)
{
	struct mgmt_cp_load_irks *cp = cp_data;
5387 5388
	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_irk_info));
5389 5390 5391 5392 5393 5394
	u16 irk_count, expected_len;
	int i, err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
5395 5396
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_NOT_SUPPORTED);
5397 5398

	irk_count = __le16_to_cpu(cp->irk_count);
5399 5400
	if (irk_count > max_irk_count) {
		BT_ERR("load_irks: too big irk_count value %u", irk_count);
5401 5402
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_INVALID_PARAMS);
5403
	}
5404 5405 5406 5407

	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
	if (expected_len != len) {
		BT_ERR("load_irks: expected %u bytes, got %u bytes",
5408
		       expected_len, len);
5409 5410
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_INVALID_PARAMS);
5411 5412 5413 5414 5415 5416 5417 5418
	}

	BT_DBG("%s irk_count %u", hdev->name, irk_count);

	for (i = 0; i < irk_count; i++) {
		struct mgmt_irk_info *key = &cp->irks[i];

		if (!irk_is_valid(key))
5419 5420 5421
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_IRKS,
					       MGMT_STATUS_INVALID_PARAMS);
5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440
	}

	hci_dev_lock(hdev);

	hci_smp_irks_clear(hdev);

	for (i = 0; i < irk_count; i++) {
		struct mgmt_irk_info *irk = &cp->irks[i];
		u8 addr_type;

		if (irk->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
			    BDADDR_ANY);
	}

5441
	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5442

5443
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5444 5445 5446 5447 5448 5449

	hci_dev_unlock(hdev);

	return err;
}

5450 5451 5452 5453
static bool ltk_is_valid(struct mgmt_ltk_info *key)
{
	if (key->master != 0x00 && key->master != 0x01)
		return false;
5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466

	switch (key->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
5467 5468
}

5469
static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5470
			       void *cp_data, u16 len)
5471 5472
{
	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5473 5474
	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_ltk_info));
5475
	u16 key_count, expected_len;
5476
	int i, err;
5477

5478 5479 5480
	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
5481 5482
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_NOT_SUPPORTED);
5483

5484
	key_count = __le16_to_cpu(cp->key_count);
5485 5486
	if (key_count > max_key_count) {
		BT_ERR("load_ltks: too big key_count value %u", key_count);
5487 5488
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5489
	}
5490 5491 5492 5493 5494

	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_ltk_info);
	if (expected_len != len) {
		BT_ERR("load_keys: expected %u bytes, got %u bytes",
5495
		       expected_len, len);
5496 5497
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5498 5499
	}

5500
	BT_DBG("%s key_count %u", hdev->name, key_count);
5501

5502 5503 5504
	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];

5505
		if (!ltk_is_valid(key))
5506 5507 5508
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_LONG_TERM_KEYS,
					       MGMT_STATUS_INVALID_PARAMS);
5509 5510
	}

5511 5512 5513 5514 5515 5516
	hci_dev_lock(hdev);

	hci_smp_ltks_clear(hdev);

	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];
5517
		u8 type, addr_type, authenticated;
5518 5519 5520 5521 5522

		if (key->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;
5523

5524 5525
		switch (key->type) {
		case MGMT_LTK_UNAUTHENTICATED:
5526
			authenticated = 0x00;
5527
			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5528 5529
			break;
		case MGMT_LTK_AUTHENTICATED:
5530
			authenticated = 0x01;
5531 5532 5533 5534 5535
			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
			break;
		case MGMT_LTK_P256_UNAUTH:
			authenticated = 0x00;
			type = SMP_LTK_P256;
5536
			break;
5537 5538 5539
		case MGMT_LTK_P256_AUTH:
			authenticated = 0x01;
			type = SMP_LTK_P256;
5540
			break;
5541 5542 5543
		case MGMT_LTK_P256_DEBUG:
			authenticated = 0x00;
			type = SMP_LTK_P256_DEBUG;
5544 5545 5546
		default:
			continue;
		}
5547

5548
		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5549
			    authenticated, key->val, key->enc_size, key->ediv,
5550
			    key->rand);
5551 5552
	}

5553
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5554 5555
			   NULL, 0);

5556 5557
	hci_dev_unlock(hdev);

5558
	return err;
5559 5560
}

5561
static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5562 5563
{
	struct hci_conn *conn = cmd->user_data;
5564
	struct mgmt_rp_get_conn_info rp;
5565
	int err;
5566

5567
	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5568

5569
	if (status == MGMT_STATUS_SUCCESS) {
5570
		rp.rssi = conn->rssi;
5571 5572 5573 5574 5575 5576
		rp.tx_power = conn->tx_power;
		rp.max_tx_power = conn->max_tx_power;
	} else {
		rp.rssi = HCI_RSSI_INVALID;
		rp.tx_power = HCI_TX_POWER_INVALID;
		rp.max_tx_power = HCI_TX_POWER_INVALID;
5577 5578
	}

5579 5580
	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
				status, &rp, sizeof(rp));
5581 5582

	hci_conn_drop(conn);
5583
	hci_conn_put(conn);
5584 5585

	return err;
5586 5587
}

5588 5589
static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
				       u16 opcode)
5590 5591
{
	struct hci_cp_read_rssi *cp;
5592
	struct mgmt_pending_cmd *cmd;
5593 5594
	struct hci_conn *conn;
	u16 handle;
5595
	u8 status;
5596

5597
	BT_DBG("status 0x%02x", hci_status);
5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612

	hci_dev_lock(hdev);

	/* Commands sent in request are either Read RSSI or Read Transmit Power
	 * Level so we check which one was last sent to retrieve connection
	 * handle.  Both commands have handle as first parameter so it's safe to
	 * cast data on the same command struct.
	 *
	 * First command sent is always Read RSSI and we fail only if it fails.
	 * In other case we simply override error to indicate success as we
	 * already remembered if TX power value is actually valid.
	 */
	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
	if (!cp) {
		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5613 5614 5615
		status = MGMT_STATUS_SUCCESS;
	} else {
		status = mgmt_status(hci_status);
5616 5617 5618
	}

	if (!cp) {
5619
		BT_ERR("invalid sent_cmd in conn_info response");
5620 5621 5622 5623 5624 5625
		goto unlock;
	}

	handle = __le16_to_cpu(cp->handle);
	conn = hci_conn_hash_lookup_handle(hdev, handle);
	if (!conn) {
5626
		BT_ERR("unknown handle (%d) in conn_info response", handle);
5627 5628 5629
		goto unlock;
	}

5630
	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5631 5632
	if (!cmd)
		goto unlock;
5633

5634 5635
	cmd->cmd_complete(cmd, status);
	mgmt_pending_remove(cmd);
5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656

unlock:
	hci_dev_unlock(hdev);
}

static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_get_conn_info *cp = data;
	struct mgmt_rp_get_conn_info rp;
	struct hci_conn *conn;
	unsigned long conn_info_age;
	int err = 0;

	BT_DBG("%s", hdev->name);

	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

	if (!bdaddr_type_is_valid(cp->addr.type))
5657 5658 5659
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
5660 5661 5662 5663

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
5664 5665 5666
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
5667 5668 5669 5670 5671 5672 5673 5674 5675 5676
		goto unlock;
	}

	if (cp->addr.type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);

	if (!conn || conn->state != BT_CONNECTED) {
5677 5678 5679
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_NOT_CONNECTED, &rp,
					sizeof(rp));
5680 5681 5682
		goto unlock;
	}

5683
	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5684 5685
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
5686 5687 5688
		goto unlock;
	}

5689 5690 5691 5692 5693 5694 5695 5696 5697 5698
	/* To avoid client trying to guess when to poll again for information we
	 * calculate conn info age as random value between min/max set in hdev.
	 */
	conn_info_age = hdev->conn_info_min_age +
			prandom_u32_max(hdev->conn_info_max_age -
					hdev->conn_info_min_age);

	/* Query controller to refresh cached values if they are too old or were
	 * never read.
	 */
5699 5700
	if (time_after(jiffies, conn->conn_info_timestamp +
		       msecs_to_jiffies(conn_info_age)) ||
5701 5702 5703 5704
	    !conn->conn_info_timestamp) {
		struct hci_request req;
		struct hci_cp_read_tx_power req_txp_cp;
		struct hci_cp_read_rssi req_rssi_cp;
5705
		struct mgmt_pending_cmd *cmd;
5706 5707 5708 5709 5710 5711

		hci_req_init(&req, hdev);
		req_rssi_cp.handle = cpu_to_le16(conn->handle);
		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
			    &req_rssi_cp);

5712 5713 5714 5715 5716 5717 5718 5719 5720 5721
		/* For LE links TX power does not change thus we don't need to
		 * query for it once value is known.
		 */
		if (!bdaddr_type_is_le(cp->addr.type) ||
		    conn->tx_power == HCI_TX_POWER_INVALID) {
			req_txp_cp.handle = cpu_to_le16(conn->handle);
			req_txp_cp.type = 0x00;
			hci_req_add(&req, HCI_OP_READ_TX_POWER,
				    sizeof(req_txp_cp), &req_txp_cp);
		}
5722

5723 5724 5725 5726 5727 5728 5729 5730
		/* Max TX power needs to be read only once per connection */
		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
			req_txp_cp.handle = cpu_to_le16(conn->handle);
			req_txp_cp.type = 0x01;
			hci_req_add(&req, HCI_OP_READ_TX_POWER,
				    sizeof(req_txp_cp), &req_txp_cp);
		}

5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742
		err = hci_req_run(&req, conn_info_refresh_complete);
		if (err < 0)
			goto unlock;

		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
				       data, len);
		if (!cmd) {
			err = -ENOMEM;
			goto unlock;
		}

		hci_conn_hold(conn);
5743
		cmd->user_data = hci_conn_get(conn);
5744
		cmd->cmd_complete = conn_info_cmd_complete;
5745 5746 5747 5748 5749 5750

		conn->conn_info_timestamp = jiffies;
	} else {
		/* Cache is valid, just reply with values cached in hci_conn */
		rp.rssi = conn->rssi;
		rp.tx_power = conn->tx_power;
5751
		rp.max_tx_power = conn->max_tx_power;
5752

5753 5754
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5755 5756 5757 5758 5759 5760 5761
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5762
static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5763
{
5764
	struct hci_conn *conn = cmd->user_data;
5765
	struct mgmt_rp_get_clock_info rp;
5766
	struct hci_dev *hdev;
5767
	int err;
5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786

	memset(&rp, 0, sizeof(rp));
	memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));

	if (status)
		goto complete;

	hdev = hci_dev_get(cmd->index);
	if (hdev) {
		rp.local_clock = cpu_to_le32(hdev->clock);
		hci_dev_put(hdev);
	}

	if (conn) {
		rp.piconet_clock = cpu_to_le32(conn->clock);
		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
	}

complete:
5787 5788
	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
				sizeof(rp));
5789 5790 5791 5792 5793

	if (conn) {
		hci_conn_drop(conn);
		hci_conn_put(conn);
	}
5794 5795

	return err;
5796 5797
}

5798
static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5799
{
5800
	struct hci_cp_read_clock *hci_cp;
5801
	struct mgmt_pending_cmd *cmd;
5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818
	struct hci_conn *conn;

	BT_DBG("%s status %u", hdev->name, status);

	hci_dev_lock(hdev);

	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
	if (!hci_cp)
		goto unlock;

	if (hci_cp->which) {
		u16 handle = __le16_to_cpu(hci_cp->handle);
		conn = hci_conn_hash_lookup_handle(hdev, handle);
	} else {
		conn = NULL;
	}

5819
	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5820 5821 5822
	if (!cmd)
		goto unlock;

5823
	cmd->cmd_complete(cmd, mgmt_status(status));
5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_get_clock_info *cp = data;
	struct mgmt_rp_get_clock_info rp;
	struct hci_cp_read_clock hci_cp;
5836
	struct mgmt_pending_cmd *cmd;
5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847
	struct hci_request req;
	struct hci_conn *conn;
	int err;

	BT_DBG("%s", hdev->name);

	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

	if (cp->addr.type != BDADDR_BREDR)
5848 5849 5850
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
5851 5852 5853 5854

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
5855 5856 5857
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
5858 5859 5860 5861 5862 5863 5864
		goto unlock;
	}

	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
		if (!conn || conn->state != BT_CONNECTED) {
5865 5866 5867 5868
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_GET_CLOCK_INFO,
						MGMT_STATUS_NOT_CONNECTED,
						&rp, sizeof(rp));
5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880
			goto unlock;
		}
	} else {
		conn = NULL;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

5881 5882
	cmd->cmd_complete = clock_info_cmd_complete;

5883 5884 5885 5886 5887 5888 5889
	hci_req_init(&req, hdev);

	memset(&hci_cp, 0, sizeof(hci_cp));
	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);

	if (conn) {
		hci_conn_hold(conn);
5890
		cmd->user_data = hci_conn_get(conn);
5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905

		hci_cp.handle = cpu_to_le16(conn->handle);
		hci_cp.which = 0x01; /* Piconet clock */
		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
	}

	err = hci_req_run(&req, get_clock_info_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964
static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
{
	struct hci_conn *conn;

	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
	if (!conn)
		return false;

	if (conn->dst_type != type)
		return false;

	if (conn->state != BT_CONNECTED)
		return false;

	return true;
}

/* This function requires the caller holds hdev->lock */
static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
			       u8 addr_type, u8 auto_connect)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_conn_params *params;

	params = hci_conn_params_add(hdev, addr, addr_type);
	if (!params)
		return -EIO;

	if (params->auto_connect == auto_connect)
		return 0;

	list_del_init(&params->action);

	switch (auto_connect) {
	case HCI_AUTO_CONN_DISABLED:
	case HCI_AUTO_CONN_LINK_LOSS:
		__hci_update_background_scan(req);
		break;
	case HCI_AUTO_CONN_REPORT:
		list_add(&params->action, &hdev->pend_le_reports);
		__hci_update_background_scan(req);
		break;
	case HCI_AUTO_CONN_DIRECT:
	case HCI_AUTO_CONN_ALWAYS:
		if (!is_connected(hdev, addr, addr_type)) {
			list_add(&params->action, &hdev->pend_le_conns);
			__hci_update_background_scan(req);
		}
		break;
	}

	params->auto_connect = auto_connect;

	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
	       auto_connect);

	return 0;
}

5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976
static void device_added(struct sock *sk, struct hci_dev *hdev,
			 bdaddr_t *bdaddr, u8 type, u8 action)
{
	struct mgmt_ev_device_added ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
	ev.action = action;

	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
}

5977
static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5978
{
5979
	struct mgmt_pending_cmd *cmd;
5980 5981 5982 5983 5984

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

5985
	cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
5986 5987 5988 5989 5990 5991 5992 5993 5994 5995
	if (!cmd)
		goto unlock;

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

5996 5997 5998 5999
static int add_device(struct sock *sk, struct hci_dev *hdev,
		      void *data, u16 len)
{
	struct mgmt_cp_add_device *cp = data;
6000
	struct mgmt_pending_cmd *cmd;
6001
	struct hci_request req;
6002 6003 6004 6005 6006
	u8 auto_conn, addr_type;
	int err;

	BT_DBG("%s", hdev->name);

6007
	if (!bdaddr_type_is_valid(cp->addr.type) ||
6008
	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6009 6010 6011
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
6012

6013
	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6014 6015 6016
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
6017

6018 6019
	hci_req_init(&req, hdev);

6020 6021
	hci_dev_lock(hdev);

6022 6023 6024 6025 6026 6027 6028 6029
	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	cmd->cmd_complete = addr_cmd_complete;

6030
	if (cp->addr.type == BDADDR_BREDR) {
6031
		/* Only incoming connections action is supported for now */
6032
		if (cp->action != 0x01) {
6033 6034
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
6035
			mgmt_pending_remove(cmd);
6036 6037 6038 6039 6040 6041 6042
			goto unlock;
		}

		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
					  cp->addr.type);
		if (err)
			goto unlock;
6043

6044
		__hci_update_page_scan(&req);
6045

6046 6047 6048
		goto added;
	}

6049 6050 6051 6052 6053
	if (cp->addr.type == BDADDR_LE_PUBLIC)
		addr_type = ADDR_LE_DEV_PUBLIC;
	else
		addr_type = ADDR_LE_DEV_RANDOM;

6054
	if (cp->action == 0x02)
6055
		auto_conn = HCI_AUTO_CONN_ALWAYS;
6056 6057
	else if (cp->action == 0x01)
		auto_conn = HCI_AUTO_CONN_DIRECT;
6058
	else
6059
		auto_conn = HCI_AUTO_CONN_REPORT;
6060

6061 6062 6063
	/* If the connection parameters don't exist for this device,
	 * they will be created and configured with defaults.
	 */
6064
	if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
6065
				auto_conn) < 0) {
6066
		err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
6067
		mgmt_pending_remove(cmd);
6068 6069 6070
		goto unlock;
	}

6071
added:
6072 6073
	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);

6074 6075 6076 6077 6078
	err = hci_req_run(&req, add_device_complete);
	if (err < 0) {
		/* ENODATA means no HCI commands were needed (e.g. if
		 * the adapter is powered off).
		 */
6079 6080
		if (err == -ENODATA)
			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6081 6082
		mgmt_pending_remove(cmd);
	}
6083 6084 6085 6086 6087 6088

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099
static void device_removed(struct sock *sk, struct hci_dev *hdev,
			   bdaddr_t *bdaddr, u8 type)
{
	struct mgmt_ev_device_removed ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;

	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
}

6100
static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6101
{
6102
	struct mgmt_pending_cmd *cmd;
6103 6104 6105 6106 6107

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

6108
	cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6109 6110 6111 6112 6113 6114 6115 6116 6117 6118
	if (!cmd)
		goto unlock;

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

6119 6120 6121 6122
static int remove_device(struct sock *sk, struct hci_dev *hdev,
			 void *data, u16 len)
{
	struct mgmt_cp_remove_device *cp = data;
6123
	struct mgmt_pending_cmd *cmd;
6124
	struct hci_request req;
6125 6126 6127 6128
	int err;

	BT_DBG("%s", hdev->name);

6129 6130
	hci_req_init(&req, hdev);

6131 6132
	hci_dev_lock(hdev);

6133 6134 6135 6136 6137 6138 6139 6140
	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	cmd->cmd_complete = addr_cmd_complete;

6141
	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6142
		struct hci_conn_params *params;
6143 6144
		u8 addr_type;

6145
		if (!bdaddr_type_is_valid(cp->addr.type)) {
6146 6147
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
6148
			mgmt_pending_remove(cmd);
6149 6150 6151
			goto unlock;
		}

6152 6153 6154 6155 6156
		if (cp->addr.type == BDADDR_BREDR) {
			err = hci_bdaddr_list_del(&hdev->whitelist,
						  &cp->addr.bdaddr,
						  cp->addr.type);
			if (err) {
6157 6158
				err = cmd->cmd_complete(cmd,
							MGMT_STATUS_INVALID_PARAMS);
6159
				mgmt_pending_remove(cmd);
6160 6161 6162
				goto unlock;
			}

6163
			__hci_update_page_scan(&req);
6164

6165 6166 6167 6168 6169
			device_removed(sk, hdev, &cp->addr.bdaddr,
				       cp->addr.type);
			goto complete;
		}

6170 6171 6172 6173 6174
		if (cp->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

6175 6176 6177
		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
						addr_type);
		if (!params) {
6178 6179
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
6180
			mgmt_pending_remove(cmd);
6181 6182 6183 6184
			goto unlock;
		}

		if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6185 6186
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
6187
			mgmt_pending_remove(cmd);
6188 6189 6190
			goto unlock;
		}

6191
		list_del(&params->action);
6192 6193
		list_del(&params->list);
		kfree(params);
6194
		__hci_update_background_scan(&req);
6195 6196

		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6197
	} else {
6198
		struct hci_conn_params *p, *tmp;
6199
		struct bdaddr_list *b, *btmp;
6200

6201
		if (cp->addr.type) {
6202 6203
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
6204
			mgmt_pending_remove(cmd);
6205 6206 6207
			goto unlock;
		}

6208 6209 6210 6211 6212 6213
		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
			list_del(&b->list);
			kfree(b);
		}

6214
		__hci_update_page_scan(&req);
6215

6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226
		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
				continue;
			device_removed(sk, hdev, &p->addr, p->addr_type);
			list_del(&p->action);
			list_del(&p->list);
			kfree(p);
		}

		BT_DBG("All LE connection parameters were removed");

6227
		__hci_update_background_scan(&req);
6228 6229
	}

6230
complete:
6231 6232 6233 6234 6235
	err = hci_req_run(&req, remove_device_complete);
	if (err < 0) {
		/* ENODATA means no HCI commands were needed (e.g. if
		 * the adapter is powered off).
		 */
6236 6237
		if (err == -ENODATA)
			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6238 6239
		mgmt_pending_remove(cmd);
	}
6240 6241 6242 6243 6244 6245

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6246 6247 6248 6249
static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
{
	struct mgmt_cp_load_conn_param *cp = data;
6250 6251
	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
				     sizeof(struct mgmt_conn_param));
6252 6253 6254 6255
	u16 param_count, expected_len;
	int i;

	if (!lmp_le_capable(hdev))
6256 6257
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_NOT_SUPPORTED);
6258 6259

	param_count = __le16_to_cpu(cp->param_count);
6260 6261 6262
	if (param_count > max_param_count) {
		BT_ERR("load_conn_param: too big param_count value %u",
		       param_count);
6263 6264
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_INVALID_PARAMS);
6265
	}
6266 6267 6268 6269 6270 6271

	expected_len = sizeof(*cp) + param_count *
					sizeof(struct mgmt_conn_param);
	if (expected_len != len) {
		BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
		       expected_len, len);
6272 6273
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_INVALID_PARAMS);
6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327
	}

	BT_DBG("%s param_count %u", hdev->name, param_count);

	hci_dev_lock(hdev);

	hci_conn_params_clear_disabled(hdev);

	for (i = 0; i < param_count; i++) {
		struct mgmt_conn_param *param = &cp->params[i];
		struct hci_conn_params *hci_param;
		u16 min, max, latency, timeout;
		u8 addr_type;

		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
		       param->addr.type);

		if (param->addr.type == BDADDR_LE_PUBLIC) {
			addr_type = ADDR_LE_DEV_PUBLIC;
		} else if (param->addr.type == BDADDR_LE_RANDOM) {
			addr_type = ADDR_LE_DEV_RANDOM;
		} else {
			BT_ERR("Ignoring invalid connection parameters");
			continue;
		}

		min = le16_to_cpu(param->min_interval);
		max = le16_to_cpu(param->max_interval);
		latency = le16_to_cpu(param->latency);
		timeout = le16_to_cpu(param->timeout);

		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
		       min, max, latency, timeout);

		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
			BT_ERR("Ignoring invalid connection parameters");
			continue;
		}

		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
						addr_type);
		if (!hci_param) {
			BT_ERR("Failed to add connection parameters");
			continue;
		}

		hci_param->conn_min_interval = min;
		hci_param->conn_max_interval = max;
		hci_param->conn_latency = latency;
		hci_param->supervision_timeout = timeout;
	}

	hci_dev_unlock(hdev);

6328 6329
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
				 NULL, 0);
6330 6331
}

6332 6333 6334 6335 6336 6337 6338 6339 6340 6341
static int set_external_config(struct sock *sk, struct hci_dev *hdev,
			       void *data, u16 len)
{
	struct mgmt_cp_set_external_config *cp = data;
	bool changed;
	int err;

	BT_DBG("%s", hdev->name);

	if (hdev_is_powered(hdev))
6342 6343
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				       MGMT_STATUS_REJECTED);
6344 6345

	if (cp->config != 0x00 && cp->config != 0x01)
6346 6347
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				         MGMT_STATUS_INVALID_PARAMS);
6348 6349

	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6350 6351
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				       MGMT_STATUS_NOT_SUPPORTED);
6352 6353 6354 6355

	hci_dev_lock(hdev);

	if (cp->config)
6356
		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6357
	else
6358
		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6359 6360 6361 6362 6363 6364 6365 6366

	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
	if (err < 0)
		goto unlock;

	if (!changed)
		goto unlock;

6367 6368
	err = new_options(hdev, sk);

6369
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6370
		mgmt_index_removed(hdev);
6371

6372
		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6373 6374
			hci_dev_set_flag(hdev, HCI_CONFIG);
			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6375 6376 6377

			queue_work(hdev->req_workqueue, &hdev->power_on);
		} else {
6378
			set_bit(HCI_RAW, &hdev->flags);
6379 6380
			mgmt_index_added(hdev);
		}
6381 6382 6383 6384 6385 6386 6387
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6388 6389 6390 6391 6392 6393 6394 6395 6396 6397
static int set_public_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_public_address *cp = data;
	bool changed;
	int err;

	BT_DBG("%s", hdev->name);

	if (hdev_is_powered(hdev))
6398 6399
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_REJECTED);
6400 6401

	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6402 6403
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_INVALID_PARAMS);
6404 6405

	if (!hdev->set_bdaddr)
6406 6407
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_NOT_SUPPORTED);
6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420

	hci_dev_lock(hdev);

	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
	bacpy(&hdev->public_addr, &cp->bdaddr);

	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
	if (err < 0)
		goto unlock;

	if (!changed)
		goto unlock;

6421
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6422 6423 6424 6425 6426
		err = new_options(hdev, sk);

	if (is_configured(hdev)) {
		mgmt_index_removed(hdev);

6427
		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6428

6429 6430
		hci_dev_set_flag(hdev, HCI_CONFIG);
		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6431 6432 6433 6434 6435 6436 6437 6438 6439

		queue_work(hdev->req_workqueue, &hdev->power_on);
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
				  u8 data_len)
{
	eir[eir_len++] = sizeof(type) + data_len;
	eir[eir_len++] = type;
	memcpy(&eir[eir_len], data, data_len);
	eir_len += data_len;

	return eir_len;
}

6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589
static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
					     u16 opcode, struct sk_buff *skb)
{
	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
	u8 *h192, *r192, *h256, *r256;
	struct mgmt_pending_cmd *cmd;
	u16 eir_len;
	int err;

	BT_DBG("%s status %u", hdev->name, status);

	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
	if (!cmd)
		return;

	mgmt_cp = cmd->param;

	if (status) {
		status = mgmt_status(status);
		eir_len = 0;

		h192 = NULL;
		r192 = NULL;
		h256 = NULL;
		r256 = NULL;
	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
		struct hci_rp_read_local_oob_data *rp;

		if (skb->len != sizeof(*rp)) {
			status = MGMT_STATUS_FAILED;
			eir_len = 0;
		} else {
			status = MGMT_STATUS_SUCCESS;
			rp = (void *)skb->data;

			eir_len = 5 + 18 + 18;
			h192 = rp->hash;
			r192 = rp->rand;
			h256 = NULL;
			r256 = NULL;
		}
	} else {
		struct hci_rp_read_local_oob_ext_data *rp;

		if (skb->len != sizeof(*rp)) {
			status = MGMT_STATUS_FAILED;
			eir_len = 0;
		} else {
			status = MGMT_STATUS_SUCCESS;
			rp = (void *)skb->data;

			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
				eir_len = 5 + 18 + 18;
				h192 = NULL;
				r192 = NULL;
			} else {
				eir_len = 5 + 18 + 18 + 18 + 18;
				h192 = rp->hash192;
				r192 = rp->rand192;
			}

			h256 = rp->hash256;
			r256 = rp->rand256;
		}
	}

	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
	if (!mgmt_rp)
		goto done;

	if (status)
		goto send_rsp;

	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
				  hdev->dev_class, 3);

	if (h192 && r192) {
		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
					  EIR_SSP_HASH_C192, h192, 16);
		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
					  EIR_SSP_RAND_R192, r192, 16);
	}

	if (h256 && r256) {
		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
					  EIR_SSP_HASH_C256, h256, 16);
		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
					  EIR_SSP_RAND_R256, r256, 16);
	}

send_rsp:
	mgmt_rp->type = mgmt_cp->type;
	mgmt_rp->eir_len = cpu_to_le16(eir_len);

	err = mgmt_cmd_complete(cmd->sk, hdev->id,
				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
	if (err < 0 || status)
		goto done;

	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);

	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
done:
	kfree(mgmt_rp);
	mgmt_pending_remove(cmd);
}

static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
				  struct mgmt_cp_read_local_oob_ext_data *cp)
{
	struct mgmt_pending_cmd *cmd;
	struct hci_request req;
	int err;

	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
			       cp, sizeof(*cp));
	if (!cmd)
		return -ENOMEM;

	hci_req_init(&req, hdev);

	if (bredr_sc_enabled(hdev))
		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
	else
		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);

	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
	if (err < 0) {
		mgmt_pending_remove(cmd);
		return err;
	}

	return 0;
}

6590 6591 6592 6593 6594 6595 6596
static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
				   void *data, u16 data_len)
{
	struct mgmt_cp_read_local_oob_ext_data *cp = data;
	struct mgmt_rp_read_local_oob_ext_data *rp;
	size_t rp_len;
	u16 eir_len;
6597
	u8 status, flags, role, addr[7], hash[16], rand[16];
6598 6599 6600 6601
	int err;

	BT_DBG("%s", hdev->name);

6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625
	if (hdev_is_powered(hdev)) {
		switch (cp->type) {
		case BIT(BDADDR_BREDR):
			status = mgmt_bredr_support(hdev);
			if (status)
				eir_len = 0;
			else
				eir_len = 5;
			break;
		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
			status = mgmt_le_support(hdev);
			if (status)
				eir_len = 0;
			else
				eir_len = 9 + 3 + 18 + 18 + 3;
			break;
		default:
			status = MGMT_STATUS_INVALID_PARAMS;
			eir_len = 0;
			break;
		}
	} else {
		status = MGMT_STATUS_NOT_POWERED;
		eir_len = 0;
6626 6627 6628 6629
	}

	rp_len = sizeof(*rp) + eir_len;
	rp = kmalloc(rp_len, GFP_ATOMIC);
6630
	if (!rp)
6631
		return -ENOMEM;
6632

6633 6634 6635
	if (status)
		goto complete;

6636
	hci_dev_lock(hdev);
6637 6638 6639 6640

	eir_len = 0;
	switch (cp->type) {
	case BIT(BDADDR_BREDR):
6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653
		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
			err = read_local_ssp_oob_req(hdev, sk, cp);
			hci_dev_unlock(hdev);
			if (!err)
				goto done;

			status = MGMT_STATUS_FAILED;
			goto complete;
		} else {
			eir_len = eir_append_data(rp->eir, eir_len,
						  EIR_CLASS_OF_DEV,
						  hdev->dev_class, 3);
		}
6654 6655
		break;
	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6656 6657
		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
		    smp_generate_oob(hdev, hash, rand) < 0) {
6658
			hci_dev_unlock(hdev);
6659 6660
			status = MGMT_STATUS_FAILED;
			goto complete;
6661 6662
		}

6663 6664 6665 6666 6667 6668 6669 6670 6671 6672
		/* This should return the active RPA, but since the RPA
		 * is only programmed on demand, it is really hard to fill
		 * this in at the moment. For now disallow retrieving
		 * local out-of-band data when privacy is in use.
		 *
		 * Returning the identity address will not help here since
		 * pairing happens before the identity resolving key is
		 * known and thus the connection establishment happens
		 * based on the RPA and not the identity address.
		 */
6673
		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6674 6675 6676 6677 6678 6679 6680 6681 6682
			hci_dev_unlock(hdev);
			status = MGMT_STATUS_REJECTED;
			goto complete;
		}

		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700
			memcpy(addr, &hdev->static_addr, 6);
			addr[6] = 0x01;
		} else {
			memcpy(addr, &hdev->bdaddr, 6);
			addr[6] = 0x00;
		}

		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
					  addr, sizeof(addr));

		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
			role = 0x02;
		else
			role = 0x01;

		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
					  &role, sizeof(role));

6701 6702 6703 6704
		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
			eir_len = eir_append_data(rp->eir, eir_len,
						  EIR_LE_SC_CONFIRM,
						  hash, sizeof(hash));
6705

6706 6707 6708 6709
			eir_len = eir_append_data(rp->eir, eir_len,
						  EIR_LE_SC_RANDOM,
						  rand, sizeof(rand));
		}
6710

6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722
		flags = get_adv_discov_flags(hdev);

		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
			flags |= LE_AD_NO_BREDR;

		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
					  &flags, sizeof(flags));
		break;
	}

	hci_dev_unlock(hdev);

6723 6724
	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);

6725 6726 6727
	status = MGMT_STATUS_SUCCESS;

complete:
6728 6729 6730
	rp->type = cp->type;
	rp->eir_len = cpu_to_le16(eir_len);

6731
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6732 6733
				status, rp, sizeof(*rp) + eir_len);
	if (err < 0 || status)
6734 6735 6736 6737 6738
		goto done;

	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
				 rp, sizeof(*rp) + eir_len,
				 HCI_MGMT_OOB_DATA_EVENTS, sk);
6739

6740
done:
6741 6742 6743 6744 6745
	kfree(rp);

	return err;
}

6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760
static u32 get_supported_adv_flags(struct hci_dev *hdev)
{
	u32 flags = 0;

	flags |= MGMT_ADV_FLAG_CONNECTABLE;
	flags |= MGMT_ADV_FLAG_DISCOV;
	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;

	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
		flags |= MGMT_ADV_FLAG_TX_POWER;

	return flags;
}

6761 6762 6763 6764 6765 6766
static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
			     void *data, u16 data_len)
{
	struct mgmt_rp_read_adv_features *rp;
	size_t rp_len;
	int err;
6767
	bool instance;
6768
	u32 supported_flags;
6769 6770 6771

	BT_DBG("%s", hdev->name);

6772 6773 6774 6775
	if (!lmp_le_capable(hdev))
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
				       MGMT_STATUS_REJECTED);

6776 6777 6778
	hci_dev_lock(hdev);

	rp_len = sizeof(*rp);
6779 6780 6781 6782 6783 6784 6785 6786

	/* Currently only one instance is supported, so just add 1 to the
	 * response length.
	 */
	instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
	if (instance)
		rp_len++;

6787 6788 6789 6790 6791 6792
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		hci_dev_unlock(hdev);
		return -ENOMEM;
	}

6793 6794 6795
	supported_flags = get_supported_adv_flags(hdev);

	rp->supported_flags = cpu_to_le32(supported_flags);
6796 6797
	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6798
	rp->max_instances = HCI_MAX_ADV_INSTANCES;
6799 6800 6801 6802 6803 6804 6805 6806 6807 6808

	/* Currently only one instance is supported, so simply return the
	 * current instance number.
	 */
	if (instance) {
		rp->num_instances = 1;
		rp->instance[0] = 1;
	} else {
		rp->num_instances = 0;
	}
6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819

	hci_dev_unlock(hdev);

	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
				MGMT_STATUS_SUCCESS, rp, rp_len);

	kfree(rp);

	return err;
}

6820
static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6821
			      u8 len, bool is_adv_data)
6822
{
6823
	u8 max_len = HCI_MAX_AD_LENGTH;
6824
	int i, cur_len;
6825
	bool flags_managed = false;
6826
	bool tx_power_managed = false;
6827 6828
	u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
			   MGMT_ADV_FLAG_MANAGED_FLAGS;
6829

6830
	if (is_adv_data && (adv_flags & flags_params)) {
6831 6832 6833
		flags_managed = true;
		max_len -= 3;
	}
6834

6835 6836 6837 6838 6839
	if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
		tx_power_managed = true;
		max_len -= 3;
	}

6840
	if (len > max_len)
6841 6842
		return false;

6843 6844 6845
	/* Make sure that the data is correctly formatted. */
	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
		cur_len = data[i];
6846

6847 6848 6849
		if (flags_managed && data[i + 1] == EIR_FLAGS)
			return false;

6850 6851 6852
		if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
			return false;

6853 6854 6855
		/* If the current field length would exceed the total data
		 * length, then it's invalid.
		 */
6856
		if (i + cur_len >= len)
6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898
			return false;
	}

	return true;
}

static void add_advertising_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
{
	struct mgmt_pending_cmd *cmd;
	struct mgmt_rp_add_advertising rp;

	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);

	if (status) {
		hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
		memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
		advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
	}

	if (!cmd)
		goto unlock;

	rp.instance = 0x01;

	if (status)
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
				mgmt_status(status));
	else
		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
				  mgmt_status(status), &rp, sizeof(rp));

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

6899
void mgmt_adv_timeout_expired(struct hci_dev *hdev)
6900
{
6901
	hdev->adv_instance_timeout = 0;
6902 6903 6904 6905 6906 6907

	hci_dev_lock(hdev);
	clear_adv_instance(hdev);
	hci_dev_unlock(hdev);
}

6908 6909 6910 6911 6912 6913
static int add_advertising(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 data_len)
{
	struct mgmt_cp_add_advertising *cp = data;
	struct mgmt_rp_add_advertising rp;
	u32 flags;
6914
	u32 supported_flags;
6915
	u8 status;
6916
	u16 timeout;
6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928
	int err;
	struct mgmt_pending_cmd *cmd;
	struct hci_request req;

	BT_DBG("%s", hdev->name);

	status = mgmt_le_support(hdev);
	if (status)
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				       status);

	flags = __le32_to_cpu(cp->flags);
6929
	timeout = __le16_to_cpu(cp->timeout);
6930

6931 6932 6933 6934 6935
	/* The current implementation only supports adding one instance and only
	 * a subset of the specified flags.
	 */
	supported_flags = get_supported_adv_flags(hdev);
	if (cp->instance != 0x01 || (flags & ~supported_flags))
6936 6937 6938 6939 6940
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

6941 6942 6943 6944 6945 6946
	if (timeout && !hdev_is_powered(hdev)) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				      MGMT_STATUS_REJECTED);
		goto unlock;
	}

6947
	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6948
	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6949 6950 6951 6952 6953 6954
	    pending_find(MGMT_OP_SET_LE, hdev)) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				      MGMT_STATUS_BUSY);
		goto unlock;
	}

6955
	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6956
	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6957
			       cp->scan_rsp_len, false)) {
6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				      MGMT_STATUS_INVALID_PARAMS);
		goto unlock;
	}

	hdev->adv_instance.flags = flags;
	hdev->adv_instance.adv_data_len = cp->adv_data_len;
	hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;

	if (cp->adv_data_len)
		memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);

	if (cp->scan_rsp_len)
		memcpy(hdev->adv_instance.scan_rsp_data,
		       cp->data + cp->adv_data_len, cp->scan_rsp_len);

6974 6975
	if (hdev->adv_instance_timeout)
		cancel_delayed_work(&hdev->adv_instance_expire);
6976

6977
	hdev->adv_instance_timeout = timeout;
6978 6979 6980

	if (timeout)
		queue_delayed_work(hdev->workqueue,
6981
				   &hdev->adv_instance_expire,
6982 6983
				   msecs_to_jiffies(timeout * 1000));

6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010
	if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
		advertising_added(sk, hdev, 1);

	/* If the HCI_ADVERTISING flag is set or the device isn't powered then
	 * we have no HCI communication to make. Simply return.
	 */
	if (!hdev_is_powered(hdev) ||
	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
		rp.instance = 0x01;
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
		goto unlock;
	}

	/* We're good to go, update advertising data, parameters, and start
	 * advertising.
	 */
	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
			       data_len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

	update_adv_data(&req);
7011
	update_scan_rsp_data(&req);
7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023
	enable_advertising(&req);

	err = hci_req_run(&req, add_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);

	return err;
}

7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085
static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
					u16 opcode)
{
	struct mgmt_pending_cmd *cmd;
	struct mgmt_rp_remove_advertising rp;

	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

	/* A failure status here only means that we failed to disable
	 * advertising. Otherwise, the advertising instance has been removed,
	 * so report success.
	 */
	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
	if (!cmd)
		goto unlock;

	rp.instance = 1;

	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
			  &rp, sizeof(rp));
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 data_len)
{
	struct mgmt_cp_remove_advertising *cp = data;
	struct mgmt_rp_remove_advertising rp;
	int err;
	struct mgmt_pending_cmd *cmd;
	struct hci_request req;

	BT_DBG("%s", hdev->name);

	/* The current implementation only allows modifying instance no 1. A
	 * value of 0 indicates that all instances should be cleared.
	 */
	if (cp->instance > 1)
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
	    pending_find(MGMT_OP_SET_LE, hdev)) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
				      MGMT_STATUS_BUSY);
		goto unlock;
	}

	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
				      MGMT_STATUS_INVALID_PARAMS);
		goto unlock;
	}

7086 7087
	if (hdev->adv_instance_timeout)
		cancel_delayed_work(&hdev->adv_instance_expire);
7088

7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126
	memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));

	advertising_removed(sk, hdev, 1);

	hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);

	/* If the HCI_ADVERTISING flag is set or the device isn't powered then
	 * we have no HCI communication to make. Simply return.
	 */
	if (!hdev_is_powered(hdev) ||
	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
		rp.instance = 1;
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_REMOVE_ADVERTISING,
					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
			       data_len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);
	disable_advertising(&req);

	err = hci_req_run(&req, remove_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);

	return err;
}

7127
static const struct hci_mgmt_handler mgmt_handlers[] = {
7128
	{ NULL }, /* 0x0000 (no command) */
7129
	{ read_version,            MGMT_READ_VERSION_SIZE,
7130 7131
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
7132
	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
7133 7134
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
7135
	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
7136 7137 7138 7139
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
	{ read_controller_info,    MGMT_READ_INFO_SIZE,
						HCI_MGMT_UNTRUSTED },
7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152
	{ set_powered,             MGMT_SETTING_SIZE },
	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
	{ set_connectable,         MGMT_SETTING_SIZE },
	{ set_fast_connectable,    MGMT_SETTING_SIZE },
	{ set_bondable,            MGMT_SETTING_SIZE },
	{ set_link_security,       MGMT_SETTING_SIZE },
	{ set_ssp,                 MGMT_SETTING_SIZE },
	{ set_hs,                  MGMT_SETTING_SIZE },
	{ set_le,                  MGMT_SETTING_SIZE },
	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
	{ add_uuid,                MGMT_ADD_UUID_SIZE },
	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
7153 7154 7155 7156
	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
						HCI_MGMT_VAR_LEN },
	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
						HCI_MGMT_VAR_LEN },
7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168
	{ disconnect,              MGMT_DISCONNECT_SIZE },
	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7169 7170 7171
	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
						HCI_MGMT_VAR_LEN },
7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185
	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
	{ set_advertising,         MGMT_SETTING_SIZE },
	{ set_bredr,               MGMT_SETTING_SIZE },
	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
	{ set_secure_conn,         MGMT_SETTING_SIZE },
	{ set_debug_keys,          MGMT_SETTING_SIZE },
	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
7186 7187
	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
						HCI_MGMT_VAR_LEN },
7188 7189 7190 7191
	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
	{ add_device,              MGMT_ADD_DEVICE_SIZE },
	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
7192 7193 7194
	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
						HCI_MGMT_VAR_LEN },
	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7195 7196
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
7197
	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
7198 7199
						HCI_MGMT_UNCONFIGURED |
						HCI_MGMT_UNTRUSTED },
7200 7201 7202 7203 7204 7205
	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
						HCI_MGMT_UNCONFIGURED },
	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
						HCI_MGMT_UNCONFIGURED },
	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
						HCI_MGMT_VAR_LEN },
7206
	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7207
	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
7208 7209
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
7210
	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
7211 7212
	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
						HCI_MGMT_VAR_LEN },
7213
	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
7214 7215
};

7216
void mgmt_index_added(struct hci_dev *hdev)
7217
{
7218
	struct mgmt_ev_ext_index ev;
7219

7220 7221 7222
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		return;

7223 7224 7225 7226 7227
	switch (hdev->dev_type) {
	case HCI_BREDR:
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7228
			ev.type = 0x01;
7229 7230 7231
		} else {
			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
					 HCI_MGMT_INDEX_EVENTS);
7232
			ev.type = 0x00;
7233 7234
		}
		break;
7235 7236 7237 7238 7239
	case HCI_AMP:
		ev.type = 0x02;
		break;
	default:
		return;
7240
	}
7241 7242 7243 7244 7245

	ev.bus = hdev->bus;

	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
			 HCI_MGMT_EXT_INDEX_EVENTS);
7246 7247
}

7248
void mgmt_index_removed(struct hci_dev *hdev)
7249
{
7250
	struct mgmt_ev_ext_index ev;
7251
	u8 status = MGMT_STATUS_INVALID_INDEX;
7252

7253 7254 7255
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		return;

7256 7257 7258
	switch (hdev->dev_type) {
	case HCI_BREDR:
		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7259

7260 7261 7262
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7263
			ev.type = 0x01;
7264 7265 7266
		} else {
			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
					 HCI_MGMT_INDEX_EVENTS);
7267
			ev.type = 0x00;
7268 7269
		}
		break;
7270 7271 7272 7273 7274
	case HCI_AMP:
		ev.type = 0x02;
		break;
	default:
		return;
7275
	}
7276 7277 7278 7279 7280

	ev.bus = hdev->bus;

	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
			 HCI_MGMT_EXT_INDEX_EVENTS);
7281 7282
}

7283
/* This function requires the caller holds hdev->lock */
7284
static void restart_le_actions(struct hci_request *req)
7285
{
7286
	struct hci_dev *hdev = req->hdev;
7287 7288 7289
	struct hci_conn_params *p;

	list_for_each_entry(p, &hdev->le_conn_params, list) {
7290 7291 7292 7293 7294 7295
		/* Needed for AUTO_OFF case where might not "really"
		 * have been powered off.
		 */
		list_del_init(&p->action);

		switch (p->auto_connect) {
7296
		case HCI_AUTO_CONN_DIRECT:
7297 7298 7299 7300 7301 7302 7303 7304
		case HCI_AUTO_CONN_ALWAYS:
			list_add(&p->action, &hdev->pend_le_conns);
			break;
		case HCI_AUTO_CONN_REPORT:
			list_add(&p->action, &hdev->pend_le_reports);
			break;
		default:
			break;
7305
		}
7306
	}
7307

7308
	__hci_update_background_scan(req);
7309 7310
}

7311
static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7312 7313 7314 7315 7316
{
	struct cmd_lookup match = { NULL, hdev };

	BT_DBG("status 0x%02x", status);

7317 7318 7319 7320 7321 7322 7323 7324 7325
	if (!status) {
		/* Register the available SMP channels (BR/EDR and LE) only
		 * when successfully powering on the controller. This late
		 * registration is required so that LE SMP can clearly
		 * decide if the public address or static address is used.
		 */
		smp_register(hdev);
	}

7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337
	hci_dev_lock(hdev);

	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	hci_dev_unlock(hdev);

	if (match.sk)
		sock_put(match.sk);
}

7338
static int powered_update_hci(struct hci_dev *hdev)
7339
{
7340
	struct hci_request req;
7341
	u8 link_sec;
7342

7343 7344
	hci_req_init(&req, hdev);

7345
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7346
	    !lmp_host_ssp_capable(hdev)) {
7347
		u8 mode = 0x01;
7348

7349 7350 7351 7352
		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);

		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
			u8 support = 0x01;
7353

7354 7355 7356
			hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
				    sizeof(support), &support);
		}
7357 7358
	}

7359
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7360
	    lmp_bredr_capable(hdev)) {
7361
		struct hci_cp_write_le_host_supported cp;
7362

7363 7364
		cp.le = 0x01;
		cp.simul = 0x00;
7365

7366 7367 7368 7369 7370
		/* Check first if we already have the right
		 * host state (host features set)
		 */
		if (cp.le != lmp_host_le_capable(hdev) ||
		    cp.simul != lmp_host_le_br_capable(hdev))
7371 7372
			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
				    sizeof(cp), &cp);
7373
	}
7374

7375
	if (lmp_le_capable(hdev)) {
7376 7377 7378 7379
		/* Make sure the controller has a good default for
		 * advertising data. This also applies to the case
		 * where BR/EDR was toggled during the AUTO_OFF phase.
		 */
7380
		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
7381
			update_adv_data(&req);
7382 7383
			update_scan_rsp_data(&req);
		}
7384

7385 7386
		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
		    hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
7387
			enable_advertising(&req);
7388 7389

		restart_le_actions(&req);
7390 7391
	}

7392
	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7393
	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7394 7395
		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
			    sizeof(link_sec), &link_sec);
7396

7397
	if (lmp_bredr_capable(hdev)) {
7398
		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7399 7400 7401
			write_fast_connectable(&req, true);
		else
			write_fast_connectable(&req, false);
7402
		__hci_update_page_scan(&req);
7403
		update_class(&req);
7404
		update_name(&req);
7405
		update_eir(&req);
7406
	}
7407

7408
	return hci_req_run(&req, powered_complete);
7409
}
7410

7411 7412 7413
int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
	struct cmd_lookup match = { NULL, hdev };
7414
	u8 status, zero_cod[] = { 0, 0, 0 };
7415
	int err;
7416

7417
	if (!hci_dev_test_flag(hdev, HCI_MGMT))
7418 7419 7420
		return 0;

	if (powered) {
7421 7422
		if (powered_update_hci(hdev) == 0)
			return 0;
7423

7424 7425 7426
		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
				     &match);
		goto new_settings;
7427 7428
	}

7429
	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7430 7431 7432 7433 7434 7435 7436 7437

	/* If the power off is because of hdev unregistration let
	 * use the appropriate INVALID_INDEX status. Otherwise use
	 * NOT_POWERED. We cover both scenarios here since later in
	 * mgmt_index_removed() any hci_conn callbacks will have already
	 * been triggered, potentially causing misleading DISCONNECTED
	 * status responses.
	 */
7438
	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7439 7440 7441 7442 7443
		status = MGMT_STATUS_INVALID_INDEX;
	else
		status = MGMT_STATUS_NOT_POWERED;

	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7444 7445

	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7446 7447
		mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
				   zero_cod, sizeof(zero_cod), NULL);
7448 7449

new_settings:
7450
	err = new_settings(hdev, match.sk);
7451 7452 7453 7454

	if (match.sk)
		sock_put(match.sk);

7455
	return err;
7456
}
7457

7458
void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7459
{
7460
	struct mgmt_pending_cmd *cmd;
7461 7462
	u8 status;

7463
	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7464
	if (!cmd)
7465
		return;
7466 7467 7468 7469 7470 7471

	if (err == -ERFKILL)
		status = MGMT_STATUS_RFKILLED;
	else
		status = MGMT_STATUS_FAILED;

7472
	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7473 7474 7475 7476

	mgmt_pending_remove(cmd);
}

7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487
void mgmt_discoverable_timeout(struct hci_dev *hdev)
{
	struct hci_request req;

	hci_dev_lock(hdev);

	/* When discoverable timeout triggers, then just make sure
	 * the limited discoverable flag is cleared. Even in the case
	 * of a timeout triggered from general discoverable, it is
	 * safe to unconditionally clear the flag.
	 */
7488 7489
	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7490 7491

	hci_req_init(&req, hdev);
7492
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7493 7494 7495 7496
		u8 scan = SCAN_PAGE;
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
			    sizeof(scan), &scan);
	}
7497
	update_class(&req);
7498 7499 7500 7501 7502 7503 7504

	/* Advertising instances don't use the global discoverable setting, so
	 * only update AD if advertising was enabled using Set Advertising.
	 */
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
		update_adv_data(&req);

7505 7506 7507 7508
	hci_req_run(&req, NULL);

	hdev->discov_timeout = 0;

7509 7510
	new_settings(hdev, NULL);

7511 7512 7513
	hci_dev_unlock(hdev);
}

7514 7515
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
		       bool persistent)
7516
{
7517
	struct mgmt_ev_new_link_key ev;
7518

7519
	memset(&ev, 0, sizeof(ev));
7520

7521
	ev.store_hint = persistent;
7522
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7523
	ev.key.addr.type = BDADDR_BREDR;
7524
	ev.key.type = key->type;
7525
	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7526
	ev.key.pin_len = key->pin_len;
7527

7528
	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7529
}
7530

7531 7532
static u8 mgmt_ltk_type(struct smp_ltk *ltk)
{
7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545
	switch (ltk->type) {
	case SMP_LTK:
	case SMP_LTK_SLAVE:
		if (ltk->authenticated)
			return MGMT_LTK_AUTHENTICATED;
		return MGMT_LTK_UNAUTHENTICATED;
	case SMP_LTK_P256:
		if (ltk->authenticated)
			return MGMT_LTK_P256_AUTH;
		return MGMT_LTK_P256_UNAUTH;
	case SMP_LTK_P256_DEBUG:
		return MGMT_LTK_P256_DEBUG;
	}
7546 7547 7548 7549

	return MGMT_LTK_UNAUTHENTICATED;
}

7550
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7551 7552 7553 7554 7555
{
	struct mgmt_ev_new_long_term_key ev;

	memset(&ev, 0, sizeof(ev));

7556
	/* Devices using resolvable or non-resolvable random addresses
F
Florian Grandel 已提交
7557
	 * without providing an identity resolving key don't require
7558 7559 7560 7561 7562 7563 7564 7565 7566
	 * to store long term keys. Their addresses will change the
	 * next time around.
	 *
	 * Only when a remote device provides an identity address
	 * make sure the long term key is stored. If the remote
	 * identity is known, the long term keys are internally
	 * mapped to the identity address. So allow static random
	 * and public addresses here.
	 */
7567 7568 7569 7570
	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
		ev.store_hint = 0x00;
	else
7571
		ev.store_hint = persistent;
7572

7573
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7574
	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7575
	ev.key.type = mgmt_ltk_type(key);
7576 7577
	ev.key.enc_size = key->enc_size;
	ev.key.ediv = key->ediv;
7578
	ev.key.rand = key->rand;
7579

7580
	if (key->type == SMP_LTK)
7581 7582
		ev.key.master = 1;

7583 7584 7585 7586 7587 7588
	/* Make sure we copy only the significant bytes based on the
	 * encryption key size, and set the rest of the value to zeroes.
	 */
	memcpy(ev.key.val, key->val, sizeof(key->enc_size));
	memset(ev.key.val + key->enc_size, 0,
	       sizeof(ev.key.val) - key->enc_size);
7589

7590
	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7591 7592
}

7593 7594 7595 7596 7597 7598
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
{
	struct mgmt_ev_new_irk ev;

	memset(&ev, 0, sizeof(ev));

7599 7600 7601
	/* For identity resolving keys from devices that are already
	 * using a public address or static random address, do not
	 * ask for storing this key. The identity resolving key really
F
Florian Grandel 已提交
7602
	 * is only mandatory for devices using resolvable random
7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614
	 * addresses.
	 *
	 * Storing all identity resolving keys has the downside that
	 * they will be also loaded on next boot of they system. More
	 * identity resolving keys, means more time during scanning is
	 * needed to actually resolve these addresses.
	 */
	if (bacmp(&irk->rpa, BDADDR_ANY))
		ev.store_hint = 0x01;
	else
		ev.store_hint = 0x00;

7615 7616 7617 7618 7619 7620 7621 7622
	bacpy(&ev.rpa, &irk->rpa);
	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
	memcpy(ev.irk.val, irk->val, sizeof(irk->val));

	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
}

7623 7624
void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
		   bool persistent)
7625 7626 7627 7628 7629 7630
{
	struct mgmt_ev_new_csrk ev;

	memset(&ev, 0, sizeof(ev));

	/* Devices using resolvable or non-resolvable random addresses
F
Florian Grandel 已提交
7631
	 * without providing an identity resolving key don't require
7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642
	 * to store signature resolving keys. Their addresses will change
	 * the next time around.
	 *
	 * Only when a remote device provides an identity address
	 * make sure the signature resolving key is stored. So allow
	 * static random and public addresses here.
	 */
	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
		ev.store_hint = 0x00;
	else
7643
		ev.store_hint = persistent;
7644 7645 7646

	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7647
	ev.key.type = csrk->type;
7648 7649 7650 7651 7652
	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));

	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
}

7653
void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7654 7655
			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
			 u16 max_interval, u16 latency, u16 timeout)
7656 7657 7658
{
	struct mgmt_ev_new_conn_param ev;

7659 7660 7661
	if (!hci_is_identity_address(bdaddr, bdaddr_type))
		return;

7662 7663 7664
	memset(&ev, 0, sizeof(ev));
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7665
	ev.store_hint = store_hint;
7666 7667 7668 7669 7670 7671 7672 7673
	ev.min_interval = cpu_to_le16(min_interval);
	ev.max_interval = cpu_to_le16(max_interval);
	ev.latency = cpu_to_le16(latency);
	ev.timeout = cpu_to_le16(timeout);

	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
}

7674 7675
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
			   u32 flags, u8 *name, u8 name_len)
7676
{
7677 7678 7679
	char buf[512];
	struct mgmt_ev_device_connected *ev = (void *) buf;
	u16 eir_len = 0;
7680

7681 7682
	bacpy(&ev->addr.bdaddr, &conn->dst);
	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7683

7684
	ev->flags = __cpu_to_le32(flags);
7685

7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697
	/* We must ensure that the EIR Data fields are ordered and
	 * unique. Keep it simple for now and avoid the problem by not
	 * adding any BR/EDR data to the LE adv.
	 */
	if (conn->le_adv_data_len > 0) {
		memcpy(&ev->eir[eir_len],
		       conn->le_adv_data, conn->le_adv_data_len);
		eir_len = conn->le_adv_data_len;
	} else {
		if (name_len > 0)
			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
						  name, name_len);
7698

7699
		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7700 7701 7702 7703
			eir_len = eir_append_data(ev->eir, eir_len,
						  EIR_CLASS_OF_DEV,
						  conn->dev_class, 3);
	}
7704

7705
	ev->eir_len = cpu_to_le16(eir_len);
7706

7707 7708
	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
		    sizeof(*ev) + eir_len, NULL);
7709 7710
}

7711
static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7712 7713 7714
{
	struct sock **sk = data;

7715
	cmd->cmd_complete(cmd, 0);
7716 7717 7718 7719

	*sk = cmd->sk;
	sock_hold(*sk);

7720
	mgmt_pending_remove(cmd);
7721 7722
}

7723
static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7724
{
7725
	struct hci_dev *hdev = data;
7726
	struct mgmt_cp_unpair_device *cp = cmd->param;
7727

7728 7729
	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);

7730
	cmd->cmd_complete(cmd, 0);
7731 7732 7733
	mgmt_pending_remove(cmd);
}

7734 7735
bool mgmt_powering_down(struct hci_dev *hdev)
{
7736
	struct mgmt_pending_cmd *cmd;
7737 7738
	struct mgmt_mode *cp;

7739
	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7740 7741 7742 7743 7744 7745 7746 7747 7748 7749
	if (!cmd)
		return false;

	cp = cmd->param;
	if (!cp->val)
		return true;

	return false;
}

7750
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7751 7752
			      u8 link_type, u8 addr_type, u8 reason,
			      bool mgmt_connected)
7753
{
7754
	struct mgmt_ev_device_disconnected ev;
7755 7756
	struct sock *sk = NULL;

7757 7758 7759 7760 7761 7762
	/* The connection is still in hci_conn_hash so test for 1
	 * instead of 0 to know if this is the last one.
	 */
	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
		cancel_delayed_work(&hdev->power_off);
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7763 7764
	}

7765 7766 7767
	if (!mgmt_connected)
		return;

7768 7769 7770
	if (link_type != ACL_LINK && link_type != LE_LINK)
		return;

7771
	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7772

7773 7774 7775
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.reason = reason;
7776

7777
	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7778 7779

	if (sk)
7780
		sock_put(sk);
7781

7782
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7783
			     hdev);
7784 7785
}

7786 7787
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
			    u8 link_type, u8 addr_type, u8 status)
7788
{
7789 7790
	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
	struct mgmt_cp_disconnect *cp;
7791
	struct mgmt_pending_cmd *cmd;
7792

7793 7794 7795
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
			     hdev);

7796
	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7797
	if (!cmd)
7798
		return;
7799

7800 7801 7802 7803 7804 7805 7806 7807
	cp = cmd->param;

	if (bacmp(bdaddr, &cp->addr.bdaddr))
		return;

	if (cp->addr.type != bdaddr_type)
		return;

7808
	cmd->cmd_complete(cmd, mgmt_status(status));
7809
	mgmt_pending_remove(cmd);
7810
}
7811

7812 7813
void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			 u8 addr_type, u8 status)
7814 7815
{
	struct mgmt_ev_connect_failed ev;
7816

7817 7818 7819 7820 7821 7822
	/* The connection is still in hci_conn_hash so test for 1
	 * instead of 0 to know if this is the last one.
	 */
	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
		cancel_delayed_work(&hdev->power_off);
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7823
	}
7824

7825
	bacpy(&ev.addr.bdaddr, bdaddr);
7826
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7827
	ev.status = mgmt_status(status);
7828

7829
	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7830
}
7831

7832
void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7833 7834 7835
{
	struct mgmt_ev_pin_code_request ev;

7836
	bacpy(&ev.addr.bdaddr, bdaddr);
7837
	ev.addr.type = BDADDR_BREDR;
7838
	ev.secure = secure;
7839

7840
	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7841 7842
}

7843 7844
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				  u8 status)
7845
{
7846
	struct mgmt_pending_cmd *cmd;
7847

7848
	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7849
	if (!cmd)
7850
		return;
7851

7852
	cmd->cmd_complete(cmd, mgmt_status(status));
7853
	mgmt_pending_remove(cmd);
7854 7855
}

7856 7857
void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				      u8 status)
7858
{
7859
	struct mgmt_pending_cmd *cmd;
7860

7861
	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7862
	if (!cmd)
7863
		return;
7864

7865
	cmd->cmd_complete(cmd, mgmt_status(status));
7866
	mgmt_pending_remove(cmd);
7867
}
7868

7869
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7870
			      u8 link_type, u8 addr_type, u32 value,
7871
			      u8 confirm_hint)
7872 7873 7874
{
	struct mgmt_ev_user_confirm_request ev;

7875
	BT_DBG("%s", hdev->name);
7876

7877
	bacpy(&ev.addr.bdaddr, bdaddr);
7878
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7879
	ev.confirm_hint = confirm_hint;
7880
	ev.value = cpu_to_le32(value);
7881

7882
	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7883
			  NULL);
7884 7885
}

7886
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7887
			      u8 link_type, u8 addr_type)
7888 7889 7890 7891 7892
{
	struct mgmt_ev_user_passkey_request ev;

	BT_DBG("%s", hdev->name);

7893
	bacpy(&ev.addr.bdaddr, bdaddr);
7894
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7895 7896

	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7897
			  NULL);
7898 7899
}

7900
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7901 7902
				      u8 link_type, u8 addr_type, u8 status,
				      u8 opcode)
7903
{
7904
	struct mgmt_pending_cmd *cmd;
7905

7906
	cmd = pending_find(opcode, hdev);
7907 7908 7909
	if (!cmd)
		return -ENOENT;

7910
	cmd->cmd_complete(cmd, mgmt_status(status));
7911
	mgmt_pending_remove(cmd);
7912

7913
	return 0;
7914 7915
}

7916
int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7917
				     u8 link_type, u8 addr_type, u8 status)
7918
{
7919
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7920
					  status, MGMT_OP_USER_CONFIRM_REPLY);
7921 7922
}

7923
int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7924
					 u8 link_type, u8 addr_type, u8 status)
7925
{
7926
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7927 7928
					  status,
					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
7929
}
7930

7931
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7932
				     u8 link_type, u8 addr_type, u8 status)
7933
{
7934
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7935
					  status, MGMT_OP_USER_PASSKEY_REPLY);
7936 7937
}

7938
int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7939
					 u8 link_type, u8 addr_type, u8 status)
7940
{
7941
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7942 7943
					  status,
					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
7944 7945
}

7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961
int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
			     u8 link_type, u8 addr_type, u32 passkey,
			     u8 entered)
{
	struct mgmt_ev_passkey_notify ev;

	BT_DBG("%s", hdev->name);

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.passkey = __cpu_to_le32(passkey);
	ev.entered = entered;

	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
}

7962
void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7963 7964
{
	struct mgmt_ev_auth_failed ev;
7965
	struct mgmt_pending_cmd *cmd;
7966
	u8 status = mgmt_status(hci_status);
7967

7968 7969 7970
	bacpy(&ev.addr.bdaddr, &conn->dst);
	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
	ev.status = status;
7971

7972 7973 7974 7975 7976
	cmd = find_pairing(conn);

	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
		    cmd ? cmd->sk : NULL);

7977 7978 7979 7980
	if (cmd) {
		cmd->cmd_complete(cmd, status);
		mgmt_pending_remove(cmd);
	}
7981
}
7982

7983
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7984 7985
{
	struct cmd_lookup match = { NULL, hdev };
7986
	bool changed;
7987 7988 7989 7990

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7991
				     cmd_status_rsp, &mgmt_err);
7992
		return;
7993 7994
	}

7995
	if (test_bit(HCI_AUTH, &hdev->flags))
7996
		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7997
	else
7998
		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7999

8000
	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8001
			     &match);
8002

8003
	if (changed)
8004
		new_settings(hdev, match.sk);
8005 8006 8007 8008 8009

	if (match.sk)
		sock_put(match.sk);
}

8010
static void clear_eir(struct hci_request *req)
8011
{
8012
	struct hci_dev *hdev = req->hdev;
8013 8014
	struct hci_cp_write_eir cp;

8015
	if (!lmp_ext_inq_capable(hdev))
8016
		return;
8017

8018 8019
	memset(hdev->eir, 0, sizeof(hdev->eir));

8020 8021
	memset(&cp, 0, sizeof(cp));

8022
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8023 8024
}

8025
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8026 8027
{
	struct cmd_lookup match = { NULL, hdev };
8028
	struct hci_request req;
8029
	bool changed = false;
8030 8031 8032

	if (status) {
		u8 mgmt_err = mgmt_status(status);
8033

8034 8035
		if (enable && hci_dev_test_and_clear_flag(hdev,
							  HCI_SSP_ENABLED)) {
8036
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8037
			new_settings(hdev, NULL);
8038
		}
8039

8040 8041
		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
				     &mgmt_err);
8042
		return;
8043 8044 8045
	}

	if (enable) {
8046
		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8047
	} else {
8048
		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8049
		if (!changed)
8050 8051
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_HS_ENABLED);
8052
		else
8053
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8054 8055 8056 8057
	}

	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);

8058
	if (changed)
8059
		new_settings(hdev, match.sk);
8060

8061
	if (match.sk)
8062 8063
		sock_put(match.sk);

8064 8065
	hci_req_init(&req, hdev);

8066 8067
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8068 8069
			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
				    sizeof(enable), &enable);
8070
		update_eir(&req);
8071
	} else {
8072
		clear_eir(&req);
8073
	}
8074 8075

	hci_req_run(&req, NULL);
8076 8077
}

8078
static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8079 8080 8081 8082 8083 8084 8085 8086 8087
{
	struct cmd_lookup *match = data;

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}
}

8088 8089
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
				    u8 status)
8090
{
8091
	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8092

8093 8094 8095
	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8096 8097

	if (!status)
8098 8099
		mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
				   dev_class, 3, NULL);
8100 8101 8102

	if (match.sk)
		sock_put(match.sk);
8103 8104
}

8105
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8106 8107
{
	struct mgmt_cp_set_local_name ev;
8108
	struct mgmt_pending_cmd *cmd;
8109

8110
	if (status)
8111
		return;
8112 8113 8114

	memset(&ev, 0, sizeof(ev));
	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8115
	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8116

8117
	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8118 8119
	if (!cmd) {
		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8120

8121 8122 8123
		/* If this is a HCI command related to powering on the
		 * HCI dev don't send any mgmt signals.
		 */
8124
		if (pending_find(MGMT_OP_SET_POWERED, hdev))
8125
			return;
8126
	}
8127

8128 8129
	mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
			   cmd ? cmd->sk : NULL);
8130
}
8131

8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143
static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
{
	int i;

	for (i = 0; i < uuid_count; i++) {
		if (!memcmp(uuid, uuids[i], 16))
			return true;
	}

	return false;
}

8144 8145
static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
{
8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162
	u16 parsed = 0;

	while (parsed < eir_len) {
		u8 field_len = eir[0];
		u8 uuid[16];
		int i;

		if (field_len == 0)
			break;

		if (eir_len - parsed < field_len + 1)
			break;

		switch (eir[1]) {
		case EIR_UUID16_ALL:
		case EIR_UUID16_SOME:
			for (i = 0; i + 3 <= field_len; i += 2) {
8163
				memcpy(uuid, bluetooth_base_uuid, 16);
8164 8165 8166 8167 8168 8169 8170 8171 8172
				uuid[13] = eir[i + 3];
				uuid[12] = eir[i + 2];
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		case EIR_UUID32_ALL:
		case EIR_UUID32_SOME:
			for (i = 0; i + 5 <= field_len; i += 4) {
8173
				memcpy(uuid, bluetooth_base_uuid, 16);
8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195
				uuid[15] = eir[i + 5];
				uuid[14] = eir[i + 4];
				uuid[13] = eir[i + 3];
				uuid[12] = eir[i + 2];
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		case EIR_UUID128_ALL:
		case EIR_UUID128_SOME:
			for (i = 0; i + 17 <= field_len; i += 16) {
				memcpy(uuid, eir + i + 2, 16);
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		}

		parsed += field_len + 1;
		eir += field_len + 1;
	}

8196 8197 8198
	return false;
}

8199 8200 8201
static void restart_le_scan(struct hci_dev *hdev)
{
	/* If controller is not scanning we are done. */
8202
	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213
		return;

	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
		       hdev->discovery.scan_start +
		       hdev->discovery.scan_duration))
		return;

	queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
			   DISCOV_LE_RESTART_DELAY);
}

8214 8215
static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8216
{
8217 8218 8219 8220 8221
	/* If a RSSI threshold has been specified, and
	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
	 * is set, let it through for further processing, as we might need to
	 * restart the scan.
8222 8223 8224
	 *
	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
	 * the results are also dropped.
8225 8226
	 */
	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8227 8228 8229
	    (rssi == HCI_RSSI_INVALID ||
	    (rssi < hdev->discovery.rssi &&
	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8230
		return  false;
8231

8232 8233 8234
	if (hdev->discovery.uuid_count != 0) {
		/* If a list of UUIDs is provided in filter, results with no
		 * matching UUID should be dropped.
8235
		 */
8236 8237 8238 8239 8240 8241
		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
				   hdev->discovery.uuids) &&
		    !eir_has_uuids(scan_rsp, scan_rsp_len,
				   hdev->discovery.uuid_count,
				   hdev->discovery.uuids))
			return false;
8242
	}
8243

8244 8245
	/* If duplicate filtering does not report RSSI changes, then restart
	 * scanning to ensure updated result with updated RSSI values.
8246
	 */
8247 8248 8249 8250 8251 8252 8253 8254
	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
		restart_le_scan(hdev);

		/* Validate RSSI value against the RSSI threshold once more. */
		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
		    rssi < hdev->discovery.rssi)
			return false;
	}
8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277

	return true;
}

void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
{
	char buf[512];
	struct mgmt_ev_device_found *ev = (void *)buf;
	size_t ev_size;

	/* Don't send events for a non-kernel initiated discovery. With
	 * LE one exception is if we have pend_le_reports > 0 in which
	 * case we're doing passive scanning and want these events.
	 */
	if (!hci_discovery_active(hdev)) {
		if (link_type == ACL_LINK)
			return;
		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
			return;
	}

8278
	if (hdev->discovery.result_filtering) {
8279 8280 8281 8282 8283 8284 8285 8286 8287 8288
		/* We are using service discovery */
		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
				     scan_rsp_len))
			return;
	}

	/* Make sure that the buffer is big enough. The 5 extra bytes
	 * are for the potential CoD field.
	 */
	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8289 8290
		return;

8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321
	memset(buf, 0, sizeof(buf));

	/* In case of device discovery with BR/EDR devices (pre 1.2), the
	 * RSSI value was reported as 0 when not available. This behavior
	 * is kept when using device discovery. This is required for full
	 * backwards compatibility with the API.
	 *
	 * However when using service discovery, the value 127 will be
	 * returned when the RSSI is not available.
	 */
	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
	    link_type == ACL_LINK)
		rssi = 0;

	bacpy(&ev->addr.bdaddr, bdaddr);
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
	ev->rssi = rssi;
	ev->flags = cpu_to_le32(flags);

	if (eir_len > 0)
		/* Copy EIR or advertising data into event */
		memcpy(ev->eir, eir, eir_len);

	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
					  dev_class, 3);

	if (scan_rsp_len > 0)
		/* Append scan response data to event */
		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);

8322 8323
	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8324

8325
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8326
}
8327

8328 8329
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8330
{
8331 8332 8333
	struct mgmt_ev_device_found *ev;
	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
	u16 eir_len;
8334

8335
	ev = (struct mgmt_ev_device_found *) buf;
8336

8337 8338 8339
	memset(buf, 0, sizeof(buf));

	bacpy(&ev->addr.bdaddr, bdaddr);
8340
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8341 8342 8343
	ev->rssi = rssi;

	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8344
				  name_len);
8345

8346
	ev->eir_len = cpu_to_le16(eir_len);
8347

8348
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8349
}
8350

8351
void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8352
{
8353
	struct mgmt_ev_discovering ev;
8354

8355 8356
	BT_DBG("%s discovering %u", hdev->name, discovering);

8357 8358 8359 8360
	memset(&ev, 0, sizeof(ev));
	ev.type = hdev->discovery.type;
	ev.discovering = discovering;

8361
	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8362
}
8363

8364
static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8365 8366 8367 8368 8369 8370 8371 8372
{
	BT_DBG("%s status %u", hdev->name, status);
}

void mgmt_reenable_advertising(struct hci_dev *hdev)
{
	struct hci_request req;

8373 8374
	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
	    !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8375 8376 8377 8378
		return;

	hci_req_init(&req, hdev);
	enable_advertising(&req);
8379
	hci_req_run(&req, adv_enable_complete);
8380
}
8381 8382 8383 8384 8385

static struct hci_mgmt_chan chan = {
	.channel	= HCI_CHANNEL_CONTROL,
	.handler_count	= ARRAY_SIZE(mgmt_handlers),
	.handlers	= mgmt_handlers,
8386
	.hdev_init	= mgmt_init_hdev,
8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397
};

int mgmt_init(void)
{
	return hci_mgmt_chan_register(&chan);
}

void mgmt_exit(void)
{
	hci_mgmt_chan_unregister(&chan);
}