mgmt.c 203.7 KB
Newer Older
1 2
/*
   BlueZ - Bluetooth protocol stack for Linux
3

4
   Copyright (C) 2010  Nokia Corporation
5
   Copyright (C) 2011-2012 Intel Corporation
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI Management interface */

27
#include <linux/module.h>
28 29 30 31
#include <asm/unaligned.h>

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
32
#include <net/bluetooth/hci_sock.h>
33
#include <net/bluetooth/l2cap.h>
34
#include <net/bluetooth/mgmt.h>
35

36
#include "hci_request.h"
37
#include "smp.h"
38
#include "mgmt_util.h"
39

40
#define MGMT_VERSION	1
41
#define MGMT_REVISION	9
42

43 44 45 46 47 48 49
static const u16 mgmt_commands[] = {
	MGMT_OP_READ_INDEX_LIST,
	MGMT_OP_READ_INFO,
	MGMT_OP_SET_POWERED,
	MGMT_OP_SET_DISCOVERABLE,
	MGMT_OP_SET_CONNECTABLE,
	MGMT_OP_SET_FAST_CONNECTABLE,
50
	MGMT_OP_SET_BONDABLE,
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
	MGMT_OP_SET_LINK_SECURITY,
	MGMT_OP_SET_SSP,
	MGMT_OP_SET_HS,
	MGMT_OP_SET_LE,
	MGMT_OP_SET_DEV_CLASS,
	MGMT_OP_SET_LOCAL_NAME,
	MGMT_OP_ADD_UUID,
	MGMT_OP_REMOVE_UUID,
	MGMT_OP_LOAD_LINK_KEYS,
	MGMT_OP_LOAD_LONG_TERM_KEYS,
	MGMT_OP_DISCONNECT,
	MGMT_OP_GET_CONNECTIONS,
	MGMT_OP_PIN_CODE_REPLY,
	MGMT_OP_PIN_CODE_NEG_REPLY,
	MGMT_OP_SET_IO_CAPABILITY,
	MGMT_OP_PAIR_DEVICE,
	MGMT_OP_CANCEL_PAIR_DEVICE,
	MGMT_OP_UNPAIR_DEVICE,
	MGMT_OP_USER_CONFIRM_REPLY,
	MGMT_OP_USER_CONFIRM_NEG_REPLY,
	MGMT_OP_USER_PASSKEY_REPLY,
	MGMT_OP_USER_PASSKEY_NEG_REPLY,
	MGMT_OP_READ_LOCAL_OOB_DATA,
	MGMT_OP_ADD_REMOTE_OOB_DATA,
	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
	MGMT_OP_START_DISCOVERY,
	MGMT_OP_STOP_DISCOVERY,
	MGMT_OP_CONFIRM_NAME,
	MGMT_OP_BLOCK_DEVICE,
	MGMT_OP_UNBLOCK_DEVICE,
81
	MGMT_OP_SET_DEVICE_ID,
82
	MGMT_OP_SET_ADVERTISING,
83
	MGMT_OP_SET_BREDR,
84
	MGMT_OP_SET_STATIC_ADDRESS,
85
	MGMT_OP_SET_SCAN_PARAMS,
86
	MGMT_OP_SET_SECURE_CONN,
87
	MGMT_OP_SET_DEBUG_KEYS,
88
	MGMT_OP_SET_PRIVACY,
89
	MGMT_OP_LOAD_IRKS,
90
	MGMT_OP_GET_CONN_INFO,
91
	MGMT_OP_GET_CLOCK_INFO,
92 93
	MGMT_OP_ADD_DEVICE,
	MGMT_OP_REMOVE_DEVICE,
94
	MGMT_OP_LOAD_CONN_PARAM,
95
	MGMT_OP_READ_UNCONF_INDEX_LIST,
96
	MGMT_OP_READ_CONFIG_INFO,
97
	MGMT_OP_SET_EXTERNAL_CONFIG,
98
	MGMT_OP_SET_PUBLIC_ADDRESS,
99
	MGMT_OP_START_SERVICE_DISCOVERY,
100
	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101
	MGMT_OP_READ_EXT_INDEX_LIST,
102
	MGMT_OP_READ_ADV_FEATURES,
103
	MGMT_OP_ADD_ADVERTISING,
104
	MGMT_OP_REMOVE_ADVERTISING,
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
};

static const u16 mgmt_events[] = {
	MGMT_EV_CONTROLLER_ERROR,
	MGMT_EV_INDEX_ADDED,
	MGMT_EV_INDEX_REMOVED,
	MGMT_EV_NEW_SETTINGS,
	MGMT_EV_CLASS_OF_DEV_CHANGED,
	MGMT_EV_LOCAL_NAME_CHANGED,
	MGMT_EV_NEW_LINK_KEY,
	MGMT_EV_NEW_LONG_TERM_KEY,
	MGMT_EV_DEVICE_CONNECTED,
	MGMT_EV_DEVICE_DISCONNECTED,
	MGMT_EV_CONNECT_FAILED,
	MGMT_EV_PIN_CODE_REQUEST,
	MGMT_EV_USER_CONFIRM_REQUEST,
	MGMT_EV_USER_PASSKEY_REQUEST,
	MGMT_EV_AUTH_FAILED,
	MGMT_EV_DEVICE_FOUND,
	MGMT_EV_DISCOVERING,
	MGMT_EV_DEVICE_BLOCKED,
	MGMT_EV_DEVICE_UNBLOCKED,
	MGMT_EV_DEVICE_UNPAIRED,
128
	MGMT_EV_PASSKEY_NOTIFY,
129
	MGMT_EV_NEW_IRK,
130
	MGMT_EV_NEW_CSRK,
131 132
	MGMT_EV_DEVICE_ADDED,
	MGMT_EV_DEVICE_REMOVED,
133
	MGMT_EV_NEW_CONN_PARAM,
134
	MGMT_EV_UNCONF_INDEX_ADDED,
135
	MGMT_EV_UNCONF_INDEX_REMOVED,
136
	MGMT_EV_NEW_CONFIG_OPTIONS,
137 138
	MGMT_EV_EXT_INDEX_ADDED,
	MGMT_EV_EXT_INDEX_REMOVED,
139
	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 141
	MGMT_EV_ADVERTISING_ADDED,
	MGMT_EV_ADVERTISING_REMOVED,
142 143
};

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
static const u16 mgmt_untrusted_commands[] = {
	MGMT_OP_READ_INDEX_LIST,
	MGMT_OP_READ_INFO,
	MGMT_OP_READ_UNCONF_INDEX_LIST,
	MGMT_OP_READ_CONFIG_INFO,
	MGMT_OP_READ_EXT_INDEX_LIST,
};

static const u16 mgmt_untrusted_events[] = {
	MGMT_EV_INDEX_ADDED,
	MGMT_EV_INDEX_REMOVED,
	MGMT_EV_NEW_SETTINGS,
	MGMT_EV_CLASS_OF_DEV_CHANGED,
	MGMT_EV_LOCAL_NAME_CHANGED,
	MGMT_EV_UNCONF_INDEX_ADDED,
	MGMT_EV_UNCONF_INDEX_REMOVED,
	MGMT_EV_NEW_CONFIG_OPTIONS,
	MGMT_EV_EXT_INDEX_ADDED,
	MGMT_EV_EXT_INDEX_REMOVED,
};

165
#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
166

167 168 169
#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
		 "\x00\x00\x00\x00\x00\x00\x00\x00"

170 171 172 173 174 175 176 177
/* HCI to MGMT error code conversion table */
static u8 mgmt_status_table[] = {
	MGMT_STATUS_SUCCESS,
	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
	MGMT_STATUS_FAILED,		/* Hardware Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
178
	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
	MGMT_STATUS_BUSY,		/* Command Disallowed */
	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
	MGMT_STATUS_REJECTED,		/* Rejected Security */
	MGMT_STATUS_REJECTED,		/* Rejected Personal */
	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
	MGMT_STATUS_BUSY,		/* Repeated Attempts */
	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
	MGMT_STATUS_FAILED,		/* Unspecified Error */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
	MGMT_STATUS_FAILED,		/* Transaction Collision */
	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
	MGMT_STATUS_REJECTED,		/* QoS Rejected */
	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
	MGMT_STATUS_REJECTED,		/* Insufficient Security */
	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
	MGMT_STATUS_BUSY,		/* Role Switch Pending */
	MGMT_STATUS_FAILED,		/* Slot Violation */
	MGMT_STATUS_FAILED,		/* Role Switch Failed */
	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
	MGMT_STATUS_BUSY,		/* Controller Busy */
	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
};

static u8 mgmt_status(u8 hci_status)
{
	if (hci_status < ARRAY_SIZE(mgmt_status_table))
		return mgmt_status_table[hci_status];

	return MGMT_STATUS_FAILED;
}

243 244
static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
			    u16 len, int flag)
245
{
246 247
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
			       flag, NULL);
248 249
}

250 251 252 253 254 255 256
static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
			      u16 len, int flag, struct sock *skip_sk)
{
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
			       flag, skip_sk);
}

257 258 259 260 261 262 263
static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
			      u16 len, struct sock *skip_sk)
{
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
			       HCI_MGMT_GENERIC_EVENTS, skip_sk);
}

264 265 266 267
static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
		      struct sock *skip_sk)
{
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268
			       HCI_SOCK_TRUSTED, skip_sk);
269 270
}

271 272
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
			u16 data_len)
273 274 275 276 277 278
{
	struct mgmt_rp_read_version rp;

	BT_DBG("sock %p", sk);

	rp.version = MGMT_VERSION;
279
	rp.revision = cpu_to_le16(MGMT_REVISION);
280

281 282
	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
				 &rp, sizeof(rp));
283 284
}

285 286
static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 data_len)
287 288
{
	struct mgmt_rp_read_commands *rp;
289
	u16 num_commands, num_events;
290 291 292 293 294
	size_t rp_size;
	int i, err;

	BT_DBG("sock %p", sk);

295 296 297 298 299 300 301 302
	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
		num_commands = ARRAY_SIZE(mgmt_commands);
		num_events = ARRAY_SIZE(mgmt_events);
	} else {
		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
		num_events = ARRAY_SIZE(mgmt_untrusted_events);
	}

303 304 305 306 307 308
	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));

	rp = kmalloc(rp_size, GFP_KERNEL);
	if (!rp)
		return -ENOMEM;

309 310
	rp->num_commands = cpu_to_le16(num_commands);
	rp->num_events = cpu_to_le16(num_events);
311

312 313 314 315 316
	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
		__le16 *opcode = rp->opcodes;

		for (i = 0; i < num_commands; i++, opcode++)
			put_unaligned_le16(mgmt_commands[i], opcode);
317

318 319 320 321 322 323 324 325 326 327 328
		for (i = 0; i < num_events; i++, opcode++)
			put_unaligned_le16(mgmt_events[i], opcode);
	} else {
		__le16 *opcode = rp->opcodes;

		for (i = 0; i < num_commands; i++, opcode++)
			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);

		for (i = 0; i < num_events; i++, opcode++)
			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
	}
329

330 331
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
				rp, rp_size);
332 333 334 335 336
	kfree(rp);

	return err;
}

337 338
static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
339 340
{
	struct mgmt_rp_read_index_list *rp;
341
	struct hci_dev *d;
342
	size_t rp_len;
343
	u16 count;
344
	int err;
345 346 347 348 349 350

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
351
	list_for_each_entry(d, &hci_dev_list, list) {
352
		if (d->dev_type == HCI_BREDR &&
353
		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
354
			count++;
355 356
	}

357 358 359
	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
360
		read_unlock(&hci_dev_list_lock);
361
		return -ENOMEM;
362
	}
363

364
	count = 0;
365
	list_for_each_entry(d, &hci_dev_list, list) {
366 367 368
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
369 370
			continue;

371 372 373 374
		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
375 376
			continue;

377
		if (d->dev_type == HCI_BREDR &&
378
		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
379 380 381
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
382 383
	}

384 385 386
	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

387 388
	read_unlock(&hci_dev_list_lock);

389 390
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
				0, rp, rp_len);
391

392 393 394
	kfree(rp);

	return err;
395 396
}

397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
				  void *data, u16 data_len)
{
	struct mgmt_rp_read_unconf_index_list *rp;
	struct hci_dev *d;
	size_t rp_len;
	u16 count;
	int err;

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
		if (d->dev_type == HCI_BREDR &&
413
		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
414 415 416 417 418 419 420 421 422 423 424 425
			count++;
	}

	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		read_unlock(&hci_dev_list_lock);
		return -ENOMEM;
	}

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
426 427 428
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
429 430 431 432 433 434 435 436 437
			continue;

		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
			continue;

		if (d->dev_type == HCI_BREDR &&
438
		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
439 440 441 442 443 444 445 446 447 448
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
	}

	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

	read_unlock(&hci_dev_list_lock);

449 450
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
451 452 453 454 455 456

	kfree(rp);

	return err;
}

457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
			       void *data, u16 data_len)
{
	struct mgmt_rp_read_ext_index_list *rp;
	struct hci_dev *d;
	size_t rp_len;
	u16 count;
	int err;

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
		if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
			count++;
	}

	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		read_unlock(&hci_dev_list_lock);
		return -ENOMEM;
	}

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
			continue;

		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
			continue;

		if (d->dev_type == HCI_BREDR) {
			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
				rp->entry[count].type = 0x01;
			else
				rp->entry[count].type = 0x00;
		} else if (d->dev_type == HCI_AMP) {
			rp->entry[count].type = 0x02;
		} else {
			continue;
		}

		rp->entry[count].bus = d->bus;
		rp->entry[count++].index = cpu_to_le16(d->id);
		BT_DBG("Added hci%u", d->id);
	}

	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);

	read_unlock(&hci_dev_list_lock);

	/* If this command is called at least once, then all the
	 * default index and unconfigured index events are disabled
	 * and from now on only extended index events are used.
	 */
	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);

	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);

	kfree(rp);

	return err;
}

533 534 535
static bool is_configured(struct hci_dev *hdev)
{
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
536
	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
537 538 539 540 541 542 543 544 545
		return false;

	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
	    !bacmp(&hdev->public_addr, BDADDR_ANY))
		return false;

	return true;
}

546 547 548 549
static __le32 get_missing_options(struct hci_dev *hdev)
{
	u32 options = 0;

550
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551
	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 553
		options |= MGMT_OPTION_EXTERNAL_CONFIG;

554 555 556 557 558 559 560
	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
	    !bacmp(&hdev->public_addr, BDADDR_ANY))
		options |= MGMT_OPTION_PUBLIC_ADDRESS;

	return cpu_to_le32(options);
}

561 562 563 564
static int new_options(struct hci_dev *hdev, struct sock *skip)
{
	__le32 options = get_missing_options(hdev);

565 566
	return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
				  sizeof(options), skip);
567 568
}

569 570 571 572
static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
{
	__le32 options = get_missing_options(hdev);

573 574
	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
				 sizeof(options));
575 576
}

577 578 579 580
static int read_config_info(struct sock *sk, struct hci_dev *hdev,
			    void *data, u16 data_len)
{
	struct mgmt_rp_read_config_info rp;
581
	u32 options = 0;
582 583 584 585 586 587 588

	BT_DBG("sock %p %s", sk, hdev->name);

	hci_dev_lock(hdev);

	memset(&rp, 0, sizeof(rp));
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
589

590 591 592
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
		options |= MGMT_OPTION_EXTERNAL_CONFIG;

593
	if (hdev->set_bdaddr)
594 595 596 597
		options |= MGMT_OPTION_PUBLIC_ADDRESS;

	rp.supported_options = cpu_to_le32(options);
	rp.missing_options = get_missing_options(hdev);
598 599 600

	hci_dev_unlock(hdev);

601 602
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
				 &rp, sizeof(rp));
603 604
}

605 606 607 608 609
static u32 get_supported_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

	settings |= MGMT_SETTING_POWERED;
610
	settings |= MGMT_SETTING_BONDABLE;
611
	settings |= MGMT_SETTING_DEBUG_KEYS;
612 613
	settings |= MGMT_SETTING_CONNECTABLE;
	settings |= MGMT_SETTING_DISCOVERABLE;
614

615
	if (lmp_bredr_capable(hdev)) {
616 617
		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
			settings |= MGMT_SETTING_FAST_CONNECTABLE;
618 619
		settings |= MGMT_SETTING_BREDR;
		settings |= MGMT_SETTING_LINK_SECURITY;
620 621 622 623 624

		if (lmp_ssp_capable(hdev)) {
			settings |= MGMT_SETTING_SSP;
			settings |= MGMT_SETTING_HS;
		}
625

626
		if (lmp_sc_capable(hdev))
627
			settings |= MGMT_SETTING_SECURE_CONN;
628
	}
629

630
	if (lmp_le_capable(hdev)) {
631
		settings |= MGMT_SETTING_LE;
632
		settings |= MGMT_SETTING_ADVERTISING;
633
		settings |= MGMT_SETTING_SECURE_CONN;
634
		settings |= MGMT_SETTING_PRIVACY;
635
		settings |= MGMT_SETTING_STATIC_ADDRESS;
636
	}
637

638 639
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
	    hdev->set_bdaddr)
640 641
		settings |= MGMT_SETTING_CONFIGURATION;

642 643 644 645 646 647 648
	return settings;
}

static u32 get_current_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

649
	if (hdev_is_powered(hdev))
650 651
		settings |= MGMT_SETTING_POWERED;

652
	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
653 654
		settings |= MGMT_SETTING_CONNECTABLE;

655
	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
656 657
		settings |= MGMT_SETTING_FAST_CONNECTABLE;

658
	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
659 660
		settings |= MGMT_SETTING_DISCOVERABLE;

661
	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
662
		settings |= MGMT_SETTING_BONDABLE;
663

664
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
665 666
		settings |= MGMT_SETTING_BREDR;

667
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
668 669
		settings |= MGMT_SETTING_LE;

670
	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
671 672
		settings |= MGMT_SETTING_LINK_SECURITY;

673
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
674 675
		settings |= MGMT_SETTING_SSP;

676
	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
677 678
		settings |= MGMT_SETTING_HS;

679
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
680 681
		settings |= MGMT_SETTING_ADVERTISING;

682
	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
683 684
		settings |= MGMT_SETTING_SECURE_CONN;

685
	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
686 687
		settings |= MGMT_SETTING_DEBUG_KEYS;

688
	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
689 690
		settings |= MGMT_SETTING_PRIVACY;

691 692 693 694 695
	/* The current setting for static address has two purposes. The
	 * first is to indicate if the static address will be used and
	 * the second is to indicate if it is actually set.
	 *
	 * This means if the static address is not configured, this flag
696
	 * will never be set. If the address is configured, then if the
697 698 699 700 701 702
	 * address is actually used decides if the flag is set or not.
	 *
	 * For single mode LE only controllers and dual-mode controllers
	 * with BR/EDR disabled, the existence of the static address will
	 * be evaluated.
	 */
703
	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
704
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
705 706 707 708 709
	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
		if (bacmp(&hdev->static_addr, BDADDR_ANY))
			settings |= MGMT_SETTING_STATIC_ADDRESS;
	}

710 711 712
	return settings;
}

713 714
#define PNP_INFO_SVCLASS_ID		0x1200

715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 4)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		u16 uuid16;

		if (uuid->size != 16)
			continue;

		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
		if (uuid16 < 0x1100)
			continue;

		if (uuid16 == PNP_INFO_SVCLASS_ID)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID16_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u16) > len) {
			uuids_start[1] = EIR_UUID16_SOME;
			break;
		}

		*ptr++ = (uuid16 & 0x00ff);
		*ptr++ = (uuid16 & 0xff00) >> 8;
		uuids_start[0] += sizeof(uuid16);
	}

	return ptr;
}

757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 6)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 32)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID32_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u32) > len) {
			uuids_start[1] = EIR_UUID32_SOME;
			break;
		}

		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
		ptr += sizeof(u32);
		uuids_start[0] += sizeof(u32);
	}

	return ptr;
}

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 18)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 128)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID128_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + 16 > len) {
			uuids_start[1] = EIR_UUID128_SOME;
			break;
		}

		memcpy(ptr, uuid->uuid, 16);
		ptr += 16;
		uuids_start[0] += 16;
	}

	return ptr;
}

823 824 825 826 827 828 829 830 831 832 833 834
static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
{
	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
}

static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
						  struct hci_dev *hdev,
						  const void *data)
{
	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
}

835 836 837 838 839 840 841 842 843 844 845 846 847 848
static u8 get_current_adv_instance(struct hci_dev *hdev)
{
	/* The "Set Advertising" setting supersedes the "Add Advertising"
	 * setting. Here we set the advertising data based on which
	 * setting was set. When neither apply, default to the global settings,
	 * represented by instance "0".
	 */
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
	    !hci_dev_test_flag(hdev, HCI_ADVERTISING))
		return 0x01;

	return 0x00;
}

849
static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
850
{
851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
	u8 ad_len = 0;
	size_t name_len;

	name_len = strlen(hdev->dev_name);
	if (name_len > 0) {
		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;

		if (name_len > max_len) {
			name_len = max_len;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ad_len += (name_len + 2);
		ptr += (name_len + 2);
	}

	return ad_len;
873 874
}

875 876 877 878 879 880 881 882 883 884 885 886 887
static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
{
	/* TODO: Set the appropriate entries based on advertising instance flags
	 * here once flags other than 0 are supported.
	 */
	memcpy(ptr, hdev->adv_instance.scan_rsp_data,
	       hdev->adv_instance.scan_rsp_len);

	return hdev->adv_instance.scan_rsp_len;
}

static void update_scan_rsp_data_for_instance(struct hci_request *req,
					      u8 instance)
888 889 890 891 892
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_scan_rsp_data cp;
	u8 len;

893
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
894 895 896 897
		return;

	memset(&cp, 0, sizeof(cp));

898 899 900 901
	if (instance)
		len = create_instance_scan_rsp_data(hdev, cp.data);
	else
		len = create_default_scan_rsp_data(hdev, cp.data);
902

903
	if (hdev->scan_rsp_data_len == len &&
904
	    !memcmp(cp.data, hdev->scan_rsp_data, len))
905 906
		return;

907 908
	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
	hdev->scan_rsp_data_len = len;
909 910 911 912 913 914

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
}

915 916
static void update_scan_rsp_data(struct hci_request *req)
{
917 918
	update_scan_rsp_data_for_instance(req,
					  get_current_adv_instance(req->hdev));
919 920
}

921 922
static u8 get_adv_discov_flags(struct hci_dev *hdev)
{
923
	struct mgmt_pending_cmd *cmd;
924 925 926 927

	/* If there's a pending mgmt command the flags will not yet have
	 * their final values, so check for this first.
	 */
928
	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
929 930 931 932 933 934 935
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
		if (cp->val == 0x01)
			return LE_AD_GENERAL;
		else if (cp->val == 0x02)
			return LE_AD_LIMITED;
	} else {
936
		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
937
			return LE_AD_LIMITED;
938
		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
939 940 941 942 943 944
			return LE_AD_GENERAL;
	}

	return 0;
}

945 946 947
static bool get_connectable(struct hci_dev *hdev)
{
	struct mgmt_pending_cmd *cmd;
948

949 950 951 952 953 954
	/* If there's a pending mgmt command the flag will not yet have
	 * it's final value, so check for this first.
	 */
	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
955

956
		return cp->val;
957 958
	}

959 960
	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
}
961

962 963 964
static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
{
	u32 flags;
965

966 967 968 969 970 971 972 973 974
	if (instance > 0x01)
		return 0;

	if (instance == 0x01)
		return hdev->adv_instance.flags;

	/* Instance 0 always manages the "Tx Power" and "Flags" fields */
	flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;

975 976 977 978
	/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds
	 * to the "connectable" instance flag.
	 */
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
979 980 981
		flags |= MGMT_ADV_FLAG_CONNECTABLE;

	return flags;
982 983
}

984 985 986 987 988 989 990 991 992 993 994 995
static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
{
	/* Ignore instance 0 and other unsupported instances */
	if (instance != 0x01)
		return 0;

	/* TODO: Take into account the "appearance" and "local-name" flags here.
	 * These are currently being ignored as they are not supported.
	 */
	return hdev->adv_instance.scan_rsp_len;
}

996
static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
997
{
998
	u8 ad_len = 0, flags = 0;
999
	u32 instance_flags = get_adv_instance_flags(hdev, instance);
1000

1001 1002 1003
	/* The Add Advertising command allows userspace to set both the general
	 * and limited discoverable flags.
	 */
1004
	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1005 1006
		flags |= LE_AD_GENERAL;

1007
	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1008 1009
		flags |= LE_AD_LIMITED;

1010
	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1011 1012 1013 1014 1015 1016
		/* If a discovery flag wasn't provided, simply use the global
		 * settings.
		 */
		if (!flags)
			flags |= get_adv_discov_flags(hdev);

1017 1018 1019
		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
			flags |= LE_AD_NO_BREDR;

1020 1021 1022 1023 1024 1025 1026
		/* If flags would still be empty, then there is no need to
		 * include the "Flags" AD field".
		 */
		if (flags) {
			ptr[0] = 0x02;
			ptr[1] = EIR_FLAGS;
			ptr[2] = flags;
1027

1028 1029 1030
			ad_len += 3;
			ptr += 3;
		}
1031 1032
	}

1033 1034 1035 1036 1037 1038 1039 1040
	if (instance) {
		memcpy(ptr, hdev->adv_instance.adv_data,
		       hdev->adv_instance.adv_data_len);

		ad_len += hdev->adv_instance.adv_data_len;
		ptr += hdev->adv_instance.adv_data_len;
	}

1041
	/* Provide Tx Power only if we can provide a valid value for it */
1042
	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1043
	    (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1044 1045 1046 1047 1048 1049 1050 1051
		ptr[0] = 0x02;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8)hdev->adv_tx_power;

		ad_len += 3;
		ptr += 3;
	}

1052
	return ad_len;
1053 1054 1055
}

static void update_adv_data_for_instance(struct hci_request *req, u8 instance)
1056 1057 1058 1059 1060
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_data cp;
	u8 len;

1061
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1062 1063 1064 1065
		return;

	memset(&cp, 0, sizeof(cp));

1066
	len = create_instance_adv_data(hdev, instance, cp.data);
1067

1068
	/* There's nothing to do if the data hasn't changed */
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
	if (hdev->adv_data_len == len &&
	    memcmp(cp.data, hdev->adv_data, len) == 0)
		return;

	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
	hdev->adv_data_len = len;

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
}

1081 1082
static void update_adv_data(struct hci_request *req)
{
1083
	update_adv_data_for_instance(req, get_current_adv_instance(req->hdev));
1084 1085
}

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
int mgmt_update_adv_data(struct hci_dev *hdev)
{
	struct hci_request req;

	hci_req_init(&req, hdev);
	update_adv_data(&req);

	return hci_req_run(&req, NULL);
}

1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
static void create_eir(struct hci_dev *hdev, u8 *data)
{
	u8 *ptr = data;
	size_t name_len;

	name_len = strlen(hdev->dev_name);

	if (name_len > 0) {
		/* EIR Data type */
		if (name_len > 48) {
			name_len = 48;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		/* EIR Data length */
		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ptr += (name_len + 2);
	}

1119
	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1120 1121 1122 1123 1124 1125 1126
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->inq_tx_power;

		ptr += 3;
	}

1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
	if (hdev->devid_source > 0) {
		ptr[0] = 9;
		ptr[1] = EIR_DEVICE_ID;

		put_unaligned_le16(hdev->devid_source, ptr + 2);
		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
		put_unaligned_le16(hdev->devid_product, ptr + 6);
		put_unaligned_le16(hdev->devid_version, ptr + 8);

		ptr += 10;
	}

1139
	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1140
	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1141
	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1142 1143
}

1144
static void update_eir(struct hci_request *req)
1145
{
1146
	struct hci_dev *hdev = req->hdev;
1147 1148
	struct hci_cp_write_eir cp;

1149
	if (!hdev_is_powered(hdev))
1150
		return;
1151

1152
	if (!lmp_ext_inq_capable(hdev))
1153
		return;
1154

1155
	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1156
		return;
1157

1158
	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1159
		return;
1160 1161 1162 1163 1164 1165

	memset(&cp, 0, sizeof(cp));

	create_eir(hdev, cp.data);

	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1166
		return;
1167 1168 1169

	memcpy(hdev->eir, cp.data, sizeof(cp.data));

1170
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
}

static u8 get_service_classes(struct hci_dev *hdev)
{
	struct bt_uuid *uuid;
	u8 val = 0;

	list_for_each_entry(uuid, &hdev->uuids, list)
		val |= uuid->svc_hint;

	return val;
}

1184
static void update_class(struct hci_request *req)
1185
{
1186
	struct hci_dev *hdev = req->hdev;
1187 1188 1189 1190
	u8 cod[3];

	BT_DBG("%s", hdev->name);

1191
	if (!hdev_is_powered(hdev))
1192
		return;
1193

1194
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1195 1196
		return;

1197
	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1198
		return;
1199 1200 1201 1202 1203

	cod[0] = hdev->minor_class;
	cod[1] = hdev->major_class;
	cod[2] = get_service_classes(hdev);

1204
	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1205 1206
		cod[1] |= 0x20;

1207
	if (memcmp(cod, hdev->dev_class, 3) == 0)
1208
		return;
1209

1210
	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1211 1212
}

1213 1214 1215 1216 1217 1218 1219
static void disable_advertising(struct hci_request *req)
{
	u8 enable = 0x00;

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

1220 1221 1222 1223
static void enable_advertising(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_param cp;
1224
	u8 own_addr_type, enable = 0x01;
1225
	bool connectable;
1226 1227
	u8 instance;
	u32 flags;
1228

1229 1230 1231
	if (hci_conn_num(hdev, LE_LINK) > 0)
		return;

1232
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1233 1234
		disable_advertising(req);

1235
	/* Clear the HCI_LE_ADV bit temporarily so that the
1236 1237 1238 1239
	 * hci_update_random_address knows that it's safe to go ahead
	 * and write a new random address. The flag will be set back on
	 * as soon as the SET_ADV_ENABLE HCI command completes.
	 */
1240
	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1241

1242 1243
	instance = get_current_adv_instance(hdev);
	flags = get_adv_instance_flags(hdev, instance);
1244 1245 1246 1247 1248 1249

	/* If the "connectable" instance flag was not set, then choose between
	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
	 */
	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
		      get_connectable(hdev);
1250

1251 1252 1253 1254 1255
	/* Set require_privacy to true only when non-connectable
	 * advertising is used. In that case it is fine to use a
	 * non-resolvable private address.
	 */
	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1256 1257
		return;

1258
	memset(&cp, 0, sizeof(cp));
1259 1260
	cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
	cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1261 1262 1263 1264 1265 1266 1267 1268

	if (connectable)
		cp.type = LE_ADV_IND;
	else if (get_adv_instance_scan_rsp_len(hdev, instance))
		cp.type = LE_ADV_SCAN_IND;
	else
		cp.type = LE_ADV_NONCONN_IND;

1269
	cp.own_address_type = own_addr_type;
1270 1271 1272 1273 1274 1275 1276
	cp.channel_map = hdev->le_adv_channel_map;

	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

1277 1278 1279
static void service_cache_off(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
1280
					    service_cache.work);
1281
	struct hci_request req;
1282

1283
	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1284 1285
		return;

1286 1287
	hci_req_init(&req, hdev);

1288 1289
	hci_dev_lock(hdev);

1290 1291
	update_eir(&req);
	update_class(&req);
1292 1293

	hci_dev_unlock(hdev);
1294 1295

	hci_req_run(&req, NULL);
1296 1297
}

1298 1299 1300 1301 1302 1303 1304 1305
static void rpa_expired(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
					    rpa_expired.work);
	struct hci_request req;

	BT_DBG("");

1306
	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1307

1308
	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
		return;

	/* The generation of a new RPA and programming it into the
	 * controller happens in the enable_advertising() function.
	 */
	hci_req_init(&req, hdev);
	enable_advertising(&req);
	hci_req_run(&req, NULL);
}

1319
static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1320
{
1321
	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1322 1323
		return;

1324
	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1325
	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1326

1327 1328 1329 1330 1331
	/* Non-mgmt controlled devices get this bit set
	 * implicitly so that pairing works for them, however
	 * for mgmt we require user-space to explicitly enable
	 * it
	 */
1332
	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1333 1334
}

1335
static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1336
				void *data, u16 data_len)
1337
{
1338
	struct mgmt_rp_read_info rp;
1339

1340
	BT_DBG("sock %p %s", sk, hdev->name);
1341

1342
	hci_dev_lock(hdev);
1343

1344 1345
	memset(&rp, 0, sizeof(rp));

1346
	bacpy(&rp.bdaddr, &hdev->bdaddr);
1347

1348
	rp.version = hdev->hci_ver;
1349
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1350 1351 1352

	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1353

1354
	memcpy(rp.dev_class, hdev->dev_class, 3);
1355

1356
	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1357
	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1358

1359
	hci_dev_unlock(hdev);
1360

1361 1362
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
				 sizeof(rp));
1363 1364
}

1365
static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1366
{
1367
	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1368

1369 1370
	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
				 sizeof(settings));
1371 1372
}

1373
static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1374 1375 1376
{
	BT_DBG("%s status 0x%02x", hdev->name, status);

1377 1378
	if (hci_conn_count(hdev) == 0) {
		cancel_delayed_work(&hdev->power_off);
1379
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1380
	}
1381 1382
}

1383
static bool hci_stop_discovery(struct hci_request *req)
1384 1385 1386 1387 1388 1389 1390
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_remote_name_req_cancel cp;
	struct inquiry_entry *e;

	switch (hdev->discovery.state) {
	case DISCOVERY_FINDING:
1391
		if (test_bit(HCI_INQUIRY, &hdev->flags))
1392
			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1393 1394

		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1395 1396 1397 1398
			cancel_delayed_work(&hdev->le_scan_disable);
			hci_req_add_le_scan_disable(req);
		}

1399
		return true;
1400 1401 1402 1403 1404

	case DISCOVERY_RESOLVING:
		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
						     NAME_PENDING);
		if (!e)
1405
			break;
1406 1407 1408 1409 1410

		bacpy(&cp.bdaddr, &e->data.bdaddr);
		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
			    &cp);

1411
		return true;
1412 1413 1414

	default:
		/* Passive scanning */
1415
		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1416
			hci_req_add_le_scan_disable(req);
1417 1418 1419
			return true;
		}

1420 1421
		break;
	}
1422 1423

	return false;
1424 1425
}

1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
static void advertising_added(struct sock *sk, struct hci_dev *hdev,
			      u8 instance)
{
	struct mgmt_ev_advertising_added ev;

	ev.instance = instance;

	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
}

static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
				u8 instance)
{
	struct mgmt_ev_advertising_removed ev;

	ev.instance = instance;

	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
}

static void clear_adv_instance(struct hci_dev *hdev)
{
	struct hci_request req;

	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
		return;

1453 1454
	if (hdev->adv_instance_timeout)
		cancel_delayed_work(&hdev->adv_instance_expire);
1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468

	memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
	advertising_removed(NULL, hdev, 1);
	hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);

	if (!hdev_is_powered(hdev) ||
	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
		return;

	hci_req_init(&req, hdev);
	disable_advertising(&req);
	hci_req_run(&req, NULL);
}

1469 1470 1471 1472
static int clean_up_hci_state(struct hci_dev *hdev)
{
	struct hci_request req;
	struct hci_conn *conn;
1473 1474
	bool discov_stopped;
	int err;
1475 1476 1477 1478 1479 1480 1481 1482 1483

	hci_req_init(&req, hdev);

	if (test_bit(HCI_ISCAN, &hdev->flags) ||
	    test_bit(HCI_PSCAN, &hdev->flags)) {
		u8 scan = 0x00;
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}

1484
	if (hdev->adv_instance_timeout)
1485 1486
		clear_adv_instance(hdev);

1487
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1488 1489
		disable_advertising(&req);

1490
	discov_stopped = hci_stop_discovery(&req);
1491 1492 1493

	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
		struct hci_cp_disconnect dc;
1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521
		struct hci_cp_reject_conn_req rej;

		switch (conn->state) {
		case BT_CONNECTED:
		case BT_CONFIG:
			dc.handle = cpu_to_le16(conn->handle);
			dc.reason = 0x15; /* Terminated due to Power Off */
			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
			break;
		case BT_CONNECT:
			if (conn->type == LE_LINK)
				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
					    0, NULL);
			else if (conn->type == ACL_LINK)
				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
					    6, &conn->dst);
			break;
		case BT_CONNECT2:
			bacpy(&rej.bdaddr, &conn->dst);
			rej.reason = 0x15; /* Terminated due to Power Off */
			if (conn->type == ACL_LINK)
				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
					    sizeof(rej), &rej);
			else if (conn->type == SCO_LINK)
				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
					    sizeof(rej), &rej);
			break;
		}
1522 1523
	}

1524 1525 1526 1527 1528
	err = hci_req_run(&req, clean_up_hci_complete);
	if (!err && discov_stopped)
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);

	return err;
1529 1530
}

1531
static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1532
		       u16 len)
1533
{
1534
	struct mgmt_mode *cp = data;
1535
	struct mgmt_pending_cmd *cmd;
1536
	int err;
1537

1538
	BT_DBG("request for %s", hdev->name);
1539

1540
	if (cp->val != 0x00 && cp->val != 0x01)
1541 1542
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				       MGMT_STATUS_INVALID_PARAMS);
1543

1544
	hci_dev_lock(hdev);
1545

1546
	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1547 1548
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				      MGMT_STATUS_BUSY);
1549 1550 1551
		goto failed;
	}

1552
	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1553 1554 1555
		cancel_delayed_work(&hdev->power_off);

		if (cp->val) {
1556 1557 1558
			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
					 data, len);
			err = mgmt_powered(hdev, 1);
1559 1560 1561 1562
			goto failed;
		}
	}

1563
	if (!!cp->val == hdev_is_powered(hdev)) {
1564
		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1565 1566 1567
		goto failed;
	}

1568
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1569 1570
	if (!cmd) {
		err = -ENOMEM;
1571
		goto failed;
1572
	}
1573

1574
	if (cp->val) {
1575
		queue_work(hdev->req_workqueue, &hdev->power_on);
1576 1577 1578 1579
		err = 0;
	} else {
		/* Disconnect connections, stop scans, etc */
		err = clean_up_hci_state(hdev);
1580 1581 1582
		if (!err)
			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
					   HCI_POWER_OFF_TIMEOUT);
1583

1584 1585
		/* ENODATA means there were no HCI commands queued */
		if (err == -ENODATA) {
1586
			cancel_delayed_work(&hdev->power_off);
1587 1588 1589 1590
			queue_work(hdev->req_workqueue, &hdev->power_off.work);
			err = 0;
		}
	}
1591 1592

failed:
1593
	hci_dev_unlock(hdev);
1594
	return err;
1595 1596
}

1597 1598
static int new_settings(struct hci_dev *hdev, struct sock *skip)
{
1599
	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1600

1601 1602
	return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
				  sizeof(ev), skip);
1603 1604
}

1605 1606 1607 1608 1609
int mgmt_new_settings(struct hci_dev *hdev)
{
	return new_settings(hdev, NULL);
}

1610 1611 1612 1613 1614 1615
struct cmd_lookup {
	struct sock *sk;
	struct hci_dev *hdev;
	u8 mgmt_status;
};

1616
static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
{
	struct cmd_lookup *match = data;

	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);

	list_del(&cmd->list);

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}

	mgmt_pending_free(cmd);
}

1632
static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1633 1634 1635
{
	u8 *status = data;

1636
	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1637 1638 1639
	mgmt_pending_remove(cmd);
}

1640
static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
{
	if (cmd->cmd_complete) {
		u8 *status = data;

		cmd->cmd_complete(cmd, *status);
		mgmt_pending_remove(cmd);

		return;
	}

	cmd_status_rsp(cmd, data);
}

1654
static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1655
{
1656 1657
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, cmd->param_len);
1658 1659
}

1660
static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1661
{
1662 1663
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, sizeof(struct mgmt_addr_info));
1664 1665
}

1666 1667 1668 1669
static u8 mgmt_bredr_support(struct hci_dev *hdev)
{
	if (!lmp_bredr_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
1670
	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1671 1672 1673 1674 1675 1676 1677 1678 1679
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

static u8 mgmt_le_support(struct hci_dev *hdev)
{
	if (!lmp_le_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
1680
	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1681 1682 1683 1684 1685
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

1686 1687
static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
				      u16 opcode)
1688
{
1689
	struct mgmt_pending_cmd *cmd;
1690
	struct mgmt_mode *cp;
1691
	struct hci_request req;
1692 1693 1694 1695 1696 1697
	bool changed;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

1698
	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1699 1700 1701 1702 1703
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);
1704
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1705
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1706 1707 1708 1709
		goto remove_cmd;
	}

	cp = cmd->param;
1710
	if (cp->val) {
1711
		changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1712 1713 1714 1715 1716 1717 1718

		if (hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
					   to);
		}
	} else {
1719
		changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1720
	}
1721 1722 1723 1724 1725 1726

	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);

	if (changed)
		new_settings(hdev, cmd->sk);

1727 1728
	/* When the discoverable mode gets changed, make sure
	 * that class of device has the limited discoverable
1729 1730
	 * bit correctly set. Also update page scan based on whitelist
	 * entries.
1731 1732
	 */
	hci_req_init(&req, hdev);
1733
	__hci_update_page_scan(&req);
1734 1735 1736
	update_class(&req);
	hci_req_run(&req, NULL);

1737 1738 1739 1740 1741 1742 1743
remove_cmd:
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1744
static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1745
			    u16 len)
1746
{
1747
	struct mgmt_cp_set_discoverable *cp = data;
1748
	struct mgmt_pending_cmd *cmd;
1749
	struct hci_request req;
1750
	u16 timeout;
1751
	u8 scan;
1752 1753
	int err;

1754
	BT_DBG("request for %s", hdev->name);
1755

1756 1757
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1758 1759
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_REJECTED);
1760

1761
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1762 1763
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1764

1765
	timeout = __le16_to_cpu(cp->timeout);
1766 1767 1768 1769 1770 1771

	/* Disabling discoverable requires that no timeout is set,
	 * and enabling limited discoverable requires a timeout.
	 */
	if ((cp->val == 0x00 && timeout > 0) ||
	    (cp->val == 0x02 && timeout == 0))
1772 1773
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1774

1775
	hci_dev_lock(hdev);
1776

1777
	if (!hdev_is_powered(hdev) && timeout > 0) {
1778 1779
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_NOT_POWERED);
1780 1781 1782
		goto failed;
	}

1783 1784
	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1785 1786
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_BUSY);
1787 1788 1789
		goto failed;
	}

1790
	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1791 1792
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_REJECTED);
1793 1794 1795 1796
		goto failed;
	}

	if (!hdev_is_powered(hdev)) {
1797 1798
		bool changed = false;

1799 1800 1801 1802
		/* Setting limited discoverable when powered off is
		 * not a valid operation since it requires a timeout
		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
		 */
1803
		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1804
			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1805 1806 1807
			changed = true;
		}

1808
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1809 1810 1811 1812 1813 1814
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1815 1816 1817
		goto failed;
	}

1818 1819 1820 1821
	/* If the current mode is the same, then just update the timeout
	 * value with the new value. And if only the timeout gets updated,
	 * then no need for any HCI transactions.
	 */
1822 1823 1824
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
						   HCI_LIMITED_DISCOVERABLE)) {
1825 1826
		cancel_delayed_work(&hdev->discov_off);
		hdev->discov_timeout = timeout;
1827

1828 1829
		if (cp->val && hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1830
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1831
					   to);
1832 1833
		}

1834
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1835 1836 1837
		goto failed;
	}

1838
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1839 1840
	if (!cmd) {
		err = -ENOMEM;
1841
		goto failed;
1842
	}
1843

1844 1845 1846 1847 1848 1849 1850
	/* Cancel any potential discoverable timeout that might be
	 * still active and store new timeout value. The arming of
	 * the timeout happens in the complete handler.
	 */
	cancel_delayed_work(&hdev->discov_off);
	hdev->discov_timeout = timeout;

1851 1852
	/* Limited discoverable mode */
	if (cp->val == 0x02)
1853
		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1854
	else
1855
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1856

1857 1858
	hci_req_init(&req, hdev);

1859 1860 1861
	/* The procedure for LE-only controllers is much simpler - just
	 * update the advertising data.
	 */
1862
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1863 1864
		goto update_ad;

1865 1866
	scan = SCAN_PAGE;

1867 1868 1869 1870 1871
	if (cp->val) {
		struct hci_cp_write_current_iac_lap hci_cp;

		if (cp->val == 0x02) {
			/* Limited discoverable mode */
1872
			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
			hci_cp.iac_lap[4] = 0x8b;
			hci_cp.iac_lap[5] = 0x9e;
		} else {
			/* General discoverable mode */
			hci_cp.num_iac = 1;
			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
		}

		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
			    (hci_cp.num_iac * 3) + 1, &hci_cp);

1890
		scan |= SCAN_INQUIRY;
1891
	} else {
1892
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1893
	}
1894

1895
	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1896

1897 1898 1899
update_ad:
	update_adv_data(&req);

1900
	err = hci_req_run(&req, set_discoverable_complete);
1901
	if (err < 0)
1902
		mgmt_pending_remove(cmd);
1903 1904

failed:
1905
	hci_dev_unlock(hdev);
1906 1907 1908
	return err;
}

1909 1910
static void write_fast_connectable(struct hci_request *req, bool enable)
{
1911
	struct hci_dev *hdev = req->hdev;
1912 1913 1914
	struct hci_cp_write_page_scan_activity acp;
	u8 type;

1915
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1916 1917
		return;

1918 1919 1920
	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
		return;

1921 1922 1923 1924
	if (enable) {
		type = PAGE_SCAN_TYPE_INTERLACED;

		/* 160 msec page scan interval */
1925
		acp.interval = cpu_to_le16(0x0100);
1926 1927 1928 1929
	} else {
		type = PAGE_SCAN_TYPE_STANDARD;	/* default */

		/* default 1.28 sec page scan */
1930
		acp.interval = cpu_to_le16(0x0800);
1931 1932
	}

1933
	acp.window = cpu_to_le16(0x0012);
1934

1935 1936 1937 1938 1939 1940 1941
	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
			    sizeof(acp), &acp);

	if (hdev->page_scan_type != type)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1942 1943
}

1944 1945
static void set_connectable_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
1946
{
1947
	struct mgmt_pending_cmd *cmd;
1948
	struct mgmt_mode *cp;
1949
	bool conn_changed, discov_changed;
1950 1951 1952 1953 1954

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

1955
	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1956 1957 1958
	if (!cmd)
		goto unlock;

1959 1960
	if (status) {
		u8 mgmt_err = mgmt_status(status);
1961
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1962 1963 1964
		goto remove_cmd;
	}

1965
	cp = cmd->param;
1966
	if (cp->val) {
1967 1968
		conn_changed = !hci_dev_test_and_set_flag(hdev,
							  HCI_CONNECTABLE);
1969 1970
		discov_changed = false;
	} else {
1971 1972 1973 1974
		conn_changed = hci_dev_test_and_clear_flag(hdev,
							   HCI_CONNECTABLE);
		discov_changed = hci_dev_test_and_clear_flag(hdev,
							     HCI_DISCOVERABLE);
1975
	}
1976

1977 1978
	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);

1979
	if (conn_changed || discov_changed) {
1980
		new_settings(hdev, cmd->sk);
1981
		hci_update_page_scan(hdev);
1982 1983
		if (discov_changed)
			mgmt_update_adv_data(hdev);
1984 1985
		hci_update_background_scan(hdev);
	}
1986

1987
remove_cmd:
1988 1989 1990 1991 1992 1993
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1994 1995 1996 1997 1998 1999
static int set_connectable_update_settings(struct hci_dev *hdev,
					   struct sock *sk, u8 val)
{
	bool changed = false;
	int err;

2000
	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
2001 2002 2003
		changed = true;

	if (val) {
2004
		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
2005
	} else {
2006 2007
		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2008 2009 2010 2011 2012 2013
	}

	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
	if (err < 0)
		return err;

2014
	if (changed) {
2015
		hci_update_page_scan(hdev);
2016
		hci_update_background_scan(hdev);
2017
		return new_settings(hdev, sk);
2018
	}
2019 2020 2021 2022

	return 0;
}

2023
static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2024
			   u16 len)
2025
{
2026
	struct mgmt_mode *cp = data;
2027
	struct mgmt_pending_cmd *cmd;
2028
	struct hci_request req;
2029
	u8 scan;
2030 2031
	int err;

2032
	BT_DBG("request for %s", hdev->name);
2033

2034 2035
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2036 2037
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				       MGMT_STATUS_REJECTED);
2038

2039
	if (cp->val != 0x00 && cp->val != 0x01)
2040 2041
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				       MGMT_STATUS_INVALID_PARAMS);
2042

2043
	hci_dev_lock(hdev);
2044

2045
	if (!hdev_is_powered(hdev)) {
2046
		err = set_connectable_update_settings(hdev, sk, cp->val);
2047 2048 2049
		goto failed;
	}

2050 2051
	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2052 2053
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				      MGMT_STATUS_BUSY);
2054 2055 2056
		goto failed;
	}

2057
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2058 2059
	if (!cmd) {
		err = -ENOMEM;
2060
		goto failed;
2061
	}
2062

2063
	hci_req_init(&req, hdev);
2064

2065 2066 2067 2068
	/* If BR/EDR is not enabled and we disable advertising as a
	 * by-product of disabling connectable, we need to update the
	 * advertising flags.
	 */
2069
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2070
		if (!cp->val) {
2071 2072
			hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2073 2074 2075
		}
		update_adv_data(&req);
	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2076 2077 2078
		if (cp->val) {
			scan = SCAN_PAGE;
		} else {
2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090
			/* If we don't have any whitelist entries just
			 * disable all scanning. If there are entries
			 * and we had both page and inquiry scanning
			 * enabled then fall back to only page scanning.
			 * Otherwise no changes are needed.
			 */
			if (list_empty(&hdev->whitelist))
				scan = SCAN_DISABLED;
			else if (test_bit(HCI_ISCAN, &hdev->flags))
				scan = SCAN_PAGE;
			else
				goto no_scan_update;
2091 2092

			if (test_bit(HCI_ISCAN, &hdev->flags) &&
2093
			    hdev->discov_timeout > 0)
2094 2095
				cancel_delayed_work(&hdev->discov_off);
		}
2096

2097 2098
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}
2099

2100
no_scan_update:
2101
	/* Update the advertising parameters if necessary */
2102 2103
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
	    hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
2104 2105
		enable_advertising(&req);

2106
	err = hci_req_run(&req, set_connectable_complete);
2107
	if (err < 0) {
2108
		mgmt_pending_remove(cmd);
2109
		if (err == -ENODATA)
2110 2111
			err = set_connectable_update_settings(hdev, sk,
							      cp->val);
2112 2113
		goto failed;
	}
2114 2115

failed:
2116
	hci_dev_unlock(hdev);
2117 2118 2119
	return err;
}

2120
static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2121
			u16 len)
2122
{
2123
	struct mgmt_mode *cp = data;
2124
	bool changed;
2125 2126
	int err;

2127
	BT_DBG("request for %s", hdev->name);
2128

2129
	if (cp->val != 0x00 && cp->val != 0x01)
2130 2131
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
				       MGMT_STATUS_INVALID_PARAMS);
2132

2133
	hci_dev_lock(hdev);
2134 2135

	if (cp->val)
2136
		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2137
	else
2138
		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2139

2140
	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2141
	if (err < 0)
2142
		goto unlock;
2143

2144 2145
	if (changed)
		err = new_settings(hdev, sk);
2146

2147
unlock:
2148
	hci_dev_unlock(hdev);
2149 2150 2151
	return err;
}

2152 2153
static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
2154 2155
{
	struct mgmt_mode *cp = data;
2156
	struct mgmt_pending_cmd *cmd;
2157
	u8 val, status;
2158 2159
	int err;

2160
	BT_DBG("request for %s", hdev->name);
2161

2162 2163
	status = mgmt_bredr_support(hdev);
	if (status)
2164 2165
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				       status);
2166

2167
	if (cp->val != 0x00 && cp->val != 0x01)
2168 2169
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				       MGMT_STATUS_INVALID_PARAMS);
2170

2171 2172
	hci_dev_lock(hdev);

2173
	if (!hdev_is_powered(hdev)) {
2174 2175
		bool changed = false;

2176
		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177
			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2178 2179 2180 2181 2182 2183 2184 2185 2186 2187
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

2188 2189 2190
		goto failed;
	}

2191
	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2192 2193
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				      MGMT_STATUS_BUSY);
2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220
		goto failed;
	}

	val = !!cp->val;

	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

2221
static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2222 2223
{
	struct mgmt_mode *cp = data;
2224
	struct mgmt_pending_cmd *cmd;
2225
	u8 status;
2226 2227
	int err;

2228
	BT_DBG("request for %s", hdev->name);
2229

2230 2231
	status = mgmt_bredr_support(hdev);
	if (status)
2232
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2233

2234
	if (!lmp_ssp_capable(hdev))
2235 2236
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				       MGMT_STATUS_NOT_SUPPORTED);
2237

2238
	if (cp->val != 0x00 && cp->val != 0x01)
2239 2240
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				       MGMT_STATUS_INVALID_PARAMS);
2241

2242
	hci_dev_lock(hdev);
2243

2244
	if (!hdev_is_powered(hdev)) {
2245
		bool changed;
2246

2247
		if (cp->val) {
2248 2249
			changed = !hci_dev_test_and_set_flag(hdev,
							     HCI_SSP_ENABLED);
2250
		} else {
2251 2252
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_SSP_ENABLED);
2253
			if (!changed)
2254 2255
				changed = hci_dev_test_and_clear_flag(hdev,
								      HCI_HS_ENABLED);
2256
			else
2257
				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2258 2259 2260 2261 2262 2263 2264 2265 2266
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

2267 2268 2269
		goto failed;
	}

2270
	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2271 2272
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				      MGMT_STATUS_BUSY);
2273 2274 2275
		goto failed;
	}

2276
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2277 2278 2279 2280 2281 2282 2283 2284 2285 2286
		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

2287
	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2288 2289 2290
		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
			     sizeof(cp->val), &cp->val);

2291
	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2292 2293 2294 2295 2296 2297 2298 2299 2300 2301
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

2302
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2303 2304
{
	struct mgmt_mode *cp = data;
2305
	bool changed;
2306
	u8 status;
2307
	int err;
2308

2309
	BT_DBG("request for %s", hdev->name);
2310

2311 2312
	status = mgmt_bredr_support(hdev);
	if (status)
2313
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2314

2315
	if (!lmp_ssp_capable(hdev))
2316 2317
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_NOT_SUPPORTED);
2318

2319
	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2320 2321
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_REJECTED);
2322

2323
	if (cp->val != 0x00 && cp->val != 0x01)
2324 2325
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_INVALID_PARAMS);
2326

2327 2328
	hci_dev_lock(hdev);

2329
	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2330 2331
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				      MGMT_STATUS_BUSY);
2332 2333 2334
		goto unlock;
	}

2335
	if (cp->val) {
2336
		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2337 2338
	} else {
		if (hdev_is_powered(hdev)) {
2339 2340
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
					      MGMT_STATUS_REJECTED);
2341 2342 2343
			goto unlock;
		}

2344
		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2345
	}
2346 2347 2348 2349 2350 2351 2352

	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);
2353

2354 2355 2356
unlock:
	hci_dev_unlock(hdev);
	return err;
2357 2358
}

2359
static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2360 2361 2362
{
	struct cmd_lookup match = { NULL, hdev };

2363 2364
	hci_dev_lock(hdev);

2365 2366 2367 2368 2369
	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
				     &mgmt_err);
2370
		goto unlock;
2371 2372 2373 2374 2375 2376 2377 2378
	}

	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
2379 2380 2381 2382 2383 2384

	/* Make sure the controller has a good default for
	 * advertising data. Restrict the update to when LE
	 * has actually been enabled. During power on, the
	 * update in powered_update_hci will take care of it.
	 */
2385
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2386 2387 2388
		struct hci_request req;

		hci_req_init(&req, hdev);
2389
		update_adv_data(&req);
2390
		update_scan_rsp_data(&req);
2391
		__hci_update_background_scan(&req);
2392 2393
		hci_req_run(&req, NULL);
	}
2394 2395 2396

unlock:
	hci_dev_unlock(hdev);
2397 2398
}

2399
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2400 2401 2402
{
	struct mgmt_mode *cp = data;
	struct hci_cp_write_le_host_supported hci_cp;
2403
	struct mgmt_pending_cmd *cmd;
2404
	struct hci_request req;
2405
	int err;
2406
	u8 val, enabled;
2407

2408
	BT_DBG("request for %s", hdev->name);
2409

2410
	if (!lmp_le_capable(hdev))
2411 2412
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_NOT_SUPPORTED);
2413

2414
	if (cp->val != 0x00 && cp->val != 0x01)
2415 2416
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_INVALID_PARAMS);
2417

2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430
	/* Bluetooth single mode LE only controllers or dual-mode
	 * controllers configured as LE only devices, do not allow
	 * switching LE off. These have either LE enabled explicitly
	 * or BR/EDR has been previously switched off.
	 *
	 * When trying to enable an already enabled LE, then gracefully
	 * send a positive response. Trying to disable it however will
	 * result into rejection.
	 */
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
		if (cp->val == 0x01)
			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);

2431 2432
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_REJECTED);
2433
	}
2434

2435
	hci_dev_lock(hdev);
2436 2437

	val = !!cp->val;
2438
	enabled = lmp_host_le_capable(hdev);
2439

2440
	if (!hdev_is_powered(hdev) || val == enabled) {
2441 2442
		bool changed = false;

2443
		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2444
			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2445 2446 2447
			changed = true;
		}

2448
		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2449
			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2450 2451 2452
			changed = true;
		}

2453 2454
		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
		if (err < 0)
2455
			goto unlock;
2456 2457 2458 2459

		if (changed)
			err = new_settings(hdev, sk);

2460
		goto unlock;
2461 2462
	}

2463 2464
	if (pending_find(MGMT_OP_SET_LE, hdev) ||
	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2465 2466
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				      MGMT_STATUS_BUSY);
2467
		goto unlock;
2468 2469 2470 2471 2472
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
2473
		goto unlock;
2474 2475
	}

2476 2477
	hci_req_init(&req, hdev);

2478 2479 2480 2481
	memset(&hci_cp, 0, sizeof(hci_cp));

	if (val) {
		hci_cp.le = val;
2482
		hci_cp.simul = 0x00;
2483
	} else {
2484
		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2485
			disable_advertising(&req);
2486 2487
	}

2488 2489 2490 2491
	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
		    &hci_cp);

	err = hci_req_run(&req, le_enable_complete);
2492
	if (err < 0)
2493 2494
		mgmt_pending_remove(cmd);

2495 2496
unlock:
	hci_dev_unlock(hdev);
2497 2498 2499
	return err;
}

2500 2501 2502 2503 2504 2505 2506 2507
/* This is a helper function to test for pending mgmt commands that can
 * cause CoD or EIR HCI commands. We can only allow one such pending
 * mgmt command at a time since otherwise we cannot easily track what
 * the current values are, will be, and based on that calculate if a new
 * HCI command needs to be sent and if yes with what value.
 */
static bool pending_eir_or_class(struct hci_dev *hdev)
{
2508
	struct mgmt_pending_cmd *cmd;
2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		switch (cmd->opcode) {
		case MGMT_OP_ADD_UUID:
		case MGMT_OP_REMOVE_UUID:
		case MGMT_OP_SET_DEV_CLASS:
		case MGMT_OP_SET_POWERED:
			return true;
		}
	}

	return false;
}

2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541
static const u8 bluetooth_base_uuid[] = {
			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};

static u8 get_uuid_size(const u8 *uuid)
{
	u32 val;

	if (memcmp(uuid, bluetooth_base_uuid, 12))
		return 128;

	val = get_unaligned_le32(&uuid[12]);
	if (val > 0xffff)
		return 32;

	return 16;
}

2542 2543
static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
{
2544
	struct mgmt_pending_cmd *cmd;
2545 2546 2547

	hci_dev_lock(hdev);

2548
	cmd = pending_find(mgmt_op, hdev);
2549 2550 2551
	if (!cmd)
		goto unlock;

2552 2553
	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
			  mgmt_status(status), hdev->dev_class, 3);
2554 2555 2556 2557 2558 2559 2560

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

2561
static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2562 2563 2564 2565 2566 2567
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
}

2568
static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2569
{
2570
	struct mgmt_cp_add_uuid *cp = data;
2571
	struct mgmt_pending_cmd *cmd;
2572
	struct hci_request req;
2573 2574 2575
	struct bt_uuid *uuid;
	int err;

2576
	BT_DBG("request for %s", hdev->name);
2577

2578
	hci_dev_lock(hdev);
2579

2580
	if (pending_eir_or_class(hdev)) {
2581 2582
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
				      MGMT_STATUS_BUSY);
2583 2584 2585
		goto failed;
	}

2586
	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2587 2588 2589 2590 2591 2592
	if (!uuid) {
		err = -ENOMEM;
		goto failed;
	}

	memcpy(uuid->uuid, cp->uuid, 16);
2593
	uuid->svc_hint = cp->svc_hint;
2594
	uuid->size = get_uuid_size(cp->uuid);
2595

2596
	list_add_tail(&uuid->list, &hdev->uuids);
2597

2598
	hci_req_init(&req, hdev);
2599

2600 2601 2602
	update_class(&req);
	update_eir(&req);

2603 2604 2605 2606
	err = hci_req_run(&req, add_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto failed;
2607

2608 2609
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
					hdev->dev_class, 3);
2610 2611 2612 2613
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2614
	if (!cmd) {
2615
		err = -ENOMEM;
2616 2617 2618 2619
		goto failed;
	}

	err = 0;
2620 2621

failed:
2622
	hci_dev_unlock(hdev);
2623 2624 2625
	return err;
}

2626 2627 2628 2629 2630
static bool enable_service_cache(struct hci_dev *hdev)
{
	if (!hdev_is_powered(hdev))
		return false;

2631
	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2632 2633
		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
				   CACHE_TIMEOUT);
2634 2635 2636 2637 2638 2639
		return true;
	}

	return false;
}

2640
static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2641 2642 2643 2644 2645 2646
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
}

2647
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2648
		       u16 len)
2649
{
2650
	struct mgmt_cp_remove_uuid *cp = data;
2651
	struct mgmt_pending_cmd *cmd;
2652
	struct bt_uuid *match, *tmp;
2653
	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2654
	struct hci_request req;
2655 2656
	int err, found;

2657
	BT_DBG("request for %s", hdev->name);
2658

2659
	hci_dev_lock(hdev);
2660

2661
	if (pending_eir_or_class(hdev)) {
2662 2663
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
				      MGMT_STATUS_BUSY);
2664 2665 2666
		goto unlock;
	}

2667
	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2668
		hci_uuids_clear(hdev);
2669

2670
		if (enable_service_cache(hdev)) {
2671 2672 2673
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_UUID,
						0, hdev->dev_class, 3);
2674 2675
			goto unlock;
		}
2676

2677
		goto update_class;
2678 2679 2680 2681
	}

	found = 0;

2682
	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2683 2684 2685 2686
		if (memcmp(match->uuid, cp->uuid, 16) != 0)
			continue;

		list_del(&match->list);
2687
		kfree(match);
2688 2689 2690 2691
		found++;
	}

	if (found == 0) {
2692 2693
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
				      MGMT_STATUS_INVALID_PARAMS);
2694 2695 2696
		goto unlock;
	}

2697
update_class:
2698
	hci_req_init(&req, hdev);
2699

2700 2701 2702
	update_class(&req);
	update_eir(&req);

2703 2704 2705 2706
	err = hci_req_run(&req, remove_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2707

2708 2709
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
					hdev->dev_class, 3);
2710 2711 2712 2713
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2714
	if (!cmd) {
2715
		err = -ENOMEM;
2716 2717 2718 2719
		goto unlock;
	}

	err = 0;
2720 2721

unlock:
2722
	hci_dev_unlock(hdev);
2723 2724 2725
	return err;
}

2726
static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2727 2728 2729 2730 2731 2732
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
}

2733
static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2734
			 u16 len)
2735
{
2736
	struct mgmt_cp_set_dev_class *cp = data;
2737
	struct mgmt_pending_cmd *cmd;
2738
	struct hci_request req;
2739 2740
	int err;

2741
	BT_DBG("request for %s", hdev->name);
2742

2743
	if (!lmp_bredr_capable(hdev))
2744 2745
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				       MGMT_STATUS_NOT_SUPPORTED);
2746

2747
	hci_dev_lock(hdev);
2748

2749
	if (pending_eir_or_class(hdev)) {
2750 2751
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				      MGMT_STATUS_BUSY);
2752 2753
		goto unlock;
	}
2754

2755
	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2756 2757
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				      MGMT_STATUS_INVALID_PARAMS);
2758 2759
		goto unlock;
	}
2760

2761 2762 2763
	hdev->major_class = cp->major;
	hdev->minor_class = cp->minor;

2764
	if (!hdev_is_powered(hdev)) {
2765 2766
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
					hdev->dev_class, 3);
2767 2768 2769
		goto unlock;
	}

2770 2771
	hci_req_init(&req, hdev);

2772
	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2773 2774 2775
		hci_dev_unlock(hdev);
		cancel_delayed_work_sync(&hdev->service_cache);
		hci_dev_lock(hdev);
2776
		update_eir(&req);
2777
	}
2778

2779 2780
	update_class(&req);

2781 2782 2783 2784
	err = hci_req_run(&req, set_class_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2785

2786 2787
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
					hdev->dev_class, 3);
2788 2789 2790 2791
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2792
	if (!cmd) {
2793
		err = -ENOMEM;
2794 2795 2796 2797
		goto unlock;
	}

	err = 0;
2798

2799
unlock:
2800
	hci_dev_unlock(hdev);
2801 2802 2803
	return err;
}

2804
static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2805
			  u16 len)
2806
{
2807
	struct mgmt_cp_load_link_keys *cp = data;
2808 2809
	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_link_key_info));
2810
	u16 key_count, expected_len;
2811
	bool changed;
2812
	int i;
2813

2814 2815 2816
	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev))
2817 2818
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_NOT_SUPPORTED);
2819

2820
	key_count = __le16_to_cpu(cp->key_count);
2821 2822 2823
	if (key_count > max_key_count) {
		BT_ERR("load_link_keys: too big key_count value %u",
		       key_count);
2824 2825
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2826
	}
2827

2828 2829
	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_link_key_info);
2830
	if (expected_len != len) {
2831
		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2832
		       expected_len, len);
2833 2834
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2835 2836
	}

2837
	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2838 2839
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2840

2841
	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2842
	       key_count);
2843

2844 2845 2846
	for (i = 0; i < key_count; i++) {
		struct mgmt_link_key_info *key = &cp->keys[i];

2847
		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2848 2849 2850
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_LINK_KEYS,
					       MGMT_STATUS_INVALID_PARAMS);
2851 2852
	}

2853
	hci_dev_lock(hdev);
2854 2855 2856 2857

	hci_link_keys_clear(hdev);

	if (cp->debug_keys)
2858
		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2859
	else
2860 2861
		changed = hci_dev_test_and_clear_flag(hdev,
						      HCI_KEEP_DEBUG_KEYS);
2862 2863 2864

	if (changed)
		new_settings(hdev, NULL);
2865

2866
	for (i = 0; i < key_count; i++) {
2867
		struct mgmt_link_key_info *key = &cp->keys[i];
2868

2869 2870 2871 2872 2873 2874
		/* Always ignore debug keys and require a new pairing if
		 * the user wants to use them.
		 */
		if (key->type == HCI_LK_DEBUG_COMBINATION)
			continue;

2875 2876
		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
				 key->type, key->pin_len, NULL);
2877 2878
	}

2879
	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2880

2881
	hci_dev_unlock(hdev);
2882

2883
	return 0;
2884 2885
}

2886
static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2887
			   u8 addr_type, struct sock *skip_sk)
2888 2889 2890 2891 2892 2893 2894
{
	struct mgmt_ev_device_unpaired ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = addr_type;

	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2895
			  skip_sk);
2896 2897
}

2898
static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2899
			 u16 len)
2900
{
2901 2902
	struct mgmt_cp_unpair_device *cp = data;
	struct mgmt_rp_unpair_device rp;
2903
	struct hci_cp_disconnect dc;
2904
	struct mgmt_pending_cmd *cmd;
2905 2906 2907
	struct hci_conn *conn;
	int err;

2908
	memset(&rp, 0, sizeof(rp));
2909 2910
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
2911

2912
	if (!bdaddr_type_is_valid(cp->addr.type))
2913 2914 2915
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2916

2917
	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2918 2919 2920
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2921

2922 2923
	hci_dev_lock(hdev);

2924
	if (!hdev_is_powered(hdev)) {
2925 2926 2927
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
2928 2929 2930
		goto unlock;
	}

2931
	if (cp->addr.type == BDADDR_BREDR) {
2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944
		/* If disconnection is requested, then look up the
		 * connection. If the remote device is connected, it
		 * will be later used to terminate the link.
		 *
		 * Setting it to NULL explicitly will cause no
		 * termination of the link.
		 */
		if (cp->disconnect)
			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
						       &cp->addr.bdaddr);
		else
			conn = NULL;

2945
		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2946 2947 2948
	} else {
		u8 addr_type;

2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
					       &cp->addr.bdaddr);
		if (conn) {
			/* Defer clearing up the connection parameters
			 * until closing to give a chance of keeping
			 * them if a repairing happens.
			 */
			set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);

			/* If disconnection is not requested, then
			 * clear the connection variable so that the
			 * link is not terminated.
			 */
			if (!cp->disconnect)
				conn = NULL;
		}

2966 2967 2968 2969 2970
		if (cp->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

2971 2972
		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);

2973 2974
		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
	}
2975

2976
	if (err < 0) {
2977 2978 2979
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					MGMT_STATUS_NOT_PAIRED, &rp,
					sizeof(rp));
2980 2981 2982
		goto unlock;
	}

2983 2984 2985
	/* If the connection variable is set, then termination of the
	 * link is requested.
	 */
2986
	if (!conn) {
2987 2988
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
					&rp, sizeof(rp));
2989
		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2990 2991
		goto unlock;
	}
2992

2993
	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2994
			       sizeof(*cp));
2995 2996 2997
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
2998 2999
	}

3000 3001
	cmd->cmd_complete = addr_cmd_complete;

3002
	dc.handle = cpu_to_le16(conn->handle);
3003 3004 3005 3006 3007
	dc.reason = 0x13; /* Remote User Terminated Connection */
	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
	if (err < 0)
		mgmt_pending_remove(cmd);

3008
unlock:
3009
	hci_dev_unlock(hdev);
3010 3011 3012
	return err;
}

3013
static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3014
		      u16 len)
3015
{
3016
	struct mgmt_cp_disconnect *cp = data;
3017
	struct mgmt_rp_disconnect rp;
3018
	struct mgmt_pending_cmd *cmd;
3019 3020 3021 3022 3023
	struct hci_conn *conn;
	int err;

	BT_DBG("");

3024 3025 3026 3027
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

3028
	if (!bdaddr_type_is_valid(cp->addr.type))
3029 3030 3031
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
3032

3033
	hci_dev_lock(hdev);
3034 3035

	if (!test_bit(HCI_UP, &hdev->flags)) {
3036 3037 3038
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
3039 3040 3041
		goto failed;
	}

3042
	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3043 3044
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3045 3046 3047
		goto failed;
	}

3048
	if (cp->addr.type == BDADDR_BREDR)
3049 3050
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
3051 3052
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3053

3054
	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3055 3056 3057
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_NOT_CONNECTED, &rp,
					sizeof(rp));
3058 3059 3060
		goto failed;
	}

3061
	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3062 3063
	if (!cmd) {
		err = -ENOMEM;
3064
		goto failed;
3065
	}
3066

3067 3068
	cmd->cmd_complete = generic_cmd_complete;

3069
	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3070
	if (err < 0)
3071
		mgmt_pending_remove(cmd);
3072 3073

failed:
3074
	hci_dev_unlock(hdev);
3075 3076 3077
	return err;
}

3078
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3079 3080 3081
{
	switch (link_type) {
	case LE_LINK:
3082 3083
		switch (addr_type) {
		case ADDR_LE_DEV_PUBLIC:
3084
			return BDADDR_LE_PUBLIC;
3085

3086
		default:
3087
			/* Fallback to LE Random address type */
3088
			return BDADDR_LE_RANDOM;
3089
		}
3090

3091
	default:
3092
		/* Fallback to BR/EDR type */
3093
		return BDADDR_BREDR;
3094 3095 3096
	}
}

3097 3098
static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
3099 3100
{
	struct mgmt_rp_get_connections *rp;
3101
	struct hci_conn *c;
3102
	size_t rp_len;
3103 3104
	int err;
	u16 i;
3105 3106 3107

	BT_DBG("");

3108
	hci_dev_lock(hdev);
3109

3110
	if (!hdev_is_powered(hdev)) {
3111 3112
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
				      MGMT_STATUS_NOT_POWERED);
3113 3114 3115
		goto unlock;
	}

3116
	i = 0;
3117 3118
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3119
			i++;
3120 3121
	}

3122
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3123
	rp = kmalloc(rp_len, GFP_KERNEL);
3124
	if (!rp) {
3125 3126 3127 3128 3129
		err = -ENOMEM;
		goto unlock;
	}

	i = 0;
3130
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3131 3132
		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
			continue;
3133
		bacpy(&rp->addr[i].bdaddr, &c->dst);
3134
		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3135
		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3136 3137 3138 3139
			continue;
		i++;
	}

3140
	rp->conn_count = cpu_to_le16(i);
3141

3142 3143
	/* Recalculate length in case of filtered SCO connections, etc */
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3144

3145 3146
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
				rp_len);
3147

3148
	kfree(rp);
3149 3150

unlock:
3151
	hci_dev_unlock(hdev);
3152 3153 3154
	return err;
}

3155
static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3156
				   struct mgmt_cp_pin_code_neg_reply *cp)
3157
{
3158
	struct mgmt_pending_cmd *cmd;
3159 3160
	int err;

3161
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3162
			       sizeof(*cp));
3163 3164 3165
	if (!cmd)
		return -ENOMEM;

3166
	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3167
			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3168 3169 3170 3171 3172 3173
	if (err < 0)
		mgmt_pending_remove(cmd);

	return err;
}

3174
static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3175
			  u16 len)
3176
{
3177
	struct hci_conn *conn;
3178
	struct mgmt_cp_pin_code_reply *cp = data;
3179
	struct hci_cp_pin_code_reply reply;
3180
	struct mgmt_pending_cmd *cmd;
3181 3182 3183 3184
	int err;

	BT_DBG("");

3185
	hci_dev_lock(hdev);
3186

3187
	if (!hdev_is_powered(hdev)) {
3188 3189
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
				      MGMT_STATUS_NOT_POWERED);
3190 3191 3192
		goto failed;
	}

3193
	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3194
	if (!conn) {
3195 3196
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
				      MGMT_STATUS_NOT_CONNECTED);
3197 3198 3199 3200
		goto failed;
	}

	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3201 3202 3203
		struct mgmt_cp_pin_code_neg_reply ncp;

		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3204 3205 3206

		BT_ERR("PIN code is not 16 bytes long");

3207
		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3208
		if (err >= 0)
3209 3210
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
					      MGMT_STATUS_INVALID_PARAMS);
3211 3212 3213 3214

		goto failed;
	}

3215
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3216 3217
	if (!cmd) {
		err = -ENOMEM;
3218
		goto failed;
3219
	}
3220

3221 3222
	cmd->cmd_complete = addr_cmd_complete;

3223
	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3224
	reply.pin_len = cp->pin_len;
3225
	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3226 3227 3228

	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
	if (err < 0)
3229
		mgmt_pending_remove(cmd);
3230 3231

failed:
3232
	hci_dev_unlock(hdev);
3233 3234 3235
	return err;
}

3236 3237
static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
3238
{
3239
	struct mgmt_cp_set_io_capability *cp = data;
3240 3241 3242

	BT_DBG("");

3243
	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3244 3245
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
					 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3246

3247
	hci_dev_lock(hdev);
3248 3249 3250 3251

	hdev->io_capability = cp->io_capability;

	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3252
	       hdev->io_capability);
3253

3254
	hci_dev_unlock(hdev);
3255

3256 3257
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
				 NULL, 0);
3258 3259
}

3260
static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3261 3262
{
	struct hci_dev *hdev = conn->hdev;
3263
	struct mgmt_pending_cmd *cmd;
3264

3265
	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277
		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
			continue;

		if (cmd->user_data != conn)
			continue;

		return cmd;
	}

	return NULL;
}

3278
static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3279 3280 3281
{
	struct mgmt_rp_pair_device rp;
	struct hci_conn *conn = cmd->user_data;
3282
	int err;
3283

3284 3285
	bacpy(&rp.addr.bdaddr, &conn->dst);
	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3286

3287 3288
	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
				status, &rp, sizeof(rp));
3289 3290 3291 3292 3293 3294

	/* So we don't get further callbacks for this connection */
	conn->connect_cfm_cb = NULL;
	conn->security_cfm_cb = NULL;
	conn->disconn_cfm_cb = NULL;

3295
	hci_conn_drop(conn);
3296 3297 3298 3299 3300

	/* The device is paired so there is no need to remove
	 * its connection parameters anymore.
	 */
	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3301 3302

	hci_conn_put(conn);
3303 3304

	return err;
3305 3306
}

3307 3308 3309
void mgmt_smp_complete(struct hci_conn *conn, bool complete)
{
	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3310
	struct mgmt_pending_cmd *cmd;
3311 3312

	cmd = find_pairing(conn);
3313
	if (cmd) {
3314
		cmd->cmd_complete(cmd, status);
3315 3316
		mgmt_pending_remove(cmd);
	}
3317 3318
}

3319 3320
static void pairing_complete_cb(struct hci_conn *conn, u8 status)
{
3321
	struct mgmt_pending_cmd *cmd;
3322 3323 3324 3325

	BT_DBG("status %u", status);

	cmd = find_pairing(conn);
3326
	if (!cmd) {
3327
		BT_DBG("Unable to find a pending command");
3328 3329 3330 3331 3332
		return;
	}

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);
3333 3334
}

3335
static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3336
{
3337
	struct mgmt_pending_cmd *cmd;
3338 3339 3340 3341 3342 3343 3344

	BT_DBG("status %u", status);

	if (!status)
		return;

	cmd = find_pairing(conn);
3345
	if (!cmd) {
3346
		BT_DBG("Unable to find a pending command");
3347 3348 3349 3350 3351
		return;
	}

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);
3352 3353
}

3354
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3355
		       u16 len)
3356
{
3357
	struct mgmt_cp_pair_device *cp = data;
3358
	struct mgmt_rp_pair_device rp;
3359
	struct mgmt_pending_cmd *cmd;
3360 3361 3362 3363 3364 3365
	u8 sec_level, auth_type;
	struct hci_conn *conn;
	int err;

	BT_DBG("");

3366 3367 3368 3369
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

3370
	if (!bdaddr_type_is_valid(cp->addr.type))
3371 3372 3373
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
3374

3375
	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3376 3377 3378
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
3379

3380
	hci_dev_lock(hdev);
3381

3382
	if (!hdev_is_powered(hdev)) {
3383 3384 3385
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
3386 3387 3388
		goto unlock;
	}

3389 3390 3391 3392 3393 3394 3395
	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_ALREADY_PAIRED, &rp,
					sizeof(rp));
		goto unlock;
	}

3396
	sec_level = BT_SECURITY_MEDIUM;
3397
	auth_type = HCI_AT_DEDICATED_BONDING;
3398

3399
	if (cp->addr.type == BDADDR_BREDR) {
3400 3401
		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
				       auth_type);
3402 3403 3404 3405 3406 3407 3408 3409 3410 3411
	} else {
		u8 addr_type;

		/* Convert from L2CAP channel address type to HCI address type
		 */
		if (cp->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422
		/* When pairing a new device, it is expected to remember
		 * this device for future connections. Adding the connection
		 * parameter information ahead of time allows tracking
		 * of the slave preferred values and will speed up any
		 * further connection establishment.
		 *
		 * If connection parameters already exist, then they
		 * will be kept and this function does nothing.
		 */
		hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);

3423
		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3424 3425
				      sec_level, HCI_LE_CONN_TIMEOUT,
				      HCI_ROLE_MASTER);
3426
	}
3427

3428
	if (IS_ERR(conn)) {
3429 3430 3431 3432
		int status;

		if (PTR_ERR(conn) == -EBUSY)
			status = MGMT_STATUS_BUSY;
3433 3434 3435 3436
		else if (PTR_ERR(conn) == -EOPNOTSUPP)
			status = MGMT_STATUS_NOT_SUPPORTED;
		else if (PTR_ERR(conn) == -ECONNREFUSED)
			status = MGMT_STATUS_REJECTED;
3437 3438 3439
		else
			status = MGMT_STATUS_CONNECT_FAILED;

3440 3441
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					status, &rp, sizeof(rp));
3442 3443 3444 3445
		goto unlock;
	}

	if (conn->connect_cfm_cb) {
3446
		hci_conn_drop(conn);
3447 3448
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3449 3450 3451
		goto unlock;
	}

3452
	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3453 3454
	if (!cmd) {
		err = -ENOMEM;
3455
		hci_conn_drop(conn);
3456 3457 3458
		goto unlock;
	}

3459 3460
	cmd->cmd_complete = pairing_complete;

3461
	/* For LE, just connecting isn't a proof that the pairing finished */
3462
	if (cp->addr.type == BDADDR_BREDR) {
3463
		conn->connect_cfm_cb = pairing_complete_cb;
3464 3465 3466 3467 3468 3469 3470
		conn->security_cfm_cb = pairing_complete_cb;
		conn->disconn_cfm_cb = pairing_complete_cb;
	} else {
		conn->connect_cfm_cb = le_pairing_complete_cb;
		conn->security_cfm_cb = le_pairing_complete_cb;
		conn->disconn_cfm_cb = le_pairing_complete_cb;
	}
3471

3472
	conn->io_capability = cp->io_cap;
3473
	cmd->user_data = hci_conn_get(conn);
3474

3475
	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3476 3477 3478 3479
	    hci_conn_security(conn, sec_level, auth_type, true)) {
		cmd->cmd_complete(cmd, 0);
		mgmt_pending_remove(cmd);
	}
3480 3481 3482 3483

	err = 0;

unlock:
3484
	hci_dev_unlock(hdev);
3485 3486 3487
	return err;
}

3488 3489
static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3490
{
3491
	struct mgmt_addr_info *addr = data;
3492
	struct mgmt_pending_cmd *cmd;
3493 3494 3495 3496 3497 3498 3499
	struct hci_conn *conn;
	int err;

	BT_DBG("");

	hci_dev_lock(hdev);

3500
	if (!hdev_is_powered(hdev)) {
3501 3502
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_NOT_POWERED);
3503 3504 3505
		goto unlock;
	}

3506
	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3507
	if (!cmd) {
3508 3509
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_INVALID_PARAMS);
3510 3511 3512 3513 3514 3515
		goto unlock;
	}

	conn = cmd->user_data;

	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3516 3517
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_INVALID_PARAMS);
3518 3519 3520
		goto unlock;
	}

3521 3522
	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
	mgmt_pending_remove(cmd);
3523

3524 3525
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
				addr, sizeof(*addr));
3526 3527 3528 3529 3530
unlock:
	hci_dev_unlock(hdev);
	return err;
}

3531
static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3532
			     struct mgmt_addr_info *addr, u16 mgmt_op,
3533
			     u16 hci_op, __le32 passkey)
3534
{
3535
	struct mgmt_pending_cmd *cmd;
3536
	struct hci_conn *conn;
3537 3538
	int err;

3539
	hci_dev_lock(hdev);
3540

3541
	if (!hdev_is_powered(hdev)) {
3542 3543 3544
		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
					MGMT_STATUS_NOT_POWERED, addr,
					sizeof(*addr));
3545
		goto done;
3546 3547
	}

3548 3549
	if (addr->type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3550
	else
3551
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3552 3553

	if (!conn) {
3554 3555 3556
		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
					MGMT_STATUS_NOT_CONNECTED, addr,
					sizeof(*addr));
3557 3558
		goto done;
	}
3559

3560
	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3561 3562
		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
		if (!err)
3563 3564 3565
			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
						MGMT_STATUS_SUCCESS, addr,
						sizeof(*addr));
3566
		else
3567 3568 3569
			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
						MGMT_STATUS_FAILED, addr,
						sizeof(*addr));
3570 3571 3572 3573

		goto done;
	}

3574
	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3575 3576
	if (!cmd) {
		err = -ENOMEM;
3577
		goto done;
3578 3579
	}

3580 3581
	cmd->cmd_complete = addr_cmd_complete;

3582
	/* Continue with pairing via HCI */
3583 3584 3585
	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
		struct hci_cp_user_passkey_reply cp;

3586
		bacpy(&cp.bdaddr, &addr->bdaddr);
3587 3588 3589
		cp.passkey = passkey;
		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
	} else
3590 3591
		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
				   &addr->bdaddr);
3592

3593 3594
	if (err < 0)
		mgmt_pending_remove(cmd);
3595

3596
done:
3597
	hci_dev_unlock(hdev);
3598 3599 3600
	return err;
}

3601 3602 3603 3604 3605 3606 3607
static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_pin_code_neg_reply *cp = data;

	BT_DBG("");

3608
	return user_pairing_resp(sk, hdev, &cp->addr,
3609 3610 3611 3612
				MGMT_OP_PIN_CODE_NEG_REPLY,
				HCI_OP_PIN_CODE_NEG_REPLY, 0);
}

3613 3614
static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3615
{
3616
	struct mgmt_cp_user_confirm_reply *cp = data;
3617 3618 3619 3620

	BT_DBG("");

	if (len != sizeof(*cp))
3621 3622
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
				       MGMT_STATUS_INVALID_PARAMS);
3623

3624
	return user_pairing_resp(sk, hdev, &cp->addr,
3625 3626
				 MGMT_OP_USER_CONFIRM_REPLY,
				 HCI_OP_USER_CONFIRM_REPLY, 0);
3627 3628
}

3629
static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3630
				  void *data, u16 len)
3631
{
3632
	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3633 3634 3635

	BT_DBG("");

3636
	return user_pairing_resp(sk, hdev, &cp->addr,
3637 3638
				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3639 3640
}

3641 3642
static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3643
{
3644
	struct mgmt_cp_user_passkey_reply *cp = data;
3645 3646 3647

	BT_DBG("");

3648
	return user_pairing_resp(sk, hdev, &cp->addr,
3649 3650
				 MGMT_OP_USER_PASSKEY_REPLY,
				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3651 3652
}

3653
static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3654
				  void *data, u16 len)
3655
{
3656
	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3657 3658 3659

	BT_DBG("");

3660
	return user_pairing_resp(sk, hdev, &cp->addr,
3661 3662
				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3663 3664
}

3665
static void update_name(struct hci_request *req)
3666
{
3667
	struct hci_dev *hdev = req->hdev;
3668 3669
	struct hci_cp_write_local_name cp;

3670
	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3671

3672
	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3673 3674
}

3675
static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3676 3677
{
	struct mgmt_cp_set_local_name *cp;
3678
	struct mgmt_pending_cmd *cmd;
3679 3680 3681 3682 3683

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

3684
	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3685 3686 3687 3688 3689 3690
	if (!cmd)
		goto unlock;

	cp = cmd->param;

	if (status)
3691 3692
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
			        mgmt_status(status));
3693
	else
3694 3695
		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
				  cp, sizeof(*cp));
3696 3697 3698 3699 3700 3701 3702

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

3703
static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3704
			  u16 len)
3705
{
3706
	struct mgmt_cp_set_local_name *cp = data;
3707
	struct mgmt_pending_cmd *cmd;
3708
	struct hci_request req;
3709 3710 3711 3712
	int err;

	BT_DBG("");

3713
	hci_dev_lock(hdev);
3714

3715 3716 3717 3718 3719 3720
	/* If the old values are the same as the new ones just return a
	 * direct command complete event.
	 */
	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
	    !memcmp(hdev->short_name, cp->short_name,
		    sizeof(hdev->short_name))) {
3721 3722
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
					data, len);
3723 3724 3725
		goto failed;
	}

3726
	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3727

3728
	if (!hdev_is_powered(hdev)) {
3729
		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3730

3731 3732
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
					data, len);
3733 3734 3735
		if (err < 0)
			goto failed;

3736 3737
		err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
					 data, len, sk);
3738

3739 3740 3741
		goto failed;
	}

3742
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3743 3744 3745 3746 3747
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

3748 3749
	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));

3750
	hci_req_init(&req, hdev);
3751 3752 3753 3754 3755 3756

	if (lmp_bredr_capable(hdev)) {
		update_name(&req);
		update_eir(&req);
	}

3757 3758 3759
	/* The name is stored in the scan response data and so
	 * no need to udpate the advertising data here.
	 */
3760
	if (lmp_le_capable(hdev))
3761
		update_scan_rsp_data(&req);
3762

3763
	err = hci_req_run(&req, set_name_complete);
3764 3765 3766 3767
	if (err < 0)
		mgmt_pending_remove(cmd);

failed:
3768
	hci_dev_unlock(hdev);
3769 3770 3771
	return err;
}

3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830
static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
				         u16 opcode, struct sk_buff *skb)
{
	struct mgmt_rp_read_local_oob_data mgmt_rp;
	size_t rp_size = sizeof(mgmt_rp);
	struct mgmt_pending_cmd *cmd;

	BT_DBG("%s status %u", hdev->name, status);

	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
	if (!cmd)
		return;

	if (status || !skb) {
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
		goto remove;
	}

	memset(&mgmt_rp, 0, sizeof(mgmt_rp));

	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;

		if (skb->len < sizeof(*rp)) {
			mgmt_cmd_status(cmd->sk, hdev->id,
					MGMT_OP_READ_LOCAL_OOB_DATA,
					MGMT_STATUS_FAILED);
			goto remove;
		}

		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));

		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
	} else {
		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;

		if (skb->len < sizeof(*rp)) {
			mgmt_cmd_status(cmd->sk, hdev->id,
					MGMT_OP_READ_LOCAL_OOB_DATA,
					MGMT_STATUS_FAILED);
			goto remove;
		}

		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));

		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
	}

	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);

remove:
	mgmt_pending_remove(cmd);
}

3831
static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3832
			       void *data, u16 data_len)
3833
{
3834
	struct mgmt_pending_cmd *cmd;
3835
	struct hci_request req;
3836 3837
	int err;

3838
	BT_DBG("%s", hdev->name);
3839

3840
	hci_dev_lock(hdev);
3841

3842
	if (!hdev_is_powered(hdev)) {
3843 3844
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_NOT_POWERED);
3845 3846 3847
		goto unlock;
	}

3848
	if (!lmp_ssp_capable(hdev)) {
3849 3850
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_NOT_SUPPORTED);
3851 3852 3853
		goto unlock;
	}

3854
	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3855 3856
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_BUSY);
3857 3858 3859
		goto unlock;
	}

3860
	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3861 3862 3863 3864 3865
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

3866 3867
	hci_req_init(&req, hdev);

3868
	if (bredr_sc_enabled(hdev))
3869
		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3870
	else
3871
		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3872

3873
	err = hci_req_run_skb(&req, read_local_oob_data_complete);
3874 3875 3876 3877
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
3878
	hci_dev_unlock(hdev);
3879 3880 3881
	return err;
}

3882
static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3883
			       void *data, u16 len)
3884
{
3885
	struct mgmt_addr_info *addr = data;
3886 3887
	int err;

3888
	BT_DBG("%s ", hdev->name);
3889

3890
	if (!bdaddr_type_is_valid(addr->type))
3891 3892 3893 3894
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_ADD_REMOTE_OOB_DATA,
					 MGMT_STATUS_INVALID_PARAMS,
					 addr, sizeof(*addr));
3895

3896
	hci_dev_lock(hdev);
3897

3898 3899 3900
	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_data *cp = data;
		u8 status;
3901

3902
		if (cp->addr.type != BDADDR_BREDR) {
3903 3904 3905 3906
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_ADD_REMOTE_OOB_DATA,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
3907 3908 3909
			goto unlock;
		}

3910
		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3911 3912
					      cp->addr.type, cp->hash,
					      cp->rand, NULL, NULL);
3913 3914 3915 3916 3917
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

3918 3919 3920
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
					&cp->addr, sizeof(cp->addr));
3921 3922
	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3923
		u8 *rand192, *hash192, *rand256, *hash256;
3924 3925
		u8 status;

3926
		if (bdaddr_type_is_le(cp->addr.type)) {
3927 3928 3929 3930 3931
			/* Enforce zero-valued 192-bit parameters as
			 * long as legacy SMP OOB isn't implemented.
			 */
			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
			    memcmp(cp->hash192, ZERO_KEY, 16)) {
3932 3933 3934 3935
				err = mgmt_cmd_complete(sk, hdev->id,
							MGMT_OP_ADD_REMOTE_OOB_DATA,
							MGMT_STATUS_INVALID_PARAMS,
							addr, sizeof(*addr));
3936 3937 3938
				goto unlock;
			}

3939 3940 3941
			rand192 = NULL;
			hash192 = NULL;
		} else {
3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964
			/* In case one of the P-192 values is set to zero,
			 * then just disable OOB data for P-192.
			 */
			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
				rand192 = NULL;
				hash192 = NULL;
			} else {
				rand192 = cp->rand192;
				hash192 = cp->hash192;
			}
		}

		/* In case one of the P-256 values is set to zero, then just
		 * disable OOB data for P-256.
		 */
		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
			rand256 = NULL;
			hash256 = NULL;
		} else {
			rand256 = cp->rand256;
			hash256 = cp->hash256;
3965 3966
		}

3967
		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3968
					      cp->addr.type, hash192, rand192,
3969
					      hash256, rand256);
3970 3971 3972 3973 3974
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

3975 3976 3977
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_ADD_REMOTE_OOB_DATA,
					status, &cp->addr, sizeof(cp->addr));
3978 3979
	} else {
		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3980 3981
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
				      MGMT_STATUS_INVALID_PARAMS);
3982
	}
3983

3984
unlock:
3985
	hci_dev_unlock(hdev);
3986 3987 3988
	return err;
}

3989
static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3990
				  void *data, u16 len)
3991
{
3992
	struct mgmt_cp_remove_remote_oob_data *cp = data;
3993
	u8 status;
3994 3995
	int err;

3996
	BT_DBG("%s", hdev->name);
3997

3998
	if (cp->addr.type != BDADDR_BREDR)
3999 4000 4001 4002
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
4003

4004
	hci_dev_lock(hdev);
4005

4006 4007 4008 4009 4010 4011
	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
		hci_remote_oob_data_clear(hdev);
		status = MGMT_STATUS_SUCCESS;
		goto done;
	}

4012
	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4013
	if (err < 0)
4014
		status = MGMT_STATUS_INVALID_PARAMS;
4015
	else
4016
		status = MGMT_STATUS_SUCCESS;
4017

4018
done:
4019 4020
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
				status, &cp->addr, sizeof(cp->addr));
4021

4022
	hci_dev_unlock(hdev);
4023 4024 4025
	return err;
}

4026
static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
4027
{
4028
	struct hci_dev *hdev = req->hdev;
4029
	struct hci_cp_inquiry cp;
4030 4031
	/* General inquiry access code (GIAC) */
	u8 lap[3] = { 0x33, 0x8b, 0x9e };
4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057

	*status = mgmt_bredr_support(hdev);
	if (*status)
		return false;

	if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
		*status = MGMT_STATUS_BUSY;
		return false;
	}

	hci_inquiry_cache_flush(hdev);

	memset(&cp, 0, sizeof(cp));
	memcpy(&cp.lap, lap, sizeof(cp.lap));
	cp.length = DISCOV_BREDR_INQUIRY_LEN;

	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);

	return true;
}

static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_scan_param param_cp;
	struct hci_cp_le_set_scan_enable enable_cp;
4058
	u8 own_addr_type;
4059 4060
	int err;

4061 4062 4063
	*status = mgmt_le_support(hdev);
	if (*status)
		return false;
4064

4065 4066 4067 4068 4069 4070
	if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
		/* Don't let discovery abort an outgoing connection attempt
		 * that's using directed advertising.
		 */
		if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
			*status = MGMT_STATUS_REJECTED;
4071 4072
			return false;
		}
4073

4074 4075
		disable_advertising(req);
	}
4076

4077 4078 4079 4080 4081 4082
	/* If controller is scanning, it means the background scanning is
	 * running. Thus, we should temporarily stop it in order to set the
	 * discovery scanning parameters.
	 */
	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
		hci_req_add_le_scan_disable(req);
4083

4084 4085 4086 4087 4088 4089 4090 4091 4092
	/* All active scans will be done with either a resolvable private
	 * address (when privacy feature has been enabled) or non-resolvable
	 * private address.
	 */
	err = hci_update_random_address(req, true, &own_addr_type);
	if (err < 0) {
		*status = MGMT_STATUS_FAILED;
		return false;
	}
4093

4094 4095 4096 4097 4098
	memset(&param_cp, 0, sizeof(param_cp));
	param_cp.type = LE_SCAN_ACTIVE;
	param_cp.interval = cpu_to_le16(interval);
	param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
	param_cp.own_address_type = own_addr_type;
4099

4100 4101
	hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
		    &param_cp);
4102

4103 4104 4105
	memset(&enable_cp, 0, sizeof(enable_cp));
	enable_cp.enable = LE_SCAN_ENABLE;
	enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4106

4107 4108 4109 4110 4111
	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
		    &enable_cp);

	return true;
}
4112

4113 4114 4115
static bool trigger_discovery(struct hci_request *req, u8 *status)
{
	struct hci_dev *hdev = req->hdev;
4116

4117 4118 4119 4120 4121 4122 4123
	switch (hdev->discovery.type) {
	case DISCOV_TYPE_BREDR:
		if (!trigger_bredr_inquiry(req, status))
			return false;
		break;

	case DISCOV_TYPE_INTERLEAVED:
4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139
		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
			     &hdev->quirks)) {
			/* During simultaneous discovery, we double LE scan
			 * interval. We must leave some time for the controller
			 * to do BR/EDR inquiry.
			 */
			if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
					     status))
				return false;

			if (!trigger_bredr_inquiry(req, status))
				return false;

			return true;
		}

4140 4141
		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
			*status = MGMT_STATUS_NOT_SUPPORTED;
4142 4143
			return false;
		}
4144
		/* fall through */
4145

4146 4147 4148
	case DISCOV_TYPE_LE:
		if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
			return false;
4149 4150 4151 4152 4153 4154 4155 4156
		break;

	default:
		*status = MGMT_STATUS_INVALID_PARAMS;
		return false;
	}

	return true;
4157 4158
}

4159 4160
static void start_discovery_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
4161
{
4162
	struct mgmt_pending_cmd *cmd;
4163
	unsigned long timeout;
4164

4165 4166
	BT_DBG("status %d", status);

4167
	hci_dev_lock(hdev);
4168

4169
	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4170
	if (!cmd)
4171
		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4172

4173
	if (cmd) {
4174
		cmd->cmd_complete(cmd, mgmt_status(status));
4175 4176
		mgmt_pending_remove(cmd);
	}
4177 4178

	if (status) {
4179 4180
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
		goto unlock;
4181 4182 4183 4184
	}

	hci_discovery_set_state(hdev, DISCOVERY_FINDING);

4185 4186 4187
	/* If the scan involves LE scan, pick proper timeout to schedule
	 * hdev->le_scan_disable that will stop it.
	 */
4188 4189
	switch (hdev->discovery.type) {
	case DISCOV_TYPE_LE:
4190
		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4191 4192
		break;
	case DISCOV_TYPE_INTERLEAVED:
4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204
		 /* When running simultaneous discovery, the LE scanning time
		 * should occupy the whole discovery time sine BR/EDR inquiry
		 * and LE scanning are scheduled by the controller.
		 *
		 * For interleaving discovery in comparison, BR/EDR inquiry
		 * and LE scanning are done sequentially with separate
		 * timeouts.
		 */
		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
		else
			timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4205 4206
		break;
	case DISCOV_TYPE_BREDR:
4207
		timeout = 0;
4208 4209 4210
		break;
	default:
		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4211 4212
		timeout = 0;
		break;
4213
	}
4214

4215 4216 4217 4218 4219 4220 4221 4222
	if (timeout) {
		/* When service discovery is used and the controller has
		 * a strict duplicate filter, it is important to remember
		 * the start and duration of the scan. This is required
		 * for restarting scanning during the discovery phase.
		 */
		if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
			     &hdev->quirks) &&
4223
		    hdev->discovery.result_filtering) {
4224 4225 4226 4227
			hdev->discovery.scan_start = jiffies;
			hdev->discovery.scan_duration = timeout;
		}

4228 4229
		queue_delayed_work(hdev->workqueue,
				   &hdev->le_scan_disable, timeout);
4230
	}
4231

4232 4233
unlock:
	hci_dev_unlock(hdev);
4234 4235
}

4236
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4237
			   void *data, u16 len)
4238
{
4239
	struct mgmt_cp_start_discovery *cp = data;
4240
	struct mgmt_pending_cmd *cmd;
4241
	struct hci_request req;
4242
	u8 status;
4243 4244
	int err;

4245
	BT_DBG("%s", hdev->name);
4246

4247
	hci_dev_lock(hdev);
4248

4249
	if (!hdev_is_powered(hdev)) {
4250 4251 4252
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					MGMT_STATUS_NOT_POWERED,
					&cp->type, sizeof(cp->type));
4253 4254 4255
		goto failed;
	}

4256
	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4257
	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4258 4259 4260
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					MGMT_STATUS_BUSY, &cp->type,
					sizeof(cp->type));
4261 4262 4263
		goto failed;
	}

4264
	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4265 4266 4267 4268 4269
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

4270 4271
	cmd->cmd_complete = generic_cmd_complete;

4272 4273 4274 4275 4276
	/* Clear the discovery filter first to free any previously
	 * allocated memory for the UUID list.
	 */
	hci_discovery_filter_clear(hdev);

A
Andre Guedes 已提交
4277
	hdev->discovery.type = cp->type;
4278
	hdev->discovery.report_invalid_rssi = false;
A
Andre Guedes 已提交
4279

4280 4281
	hci_req_init(&req, hdev);

4282
	if (!trigger_discovery(&req, &status)) {
4283 4284
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					status, &cp->type, sizeof(cp->type));
4285 4286
		mgmt_pending_remove(cmd);
		goto failed;
4287
	}
4288

4289
	err = hci_req_run(&req, start_discovery_complete);
4290
	if (err < 0) {
4291
		mgmt_pending_remove(cmd);
4292 4293
		goto failed;
	}
4294

4295
	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4296

4297
failed:
4298
	hci_dev_unlock(hdev);
4299 4300
	return err;
}
4301

4302 4303
static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
					  u8 status)
4304
{
4305 4306
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, 1);
4307
}
4308

4309 4310 4311 4312
static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
				   void *data, u16 len)
{
	struct mgmt_cp_start_service_discovery *cp = data;
4313
	struct mgmt_pending_cmd *cmd;
4314 4315 4316 4317 4318
	struct hci_request req;
	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
	u16 uuid_count, expected_len;
	u8 status;
	int err;
4319

4320
	BT_DBG("%s", hdev->name);
4321

4322
	hci_dev_lock(hdev);
4323

4324
	if (!hdev_is_powered(hdev)) {
4325 4326 4327 4328
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_NOT_POWERED,
					&cp->type, sizeof(cp->type));
4329 4330
		goto failed;
	}
4331

4332
	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4333
	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4334 4335 4336 4337
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_BUSY, &cp->type,
					sizeof(cp->type));
4338 4339
		goto failed;
	}
4340

4341 4342 4343 4344
	uuid_count = __le16_to_cpu(cp->uuid_count);
	if (uuid_count > max_uuid_count) {
		BT_ERR("service_discovery: too big uuid_count value %u",
		       uuid_count);
4345 4346 4347 4348
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS, &cp->type,
					sizeof(cp->type));
4349 4350 4351 4352 4353 4354 4355
		goto failed;
	}

	expected_len = sizeof(*cp) + uuid_count * 16;
	if (expected_len != len) {
		BT_ERR("service_discovery: expected %u bytes, got %u bytes",
		       expected_len, len);
4356 4357 4358 4359
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS, &cp->type,
					sizeof(cp->type));
4360 4361 4362 4363
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4364
			       hdev, data, len);
4365 4366 4367 4368 4369
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

4370 4371
	cmd->cmd_complete = service_discovery_cmd_complete;

4372 4373 4374 4375 4376
	/* Clear the discovery filter first to free any previously
	 * allocated memory for the UUID list.
	 */
	hci_discovery_filter_clear(hdev);

4377
	hdev->discovery.result_filtering = true;
4378 4379 4380 4381 4382 4383 4384 4385
	hdev->discovery.type = cp->type;
	hdev->discovery.rssi = cp->rssi;
	hdev->discovery.uuid_count = uuid_count;

	if (uuid_count > 0) {
		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
						GFP_KERNEL);
		if (!hdev->discovery.uuids) {
4386 4387 4388 4389
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_START_SERVICE_DISCOVERY,
						MGMT_STATUS_FAILED,
						&cp->type, sizeof(cp->type));
4390 4391 4392
			mgmt_pending_remove(cmd);
			goto failed;
		}
4393
	}
4394

4395
	hci_req_init(&req, hdev);
4396

4397
	if (!trigger_discovery(&req, &status)) {
4398 4399 4400
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					status, &cp->type, sizeof(cp->type));
4401 4402
		mgmt_pending_remove(cmd);
		goto failed;
4403
	}
4404

4405
	err = hci_req_run(&req, start_discovery_complete);
4406
	if (err < 0) {
4407
		mgmt_pending_remove(cmd);
4408 4409 4410 4411
		goto failed;
	}

	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4412 4413

failed:
4414
	hci_dev_unlock(hdev);
4415 4416 4417
	return err;
}

4418
static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4419
{
4420
	struct mgmt_pending_cmd *cmd;
4421

4422 4423 4424 4425
	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

4426
	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4427
	if (cmd) {
4428
		cmd->cmd_complete(cmd, mgmt_status(status));
4429
		mgmt_pending_remove(cmd);
4430 4431
	}

4432 4433
	if (!status)
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4434 4435 4436 4437

	hci_dev_unlock(hdev);
}

4438
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4439
			  u16 len)
4440
{
4441
	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4442
	struct mgmt_pending_cmd *cmd;
4443
	struct hci_request req;
4444 4445
	int err;

4446
	BT_DBG("%s", hdev->name);
4447

4448
	hci_dev_lock(hdev);
4449

4450
	if (!hci_discovery_active(hdev)) {
4451 4452 4453
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
					MGMT_STATUS_REJECTED, &mgmt_cp->type,
					sizeof(mgmt_cp->type));
4454 4455 4456 4457
		goto unlock;
	}

	if (hdev->discovery.type != mgmt_cp->type) {
4458 4459 4460
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS,
					&mgmt_cp->type, sizeof(mgmt_cp->type));
4461
		goto unlock;
4462 4463
	}

4464
	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4465 4466
	if (!cmd) {
		err = -ENOMEM;
4467 4468 4469
		goto unlock;
	}

4470 4471
	cmd->cmd_complete = generic_cmd_complete;

4472 4473
	hci_req_init(&req, hdev);

4474
	hci_stop_discovery(&req);
4475

4476 4477 4478
	err = hci_req_run(&req, stop_discovery_complete);
	if (!err) {
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4479
		goto unlock;
4480 4481
	}

4482 4483 4484 4485
	mgmt_pending_remove(cmd);

	/* If no HCI commands were sent we're done */
	if (err == -ENODATA) {
4486 4487
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
					&mgmt_cp->type, sizeof(mgmt_cp->type));
4488 4489
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
	}
4490

4491
unlock:
4492
	hci_dev_unlock(hdev);
4493 4494 4495
	return err;
}

4496
static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4497
			u16 len)
4498
{
4499
	struct mgmt_cp_confirm_name *cp = data;
4500 4501 4502
	struct inquiry_entry *e;
	int err;

4503
	BT_DBG("%s", hdev->name);
4504 4505 4506

	hci_dev_lock(hdev);

4507
	if (!hci_discovery_active(hdev)) {
4508 4509 4510
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
					MGMT_STATUS_FAILED, &cp->addr,
					sizeof(cp->addr));
4511 4512 4513
		goto failed;
	}

4514
	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4515
	if (!e) {
4516 4517 4518
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
					sizeof(cp->addr));
4519 4520 4521 4522 4523 4524 4525 4526
		goto failed;
	}

	if (cp->name_known) {
		e->name_state = NAME_KNOWN;
		list_del(&e->list);
	} else {
		e->name_state = NAME_NEEDED;
4527
		hci_inquiry_cache_update_resolve(hdev, e);
4528 4529
	}

4530 4531
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
				&cp->addr, sizeof(cp->addr));
4532 4533 4534 4535 4536 4537

failed:
	hci_dev_unlock(hdev);
	return err;
}

4538
static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4539
			u16 len)
4540
{
4541
	struct mgmt_cp_block_device *cp = data;
4542
	u8 status;
4543 4544
	int err;

4545
	BT_DBG("%s", hdev->name);
4546

4547
	if (!bdaddr_type_is_valid(cp->addr.type))
4548 4549 4550
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
4551

4552
	hci_dev_lock(hdev);
4553

4554 4555
	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
				  cp->addr.type);
4556
	if (err < 0) {
4557
		status = MGMT_STATUS_FAILED;
4558 4559 4560 4561 4562 4563
		goto done;
	}

	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
		   sk);
	status = MGMT_STATUS_SUCCESS;
4564

4565
done:
4566 4567
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
				&cp->addr, sizeof(cp->addr));
4568

4569
	hci_dev_unlock(hdev);
4570 4571 4572 4573

	return err;
}

4574
static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4575
			  u16 len)
4576
{
4577
	struct mgmt_cp_unblock_device *cp = data;
4578
	u8 status;
4579 4580
	int err;

4581
	BT_DBG("%s", hdev->name);
4582

4583
	if (!bdaddr_type_is_valid(cp->addr.type))
4584 4585 4586
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
4587

4588
	hci_dev_lock(hdev);
4589

4590 4591
	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
				  cp->addr.type);
4592
	if (err < 0) {
4593
		status = MGMT_STATUS_INVALID_PARAMS;
4594 4595 4596 4597 4598 4599
		goto done;
	}

	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
		   sk);
	status = MGMT_STATUS_SUCCESS;
4600

4601
done:
4602 4603
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
				&cp->addr, sizeof(cp->addr));
4604

4605
	hci_dev_unlock(hdev);
4606 4607 4608 4609

	return err;
}

4610 4611 4612 4613
static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_set_device_id *cp = data;
4614
	struct hci_request req;
4615
	int err;
4616
	__u16 source;
4617 4618 4619

	BT_DBG("%s", hdev->name);

4620 4621 4622
	source = __le16_to_cpu(cp->source);

	if (source > 0x0002)
4623 4624
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
				       MGMT_STATUS_INVALID_PARAMS);
4625

4626 4627
	hci_dev_lock(hdev);

4628
	hdev->devid_source = source;
4629 4630 4631 4632
	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
	hdev->devid_product = __le16_to_cpu(cp->product);
	hdev->devid_version = __le16_to_cpu(cp->version);

4633 4634
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
				NULL, 0);
4635

4636 4637 4638
	hci_req_init(&req, hdev);
	update_eir(&req);
	hci_req_run(&req, NULL);
4639 4640 4641 4642 4643 4644

	hci_dev_unlock(hdev);

	return err;
}

4645 4646 4647 4648 4649 4650
static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
					u16 opcode)
{
	BT_DBG("status %d", status);
}

4651 4652
static void set_advertising_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
4653 4654
{
	struct cmd_lookup match = { NULL, hdev };
4655
	struct hci_request req;
4656

4657 4658
	hci_dev_lock(hdev);

4659 4660 4661 4662 4663
	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
				     cmd_status_rsp, &mgmt_err);
4664
		goto unlock;
4665 4666
	}

4667
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4668
		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4669
	else
4670
		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4671

4672 4673 4674 4675 4676 4677 4678
	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
			     &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
4679

4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694
	/* If "Set Advertising" was just disabled and instance advertising was
	 * set up earlier, then enable the advertising instance.
	 */
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
	    !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
		goto unlock;

	hci_req_init(&req, hdev);

	update_adv_data(&req);
	enable_advertising(&req);

	if (hci_req_run(&req, enable_advertising_instance) < 0)
		BT_ERR("Failed to re-configure advertising");

4695 4696
unlock:
	hci_dev_unlock(hdev);
4697 4698
}

4699 4700
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
4701 4702
{
	struct mgmt_mode *cp = data;
4703
	struct mgmt_pending_cmd *cmd;
4704
	struct hci_request req;
4705
	u8 val, status;
4706 4707 4708 4709
	int err;

	BT_DBG("request for %s", hdev->name);

4710 4711
	status = mgmt_le_support(hdev);
	if (status)
4712 4713
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				       status);
4714

4715
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4716 4717
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);
4718 4719 4720 4721 4722

	hci_dev_lock(hdev);

	val = !!cp->val;

4723 4724 4725 4726 4727
	/* The following conditions are ones which mean that we should
	 * not do any HCI communication but directly send a mgmt
	 * response to user space (after toggling the flag if
	 * necessary).
	 */
4728
	if (!hdev_is_powered(hdev) ||
4729 4730
	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4731
	    hci_conn_num(hdev, LE_LINK) > 0 ||
4732
	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4733
	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4734
		bool changed;
4735

4736
		if (cp->val) {
4737
			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4738
			if (cp->val == 0x02)
4739
				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4740
			else
4741
				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4742
		} else {
4743
			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4744
			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
		if (err < 0)
			goto unlock;

		if (changed)
			err = new_settings(hdev, sk);

		goto unlock;
	}

4757 4758
	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
	    pending_find(MGMT_OP_SET_LE, hdev)) {
4759 4760
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				      MGMT_STATUS_BUSY);
4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

4772
	if (cp->val == 0x02)
4773
		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4774
	else
4775
		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4776

4777 4778 4779
	if (val) {
		/* Switch to instance "0" for the Set Advertising setting. */
		update_adv_data_for_instance(&req, 0);
4780
		update_scan_rsp_data_for_instance(&req, 0);
4781
		enable_advertising(&req);
4782
	} else {
4783
		disable_advertising(&req);
4784
	}
4785 4786 4787 4788 4789 4790 4791 4792 4793 4794

	err = hci_req_run(&req, set_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4795 4796 4797 4798 4799 4800 4801 4802
static int set_static_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_static_address *cp = data;
	int err;

	BT_DBG("%s", hdev->name);

4803
	if (!lmp_le_capable(hdev))
4804 4805
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				       MGMT_STATUS_NOT_SUPPORTED);
4806 4807

	if (hdev_is_powered(hdev))
4808 4809
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				       MGMT_STATUS_REJECTED);
4810 4811 4812

	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4813 4814 4815
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_SET_STATIC_ADDRESS,
					       MGMT_STATUS_INVALID_PARAMS);
4816 4817 4818

		/* Two most significant bits shall be set */
		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4819 4820 4821
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_SET_STATIC_ADDRESS,
					       MGMT_STATUS_INVALID_PARAMS);
4822 4823 4824 4825 4826 4827
	}

	hci_dev_lock(hdev);

	bacpy(&hdev->static_addr, &cp->bdaddr);

4828 4829 4830 4831 4832
	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
	if (err < 0)
		goto unlock;

	err = new_settings(hdev, sk);
4833

4834
unlock:
4835 4836 4837 4838
	hci_dev_unlock(hdev);
	return err;
}

4839 4840 4841 4842 4843 4844 4845 4846 4847 4848
static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_cp_set_scan_params *cp = data;
	__u16 interval, window;
	int err;

	BT_DBG("%s", hdev->name);

	if (!lmp_le_capable(hdev))
4849 4850
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_NOT_SUPPORTED);
4851 4852 4853 4854

	interval = __le16_to_cpu(cp->interval);

	if (interval < 0x0004 || interval > 0x4000)
4855 4856
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4857 4858 4859 4860

	window = __le16_to_cpu(cp->window);

	if (window < 0x0004 || window > 0x4000)
4861 4862
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4863

4864
	if (window > interval)
4865 4866
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4867

4868 4869 4870 4871 4872
	hci_dev_lock(hdev);

	hdev->le_scan_interval = interval;
	hdev->le_scan_window = window;

4873 4874
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
				NULL, 0);
4875

4876 4877 4878
	/* If background scan is running, restart it so new parameters are
	 * loaded.
	 */
4879
	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890
	    hdev->discovery.state == DISCOVERY_STOPPED) {
		struct hci_request req;

		hci_req_init(&req, hdev);

		hci_req_add_le_scan_disable(&req);
		hci_req_add_le_passive_scan(&req);

		hci_req_run(&req, NULL);
	}

4891 4892 4893 4894 4895
	hci_dev_unlock(hdev);

	return err;
}

4896 4897
static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
				      u16 opcode)
4898
{
4899
	struct mgmt_pending_cmd *cmd;
4900 4901 4902 4903 4904

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

4905
	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4906 4907 4908 4909
	if (!cmd)
		goto unlock;

	if (status) {
4910 4911
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
			        mgmt_status(status));
4912
	} else {
4913 4914 4915
		struct mgmt_mode *cp = cmd->param;

		if (cp->val)
4916
			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4917
		else
4918
			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4919

4920 4921 4922 4923 4924 4925 4926 4927 4928 4929
		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

4930
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4931
				void *data, u16 len)
4932
{
4933
	struct mgmt_mode *cp = data;
4934
	struct mgmt_pending_cmd *cmd;
4935
	struct hci_request req;
4936 4937
	int err;

4938
	BT_DBG("%s", hdev->name);
4939

4940
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4941
	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4942 4943
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				       MGMT_STATUS_NOT_SUPPORTED);
4944

4945
	if (cp->val != 0x00 && cp->val != 0x01)
4946 4947
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				       MGMT_STATUS_INVALID_PARAMS);
4948

4949 4950
	hci_dev_lock(hdev);

4951
	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4952 4953
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				      MGMT_STATUS_BUSY);
4954 4955 4956
		goto unlock;
	}

4957
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4958 4959 4960 4961 4962
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		goto unlock;
	}

4963
	if (!hdev_is_powered(hdev)) {
4964
		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4965 4966 4967 4968 4969 4970
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		new_settings(hdev, sk);
		goto unlock;
	}

4971 4972 4973 4974 4975
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
			       data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
4976 4977
	}

4978 4979
	hci_req_init(&req, hdev);

4980
	write_fast_connectable(&req, cp->val);
4981 4982

	err = hci_req_run(&req, fast_connectable_complete);
4983
	if (err < 0) {
4984 4985
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				      MGMT_STATUS_FAILED);
4986
		mgmt_pending_remove(cmd);
4987 4988
	}

4989
unlock:
4990
	hci_dev_unlock(hdev);
4991

4992 4993 4994
	return err;
}

4995
static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4996
{
4997
	struct mgmt_pending_cmd *cmd;
4998 4999 5000 5001 5002

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

5003
	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5004 5005 5006 5007 5008 5009 5010 5011 5012
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		/* We need to restore the flag if related HCI commands
		 * failed.
		 */
5013
		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5014

5015
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029
	} else {
		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
	struct mgmt_mode *cp = data;
5030
	struct mgmt_pending_cmd *cmd;
5031 5032 5033 5034 5035 5036
	struct hci_request req;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5037 5038
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_NOT_SUPPORTED);
5039

5040
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5041 5042
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_REJECTED);
5043 5044

	if (cp->val != 0x00 && cp->val != 0x01)
5045 5046
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_INVALID_PARAMS);
5047 5048 5049

	hci_dev_lock(hdev);

5050
	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5051 5052 5053 5054 5055 5056
		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		goto unlock;
	}

	if (!hdev_is_powered(hdev)) {
		if (!cp->val) {
5057 5058 5059 5060 5061
			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5062 5063
		}

5064
		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075

		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		if (err < 0)
			goto unlock;

		err = new_settings(hdev, sk);
		goto unlock;
	}

	/* Reject disabling when powered on */
	if (!cp->val) {
5076 5077
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				      MGMT_STATUS_REJECTED);
5078
		goto unlock;
5079 5080 5081 5082 5083 5084 5085 5086
	} else {
		/* When configuring a dual-mode controller to operate
		 * with LE only and using a static address, then switching
		 * BR/EDR back on is not allowed.
		 *
		 * Dual-mode controllers shall operate with the public
		 * address as its identity address for BR/EDR and LE. So
		 * reject the attempt to create an invalid configuration.
5087 5088 5089 5090 5091 5092
		 *
		 * The same restrictions applies when secure connections
		 * has been enabled. For BR/EDR this is a controller feature
		 * while for LE it is a host stack feature. This means that
		 * switching BR/EDR back on when secure connections has been
		 * enabled is not a supported transaction.
5093
		 */
5094
		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5095
		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5096
		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5097 5098
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
					      MGMT_STATUS_REJECTED);
5099 5100
			goto unlock;
		}
5101 5102
	}

5103
	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5104 5105
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				      MGMT_STATUS_BUSY);
5106 5107 5108 5109 5110 5111 5112 5113 5114
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

5115
	/* We need to flip the bit already here so that update_adv_data
5116 5117
	 * generates the correct flags.
	 */
5118
	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5119 5120

	hci_req_init(&req, hdev);
5121

5122
	write_fast_connectable(&req, false);
5123
	__hci_update_page_scan(&req);
5124

5125 5126 5127
	/* Since only the advertising data flags will change, there
	 * is no need to update the scan response data.
	 */
5128
	update_adv_data(&req);
5129

5130 5131 5132 5133 5134 5135 5136 5137 5138
	err = hci_req_run(&req, set_bredr_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5139 5140
static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
5141
	struct mgmt_pending_cmd *cmd;
5142 5143 5144 5145 5146 5147
	struct mgmt_mode *cp;

	BT_DBG("%s status %u", hdev->name, status);

	hci_dev_lock(hdev);

5148
	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5149 5150 5151 5152
	if (!cmd)
		goto unlock;

	if (status) {
5153 5154
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
			        mgmt_status(status));
5155 5156 5157 5158 5159 5160 5161
		goto remove;
	}

	cp = cmd->param;

	switch (cp->val) {
	case 0x00:
5162 5163
		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5164 5165
		break;
	case 0x01:
5166
		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5167
		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5168 5169
		break;
	case 0x02:
5170 5171
		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
		hci_dev_set_flag(hdev, HCI_SC_ONLY);
5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183
		break;
	}

	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
	new_settings(hdev, cmd->sk);

remove:
	mgmt_pending_remove(cmd);
unlock:
	hci_dev_unlock(hdev);
}

5184 5185 5186 5187
static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_mode *cp = data;
5188
	struct mgmt_pending_cmd *cmd;
5189
	struct hci_request req;
5190
	u8 val;
5191 5192 5193 5194
	int err;

	BT_DBG("request for %s", hdev->name);

5195
	if (!lmp_sc_capable(hdev) &&
5196
	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5197 5198
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				       MGMT_STATUS_NOT_SUPPORTED);
5199

5200
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5201
	    lmp_sc_capable(hdev) &&
5202
	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5203 5204
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				       MGMT_STATUS_REJECTED);
5205

5206
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5207
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5208 5209 5210 5211
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

5212
	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5213
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5214 5215
		bool changed;

5216
		if (cp->val) {
5217 5218
			changed = !hci_dev_test_and_set_flag(hdev,
							     HCI_SC_ENABLED);
5219
			if (cp->val == 0x02)
5220
				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5221
			else
5222
				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5223
		} else {
5224 5225
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_SC_ENABLED);
5226
			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5227
		}
5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238

		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

		goto failed;
	}

5239
	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5240 5241
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				      MGMT_STATUS_BUSY);
5242 5243 5244
		goto failed;
	}

5245 5246
	val = !!cp->val;

5247 5248
	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5249 5250 5251 5252 5253 5254 5255 5256 5257 5258
		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

5259 5260 5261
	hci_req_init(&req, hdev);
	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
	err = hci_req_run(&req, sc_enable_complete);
5262 5263 5264 5265 5266 5267 5268 5269 5270 5271
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

5272 5273 5274 5275
static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
			  void *data, u16 len)
{
	struct mgmt_mode *cp = data;
5276
	bool changed, use_changed;
5277 5278 5279 5280
	int err;

	BT_DBG("request for %s", hdev->name);

5281
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5282 5283
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5284 5285 5286 5287

	hci_dev_lock(hdev);

	if (cp->val)
5288
		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5289
	else
5290 5291
		changed = hci_dev_test_and_clear_flag(hdev,
						      HCI_KEEP_DEBUG_KEYS);
5292

5293
	if (cp->val == 0x02)
5294 5295
		use_changed = !hci_dev_test_and_set_flag(hdev,
							 HCI_USE_DEBUG_KEYS);
5296
	else
5297 5298
		use_changed = hci_dev_test_and_clear_flag(hdev,
							  HCI_USE_DEBUG_KEYS);
5299 5300

	if (hdev_is_powered(hdev) && use_changed &&
5301
	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5302 5303 5304 5305 5306
		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
			     sizeof(mode), &mode);
	}

5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318
	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5319 5320 5321 5322 5323 5324 5325 5326 5327 5328
static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
		       u16 len)
{
	struct mgmt_cp_set_privacy *cp = cp_data;
	bool changed;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
5329 5330
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_NOT_SUPPORTED);
5331 5332

	if (cp->privacy != 0x00 && cp->privacy != 0x01)
5333 5334
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_INVALID_PARAMS);
5335 5336

	if (hdev_is_powered(hdev))
5337 5338
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_REJECTED);
5339 5340 5341

	hci_dev_lock(hdev);

5342 5343 5344
	/* If user space supports this command it is also expected to
	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
	 */
5345
	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5346

5347
	if (cp->privacy) {
5348
		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5349
		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5350
		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5351
	} else {
5352
		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5353
		memset(hdev->irk, 0, sizeof(hdev->irk));
5354
		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368
	}

	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388
static bool irk_is_valid(struct mgmt_irk_info *irk)
{
	switch (irk->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
}

static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
		     u16 len)
{
	struct mgmt_cp_load_irks *cp = cp_data;
5389 5390
	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_irk_info));
5391 5392 5393 5394 5395 5396
	u16 irk_count, expected_len;
	int i, err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
5397 5398
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_NOT_SUPPORTED);
5399 5400

	irk_count = __le16_to_cpu(cp->irk_count);
5401 5402
	if (irk_count > max_irk_count) {
		BT_ERR("load_irks: too big irk_count value %u", irk_count);
5403 5404
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_INVALID_PARAMS);
5405
	}
5406 5407 5408 5409

	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
	if (expected_len != len) {
		BT_ERR("load_irks: expected %u bytes, got %u bytes",
5410
		       expected_len, len);
5411 5412
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_INVALID_PARAMS);
5413 5414 5415 5416 5417 5418 5419 5420
	}

	BT_DBG("%s irk_count %u", hdev->name, irk_count);

	for (i = 0; i < irk_count; i++) {
		struct mgmt_irk_info *key = &cp->irks[i];

		if (!irk_is_valid(key))
5421 5422 5423
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_IRKS,
					       MGMT_STATUS_INVALID_PARAMS);
5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442
	}

	hci_dev_lock(hdev);

	hci_smp_irks_clear(hdev);

	for (i = 0; i < irk_count; i++) {
		struct mgmt_irk_info *irk = &cp->irks[i];
		u8 addr_type;

		if (irk->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
			    BDADDR_ANY);
	}

5443
	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5444

5445
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5446 5447 5448 5449 5450 5451

	hci_dev_unlock(hdev);

	return err;
}

5452 5453 5454 5455
static bool ltk_is_valid(struct mgmt_ltk_info *key)
{
	if (key->master != 0x00 && key->master != 0x01)
		return false;
5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468

	switch (key->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
5469 5470
}

5471
static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5472
			       void *cp_data, u16 len)
5473 5474
{
	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5475 5476
	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_ltk_info));
5477
	u16 key_count, expected_len;
5478
	int i, err;
5479

5480 5481 5482
	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
5483 5484
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_NOT_SUPPORTED);
5485

5486
	key_count = __le16_to_cpu(cp->key_count);
5487 5488
	if (key_count > max_key_count) {
		BT_ERR("load_ltks: too big key_count value %u", key_count);
5489 5490
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5491
	}
5492 5493 5494 5495 5496

	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_ltk_info);
	if (expected_len != len) {
		BT_ERR("load_keys: expected %u bytes, got %u bytes",
5497
		       expected_len, len);
5498 5499
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5500 5501
	}

5502
	BT_DBG("%s key_count %u", hdev->name, key_count);
5503

5504 5505 5506
	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];

5507
		if (!ltk_is_valid(key))
5508 5509 5510
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_LONG_TERM_KEYS,
					       MGMT_STATUS_INVALID_PARAMS);
5511 5512
	}

5513 5514 5515 5516 5517 5518
	hci_dev_lock(hdev);

	hci_smp_ltks_clear(hdev);

	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];
5519
		u8 type, addr_type, authenticated;
5520 5521 5522 5523 5524

		if (key->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;
5525

5526 5527
		switch (key->type) {
		case MGMT_LTK_UNAUTHENTICATED:
5528
			authenticated = 0x00;
5529
			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5530 5531
			break;
		case MGMT_LTK_AUTHENTICATED:
5532
			authenticated = 0x01;
5533 5534 5535 5536 5537
			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
			break;
		case MGMT_LTK_P256_UNAUTH:
			authenticated = 0x00;
			type = SMP_LTK_P256;
5538
			break;
5539 5540 5541
		case MGMT_LTK_P256_AUTH:
			authenticated = 0x01;
			type = SMP_LTK_P256;
5542
			break;
5543 5544 5545
		case MGMT_LTK_P256_DEBUG:
			authenticated = 0x00;
			type = SMP_LTK_P256_DEBUG;
5546 5547 5548
		default:
			continue;
		}
5549

5550
		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5551
			    authenticated, key->val, key->enc_size, key->ediv,
5552
			    key->rand);
5553 5554
	}

5555
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5556 5557
			   NULL, 0);

5558 5559
	hci_dev_unlock(hdev);

5560
	return err;
5561 5562
}

5563
static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5564 5565
{
	struct hci_conn *conn = cmd->user_data;
5566
	struct mgmt_rp_get_conn_info rp;
5567
	int err;
5568

5569
	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5570

5571
	if (status == MGMT_STATUS_SUCCESS) {
5572
		rp.rssi = conn->rssi;
5573 5574 5575 5576 5577 5578
		rp.tx_power = conn->tx_power;
		rp.max_tx_power = conn->max_tx_power;
	} else {
		rp.rssi = HCI_RSSI_INVALID;
		rp.tx_power = HCI_TX_POWER_INVALID;
		rp.max_tx_power = HCI_TX_POWER_INVALID;
5579 5580
	}

5581 5582
	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
				status, &rp, sizeof(rp));
5583 5584

	hci_conn_drop(conn);
5585
	hci_conn_put(conn);
5586 5587

	return err;
5588 5589
}

5590 5591
static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
				       u16 opcode)
5592 5593
{
	struct hci_cp_read_rssi *cp;
5594
	struct mgmt_pending_cmd *cmd;
5595 5596
	struct hci_conn *conn;
	u16 handle;
5597
	u8 status;
5598

5599
	BT_DBG("status 0x%02x", hci_status);
5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614

	hci_dev_lock(hdev);

	/* Commands sent in request are either Read RSSI or Read Transmit Power
	 * Level so we check which one was last sent to retrieve connection
	 * handle.  Both commands have handle as first parameter so it's safe to
	 * cast data on the same command struct.
	 *
	 * First command sent is always Read RSSI and we fail only if it fails.
	 * In other case we simply override error to indicate success as we
	 * already remembered if TX power value is actually valid.
	 */
	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
	if (!cp) {
		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5615 5616 5617
		status = MGMT_STATUS_SUCCESS;
	} else {
		status = mgmt_status(hci_status);
5618 5619 5620
	}

	if (!cp) {
5621
		BT_ERR("invalid sent_cmd in conn_info response");
5622 5623 5624 5625 5626 5627
		goto unlock;
	}

	handle = __le16_to_cpu(cp->handle);
	conn = hci_conn_hash_lookup_handle(hdev, handle);
	if (!conn) {
5628
		BT_ERR("unknown handle (%d) in conn_info response", handle);
5629 5630 5631
		goto unlock;
	}

5632
	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5633 5634
	if (!cmd)
		goto unlock;
5635

5636 5637
	cmd->cmd_complete(cmd, status);
	mgmt_pending_remove(cmd);
5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658

unlock:
	hci_dev_unlock(hdev);
}

static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_get_conn_info *cp = data;
	struct mgmt_rp_get_conn_info rp;
	struct hci_conn *conn;
	unsigned long conn_info_age;
	int err = 0;

	BT_DBG("%s", hdev->name);

	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

	if (!bdaddr_type_is_valid(cp->addr.type))
5659 5660 5661
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
5662 5663 5664 5665

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
5666 5667 5668
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
5669 5670 5671 5672 5673 5674 5675 5676 5677 5678
		goto unlock;
	}

	if (cp->addr.type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);

	if (!conn || conn->state != BT_CONNECTED) {
5679 5680 5681
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_NOT_CONNECTED, &rp,
					sizeof(rp));
5682 5683 5684
		goto unlock;
	}

5685
	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5686 5687
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
5688 5689 5690
		goto unlock;
	}

5691 5692 5693 5694 5695 5696 5697 5698 5699 5700
	/* To avoid client trying to guess when to poll again for information we
	 * calculate conn info age as random value between min/max set in hdev.
	 */
	conn_info_age = hdev->conn_info_min_age +
			prandom_u32_max(hdev->conn_info_max_age -
					hdev->conn_info_min_age);

	/* Query controller to refresh cached values if they are too old or were
	 * never read.
	 */
5701 5702
	if (time_after(jiffies, conn->conn_info_timestamp +
		       msecs_to_jiffies(conn_info_age)) ||
5703 5704 5705 5706
	    !conn->conn_info_timestamp) {
		struct hci_request req;
		struct hci_cp_read_tx_power req_txp_cp;
		struct hci_cp_read_rssi req_rssi_cp;
5707
		struct mgmt_pending_cmd *cmd;
5708 5709 5710 5711 5712 5713

		hci_req_init(&req, hdev);
		req_rssi_cp.handle = cpu_to_le16(conn->handle);
		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
			    &req_rssi_cp);

5714 5715 5716 5717 5718 5719 5720 5721 5722 5723
		/* For LE links TX power does not change thus we don't need to
		 * query for it once value is known.
		 */
		if (!bdaddr_type_is_le(cp->addr.type) ||
		    conn->tx_power == HCI_TX_POWER_INVALID) {
			req_txp_cp.handle = cpu_to_le16(conn->handle);
			req_txp_cp.type = 0x00;
			hci_req_add(&req, HCI_OP_READ_TX_POWER,
				    sizeof(req_txp_cp), &req_txp_cp);
		}
5724

5725 5726 5727 5728 5729 5730 5731 5732
		/* Max TX power needs to be read only once per connection */
		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
			req_txp_cp.handle = cpu_to_le16(conn->handle);
			req_txp_cp.type = 0x01;
			hci_req_add(&req, HCI_OP_READ_TX_POWER,
				    sizeof(req_txp_cp), &req_txp_cp);
		}

5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744
		err = hci_req_run(&req, conn_info_refresh_complete);
		if (err < 0)
			goto unlock;

		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
				       data, len);
		if (!cmd) {
			err = -ENOMEM;
			goto unlock;
		}

		hci_conn_hold(conn);
5745
		cmd->user_data = hci_conn_get(conn);
5746
		cmd->cmd_complete = conn_info_cmd_complete;
5747 5748 5749 5750 5751 5752

		conn->conn_info_timestamp = jiffies;
	} else {
		/* Cache is valid, just reply with values cached in hci_conn */
		rp.rssi = conn->rssi;
		rp.tx_power = conn->tx_power;
5753
		rp.max_tx_power = conn->max_tx_power;
5754

5755 5756
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5757 5758 5759 5760 5761 5762 5763
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5764
static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5765
{
5766
	struct hci_conn *conn = cmd->user_data;
5767
	struct mgmt_rp_get_clock_info rp;
5768
	struct hci_dev *hdev;
5769
	int err;
5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788

	memset(&rp, 0, sizeof(rp));
	memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));

	if (status)
		goto complete;

	hdev = hci_dev_get(cmd->index);
	if (hdev) {
		rp.local_clock = cpu_to_le32(hdev->clock);
		hci_dev_put(hdev);
	}

	if (conn) {
		rp.piconet_clock = cpu_to_le32(conn->clock);
		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
	}

complete:
5789 5790
	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
				sizeof(rp));
5791 5792 5793 5794 5795

	if (conn) {
		hci_conn_drop(conn);
		hci_conn_put(conn);
	}
5796 5797

	return err;
5798 5799
}

5800
static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5801
{
5802
	struct hci_cp_read_clock *hci_cp;
5803
	struct mgmt_pending_cmd *cmd;
5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820
	struct hci_conn *conn;

	BT_DBG("%s status %u", hdev->name, status);

	hci_dev_lock(hdev);

	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
	if (!hci_cp)
		goto unlock;

	if (hci_cp->which) {
		u16 handle = __le16_to_cpu(hci_cp->handle);
		conn = hci_conn_hash_lookup_handle(hdev, handle);
	} else {
		conn = NULL;
	}

5821
	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5822 5823 5824
	if (!cmd)
		goto unlock;

5825
	cmd->cmd_complete(cmd, mgmt_status(status));
5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_get_clock_info *cp = data;
	struct mgmt_rp_get_clock_info rp;
	struct hci_cp_read_clock hci_cp;
5838
	struct mgmt_pending_cmd *cmd;
5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849
	struct hci_request req;
	struct hci_conn *conn;
	int err;

	BT_DBG("%s", hdev->name);

	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

	if (cp->addr.type != BDADDR_BREDR)
5850 5851 5852
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
5853 5854 5855 5856

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
5857 5858 5859
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
5860 5861 5862 5863 5864 5865 5866
		goto unlock;
	}

	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
		if (!conn || conn->state != BT_CONNECTED) {
5867 5868 5869 5870
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_GET_CLOCK_INFO,
						MGMT_STATUS_NOT_CONNECTED,
						&rp, sizeof(rp));
5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882
			goto unlock;
		}
	} else {
		conn = NULL;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

5883 5884
	cmd->cmd_complete = clock_info_cmd_complete;

5885 5886 5887 5888 5889 5890 5891
	hci_req_init(&req, hdev);

	memset(&hci_cp, 0, sizeof(hci_cp));
	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);

	if (conn) {
		hci_conn_hold(conn);
5892
		cmd->user_data = hci_conn_get(conn);
5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907

		hci_cp.handle = cpu_to_le16(conn->handle);
		hci_cp.which = 0x01; /* Piconet clock */
		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
	}

	err = hci_req_run(&req, get_clock_info_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966
static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
{
	struct hci_conn *conn;

	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
	if (!conn)
		return false;

	if (conn->dst_type != type)
		return false;

	if (conn->state != BT_CONNECTED)
		return false;

	return true;
}

/* This function requires the caller holds hdev->lock */
static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
			       u8 addr_type, u8 auto_connect)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_conn_params *params;

	params = hci_conn_params_add(hdev, addr, addr_type);
	if (!params)
		return -EIO;

	if (params->auto_connect == auto_connect)
		return 0;

	list_del_init(&params->action);

	switch (auto_connect) {
	case HCI_AUTO_CONN_DISABLED:
	case HCI_AUTO_CONN_LINK_LOSS:
		__hci_update_background_scan(req);
		break;
	case HCI_AUTO_CONN_REPORT:
		list_add(&params->action, &hdev->pend_le_reports);
		__hci_update_background_scan(req);
		break;
	case HCI_AUTO_CONN_DIRECT:
	case HCI_AUTO_CONN_ALWAYS:
		if (!is_connected(hdev, addr, addr_type)) {
			list_add(&params->action, &hdev->pend_le_conns);
			__hci_update_background_scan(req);
		}
		break;
	}

	params->auto_connect = auto_connect;

	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
	       auto_connect);

	return 0;
}

5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978
static void device_added(struct sock *sk, struct hci_dev *hdev,
			 bdaddr_t *bdaddr, u8 type, u8 action)
{
	struct mgmt_ev_device_added ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
	ev.action = action;

	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
}

5979
static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5980
{
5981
	struct mgmt_pending_cmd *cmd;
5982 5983 5984 5985 5986

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

5987
	cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
5988 5989 5990 5991 5992 5993 5994 5995 5996 5997
	if (!cmd)
		goto unlock;

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

5998 5999 6000 6001
static int add_device(struct sock *sk, struct hci_dev *hdev,
		      void *data, u16 len)
{
	struct mgmt_cp_add_device *cp = data;
6002
	struct mgmt_pending_cmd *cmd;
6003
	struct hci_request req;
6004 6005 6006 6007 6008
	u8 auto_conn, addr_type;
	int err;

	BT_DBG("%s", hdev->name);

6009
	if (!bdaddr_type_is_valid(cp->addr.type) ||
6010
	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6011 6012 6013
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
6014

6015
	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6016 6017 6018
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
6019

6020 6021
	hci_req_init(&req, hdev);

6022 6023
	hci_dev_lock(hdev);

6024 6025 6026 6027 6028 6029 6030 6031
	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	cmd->cmd_complete = addr_cmd_complete;

6032
	if (cp->addr.type == BDADDR_BREDR) {
6033
		/* Only incoming connections action is supported for now */
6034
		if (cp->action != 0x01) {
6035 6036
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
6037
			mgmt_pending_remove(cmd);
6038 6039 6040 6041 6042 6043 6044
			goto unlock;
		}

		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
					  cp->addr.type);
		if (err)
			goto unlock;
6045

6046
		__hci_update_page_scan(&req);
6047

6048 6049 6050
		goto added;
	}

6051 6052 6053 6054 6055
	if (cp->addr.type == BDADDR_LE_PUBLIC)
		addr_type = ADDR_LE_DEV_PUBLIC;
	else
		addr_type = ADDR_LE_DEV_RANDOM;

6056
	if (cp->action == 0x02)
6057
		auto_conn = HCI_AUTO_CONN_ALWAYS;
6058 6059
	else if (cp->action == 0x01)
		auto_conn = HCI_AUTO_CONN_DIRECT;
6060
	else
6061
		auto_conn = HCI_AUTO_CONN_REPORT;
6062

6063 6064 6065
	/* If the connection parameters don't exist for this device,
	 * they will be created and configured with defaults.
	 */
6066
	if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
6067
				auto_conn) < 0) {
6068
		err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
6069
		mgmt_pending_remove(cmd);
6070 6071 6072
		goto unlock;
	}

6073
added:
6074 6075
	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);

6076 6077 6078 6079 6080
	err = hci_req_run(&req, add_device_complete);
	if (err < 0) {
		/* ENODATA means no HCI commands were needed (e.g. if
		 * the adapter is powered off).
		 */
6081 6082
		if (err == -ENODATA)
			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6083 6084
		mgmt_pending_remove(cmd);
	}
6085 6086 6087 6088 6089 6090

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101
static void device_removed(struct sock *sk, struct hci_dev *hdev,
			   bdaddr_t *bdaddr, u8 type)
{
	struct mgmt_ev_device_removed ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;

	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
}

6102
static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6103
{
6104
	struct mgmt_pending_cmd *cmd;
6105 6106 6107 6108 6109

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

6110
	cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6111 6112 6113 6114 6115 6116 6117 6118 6119 6120
	if (!cmd)
		goto unlock;

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

6121 6122 6123 6124
static int remove_device(struct sock *sk, struct hci_dev *hdev,
			 void *data, u16 len)
{
	struct mgmt_cp_remove_device *cp = data;
6125
	struct mgmt_pending_cmd *cmd;
6126
	struct hci_request req;
6127 6128 6129 6130
	int err;

	BT_DBG("%s", hdev->name);

6131 6132
	hci_req_init(&req, hdev);

6133 6134
	hci_dev_lock(hdev);

6135 6136 6137 6138 6139 6140 6141 6142
	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	cmd->cmd_complete = addr_cmd_complete;

6143
	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6144
		struct hci_conn_params *params;
6145 6146
		u8 addr_type;

6147
		if (!bdaddr_type_is_valid(cp->addr.type)) {
6148 6149
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
6150
			mgmt_pending_remove(cmd);
6151 6152 6153
			goto unlock;
		}

6154 6155 6156 6157 6158
		if (cp->addr.type == BDADDR_BREDR) {
			err = hci_bdaddr_list_del(&hdev->whitelist,
						  &cp->addr.bdaddr,
						  cp->addr.type);
			if (err) {
6159 6160
				err = cmd->cmd_complete(cmd,
							MGMT_STATUS_INVALID_PARAMS);
6161
				mgmt_pending_remove(cmd);
6162 6163 6164
				goto unlock;
			}

6165
			__hci_update_page_scan(&req);
6166

6167 6168 6169 6170 6171
			device_removed(sk, hdev, &cp->addr.bdaddr,
				       cp->addr.type);
			goto complete;
		}

6172 6173 6174 6175 6176
		if (cp->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

6177 6178 6179
		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
						addr_type);
		if (!params) {
6180 6181
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
6182
			mgmt_pending_remove(cmd);
6183 6184 6185 6186
			goto unlock;
		}

		if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6187 6188
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
6189
			mgmt_pending_remove(cmd);
6190 6191 6192
			goto unlock;
		}

6193
		list_del(&params->action);
6194 6195
		list_del(&params->list);
		kfree(params);
6196
		__hci_update_background_scan(&req);
6197 6198

		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6199
	} else {
6200
		struct hci_conn_params *p, *tmp;
6201
		struct bdaddr_list *b, *btmp;
6202

6203
		if (cp->addr.type) {
6204 6205
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
6206
			mgmt_pending_remove(cmd);
6207 6208 6209
			goto unlock;
		}

6210 6211 6212 6213 6214 6215
		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
			list_del(&b->list);
			kfree(b);
		}

6216
		__hci_update_page_scan(&req);
6217

6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228
		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
				continue;
			device_removed(sk, hdev, &p->addr, p->addr_type);
			list_del(&p->action);
			list_del(&p->list);
			kfree(p);
		}

		BT_DBG("All LE connection parameters were removed");

6229
		__hci_update_background_scan(&req);
6230 6231
	}

6232
complete:
6233 6234 6235 6236 6237
	err = hci_req_run(&req, remove_device_complete);
	if (err < 0) {
		/* ENODATA means no HCI commands were needed (e.g. if
		 * the adapter is powered off).
		 */
6238 6239
		if (err == -ENODATA)
			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6240 6241
		mgmt_pending_remove(cmd);
	}
6242 6243 6244 6245 6246 6247

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6248 6249 6250 6251
static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
{
	struct mgmt_cp_load_conn_param *cp = data;
6252 6253
	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
				     sizeof(struct mgmt_conn_param));
6254 6255 6256 6257
	u16 param_count, expected_len;
	int i;

	if (!lmp_le_capable(hdev))
6258 6259
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_NOT_SUPPORTED);
6260 6261

	param_count = __le16_to_cpu(cp->param_count);
6262 6263 6264
	if (param_count > max_param_count) {
		BT_ERR("load_conn_param: too big param_count value %u",
		       param_count);
6265 6266
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_INVALID_PARAMS);
6267
	}
6268 6269 6270 6271 6272 6273

	expected_len = sizeof(*cp) + param_count *
					sizeof(struct mgmt_conn_param);
	if (expected_len != len) {
		BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
		       expected_len, len);
6274 6275
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_INVALID_PARAMS);
6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329
	}

	BT_DBG("%s param_count %u", hdev->name, param_count);

	hci_dev_lock(hdev);

	hci_conn_params_clear_disabled(hdev);

	for (i = 0; i < param_count; i++) {
		struct mgmt_conn_param *param = &cp->params[i];
		struct hci_conn_params *hci_param;
		u16 min, max, latency, timeout;
		u8 addr_type;

		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
		       param->addr.type);

		if (param->addr.type == BDADDR_LE_PUBLIC) {
			addr_type = ADDR_LE_DEV_PUBLIC;
		} else if (param->addr.type == BDADDR_LE_RANDOM) {
			addr_type = ADDR_LE_DEV_RANDOM;
		} else {
			BT_ERR("Ignoring invalid connection parameters");
			continue;
		}

		min = le16_to_cpu(param->min_interval);
		max = le16_to_cpu(param->max_interval);
		latency = le16_to_cpu(param->latency);
		timeout = le16_to_cpu(param->timeout);

		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
		       min, max, latency, timeout);

		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
			BT_ERR("Ignoring invalid connection parameters");
			continue;
		}

		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
						addr_type);
		if (!hci_param) {
			BT_ERR("Failed to add connection parameters");
			continue;
		}

		hci_param->conn_min_interval = min;
		hci_param->conn_max_interval = max;
		hci_param->conn_latency = latency;
		hci_param->supervision_timeout = timeout;
	}

	hci_dev_unlock(hdev);

6330 6331
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
				 NULL, 0);
6332 6333
}

6334 6335 6336 6337 6338 6339 6340 6341 6342 6343
static int set_external_config(struct sock *sk, struct hci_dev *hdev,
			       void *data, u16 len)
{
	struct mgmt_cp_set_external_config *cp = data;
	bool changed;
	int err;

	BT_DBG("%s", hdev->name);

	if (hdev_is_powered(hdev))
6344 6345
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				       MGMT_STATUS_REJECTED);
6346 6347

	if (cp->config != 0x00 && cp->config != 0x01)
6348 6349
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				         MGMT_STATUS_INVALID_PARAMS);
6350 6351

	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6352 6353
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				       MGMT_STATUS_NOT_SUPPORTED);
6354 6355 6356 6357

	hci_dev_lock(hdev);

	if (cp->config)
6358
		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6359
	else
6360
		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6361 6362 6363 6364 6365 6366 6367 6368

	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
	if (err < 0)
		goto unlock;

	if (!changed)
		goto unlock;

6369 6370
	err = new_options(hdev, sk);

6371
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6372
		mgmt_index_removed(hdev);
6373

6374
		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6375 6376
			hci_dev_set_flag(hdev, HCI_CONFIG);
			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6377 6378 6379

			queue_work(hdev->req_workqueue, &hdev->power_on);
		} else {
6380
			set_bit(HCI_RAW, &hdev->flags);
6381 6382
			mgmt_index_added(hdev);
		}
6383 6384 6385 6386 6387 6388 6389
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6390 6391 6392 6393 6394 6395 6396 6397 6398 6399
static int set_public_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_public_address *cp = data;
	bool changed;
	int err;

	BT_DBG("%s", hdev->name);

	if (hdev_is_powered(hdev))
6400 6401
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_REJECTED);
6402 6403

	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6404 6405
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_INVALID_PARAMS);
6406 6407

	if (!hdev->set_bdaddr)
6408 6409
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_NOT_SUPPORTED);
6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422

	hci_dev_lock(hdev);

	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
	bacpy(&hdev->public_addr, &cp->bdaddr);

	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
	if (err < 0)
		goto unlock;

	if (!changed)
		goto unlock;

6423
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6424 6425 6426 6427 6428
		err = new_options(hdev, sk);

	if (is_configured(hdev)) {
		mgmt_index_removed(hdev);

6429
		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6430

6431 6432
		hci_dev_set_flag(hdev, HCI_CONFIG);
		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6433 6434 6435 6436 6437 6438 6439 6440 6441

		queue_work(hdev->req_workqueue, &hdev->power_on);
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
				  u8 data_len)
{
	eir[eir_len++] = sizeof(type) + data_len;
	eir[eir_len++] = type;
	memcpy(&eir[eir_len], data, data_len);
	eir_len += data_len;

	return eir_len;
}

6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591
static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
					     u16 opcode, struct sk_buff *skb)
{
	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
	u8 *h192, *r192, *h256, *r256;
	struct mgmt_pending_cmd *cmd;
	u16 eir_len;
	int err;

	BT_DBG("%s status %u", hdev->name, status);

	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
	if (!cmd)
		return;

	mgmt_cp = cmd->param;

	if (status) {
		status = mgmt_status(status);
		eir_len = 0;

		h192 = NULL;
		r192 = NULL;
		h256 = NULL;
		r256 = NULL;
	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
		struct hci_rp_read_local_oob_data *rp;

		if (skb->len != sizeof(*rp)) {
			status = MGMT_STATUS_FAILED;
			eir_len = 0;
		} else {
			status = MGMT_STATUS_SUCCESS;
			rp = (void *)skb->data;

			eir_len = 5 + 18 + 18;
			h192 = rp->hash;
			r192 = rp->rand;
			h256 = NULL;
			r256 = NULL;
		}
	} else {
		struct hci_rp_read_local_oob_ext_data *rp;

		if (skb->len != sizeof(*rp)) {
			status = MGMT_STATUS_FAILED;
			eir_len = 0;
		} else {
			status = MGMT_STATUS_SUCCESS;
			rp = (void *)skb->data;

			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
				eir_len = 5 + 18 + 18;
				h192 = NULL;
				r192 = NULL;
			} else {
				eir_len = 5 + 18 + 18 + 18 + 18;
				h192 = rp->hash192;
				r192 = rp->rand192;
			}

			h256 = rp->hash256;
			r256 = rp->rand256;
		}
	}

	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
	if (!mgmt_rp)
		goto done;

	if (status)
		goto send_rsp;

	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
				  hdev->dev_class, 3);

	if (h192 && r192) {
		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
					  EIR_SSP_HASH_C192, h192, 16);
		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
					  EIR_SSP_RAND_R192, r192, 16);
	}

	if (h256 && r256) {
		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
					  EIR_SSP_HASH_C256, h256, 16);
		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
					  EIR_SSP_RAND_R256, r256, 16);
	}

send_rsp:
	mgmt_rp->type = mgmt_cp->type;
	mgmt_rp->eir_len = cpu_to_le16(eir_len);

	err = mgmt_cmd_complete(cmd->sk, hdev->id,
				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
	if (err < 0 || status)
		goto done;

	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);

	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
done:
	kfree(mgmt_rp);
	mgmt_pending_remove(cmd);
}

static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
				  struct mgmt_cp_read_local_oob_ext_data *cp)
{
	struct mgmt_pending_cmd *cmd;
	struct hci_request req;
	int err;

	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
			       cp, sizeof(*cp));
	if (!cmd)
		return -ENOMEM;

	hci_req_init(&req, hdev);

	if (bredr_sc_enabled(hdev))
		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
	else
		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);

	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
	if (err < 0) {
		mgmt_pending_remove(cmd);
		return err;
	}

	return 0;
}

6592 6593 6594 6595 6596 6597 6598
static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
				   void *data, u16 data_len)
{
	struct mgmt_cp_read_local_oob_ext_data *cp = data;
	struct mgmt_rp_read_local_oob_ext_data *rp;
	size_t rp_len;
	u16 eir_len;
6599
	u8 status, flags, role, addr[7], hash[16], rand[16];
6600 6601 6602 6603
	int err;

	BT_DBG("%s", hdev->name);

6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627
	if (hdev_is_powered(hdev)) {
		switch (cp->type) {
		case BIT(BDADDR_BREDR):
			status = mgmt_bredr_support(hdev);
			if (status)
				eir_len = 0;
			else
				eir_len = 5;
			break;
		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
			status = mgmt_le_support(hdev);
			if (status)
				eir_len = 0;
			else
				eir_len = 9 + 3 + 18 + 18 + 3;
			break;
		default:
			status = MGMT_STATUS_INVALID_PARAMS;
			eir_len = 0;
			break;
		}
	} else {
		status = MGMT_STATUS_NOT_POWERED;
		eir_len = 0;
6628 6629 6630 6631
	}

	rp_len = sizeof(*rp) + eir_len;
	rp = kmalloc(rp_len, GFP_ATOMIC);
6632
	if (!rp)
6633
		return -ENOMEM;
6634

6635 6636 6637
	if (status)
		goto complete;

6638
	hci_dev_lock(hdev);
6639 6640 6641 6642

	eir_len = 0;
	switch (cp->type) {
	case BIT(BDADDR_BREDR):
6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655
		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
			err = read_local_ssp_oob_req(hdev, sk, cp);
			hci_dev_unlock(hdev);
			if (!err)
				goto done;

			status = MGMT_STATUS_FAILED;
			goto complete;
		} else {
			eir_len = eir_append_data(rp->eir, eir_len,
						  EIR_CLASS_OF_DEV,
						  hdev->dev_class, 3);
		}
6656 6657
		break;
	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6658 6659
		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
		    smp_generate_oob(hdev, hash, rand) < 0) {
6660
			hci_dev_unlock(hdev);
6661 6662
			status = MGMT_STATUS_FAILED;
			goto complete;
6663 6664
		}

6665 6666 6667 6668 6669 6670 6671 6672 6673 6674
		/* This should return the active RPA, but since the RPA
		 * is only programmed on demand, it is really hard to fill
		 * this in at the moment. For now disallow retrieving
		 * local out-of-band data when privacy is in use.
		 *
		 * Returning the identity address will not help here since
		 * pairing happens before the identity resolving key is
		 * known and thus the connection establishment happens
		 * based on the RPA and not the identity address.
		 */
6675
		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6676 6677 6678 6679 6680 6681 6682 6683 6684
			hci_dev_unlock(hdev);
			status = MGMT_STATUS_REJECTED;
			goto complete;
		}

		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702
			memcpy(addr, &hdev->static_addr, 6);
			addr[6] = 0x01;
		} else {
			memcpy(addr, &hdev->bdaddr, 6);
			addr[6] = 0x00;
		}

		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
					  addr, sizeof(addr));

		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
			role = 0x02;
		else
			role = 0x01;

		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
					  &role, sizeof(role));

6703 6704 6705 6706
		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
			eir_len = eir_append_data(rp->eir, eir_len,
						  EIR_LE_SC_CONFIRM,
						  hash, sizeof(hash));
6707

6708 6709 6710 6711
			eir_len = eir_append_data(rp->eir, eir_len,
						  EIR_LE_SC_RANDOM,
						  rand, sizeof(rand));
		}
6712

6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724
		flags = get_adv_discov_flags(hdev);

		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
			flags |= LE_AD_NO_BREDR;

		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
					  &flags, sizeof(flags));
		break;
	}

	hci_dev_unlock(hdev);

6725 6726
	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);

6727 6728 6729
	status = MGMT_STATUS_SUCCESS;

complete:
6730 6731 6732
	rp->type = cp->type;
	rp->eir_len = cpu_to_le16(eir_len);

6733
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6734 6735
				status, rp, sizeof(*rp) + eir_len);
	if (err < 0 || status)
6736 6737 6738 6739 6740
		goto done;

	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
				 rp, sizeof(*rp) + eir_len,
				 HCI_MGMT_OOB_DATA_EVENTS, sk);
6741

6742
done:
6743 6744 6745 6746 6747
	kfree(rp);

	return err;
}

6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762
static u32 get_supported_adv_flags(struct hci_dev *hdev)
{
	u32 flags = 0;

	flags |= MGMT_ADV_FLAG_CONNECTABLE;
	flags |= MGMT_ADV_FLAG_DISCOV;
	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;

	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
		flags |= MGMT_ADV_FLAG_TX_POWER;

	return flags;
}

6763 6764 6765 6766 6767 6768
static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
			     void *data, u16 data_len)
{
	struct mgmt_rp_read_adv_features *rp;
	size_t rp_len;
	int err;
6769
	bool instance;
6770
	u32 supported_flags;
6771 6772 6773

	BT_DBG("%s", hdev->name);

6774 6775 6776 6777
	if (!lmp_le_capable(hdev))
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
				       MGMT_STATUS_REJECTED);

6778 6779 6780
	hci_dev_lock(hdev);

	rp_len = sizeof(*rp);
6781 6782 6783 6784 6785 6786 6787 6788

	/* Currently only one instance is supported, so just add 1 to the
	 * response length.
	 */
	instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
	if (instance)
		rp_len++;

6789 6790 6791 6792 6793 6794
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		hci_dev_unlock(hdev);
		return -ENOMEM;
	}

6795 6796 6797
	supported_flags = get_supported_adv_flags(hdev);

	rp->supported_flags = cpu_to_le32(supported_flags);
6798 6799
	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6800
	rp->max_instances = HCI_MAX_ADV_INSTANCES;
6801 6802 6803 6804 6805 6806 6807 6808 6809 6810

	/* Currently only one instance is supported, so simply return the
	 * current instance number.
	 */
	if (instance) {
		rp->num_instances = 1;
		rp->instance[0] = 1;
	} else {
		rp->num_instances = 0;
	}
6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821

	hci_dev_unlock(hdev);

	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
				MGMT_STATUS_SUCCESS, rp, rp_len);

	kfree(rp);

	return err;
}

6822
static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6823
			      u8 len, bool is_adv_data)
6824
{
6825
	u8 max_len = HCI_MAX_AD_LENGTH;
6826
	int i, cur_len;
6827
	bool flags_managed = false;
6828
	bool tx_power_managed = false;
6829 6830
	u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
			   MGMT_ADV_FLAG_MANAGED_FLAGS;
6831

6832
	if (is_adv_data && (adv_flags & flags_params)) {
6833 6834 6835
		flags_managed = true;
		max_len -= 3;
	}
6836

6837 6838 6839 6840 6841
	if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
		tx_power_managed = true;
		max_len -= 3;
	}

6842
	if (len > max_len)
6843 6844
		return false;

6845 6846 6847
	/* Make sure that the data is correctly formatted. */
	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
		cur_len = data[i];
6848

6849 6850 6851
		if (flags_managed && data[i + 1] == EIR_FLAGS)
			return false;

6852 6853 6854
		if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
			return false;

6855 6856 6857
		/* If the current field length would exceed the total data
		 * length, then it's invalid.
		 */
6858
		if (i + cur_len >= len)
6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900
			return false;
	}

	return true;
}

static void add_advertising_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
{
	struct mgmt_pending_cmd *cmd;
	struct mgmt_rp_add_advertising rp;

	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);

	if (status) {
		hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
		memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
		advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
	}

	if (!cmd)
		goto unlock;

	rp.instance = 0x01;

	if (status)
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
				mgmt_status(status));
	else
		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
				  mgmt_status(status), &rp, sizeof(rp));

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

6901
void mgmt_adv_timeout_expired(struct hci_dev *hdev)
6902
{
6903
	hdev->adv_instance_timeout = 0;
6904 6905 6906 6907 6908 6909

	hci_dev_lock(hdev);
	clear_adv_instance(hdev);
	hci_dev_unlock(hdev);
}

6910 6911 6912 6913 6914 6915
static int add_advertising(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 data_len)
{
	struct mgmt_cp_add_advertising *cp = data;
	struct mgmt_rp_add_advertising rp;
	u32 flags;
6916
	u32 supported_flags;
6917
	u8 status;
6918
	u16 timeout;
6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930
	int err;
	struct mgmt_pending_cmd *cmd;
	struct hci_request req;

	BT_DBG("%s", hdev->name);

	status = mgmt_le_support(hdev);
	if (status)
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				       status);

	flags = __le32_to_cpu(cp->flags);
6931
	timeout = __le16_to_cpu(cp->timeout);
6932

6933 6934 6935 6936 6937
	/* The current implementation only supports adding one instance and only
	 * a subset of the specified flags.
	 */
	supported_flags = get_supported_adv_flags(hdev);
	if (cp->instance != 0x01 || (flags & ~supported_flags))
6938 6939 6940 6941 6942
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

6943 6944 6945 6946 6947 6948
	if (timeout && !hdev_is_powered(hdev)) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				      MGMT_STATUS_REJECTED);
		goto unlock;
	}

6949
	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6950
	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6951 6952 6953 6954 6955 6956
	    pending_find(MGMT_OP_SET_LE, hdev)) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				      MGMT_STATUS_BUSY);
		goto unlock;
	}

6957
	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6958
	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6959
			       cp->scan_rsp_len, false)) {
6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
				      MGMT_STATUS_INVALID_PARAMS);
		goto unlock;
	}

	hdev->adv_instance.flags = flags;
	hdev->adv_instance.adv_data_len = cp->adv_data_len;
	hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;

	if (cp->adv_data_len)
		memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);

	if (cp->scan_rsp_len)
		memcpy(hdev->adv_instance.scan_rsp_data,
		       cp->data + cp->adv_data_len, cp->scan_rsp_len);

6976 6977
	if (hdev->adv_instance_timeout)
		cancel_delayed_work(&hdev->adv_instance_expire);
6978

6979
	hdev->adv_instance_timeout = timeout;
6980 6981 6982

	if (timeout)
		queue_delayed_work(hdev->workqueue,
6983
				   &hdev->adv_instance_expire,
6984 6985
				   msecs_to_jiffies(timeout * 1000));

6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012
	if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
		advertising_added(sk, hdev, 1);

	/* If the HCI_ADVERTISING flag is set or the device isn't powered then
	 * we have no HCI communication to make. Simply return.
	 */
	if (!hdev_is_powered(hdev) ||
	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
		rp.instance = 0x01;
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
		goto unlock;
	}

	/* We're good to go, update advertising data, parameters, and start
	 * advertising.
	 */
	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
			       data_len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

	update_adv_data(&req);
7013
	update_scan_rsp_data(&req);
7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025
	enable_advertising(&req);

	err = hci_req_run(&req, add_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);

	return err;
}

7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087
static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
					u16 opcode)
{
	struct mgmt_pending_cmd *cmd;
	struct mgmt_rp_remove_advertising rp;

	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

	/* A failure status here only means that we failed to disable
	 * advertising. Otherwise, the advertising instance has been removed,
	 * so report success.
	 */
	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
	if (!cmd)
		goto unlock;

	rp.instance = 1;

	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
			  &rp, sizeof(rp));
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 data_len)
{
	struct mgmt_cp_remove_advertising *cp = data;
	struct mgmt_rp_remove_advertising rp;
	int err;
	struct mgmt_pending_cmd *cmd;
	struct hci_request req;

	BT_DBG("%s", hdev->name);

	/* The current implementation only allows modifying instance no 1. A
	 * value of 0 indicates that all instances should be cleared.
	 */
	if (cp->instance > 1)
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
	    pending_find(MGMT_OP_SET_LE, hdev)) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
				      MGMT_STATUS_BUSY);
		goto unlock;
	}

	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
				      MGMT_STATUS_INVALID_PARAMS);
		goto unlock;
	}

7088 7089
	if (hdev->adv_instance_timeout)
		cancel_delayed_work(&hdev->adv_instance_expire);
7090

7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128
	memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));

	advertising_removed(sk, hdev, 1);

	hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);

	/* If the HCI_ADVERTISING flag is set or the device isn't powered then
	 * we have no HCI communication to make. Simply return.
	 */
	if (!hdev_is_powered(hdev) ||
	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
		rp.instance = 1;
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_REMOVE_ADVERTISING,
					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
			       data_len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);
	disable_advertising(&req);

	err = hci_req_run(&req, remove_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);

	return err;
}

7129
static const struct hci_mgmt_handler mgmt_handlers[] = {
7130
	{ NULL }, /* 0x0000 (no command) */
7131
	{ read_version,            MGMT_READ_VERSION_SIZE,
7132 7133
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
7134
	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
7135 7136
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
7137
	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
7138 7139 7140 7141
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
	{ read_controller_info,    MGMT_READ_INFO_SIZE,
						HCI_MGMT_UNTRUSTED },
7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154
	{ set_powered,             MGMT_SETTING_SIZE },
	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
	{ set_connectable,         MGMT_SETTING_SIZE },
	{ set_fast_connectable,    MGMT_SETTING_SIZE },
	{ set_bondable,            MGMT_SETTING_SIZE },
	{ set_link_security,       MGMT_SETTING_SIZE },
	{ set_ssp,                 MGMT_SETTING_SIZE },
	{ set_hs,                  MGMT_SETTING_SIZE },
	{ set_le,                  MGMT_SETTING_SIZE },
	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
	{ add_uuid,                MGMT_ADD_UUID_SIZE },
	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
7155 7156 7157 7158
	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
						HCI_MGMT_VAR_LEN },
	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
						HCI_MGMT_VAR_LEN },
7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170
	{ disconnect,              MGMT_DISCONNECT_SIZE },
	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7171 7172 7173
	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
						HCI_MGMT_VAR_LEN },
7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187
	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
	{ set_advertising,         MGMT_SETTING_SIZE },
	{ set_bredr,               MGMT_SETTING_SIZE },
	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
	{ set_secure_conn,         MGMT_SETTING_SIZE },
	{ set_debug_keys,          MGMT_SETTING_SIZE },
	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
7188 7189
	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
						HCI_MGMT_VAR_LEN },
7190 7191 7192 7193
	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
	{ add_device,              MGMT_ADD_DEVICE_SIZE },
	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
7194 7195 7196
	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
						HCI_MGMT_VAR_LEN },
	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7197 7198
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
7199
	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
7200 7201
						HCI_MGMT_UNCONFIGURED |
						HCI_MGMT_UNTRUSTED },
7202 7203 7204 7205 7206 7207
	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
						HCI_MGMT_UNCONFIGURED },
	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
						HCI_MGMT_UNCONFIGURED },
	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
						HCI_MGMT_VAR_LEN },
7208
	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7209
	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
7210 7211
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
7212
	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
7213 7214
	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
						HCI_MGMT_VAR_LEN },
7215
	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
7216 7217
};

7218
void mgmt_index_added(struct hci_dev *hdev)
7219
{
7220
	struct mgmt_ev_ext_index ev;
7221

7222 7223 7224
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		return;

7225 7226 7227 7228 7229
	switch (hdev->dev_type) {
	case HCI_BREDR:
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7230
			ev.type = 0x01;
7231 7232 7233
		} else {
			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
					 HCI_MGMT_INDEX_EVENTS);
7234
			ev.type = 0x00;
7235 7236
		}
		break;
7237 7238 7239 7240 7241
	case HCI_AMP:
		ev.type = 0x02;
		break;
	default:
		return;
7242
	}
7243 7244 7245 7246 7247

	ev.bus = hdev->bus;

	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
			 HCI_MGMT_EXT_INDEX_EVENTS);
7248 7249
}

7250
void mgmt_index_removed(struct hci_dev *hdev)
7251
{
7252
	struct mgmt_ev_ext_index ev;
7253
	u8 status = MGMT_STATUS_INVALID_INDEX;
7254

7255 7256 7257
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		return;

7258 7259 7260
	switch (hdev->dev_type) {
	case HCI_BREDR:
		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7261

7262 7263 7264
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7265
			ev.type = 0x01;
7266 7267 7268
		} else {
			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
					 HCI_MGMT_INDEX_EVENTS);
7269
			ev.type = 0x00;
7270 7271
		}
		break;
7272 7273 7274 7275 7276
	case HCI_AMP:
		ev.type = 0x02;
		break;
	default:
		return;
7277
	}
7278 7279 7280 7281 7282

	ev.bus = hdev->bus;

	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
			 HCI_MGMT_EXT_INDEX_EVENTS);
7283 7284
}

7285
/* This function requires the caller holds hdev->lock */
7286
static void restart_le_actions(struct hci_request *req)
7287
{
7288
	struct hci_dev *hdev = req->hdev;
7289 7290 7291
	struct hci_conn_params *p;

	list_for_each_entry(p, &hdev->le_conn_params, list) {
7292 7293 7294 7295 7296 7297
		/* Needed for AUTO_OFF case where might not "really"
		 * have been powered off.
		 */
		list_del_init(&p->action);

		switch (p->auto_connect) {
7298
		case HCI_AUTO_CONN_DIRECT:
7299 7300 7301 7302 7303 7304 7305 7306
		case HCI_AUTO_CONN_ALWAYS:
			list_add(&p->action, &hdev->pend_le_conns);
			break;
		case HCI_AUTO_CONN_REPORT:
			list_add(&p->action, &hdev->pend_le_reports);
			break;
		default:
			break;
7307
		}
7308
	}
7309

7310
	__hci_update_background_scan(req);
7311 7312
}

7313
static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7314 7315 7316 7317 7318
{
	struct cmd_lookup match = { NULL, hdev };

	BT_DBG("status 0x%02x", status);

7319 7320 7321 7322 7323 7324 7325 7326 7327
	if (!status) {
		/* Register the available SMP channels (BR/EDR and LE) only
		 * when successfully powering on the controller. This late
		 * registration is required so that LE SMP can clearly
		 * decide if the public address or static address is used.
		 */
		smp_register(hdev);
	}

7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339
	hci_dev_lock(hdev);

	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	hci_dev_unlock(hdev);

	if (match.sk)
		sock_put(match.sk);
}

7340
static int powered_update_hci(struct hci_dev *hdev)
7341
{
7342
	struct hci_request req;
7343
	u8 link_sec;
7344

7345 7346
	hci_req_init(&req, hdev);

7347
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7348
	    !lmp_host_ssp_capable(hdev)) {
7349
		u8 mode = 0x01;
7350

7351 7352 7353 7354
		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);

		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
			u8 support = 0x01;
7355

7356 7357 7358
			hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
				    sizeof(support), &support);
		}
7359 7360
	}

7361
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7362
	    lmp_bredr_capable(hdev)) {
7363
		struct hci_cp_write_le_host_supported cp;
7364

7365 7366
		cp.le = 0x01;
		cp.simul = 0x00;
7367

7368 7369 7370 7371 7372
		/* Check first if we already have the right
		 * host state (host features set)
		 */
		if (cp.le != lmp_host_le_capable(hdev) ||
		    cp.simul != lmp_host_le_br_capable(hdev))
7373 7374
			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
				    sizeof(cp), &cp);
7375
	}
7376

7377
	if (lmp_le_capable(hdev)) {
7378 7379 7380 7381
		/* Make sure the controller has a good default for
		 * advertising data. This also applies to the case
		 * where BR/EDR was toggled during the AUTO_OFF phase.
		 */
7382
		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
7383
			update_adv_data(&req);
7384 7385
			update_scan_rsp_data(&req);
		}
7386

7387 7388
		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
		    hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
7389
			enable_advertising(&req);
7390 7391

		restart_le_actions(&req);
7392 7393
	}

7394
	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7395
	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7396 7397
		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
			    sizeof(link_sec), &link_sec);
7398

7399
	if (lmp_bredr_capable(hdev)) {
7400
		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7401 7402 7403
			write_fast_connectable(&req, true);
		else
			write_fast_connectable(&req, false);
7404
		__hci_update_page_scan(&req);
7405
		update_class(&req);
7406
		update_name(&req);
7407
		update_eir(&req);
7408
	}
7409

7410
	return hci_req_run(&req, powered_complete);
7411
}
7412

7413 7414 7415
int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
	struct cmd_lookup match = { NULL, hdev };
7416
	u8 status, zero_cod[] = { 0, 0, 0 };
7417
	int err;
7418

7419
	if (!hci_dev_test_flag(hdev, HCI_MGMT))
7420 7421 7422
		return 0;

	if (powered) {
7423 7424
		if (powered_update_hci(hdev) == 0)
			return 0;
7425

7426 7427 7428
		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
				     &match);
		goto new_settings;
7429 7430
	}

7431
	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7432 7433 7434 7435 7436 7437 7438 7439

	/* If the power off is because of hdev unregistration let
	 * use the appropriate INVALID_INDEX status. Otherwise use
	 * NOT_POWERED. We cover both scenarios here since later in
	 * mgmt_index_removed() any hci_conn callbacks will have already
	 * been triggered, potentially causing misleading DISCONNECTED
	 * status responses.
	 */
7440
	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7441 7442 7443 7444 7445
		status = MGMT_STATUS_INVALID_INDEX;
	else
		status = MGMT_STATUS_NOT_POWERED;

	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7446 7447

	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7448 7449
		mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
				   zero_cod, sizeof(zero_cod), NULL);
7450 7451

new_settings:
7452
	err = new_settings(hdev, match.sk);
7453 7454 7455 7456

	if (match.sk)
		sock_put(match.sk);

7457
	return err;
7458
}
7459

7460
void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7461
{
7462
	struct mgmt_pending_cmd *cmd;
7463 7464
	u8 status;

7465
	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7466
	if (!cmd)
7467
		return;
7468 7469 7470 7471 7472 7473

	if (err == -ERFKILL)
		status = MGMT_STATUS_RFKILLED;
	else
		status = MGMT_STATUS_FAILED;

7474
	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7475 7476 7477 7478

	mgmt_pending_remove(cmd);
}

7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489
void mgmt_discoverable_timeout(struct hci_dev *hdev)
{
	struct hci_request req;

	hci_dev_lock(hdev);

	/* When discoverable timeout triggers, then just make sure
	 * the limited discoverable flag is cleared. Even in the case
	 * of a timeout triggered from general discoverable, it is
	 * safe to unconditionally clear the flag.
	 */
7490 7491
	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7492 7493

	hci_req_init(&req, hdev);
7494
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7495 7496 7497 7498
		u8 scan = SCAN_PAGE;
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
			    sizeof(scan), &scan);
	}
7499
	update_class(&req);
7500 7501 7502 7503 7504 7505 7506

	/* Advertising instances don't use the global discoverable setting, so
	 * only update AD if advertising was enabled using Set Advertising.
	 */
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
		update_adv_data(&req);

7507 7508 7509 7510
	hci_req_run(&req, NULL);

	hdev->discov_timeout = 0;

7511 7512
	new_settings(hdev, NULL);

7513 7514 7515
	hci_dev_unlock(hdev);
}

7516 7517
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
		       bool persistent)
7518
{
7519
	struct mgmt_ev_new_link_key ev;
7520

7521
	memset(&ev, 0, sizeof(ev));
7522

7523
	ev.store_hint = persistent;
7524
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7525
	ev.key.addr.type = BDADDR_BREDR;
7526
	ev.key.type = key->type;
7527
	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7528
	ev.key.pin_len = key->pin_len;
7529

7530
	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7531
}
7532

7533 7534
static u8 mgmt_ltk_type(struct smp_ltk *ltk)
{
7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547
	switch (ltk->type) {
	case SMP_LTK:
	case SMP_LTK_SLAVE:
		if (ltk->authenticated)
			return MGMT_LTK_AUTHENTICATED;
		return MGMT_LTK_UNAUTHENTICATED;
	case SMP_LTK_P256:
		if (ltk->authenticated)
			return MGMT_LTK_P256_AUTH;
		return MGMT_LTK_P256_UNAUTH;
	case SMP_LTK_P256_DEBUG:
		return MGMT_LTK_P256_DEBUG;
	}
7548 7549 7550 7551

	return MGMT_LTK_UNAUTHENTICATED;
}

7552
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7553 7554 7555 7556 7557
{
	struct mgmt_ev_new_long_term_key ev;

	memset(&ev, 0, sizeof(ev));

7558
	/* Devices using resolvable or non-resolvable random addresses
F
Florian Grandel 已提交
7559
	 * without providing an identity resolving key don't require
7560 7561 7562 7563 7564 7565 7566 7567 7568
	 * to store long term keys. Their addresses will change the
	 * next time around.
	 *
	 * Only when a remote device provides an identity address
	 * make sure the long term key is stored. If the remote
	 * identity is known, the long term keys are internally
	 * mapped to the identity address. So allow static random
	 * and public addresses here.
	 */
7569 7570 7571 7572
	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
		ev.store_hint = 0x00;
	else
7573
		ev.store_hint = persistent;
7574

7575
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7576
	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7577
	ev.key.type = mgmt_ltk_type(key);
7578 7579
	ev.key.enc_size = key->enc_size;
	ev.key.ediv = key->ediv;
7580
	ev.key.rand = key->rand;
7581

7582
	if (key->type == SMP_LTK)
7583 7584
		ev.key.master = 1;

7585 7586 7587 7588 7589 7590
	/* Make sure we copy only the significant bytes based on the
	 * encryption key size, and set the rest of the value to zeroes.
	 */
	memcpy(ev.key.val, key->val, sizeof(key->enc_size));
	memset(ev.key.val + key->enc_size, 0,
	       sizeof(ev.key.val) - key->enc_size);
7591

7592
	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7593 7594
}

7595 7596 7597 7598 7599 7600
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
{
	struct mgmt_ev_new_irk ev;

	memset(&ev, 0, sizeof(ev));

7601 7602 7603
	/* For identity resolving keys from devices that are already
	 * using a public address or static random address, do not
	 * ask for storing this key. The identity resolving key really
F
Florian Grandel 已提交
7604
	 * is only mandatory for devices using resolvable random
7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616
	 * addresses.
	 *
	 * Storing all identity resolving keys has the downside that
	 * they will be also loaded on next boot of they system. More
	 * identity resolving keys, means more time during scanning is
	 * needed to actually resolve these addresses.
	 */
	if (bacmp(&irk->rpa, BDADDR_ANY))
		ev.store_hint = 0x01;
	else
		ev.store_hint = 0x00;

7617 7618 7619 7620 7621 7622 7623 7624
	bacpy(&ev.rpa, &irk->rpa);
	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
	memcpy(ev.irk.val, irk->val, sizeof(irk->val));

	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
}

7625 7626
void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
		   bool persistent)
7627 7628 7629 7630 7631 7632
{
	struct mgmt_ev_new_csrk ev;

	memset(&ev, 0, sizeof(ev));

	/* Devices using resolvable or non-resolvable random addresses
F
Florian Grandel 已提交
7633
	 * without providing an identity resolving key don't require
7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644
	 * to store signature resolving keys. Their addresses will change
	 * the next time around.
	 *
	 * Only when a remote device provides an identity address
	 * make sure the signature resolving key is stored. So allow
	 * static random and public addresses here.
	 */
	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
		ev.store_hint = 0x00;
	else
7645
		ev.store_hint = persistent;
7646 7647 7648

	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7649
	ev.key.type = csrk->type;
7650 7651 7652 7653 7654
	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));

	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
}

7655
void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7656 7657
			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
			 u16 max_interval, u16 latency, u16 timeout)
7658 7659 7660
{
	struct mgmt_ev_new_conn_param ev;

7661 7662 7663
	if (!hci_is_identity_address(bdaddr, bdaddr_type))
		return;

7664 7665 7666
	memset(&ev, 0, sizeof(ev));
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7667
	ev.store_hint = store_hint;
7668 7669 7670 7671 7672 7673 7674 7675
	ev.min_interval = cpu_to_le16(min_interval);
	ev.max_interval = cpu_to_le16(max_interval);
	ev.latency = cpu_to_le16(latency);
	ev.timeout = cpu_to_le16(timeout);

	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
}

7676 7677
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
			   u32 flags, u8 *name, u8 name_len)
7678
{
7679 7680 7681
	char buf[512];
	struct mgmt_ev_device_connected *ev = (void *) buf;
	u16 eir_len = 0;
7682

7683 7684
	bacpy(&ev->addr.bdaddr, &conn->dst);
	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7685

7686
	ev->flags = __cpu_to_le32(flags);
7687

7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699
	/* We must ensure that the EIR Data fields are ordered and
	 * unique. Keep it simple for now and avoid the problem by not
	 * adding any BR/EDR data to the LE adv.
	 */
	if (conn->le_adv_data_len > 0) {
		memcpy(&ev->eir[eir_len],
		       conn->le_adv_data, conn->le_adv_data_len);
		eir_len = conn->le_adv_data_len;
	} else {
		if (name_len > 0)
			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
						  name, name_len);
7700

7701
		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7702 7703 7704 7705
			eir_len = eir_append_data(ev->eir, eir_len,
						  EIR_CLASS_OF_DEV,
						  conn->dev_class, 3);
	}
7706

7707
	ev->eir_len = cpu_to_le16(eir_len);
7708

7709 7710
	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
		    sizeof(*ev) + eir_len, NULL);
7711 7712
}

7713
static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7714 7715 7716
{
	struct sock **sk = data;

7717
	cmd->cmd_complete(cmd, 0);
7718 7719 7720 7721

	*sk = cmd->sk;
	sock_hold(*sk);

7722
	mgmt_pending_remove(cmd);
7723 7724
}

7725
static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7726
{
7727
	struct hci_dev *hdev = data;
7728
	struct mgmt_cp_unpair_device *cp = cmd->param;
7729

7730 7731
	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);

7732
	cmd->cmd_complete(cmd, 0);
7733 7734 7735
	mgmt_pending_remove(cmd);
}

7736 7737
bool mgmt_powering_down(struct hci_dev *hdev)
{
7738
	struct mgmt_pending_cmd *cmd;
7739 7740
	struct mgmt_mode *cp;

7741
	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7742 7743 7744 7745 7746 7747 7748 7749 7750 7751
	if (!cmd)
		return false;

	cp = cmd->param;
	if (!cp->val)
		return true;

	return false;
}

7752
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7753 7754
			      u8 link_type, u8 addr_type, u8 reason,
			      bool mgmt_connected)
7755
{
7756
	struct mgmt_ev_device_disconnected ev;
7757 7758
	struct sock *sk = NULL;

7759 7760 7761 7762 7763 7764
	/* The connection is still in hci_conn_hash so test for 1
	 * instead of 0 to know if this is the last one.
	 */
	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
		cancel_delayed_work(&hdev->power_off);
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7765 7766
	}

7767 7768 7769
	if (!mgmt_connected)
		return;

7770 7771 7772
	if (link_type != ACL_LINK && link_type != LE_LINK)
		return;

7773
	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7774

7775 7776 7777
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.reason = reason;
7778

7779
	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7780 7781

	if (sk)
7782
		sock_put(sk);
7783

7784
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7785
			     hdev);
7786 7787
}

7788 7789
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
			    u8 link_type, u8 addr_type, u8 status)
7790
{
7791 7792
	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
	struct mgmt_cp_disconnect *cp;
7793
	struct mgmt_pending_cmd *cmd;
7794

7795 7796 7797
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
			     hdev);

7798
	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7799
	if (!cmd)
7800
		return;
7801

7802 7803 7804 7805 7806 7807 7808 7809
	cp = cmd->param;

	if (bacmp(bdaddr, &cp->addr.bdaddr))
		return;

	if (cp->addr.type != bdaddr_type)
		return;

7810
	cmd->cmd_complete(cmd, mgmt_status(status));
7811
	mgmt_pending_remove(cmd);
7812
}
7813

7814 7815
void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			 u8 addr_type, u8 status)
7816 7817
{
	struct mgmt_ev_connect_failed ev;
7818

7819 7820 7821 7822 7823 7824
	/* The connection is still in hci_conn_hash so test for 1
	 * instead of 0 to know if this is the last one.
	 */
	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
		cancel_delayed_work(&hdev->power_off);
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7825
	}
7826

7827
	bacpy(&ev.addr.bdaddr, bdaddr);
7828
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7829
	ev.status = mgmt_status(status);
7830

7831
	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7832
}
7833

7834
void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7835 7836 7837
{
	struct mgmt_ev_pin_code_request ev;

7838
	bacpy(&ev.addr.bdaddr, bdaddr);
7839
	ev.addr.type = BDADDR_BREDR;
7840
	ev.secure = secure;
7841

7842
	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7843 7844
}

7845 7846
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				  u8 status)
7847
{
7848
	struct mgmt_pending_cmd *cmd;
7849

7850
	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7851
	if (!cmd)
7852
		return;
7853

7854
	cmd->cmd_complete(cmd, mgmt_status(status));
7855
	mgmt_pending_remove(cmd);
7856 7857
}

7858 7859
void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				      u8 status)
7860
{
7861
	struct mgmt_pending_cmd *cmd;
7862

7863
	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7864
	if (!cmd)
7865
		return;
7866

7867
	cmd->cmd_complete(cmd, mgmt_status(status));
7868
	mgmt_pending_remove(cmd);
7869
}
7870

7871
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7872
			      u8 link_type, u8 addr_type, u32 value,
7873
			      u8 confirm_hint)
7874 7875 7876
{
	struct mgmt_ev_user_confirm_request ev;

7877
	BT_DBG("%s", hdev->name);
7878

7879
	bacpy(&ev.addr.bdaddr, bdaddr);
7880
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7881
	ev.confirm_hint = confirm_hint;
7882
	ev.value = cpu_to_le32(value);
7883

7884
	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7885
			  NULL);
7886 7887
}

7888
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7889
			      u8 link_type, u8 addr_type)
7890 7891 7892 7893 7894
{
	struct mgmt_ev_user_passkey_request ev;

	BT_DBG("%s", hdev->name);

7895
	bacpy(&ev.addr.bdaddr, bdaddr);
7896
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7897 7898

	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7899
			  NULL);
7900 7901
}

7902
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7903 7904
				      u8 link_type, u8 addr_type, u8 status,
				      u8 opcode)
7905
{
7906
	struct mgmt_pending_cmd *cmd;
7907

7908
	cmd = pending_find(opcode, hdev);
7909 7910 7911
	if (!cmd)
		return -ENOENT;

7912
	cmd->cmd_complete(cmd, mgmt_status(status));
7913
	mgmt_pending_remove(cmd);
7914

7915
	return 0;
7916 7917
}

7918
int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7919
				     u8 link_type, u8 addr_type, u8 status)
7920
{
7921
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7922
					  status, MGMT_OP_USER_CONFIRM_REPLY);
7923 7924
}

7925
int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7926
					 u8 link_type, u8 addr_type, u8 status)
7927
{
7928
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7929 7930
					  status,
					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
7931
}
7932

7933
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7934
				     u8 link_type, u8 addr_type, u8 status)
7935
{
7936
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7937
					  status, MGMT_OP_USER_PASSKEY_REPLY);
7938 7939
}

7940
int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7941
					 u8 link_type, u8 addr_type, u8 status)
7942
{
7943
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7944 7945
					  status,
					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
7946 7947
}

7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963
int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
			     u8 link_type, u8 addr_type, u32 passkey,
			     u8 entered)
{
	struct mgmt_ev_passkey_notify ev;

	BT_DBG("%s", hdev->name);

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.passkey = __cpu_to_le32(passkey);
	ev.entered = entered;

	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
}

7964
void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7965 7966
{
	struct mgmt_ev_auth_failed ev;
7967
	struct mgmt_pending_cmd *cmd;
7968
	u8 status = mgmt_status(hci_status);
7969

7970 7971 7972
	bacpy(&ev.addr.bdaddr, &conn->dst);
	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
	ev.status = status;
7973

7974 7975 7976 7977 7978
	cmd = find_pairing(conn);

	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
		    cmd ? cmd->sk : NULL);

7979 7980 7981 7982
	if (cmd) {
		cmd->cmd_complete(cmd, status);
		mgmt_pending_remove(cmd);
	}
7983
}
7984

7985
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7986 7987
{
	struct cmd_lookup match = { NULL, hdev };
7988
	bool changed;
7989 7990 7991 7992

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7993
				     cmd_status_rsp, &mgmt_err);
7994
		return;
7995 7996
	}

7997
	if (test_bit(HCI_AUTH, &hdev->flags))
7998
		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7999
	else
8000
		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8001

8002
	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8003
			     &match);
8004

8005
	if (changed)
8006
		new_settings(hdev, match.sk);
8007 8008 8009 8010 8011

	if (match.sk)
		sock_put(match.sk);
}

8012
static void clear_eir(struct hci_request *req)
8013
{
8014
	struct hci_dev *hdev = req->hdev;
8015 8016
	struct hci_cp_write_eir cp;

8017
	if (!lmp_ext_inq_capable(hdev))
8018
		return;
8019

8020 8021
	memset(hdev->eir, 0, sizeof(hdev->eir));

8022 8023
	memset(&cp, 0, sizeof(cp));

8024
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8025 8026
}

8027
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8028 8029
{
	struct cmd_lookup match = { NULL, hdev };
8030
	struct hci_request req;
8031
	bool changed = false;
8032 8033 8034

	if (status) {
		u8 mgmt_err = mgmt_status(status);
8035

8036 8037
		if (enable && hci_dev_test_and_clear_flag(hdev,
							  HCI_SSP_ENABLED)) {
8038
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8039
			new_settings(hdev, NULL);
8040
		}
8041

8042 8043
		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
				     &mgmt_err);
8044
		return;
8045 8046 8047
	}

	if (enable) {
8048
		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8049
	} else {
8050
		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8051
		if (!changed)
8052 8053
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_HS_ENABLED);
8054
		else
8055
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8056 8057 8058 8059
	}

	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);

8060
	if (changed)
8061
		new_settings(hdev, match.sk);
8062

8063
	if (match.sk)
8064 8065
		sock_put(match.sk);

8066 8067
	hci_req_init(&req, hdev);

8068 8069
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8070 8071
			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
				    sizeof(enable), &enable);
8072
		update_eir(&req);
8073
	} else {
8074
		clear_eir(&req);
8075
	}
8076 8077

	hci_req_run(&req, NULL);
8078 8079
}

8080
static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8081 8082 8083 8084 8085 8086 8087 8088 8089
{
	struct cmd_lookup *match = data;

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}
}

8090 8091
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
				    u8 status)
8092
{
8093
	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8094

8095 8096 8097
	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8098 8099

	if (!status)
8100 8101
		mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
				   dev_class, 3, NULL);
8102 8103 8104

	if (match.sk)
		sock_put(match.sk);
8105 8106
}

8107
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8108 8109
{
	struct mgmt_cp_set_local_name ev;
8110
	struct mgmt_pending_cmd *cmd;
8111

8112
	if (status)
8113
		return;
8114 8115 8116

	memset(&ev, 0, sizeof(ev));
	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8117
	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8118

8119
	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8120 8121
	if (!cmd) {
		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8122

8123 8124 8125
		/* If this is a HCI command related to powering on the
		 * HCI dev don't send any mgmt signals.
		 */
8126
		if (pending_find(MGMT_OP_SET_POWERED, hdev))
8127
			return;
8128
	}
8129

8130 8131
	mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
			   cmd ? cmd->sk : NULL);
8132
}
8133

8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145
static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
{
	int i;

	for (i = 0; i < uuid_count; i++) {
		if (!memcmp(uuid, uuids[i], 16))
			return true;
	}

	return false;
}

8146 8147
static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
{
8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164
	u16 parsed = 0;

	while (parsed < eir_len) {
		u8 field_len = eir[0];
		u8 uuid[16];
		int i;

		if (field_len == 0)
			break;

		if (eir_len - parsed < field_len + 1)
			break;

		switch (eir[1]) {
		case EIR_UUID16_ALL:
		case EIR_UUID16_SOME:
			for (i = 0; i + 3 <= field_len; i += 2) {
8165
				memcpy(uuid, bluetooth_base_uuid, 16);
8166 8167 8168 8169 8170 8171 8172 8173 8174
				uuid[13] = eir[i + 3];
				uuid[12] = eir[i + 2];
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		case EIR_UUID32_ALL:
		case EIR_UUID32_SOME:
			for (i = 0; i + 5 <= field_len; i += 4) {
8175
				memcpy(uuid, bluetooth_base_uuid, 16);
8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197
				uuid[15] = eir[i + 5];
				uuid[14] = eir[i + 4];
				uuid[13] = eir[i + 3];
				uuid[12] = eir[i + 2];
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		case EIR_UUID128_ALL:
		case EIR_UUID128_SOME:
			for (i = 0; i + 17 <= field_len; i += 16) {
				memcpy(uuid, eir + i + 2, 16);
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		}

		parsed += field_len + 1;
		eir += field_len + 1;
	}

8198 8199 8200
	return false;
}

8201 8202 8203
static void restart_le_scan(struct hci_dev *hdev)
{
	/* If controller is not scanning we are done. */
8204
	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215
		return;

	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
		       hdev->discovery.scan_start +
		       hdev->discovery.scan_duration))
		return;

	queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
			   DISCOV_LE_RESTART_DELAY);
}

8216 8217
static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8218
{
8219 8220 8221 8222 8223
	/* If a RSSI threshold has been specified, and
	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
	 * is set, let it through for further processing, as we might need to
	 * restart the scan.
8224 8225 8226
	 *
	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
	 * the results are also dropped.
8227 8228
	 */
	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8229 8230 8231
	    (rssi == HCI_RSSI_INVALID ||
	    (rssi < hdev->discovery.rssi &&
	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8232
		return  false;
8233

8234 8235 8236
	if (hdev->discovery.uuid_count != 0) {
		/* If a list of UUIDs is provided in filter, results with no
		 * matching UUID should be dropped.
8237
		 */
8238 8239 8240 8241 8242 8243
		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
				   hdev->discovery.uuids) &&
		    !eir_has_uuids(scan_rsp, scan_rsp_len,
				   hdev->discovery.uuid_count,
				   hdev->discovery.uuids))
			return false;
8244
	}
8245

8246 8247
	/* If duplicate filtering does not report RSSI changes, then restart
	 * scanning to ensure updated result with updated RSSI values.
8248
	 */
8249 8250 8251 8252 8253 8254 8255 8256
	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
		restart_le_scan(hdev);

		/* Validate RSSI value against the RSSI threshold once more. */
		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
		    rssi < hdev->discovery.rssi)
			return false;
	}
8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279

	return true;
}

void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
{
	char buf[512];
	struct mgmt_ev_device_found *ev = (void *)buf;
	size_t ev_size;

	/* Don't send events for a non-kernel initiated discovery. With
	 * LE one exception is if we have pend_le_reports > 0 in which
	 * case we're doing passive scanning and want these events.
	 */
	if (!hci_discovery_active(hdev)) {
		if (link_type == ACL_LINK)
			return;
		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
			return;
	}

8280
	if (hdev->discovery.result_filtering) {
8281 8282 8283 8284 8285 8286 8287 8288 8289 8290
		/* We are using service discovery */
		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
				     scan_rsp_len))
			return;
	}

	/* Make sure that the buffer is big enough. The 5 extra bytes
	 * are for the potential CoD field.
	 */
	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8291 8292
		return;

8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323
	memset(buf, 0, sizeof(buf));

	/* In case of device discovery with BR/EDR devices (pre 1.2), the
	 * RSSI value was reported as 0 when not available. This behavior
	 * is kept when using device discovery. This is required for full
	 * backwards compatibility with the API.
	 *
	 * However when using service discovery, the value 127 will be
	 * returned when the RSSI is not available.
	 */
	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
	    link_type == ACL_LINK)
		rssi = 0;

	bacpy(&ev->addr.bdaddr, bdaddr);
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
	ev->rssi = rssi;
	ev->flags = cpu_to_le32(flags);

	if (eir_len > 0)
		/* Copy EIR or advertising data into event */
		memcpy(ev->eir, eir, eir_len);

	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
					  dev_class, 3);

	if (scan_rsp_len > 0)
		/* Append scan response data to event */
		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);

8324 8325
	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8326

8327
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8328
}
8329

8330 8331
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8332
{
8333 8334 8335
	struct mgmt_ev_device_found *ev;
	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
	u16 eir_len;
8336

8337
	ev = (struct mgmt_ev_device_found *) buf;
8338

8339 8340 8341
	memset(buf, 0, sizeof(buf));

	bacpy(&ev->addr.bdaddr, bdaddr);
8342
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8343 8344 8345
	ev->rssi = rssi;

	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8346
				  name_len);
8347

8348
	ev->eir_len = cpu_to_le16(eir_len);
8349

8350
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8351
}
8352

8353
void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8354
{
8355
	struct mgmt_ev_discovering ev;
8356

8357 8358
	BT_DBG("%s discovering %u", hdev->name, discovering);

8359 8360 8361 8362
	memset(&ev, 0, sizeof(ev));
	ev.type = hdev->discovery.type;
	ev.discovering = discovering;

8363
	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8364
}
8365

8366
static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8367 8368 8369 8370 8371 8372 8373 8374
{
	BT_DBG("%s status %u", hdev->name, status);
}

void mgmt_reenable_advertising(struct hci_dev *hdev)
{
	struct hci_request req;

8375 8376
	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
	    !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8377 8378 8379 8380
		return;

	hci_req_init(&req, hdev);
	enable_advertising(&req);
8381
	hci_req_run(&req, adv_enable_complete);
8382
}
8383 8384 8385 8386 8387

static struct hci_mgmt_chan chan = {
	.channel	= HCI_CHANNEL_CONTROL,
	.handler_count	= ARRAY_SIZE(mgmt_handlers),
	.handlers	= mgmt_handlers,
8388
	.hdev_init	= mgmt_init_hdev,
8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399
};

int mgmt_init(void)
{
	return hci_mgmt_chan_register(&chan);
}

void mgmt_exit(void)
{
	hci_mgmt_chan_unregister(&chan);
}