mgmt.c 188.1 KB
Newer Older
1 2
/*
   BlueZ - Bluetooth protocol stack for Linux
3

4
   Copyright (C) 2010  Nokia Corporation
5
   Copyright (C) 2011-2012 Intel Corporation
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI Management interface */

27
#include <linux/module.h>
28 29 30 31
#include <asm/unaligned.h>

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
32
#include <net/bluetooth/hci_sock.h>
33
#include <net/bluetooth/l2cap.h>
34
#include <net/bluetooth/mgmt.h>
35

36
#include "hci_request.h"
37
#include "smp.h"
38

39
#define MGMT_VERSION	1
40
#define MGMT_REVISION	9
41

42 43 44 45 46 47 48
static const u16 mgmt_commands[] = {
	MGMT_OP_READ_INDEX_LIST,
	MGMT_OP_READ_INFO,
	MGMT_OP_SET_POWERED,
	MGMT_OP_SET_DISCOVERABLE,
	MGMT_OP_SET_CONNECTABLE,
	MGMT_OP_SET_FAST_CONNECTABLE,
49
	MGMT_OP_SET_BONDABLE,
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
	MGMT_OP_SET_LINK_SECURITY,
	MGMT_OP_SET_SSP,
	MGMT_OP_SET_HS,
	MGMT_OP_SET_LE,
	MGMT_OP_SET_DEV_CLASS,
	MGMT_OP_SET_LOCAL_NAME,
	MGMT_OP_ADD_UUID,
	MGMT_OP_REMOVE_UUID,
	MGMT_OP_LOAD_LINK_KEYS,
	MGMT_OP_LOAD_LONG_TERM_KEYS,
	MGMT_OP_DISCONNECT,
	MGMT_OP_GET_CONNECTIONS,
	MGMT_OP_PIN_CODE_REPLY,
	MGMT_OP_PIN_CODE_NEG_REPLY,
	MGMT_OP_SET_IO_CAPABILITY,
	MGMT_OP_PAIR_DEVICE,
	MGMT_OP_CANCEL_PAIR_DEVICE,
	MGMT_OP_UNPAIR_DEVICE,
	MGMT_OP_USER_CONFIRM_REPLY,
	MGMT_OP_USER_CONFIRM_NEG_REPLY,
	MGMT_OP_USER_PASSKEY_REPLY,
	MGMT_OP_USER_PASSKEY_NEG_REPLY,
	MGMT_OP_READ_LOCAL_OOB_DATA,
	MGMT_OP_ADD_REMOTE_OOB_DATA,
	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
	MGMT_OP_START_DISCOVERY,
	MGMT_OP_STOP_DISCOVERY,
	MGMT_OP_CONFIRM_NAME,
	MGMT_OP_BLOCK_DEVICE,
	MGMT_OP_UNBLOCK_DEVICE,
80
	MGMT_OP_SET_DEVICE_ID,
81
	MGMT_OP_SET_ADVERTISING,
82
	MGMT_OP_SET_BREDR,
83
	MGMT_OP_SET_STATIC_ADDRESS,
84
	MGMT_OP_SET_SCAN_PARAMS,
85
	MGMT_OP_SET_SECURE_CONN,
86
	MGMT_OP_SET_DEBUG_KEYS,
87
	MGMT_OP_SET_PRIVACY,
88
	MGMT_OP_LOAD_IRKS,
89
	MGMT_OP_GET_CONN_INFO,
90
	MGMT_OP_GET_CLOCK_INFO,
91 92
	MGMT_OP_ADD_DEVICE,
	MGMT_OP_REMOVE_DEVICE,
93
	MGMT_OP_LOAD_CONN_PARAM,
94
	MGMT_OP_READ_UNCONF_INDEX_LIST,
95
	MGMT_OP_READ_CONFIG_INFO,
96
	MGMT_OP_SET_EXTERNAL_CONFIG,
97
	MGMT_OP_SET_PUBLIC_ADDRESS,
98
	MGMT_OP_START_SERVICE_DISCOVERY,
99
	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
100
	MGMT_OP_READ_EXT_INDEX_LIST,
101
	MGMT_OP_READ_ADV_FEATURES,
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
};

static const u16 mgmt_events[] = {
	MGMT_EV_CONTROLLER_ERROR,
	MGMT_EV_INDEX_ADDED,
	MGMT_EV_INDEX_REMOVED,
	MGMT_EV_NEW_SETTINGS,
	MGMT_EV_CLASS_OF_DEV_CHANGED,
	MGMT_EV_LOCAL_NAME_CHANGED,
	MGMT_EV_NEW_LINK_KEY,
	MGMT_EV_NEW_LONG_TERM_KEY,
	MGMT_EV_DEVICE_CONNECTED,
	MGMT_EV_DEVICE_DISCONNECTED,
	MGMT_EV_CONNECT_FAILED,
	MGMT_EV_PIN_CODE_REQUEST,
	MGMT_EV_USER_CONFIRM_REQUEST,
	MGMT_EV_USER_PASSKEY_REQUEST,
	MGMT_EV_AUTH_FAILED,
	MGMT_EV_DEVICE_FOUND,
	MGMT_EV_DISCOVERING,
	MGMT_EV_DEVICE_BLOCKED,
	MGMT_EV_DEVICE_UNBLOCKED,
	MGMT_EV_DEVICE_UNPAIRED,
125
	MGMT_EV_PASSKEY_NOTIFY,
126
	MGMT_EV_NEW_IRK,
127
	MGMT_EV_NEW_CSRK,
128 129
	MGMT_EV_DEVICE_ADDED,
	MGMT_EV_DEVICE_REMOVED,
130
	MGMT_EV_NEW_CONN_PARAM,
131
	MGMT_EV_UNCONF_INDEX_ADDED,
132
	MGMT_EV_UNCONF_INDEX_REMOVED,
133
	MGMT_EV_NEW_CONFIG_OPTIONS,
134 135
	MGMT_EV_EXT_INDEX_ADDED,
	MGMT_EV_EXT_INDEX_REMOVED,
136 137
};

138
#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
139

140 141 142
#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
		 "\x00\x00\x00\x00\x00\x00\x00\x00"

143
struct mgmt_pending_cmd {
144
	struct list_head list;
145
	u16 opcode;
146
	int index;
147
	void *param;
148
	size_t param_len;
149
	struct sock *sk;
150
	void *user_data;
151
	int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status);
152 153
};

154 155 156 157 158 159 160 161
/* HCI to MGMT error code conversion table */
static u8 mgmt_status_table[] = {
	MGMT_STATUS_SUCCESS,
	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
	MGMT_STATUS_FAILED,		/* Hardware Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
162
	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
	MGMT_STATUS_BUSY,		/* Command Disallowed */
	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
	MGMT_STATUS_REJECTED,		/* Rejected Security */
	MGMT_STATUS_REJECTED,		/* Rejected Personal */
	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
	MGMT_STATUS_BUSY,		/* Repeated Attempts */
	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
	MGMT_STATUS_FAILED,		/* Unspecified Error */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
	MGMT_STATUS_FAILED,		/* Transaction Collision */
	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
	MGMT_STATUS_REJECTED,		/* QoS Rejected */
	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
	MGMT_STATUS_REJECTED,		/* Insufficient Security */
	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
	MGMT_STATUS_BUSY,		/* Role Switch Pending */
	MGMT_STATUS_FAILED,		/* Slot Violation */
	MGMT_STATUS_FAILED,		/* Role Switch Failed */
	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
	MGMT_STATUS_BUSY,		/* Controller Busy */
	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
};

static u8 mgmt_status(u8 hci_status)
{
	if (hci_status < ARRAY_SIZE(mgmt_status_table))
		return mgmt_status_table[hci_status];

	return MGMT_STATUS_FAILED;
}

227 228
static int mgmt_send_event(u16 event, struct hci_dev *hdev,
			   unsigned short channel, void *data, u16 data_len,
229
			   int flag, struct sock *skip_sk)
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;

	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));
	hdr->opcode = cpu_to_le16(event);
	if (hdev)
		hdr->index = cpu_to_le16(hdev->id);
	else
		hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
	hdr->len = cpu_to_le16(data_len);

	if (data)
		memcpy(skb_put(skb, data_len), data, data_len);

	/* Time stamp */
	__net_timestamp(skb);

252
	hci_send_to_channel(channel, skb, flag, skip_sk);
253 254 255 256 257
	kfree_skb(skb);

	return 0;
}

258 259
static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
			    u16 len, int flag)
260
{
261 262
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
			       flag, NULL);
263 264
}

265 266 267 268 269 270 271
static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
			      u16 len, struct sock *skip_sk)
{
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
			       HCI_MGMT_GENERIC_EVENTS, skip_sk);
}

272 273 274 275
static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
		      struct sock *skip_sk)
{
	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
276
			       HCI_SOCK_TRUSTED, skip_sk);
277 278
}

279
static int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
280 281 282 283
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;
	struct mgmt_ev_cmd_status *ev;
284
	int err;
285

286
	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
287

288
	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
289 290 291 292 293
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));

294
	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
295
	hdr->index = cpu_to_le16(index);
296 297 298 299
	hdr->len = cpu_to_le16(sizeof(*ev));

	ev = (void *) skb_put(skb, sizeof(*ev));
	ev->status = status;
300
	ev->opcode = cpu_to_le16(cmd);
301

302 303
	err = sock_queue_rcv_skb(sk, skb);
	if (err < 0)
304 305
		kfree_skb(skb);

306
	return err;
307 308
}

309 310
static int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
			     void *rp, size_t rp_len)
311 312 313 314
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;
	struct mgmt_ev_cmd_complete *ev;
315
	int err;
316 317 318

	BT_DBG("sock %p", sk);

319
	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
320 321 322 323 324
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));

325
	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
326
	hdr->index = cpu_to_le16(index);
327
	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
328

329
	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
330
	ev->opcode = cpu_to_le16(cmd);
331
	ev->status = status;
332 333 334

	if (rp)
		memcpy(ev->data, rp, rp_len);
335

336 337
	err = sock_queue_rcv_skb(sk, skb);
	if (err < 0)
338 339
		kfree_skb(skb);

340
	return err;
341 342
}

343 344
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
			u16 data_len)
345 346 347 348 349 350
{
	struct mgmt_rp_read_version rp;

	BT_DBG("sock %p", sk);

	rp.version = MGMT_VERSION;
351
	rp.revision = cpu_to_le16(MGMT_REVISION);
352

353 354
	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
				 &rp, sizeof(rp));
355 356
}

357 358
static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 data_len)
359 360
{
	struct mgmt_rp_read_commands *rp;
361 362
	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
	const u16 num_events = ARRAY_SIZE(mgmt_events);
363
	__le16 *opcode;
364 365 366 367 368 369 370 371 372 373 374
	size_t rp_size;
	int i, err;

	BT_DBG("sock %p", sk);

	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));

	rp = kmalloc(rp_size, GFP_KERNEL);
	if (!rp)
		return -ENOMEM;

375 376
	rp->num_commands = cpu_to_le16(num_commands);
	rp->num_events = cpu_to_le16(num_events);
377 378 379 380 381 382 383

	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
		put_unaligned_le16(mgmt_commands[i], opcode);

	for (i = 0; i < num_events; i++, opcode++)
		put_unaligned_le16(mgmt_events[i], opcode);

384 385
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
				rp, rp_size);
386 387 388 389 390
	kfree(rp);

	return err;
}

391 392
static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
393 394
{
	struct mgmt_rp_read_index_list *rp;
395
	struct hci_dev *d;
396
	size_t rp_len;
397
	u16 count;
398
	int err;
399 400 401 402 403 404

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
405
	list_for_each_entry(d, &hci_dev_list, list) {
406
		if (d->dev_type == HCI_BREDR &&
407
		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
408
			count++;
409 410
	}

411 412 413
	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
414
		read_unlock(&hci_dev_list_lock);
415
		return -ENOMEM;
416
	}
417

418
	count = 0;
419
	list_for_each_entry(d, &hci_dev_list, list) {
420 421 422
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
423 424
			continue;

425 426 427 428
		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
429 430
			continue;

431
		if (d->dev_type == HCI_BREDR &&
432
		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
433 434 435
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
436 437
	}

438 439 440
	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

441 442
	read_unlock(&hci_dev_list_lock);

443 444
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
				0, rp, rp_len);
445

446 447 448
	kfree(rp);

	return err;
449 450
}

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
				  void *data, u16 data_len)
{
	struct mgmt_rp_read_unconf_index_list *rp;
	struct hci_dev *d;
	size_t rp_len;
	u16 count;
	int err;

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
		if (d->dev_type == HCI_BREDR &&
467
		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
468 469 470 471 472 473 474 475 476 477 478 479
			count++;
	}

	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		read_unlock(&hci_dev_list_lock);
		return -ENOMEM;
	}

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
480 481 482
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
483 484 485 486 487 488 489 490 491
			continue;

		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
			continue;

		if (d->dev_type == HCI_BREDR &&
492
		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
493 494 495 496 497 498 499 500 501 502
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
	}

	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

	read_unlock(&hci_dev_list_lock);

503 504
	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
505 506 507 508 509 510

	kfree(rp);

	return err;
}

511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
			       void *data, u16 data_len)
{
	struct mgmt_rp_read_ext_index_list *rp;
	struct hci_dev *d;
	size_t rp_len;
	u16 count;
	int err;

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
		if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
			count++;
	}

	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		read_unlock(&hci_dev_list_lock);
		return -ENOMEM;
	}

	count = 0;
	list_for_each_entry(d, &hci_dev_list, list) {
		if (hci_dev_test_flag(d, HCI_SETUP) ||
		    hci_dev_test_flag(d, HCI_CONFIG) ||
		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
			continue;

		/* Devices marked as raw-only are neither configured
		 * nor unconfigured controllers.
		 */
		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
			continue;

		if (d->dev_type == HCI_BREDR) {
			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
				rp->entry[count].type = 0x01;
			else
				rp->entry[count].type = 0x00;
		} else if (d->dev_type == HCI_AMP) {
			rp->entry[count].type = 0x02;
		} else {
			continue;
		}

		rp->entry[count].bus = d->bus;
		rp->entry[count++].index = cpu_to_le16(d->id);
		BT_DBG("Added hci%u", d->id);
	}

	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);

	read_unlock(&hci_dev_list_lock);

	/* If this command is called at least once, then all the
	 * default index and unconfigured index events are disabled
	 * and from now on only extended index events are used.
	 */
	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);

	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);

	kfree(rp);

	return err;
}

587 588 589
static bool is_configured(struct hci_dev *hdev)
{
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
590
	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
591 592 593 594 595 596 597 598 599
		return false;

	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
	    !bacmp(&hdev->public_addr, BDADDR_ANY))
		return false;

	return true;
}

600 601 602 603
static __le32 get_missing_options(struct hci_dev *hdev)
{
	u32 options = 0;

604
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
605
	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
606 607
		options |= MGMT_OPTION_EXTERNAL_CONFIG;

608 609 610 611 612 613 614
	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
	    !bacmp(&hdev->public_addr, BDADDR_ANY))
		options |= MGMT_OPTION_PUBLIC_ADDRESS;

	return cpu_to_le32(options);
}

615 616 617 618
static int new_options(struct hci_dev *hdev, struct sock *skip)
{
	__le32 options = get_missing_options(hdev);

619 620
	return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
				  sizeof(options), skip);
621 622
}

623 624 625 626
static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
{
	__le32 options = get_missing_options(hdev);

627 628
	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
				 sizeof(options));
629 630
}

631 632 633 634
static int read_config_info(struct sock *sk, struct hci_dev *hdev,
			    void *data, u16 data_len)
{
	struct mgmt_rp_read_config_info rp;
635
	u32 options = 0;
636 637 638 639 640 641 642

	BT_DBG("sock %p %s", sk, hdev->name);

	hci_dev_lock(hdev);

	memset(&rp, 0, sizeof(rp));
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
643

644 645 646
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
		options |= MGMT_OPTION_EXTERNAL_CONFIG;

647
	if (hdev->set_bdaddr)
648 649 650 651
		options |= MGMT_OPTION_PUBLIC_ADDRESS;

	rp.supported_options = cpu_to_le32(options);
	rp.missing_options = get_missing_options(hdev);
652 653 654

	hci_dev_unlock(hdev);

655 656
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
				 &rp, sizeof(rp));
657 658
}

659 660 661 662 663
static u32 get_supported_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

	settings |= MGMT_SETTING_POWERED;
664
	settings |= MGMT_SETTING_BONDABLE;
665
	settings |= MGMT_SETTING_DEBUG_KEYS;
666 667
	settings |= MGMT_SETTING_CONNECTABLE;
	settings |= MGMT_SETTING_DISCOVERABLE;
668

669
	if (lmp_bredr_capable(hdev)) {
670 671
		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
			settings |= MGMT_SETTING_FAST_CONNECTABLE;
672 673
		settings |= MGMT_SETTING_BREDR;
		settings |= MGMT_SETTING_LINK_SECURITY;
674 675 676 677 678

		if (lmp_ssp_capable(hdev)) {
			settings |= MGMT_SETTING_SSP;
			settings |= MGMT_SETTING_HS;
		}
679

680
		if (lmp_sc_capable(hdev))
681
			settings |= MGMT_SETTING_SECURE_CONN;
682
	}
683

684
	if (lmp_le_capable(hdev)) {
685
		settings |= MGMT_SETTING_LE;
686
		settings |= MGMT_SETTING_ADVERTISING;
687
		settings |= MGMT_SETTING_SECURE_CONN;
688
		settings |= MGMT_SETTING_PRIVACY;
689
		settings |= MGMT_SETTING_STATIC_ADDRESS;
690
	}
691

692 693
	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
	    hdev->set_bdaddr)
694 695
		settings |= MGMT_SETTING_CONFIGURATION;

696 697 698 699 700 701 702
	return settings;
}

static u32 get_current_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

703
	if (hdev_is_powered(hdev))
704 705
		settings |= MGMT_SETTING_POWERED;

706
	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
707 708
		settings |= MGMT_SETTING_CONNECTABLE;

709
	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
710 711
		settings |= MGMT_SETTING_FAST_CONNECTABLE;

712
	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
713 714
		settings |= MGMT_SETTING_DISCOVERABLE;

715
	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
716
		settings |= MGMT_SETTING_BONDABLE;
717

718
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
719 720
		settings |= MGMT_SETTING_BREDR;

721
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
722 723
		settings |= MGMT_SETTING_LE;

724
	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
725 726
		settings |= MGMT_SETTING_LINK_SECURITY;

727
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
728 729
		settings |= MGMT_SETTING_SSP;

730
	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
731 732
		settings |= MGMT_SETTING_HS;

733
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
734 735
		settings |= MGMT_SETTING_ADVERTISING;

736
	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
737 738
		settings |= MGMT_SETTING_SECURE_CONN;

739
	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
740 741
		settings |= MGMT_SETTING_DEBUG_KEYS;

742
	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
743 744
		settings |= MGMT_SETTING_PRIVACY;

745 746 747 748 749 750 751 752 753 754 755 756
	/* The current setting for static address has two purposes. The
	 * first is to indicate if the static address will be used and
	 * the second is to indicate if it is actually set.
	 *
	 * This means if the static address is not configured, this flag
	 * will never bet set. If the address is configured, then if the
	 * address is actually used decides if the flag is set or not.
	 *
	 * For single mode LE only controllers and dual-mode controllers
	 * with BR/EDR disabled, the existence of the static address will
	 * be evaluated.
	 */
757
	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
758
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
759 760 761 762 763
	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
		if (bacmp(&hdev->static_addr, BDADDR_ANY))
			settings |= MGMT_SETTING_STATIC_ADDRESS;
	}

764 765 766
	return settings;
}

767 768
#define PNP_INFO_SVCLASS_ID		0x1200

769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 4)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		u16 uuid16;

		if (uuid->size != 16)
			continue;

		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
		if (uuid16 < 0x1100)
			continue;

		if (uuid16 == PNP_INFO_SVCLASS_ID)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID16_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u16) > len) {
			uuids_start[1] = EIR_UUID16_SOME;
			break;
		}

		*ptr++ = (uuid16 & 0x00ff);
		*ptr++ = (uuid16 & 0xff00) >> 8;
		uuids_start[0] += sizeof(uuid16);
	}

	return ptr;
}

811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 6)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 32)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID32_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u32) > len) {
			uuids_start[1] = EIR_UUID32_SOME;
			break;
		}

		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
		ptr += sizeof(u32);
		uuids_start[0] += sizeof(u32);
	}

	return ptr;
}

844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 18)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 128)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID128_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + 16 > len) {
			uuids_start[1] = EIR_UUID128_SOME;
			break;
		}

		memcpy(ptr, uuid->uuid, 16);
		ptr += 16;
		uuids_start[0] += 16;
	}

	return ptr;
}

877 878
static struct mgmt_pending_cmd *mgmt_pending_find(u16 opcode,
						  struct hci_dev *hdev)
879
{
880
	struct mgmt_pending_cmd *cmd;
881 882 883 884 885 886 887 888 889

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		if (cmd->opcode == opcode)
			return cmd;
	}

	return NULL;
}

890 891 892
static struct mgmt_pending_cmd *mgmt_pending_find_data(u16 opcode,
						       struct hci_dev *hdev,
						       const void *data)
893
{
894
	struct mgmt_pending_cmd *cmd;
895 896 897 898 899 900 901 902 903 904 905

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		if (cmd->user_data != data)
			continue;
		if (cmd->opcode == opcode)
			return cmd;
	}

	return NULL;
}

906 907
static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
{
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929
	u8 ad_len = 0;
	size_t name_len;

	name_len = strlen(hdev->dev_name);
	if (name_len > 0) {
		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;

		if (name_len > max_len) {
			name_len = max_len;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ad_len += (name_len + 2);
		ptr += (name_len + 2);
	}

	return ad_len;
930 931 932 933 934 935 936 937
}

static void update_scan_rsp_data(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_scan_rsp_data cp;
	u8 len;

938
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
939 940 941 942 943 944
		return;

	memset(&cp, 0, sizeof(cp));

	len = create_scan_rsp_data(hdev, cp.data);

945 946
	if (hdev->scan_rsp_data_len == len &&
	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
947 948
		return;

949 950
	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
	hdev->scan_rsp_data_len = len;
951 952 953 954 955 956

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
}

957 958
static u8 get_adv_discov_flags(struct hci_dev *hdev)
{
959
	struct mgmt_pending_cmd *cmd;
960 961 962 963 964 965 966 967 968 969 970 971

	/* If there's a pending mgmt command the flags will not yet have
	 * their final values, so check for this first.
	 */
	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
		if (cp->val == 0x01)
			return LE_AD_GENERAL;
		else if (cp->val == 0x02)
			return LE_AD_LIMITED;
	} else {
972
		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
973
			return LE_AD_LIMITED;
974
		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
975 976 977 978 979 980
			return LE_AD_GENERAL;
	}

	return 0;
}

981
static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
982 983 984
{
	u8 ad_len = 0, flags = 0;

985
	flags |= get_adv_discov_flags(hdev);
986

987
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
		flags |= LE_AD_NO_BREDR;

	if (flags) {
		BT_DBG("adv flags 0x%02x", flags);

		ptr[0] = 2;
		ptr[1] = EIR_FLAGS;
		ptr[2] = flags;

		ad_len += 3;
		ptr += 3;
	}

	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->adv_tx_power;

		ad_len += 3;
		ptr += 3;
	}

	return ad_len;
}

1013
static void update_adv_data(struct hci_request *req)
1014 1015 1016 1017 1018
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_data cp;
	u8 len;

1019
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1020 1021 1022 1023
		return;

	memset(&cp, 0, sizeof(cp));

1024
	len = create_adv_data(hdev, cp.data);
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037

	if (hdev->adv_data_len == len &&
	    memcmp(cp.data, hdev->adv_data, len) == 0)
		return;

	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
	hdev->adv_data_len = len;

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
}

1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
int mgmt_update_adv_data(struct hci_dev *hdev)
{
	struct hci_request req;

	hci_req_init(&req, hdev);
	update_adv_data(&req);

	return hci_req_run(&req, NULL);
}

1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
static void create_eir(struct hci_dev *hdev, u8 *data)
{
	u8 *ptr = data;
	size_t name_len;

	name_len = strlen(hdev->dev_name);

	if (name_len > 0) {
		/* EIR Data type */
		if (name_len > 48) {
			name_len = 48;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		/* EIR Data length */
		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ptr += (name_len + 2);
	}

1071
	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1072 1073 1074 1075 1076 1077 1078
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->inq_tx_power;

		ptr += 3;
	}

1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
	if (hdev->devid_source > 0) {
		ptr[0] = 9;
		ptr[1] = EIR_DEVICE_ID;

		put_unaligned_le16(hdev->devid_source, ptr + 2);
		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
		put_unaligned_le16(hdev->devid_product, ptr + 6);
		put_unaligned_le16(hdev->devid_version, ptr + 8);

		ptr += 10;
	}

1091
	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1092
	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1093
	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1094 1095
}

1096
static void update_eir(struct hci_request *req)
1097
{
1098
	struct hci_dev *hdev = req->hdev;
1099 1100
	struct hci_cp_write_eir cp;

1101
	if (!hdev_is_powered(hdev))
1102
		return;
1103

1104
	if (!lmp_ext_inq_capable(hdev))
1105
		return;
1106

1107
	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1108
		return;
1109

1110
	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1111
		return;
1112 1113 1114 1115 1116 1117

	memset(&cp, 0, sizeof(cp));

	create_eir(hdev, cp.data);

	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1118
		return;
1119 1120 1121

	memcpy(hdev->eir, cp.data, sizeof(cp.data));

1122
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
}

static u8 get_service_classes(struct hci_dev *hdev)
{
	struct bt_uuid *uuid;
	u8 val = 0;

	list_for_each_entry(uuid, &hdev->uuids, list)
		val |= uuid->svc_hint;

	return val;
}

1136
static void update_class(struct hci_request *req)
1137
{
1138
	struct hci_dev *hdev = req->hdev;
1139 1140 1141 1142
	u8 cod[3];

	BT_DBG("%s", hdev->name);

1143
	if (!hdev_is_powered(hdev))
1144
		return;
1145

1146
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1147 1148
		return;

1149
	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1150
		return;
1151 1152 1153 1154 1155

	cod[0] = hdev->minor_class;
	cod[1] = hdev->major_class;
	cod[2] = get_service_classes(hdev);

1156
	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1157 1158
		cod[1] |= 0x20;

1159
	if (memcmp(cod, hdev->dev_class, 3) == 0)
1160
		return;
1161

1162
	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1163 1164
}

1165
static bool get_connectable(struct hci_dev *hdev)
1166
{
1167
	struct mgmt_pending_cmd *cmd;
1168 1169 1170 1171 1172 1173 1174

	/* If there's a pending mgmt command the flag will not yet have
	 * it's final value, so check for this first.
	 */
	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
1175
		return cp->val;
1176 1177
	}

1178
	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1179 1180
}

1181 1182 1183 1184 1185 1186 1187
static void disable_advertising(struct hci_request *req)
{
	u8 enable = 0x00;

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

1188 1189 1190 1191
static void enable_advertising(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_param cp;
1192
	u8 own_addr_type, enable = 0x01;
1193
	bool connectable;
1194

1195 1196 1197
	if (hci_conn_num(hdev, LE_LINK) > 0)
		return;

1198
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1199 1200
		disable_advertising(req);

1201
	/* Clear the HCI_LE_ADV bit temporarily so that the
1202 1203 1204 1205
	 * hci_update_random_address knows that it's safe to go ahead
	 * and write a new random address. The flag will be set back on
	 * as soon as the SET_ADV_ENABLE HCI command completes.
	 */
1206
	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1207

1208
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1209 1210 1211
		connectable = true;
	else
		connectable = get_connectable(hdev);
1212

1213 1214 1215 1216 1217
	/* Set require_privacy to true only when non-connectable
	 * advertising is used. In that case it is fine to use a
	 * non-resolvable private address.
	 */
	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1218 1219
		return;

1220
	memset(&cp, 0, sizeof(cp));
1221 1222
	cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
	cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1223
	cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1224
	cp.own_address_type = own_addr_type;
1225 1226 1227 1228 1229 1230 1231
	cp.channel_map = hdev->le_adv_channel_map;

	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

1232 1233 1234
static void service_cache_off(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
1235
					    service_cache.work);
1236
	struct hci_request req;
1237

1238
	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1239 1240
		return;

1241 1242
	hci_req_init(&req, hdev);

1243 1244
	hci_dev_lock(hdev);

1245 1246
	update_eir(&req);
	update_class(&req);
1247 1248

	hci_dev_unlock(hdev);
1249 1250

	hci_req_run(&req, NULL);
1251 1252
}

1253 1254 1255 1256 1257 1258 1259 1260
static void rpa_expired(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
					    rpa_expired.work);
	struct hci_request req;

	BT_DBG("");

1261
	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1262

1263
	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
		return;

	/* The generation of a new RPA and programming it into the
	 * controller happens in the enable_advertising() function.
	 */
	hci_req_init(&req, hdev);
	enable_advertising(&req);
	hci_req_run(&req, NULL);
}

1274
static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1275
{
1276
	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1277 1278
		return;

1279
	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1280
	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1281

1282 1283 1284 1285 1286
	/* Non-mgmt controlled devices get this bit set
	 * implicitly so that pairing works for them, however
	 * for mgmt we require user-space to explicitly enable
	 * it
	 */
1287
	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1288 1289
}

1290
static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1291
				void *data, u16 data_len)
1292
{
1293
	struct mgmt_rp_read_info rp;
1294

1295
	BT_DBG("sock %p %s", sk, hdev->name);
1296

1297
	hci_dev_lock(hdev);
1298

1299 1300
	memset(&rp, 0, sizeof(rp));

1301
	bacpy(&rp.bdaddr, &hdev->bdaddr);
1302

1303
	rp.version = hdev->hci_ver;
1304
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1305 1306 1307

	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1308

1309
	memcpy(rp.dev_class, hdev->dev_class, 3);
1310

1311
	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1312
	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1313

1314
	hci_dev_unlock(hdev);
1315

1316 1317
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
				 sizeof(rp));
1318 1319
}

1320
static void mgmt_pending_free(struct mgmt_pending_cmd *cmd)
1321 1322
{
	sock_put(cmd->sk);
1323
	kfree(cmd->param);
1324 1325 1326
	kfree(cmd);
}

1327 1328 1329
static struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
						 struct hci_dev *hdev,
						 void *data, u16 len)
1330
{
1331
	struct mgmt_pending_cmd *cmd;
1332

1333
	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1334
	if (!cmd)
1335
		return NULL;
1336 1337

	cmd->opcode = opcode;
1338
	cmd->index = hdev->id;
1339

1340
	cmd->param = kmemdup(data, len, GFP_KERNEL);
1341
	if (!cmd->param) {
1342
		kfree(cmd);
1343
		return NULL;
1344 1345
	}

1346
	cmd->param_len = len;
1347 1348 1349 1350

	cmd->sk = sk;
	sock_hold(sk);

1351
	list_add(&cmd->list, &hdev->mgmt_pending);
1352

1353
	return cmd;
1354 1355
}

1356
static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1357
				 void (*cb)(struct mgmt_pending_cmd *cmd,
1358
					    void *data),
1359
				 void *data)
1360
{
1361
	struct mgmt_pending_cmd *cmd, *tmp;
1362

1363
	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1364
		if (opcode > 0 && cmd->opcode != opcode)
1365 1366 1367 1368 1369 1370
			continue;

		cb(cmd, data);
	}
}

1371
static void mgmt_pending_remove(struct mgmt_pending_cmd *cmd)
1372 1373 1374 1375 1376
{
	list_del(&cmd->list);
	mgmt_pending_free(cmd);
}

1377
static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1378
{
1379
	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1380

1381 1382
	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
				 sizeof(settings));
1383 1384
}

1385
static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1386 1387 1388
{
	BT_DBG("%s status 0x%02x", hdev->name, status);

1389 1390
	if (hci_conn_count(hdev) == 0) {
		cancel_delayed_work(&hdev->power_off);
1391
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1392
	}
1393 1394
}

1395
static bool hci_stop_discovery(struct hci_request *req)
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_remote_name_req_cancel cp;
	struct inquiry_entry *e;

	switch (hdev->discovery.state) {
	case DISCOVERY_FINDING:
		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
		} else {
			cancel_delayed_work(&hdev->le_scan_disable);
			hci_req_add_le_scan_disable(req);
		}

1410
		return true;
1411 1412 1413 1414 1415

	case DISCOVERY_RESOLVING:
		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
						     NAME_PENDING);
		if (!e)
1416
			break;
1417 1418 1419 1420 1421

		bacpy(&cp.bdaddr, &e->data.bdaddr);
		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
			    &cp);

1422
		return true;
1423 1424 1425

	default:
		/* Passive scanning */
1426
		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1427
			hci_req_add_le_scan_disable(req);
1428 1429 1430
			return true;
		}

1431 1432
		break;
	}
1433 1434

	return false;
1435 1436
}

1437 1438 1439 1440
static int clean_up_hci_state(struct hci_dev *hdev)
{
	struct hci_request req;
	struct hci_conn *conn;
1441 1442
	bool discov_stopped;
	int err;
1443 1444 1445 1446 1447 1448 1449 1450 1451

	hci_req_init(&req, hdev);

	if (test_bit(HCI_ISCAN, &hdev->flags) ||
	    test_bit(HCI_PSCAN, &hdev->flags)) {
		u8 scan = 0x00;
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}

1452
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1453 1454
		disable_advertising(&req);

1455
	discov_stopped = hci_stop_discovery(&req);
1456 1457 1458

	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
		struct hci_cp_disconnect dc;
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
		struct hci_cp_reject_conn_req rej;

		switch (conn->state) {
		case BT_CONNECTED:
		case BT_CONFIG:
			dc.handle = cpu_to_le16(conn->handle);
			dc.reason = 0x15; /* Terminated due to Power Off */
			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
			break;
		case BT_CONNECT:
			if (conn->type == LE_LINK)
				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
					    0, NULL);
			else if (conn->type == ACL_LINK)
				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
					    6, &conn->dst);
			break;
		case BT_CONNECT2:
			bacpy(&rej.bdaddr, &conn->dst);
			rej.reason = 0x15; /* Terminated due to Power Off */
			if (conn->type == ACL_LINK)
				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
					    sizeof(rej), &rej);
			else if (conn->type == SCO_LINK)
				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
					    sizeof(rej), &rej);
			break;
		}
1487 1488
	}

1489 1490 1491 1492 1493
	err = hci_req_run(&req, clean_up_hci_complete);
	if (!err && discov_stopped)
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);

	return err;
1494 1495
}

1496
static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1497
		       u16 len)
1498
{
1499
	struct mgmt_mode *cp = data;
1500
	struct mgmt_pending_cmd *cmd;
1501
	int err;
1502

1503
	BT_DBG("request for %s", hdev->name);
1504

1505
	if (cp->val != 0x00 && cp->val != 0x01)
1506 1507
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				       MGMT_STATUS_INVALID_PARAMS);
1508

1509
	hci_dev_lock(hdev);
1510

1511
	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1512 1513
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				      MGMT_STATUS_BUSY);
1514 1515 1516
		goto failed;
	}

1517
	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1518 1519 1520
		cancel_delayed_work(&hdev->power_off);

		if (cp->val) {
1521 1522 1523
			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
					 data, len);
			err = mgmt_powered(hdev, 1);
1524 1525 1526 1527
			goto failed;
		}
	}

1528
	if (!!cp->val == hdev_is_powered(hdev)) {
1529
		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1530 1531 1532
		goto failed;
	}

1533
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1534 1535
	if (!cmd) {
		err = -ENOMEM;
1536
		goto failed;
1537
	}
1538

1539
	if (cp->val) {
1540
		queue_work(hdev->req_workqueue, &hdev->power_on);
1541 1542 1543 1544
		err = 0;
	} else {
		/* Disconnect connections, stop scans, etc */
		err = clean_up_hci_state(hdev);
1545 1546 1547
		if (!err)
			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
					   HCI_POWER_OFF_TIMEOUT);
1548

1549 1550
		/* ENODATA means there were no HCI commands queued */
		if (err == -ENODATA) {
1551
			cancel_delayed_work(&hdev->power_off);
1552 1553 1554 1555
			queue_work(hdev->req_workqueue, &hdev->power_off.work);
			err = 0;
		}
	}
1556 1557

failed:
1558
	hci_dev_unlock(hdev);
1559
	return err;
1560 1561
}

1562 1563
static int new_settings(struct hci_dev *hdev, struct sock *skip)
{
1564
	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1565

1566 1567
	return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
				  sizeof(ev), skip);
1568 1569
}

1570 1571 1572 1573 1574
int mgmt_new_settings(struct hci_dev *hdev)
{
	return new_settings(hdev, NULL);
}

1575 1576 1577 1578 1579 1580
struct cmd_lookup {
	struct sock *sk;
	struct hci_dev *hdev;
	u8 mgmt_status;
};

1581
static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
{
	struct cmd_lookup *match = data;

	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);

	list_del(&cmd->list);

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}

	mgmt_pending_free(cmd);
}

1597
static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1598 1599 1600
{
	u8 *status = data;

1601
	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1602 1603 1604
	mgmt_pending_remove(cmd);
}

1605
static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
{
	if (cmd->cmd_complete) {
		u8 *status = data;

		cmd->cmd_complete(cmd, *status);
		mgmt_pending_remove(cmd);

		return;
	}

	cmd_status_rsp(cmd, data);
}

1619
static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1620
{
1621 1622
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, cmd->param_len);
1623 1624
}

1625
static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1626
{
1627 1628
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, sizeof(struct mgmt_addr_info));
1629 1630
}

1631 1632 1633 1634
static u8 mgmt_bredr_support(struct hci_dev *hdev)
{
	if (!lmp_bredr_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
1635
	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1636 1637 1638 1639 1640 1641 1642 1643 1644
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

static u8 mgmt_le_support(struct hci_dev *hdev)
{
	if (!lmp_le_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
1645
	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1646 1647 1648 1649 1650
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

1651 1652
static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
				      u16 opcode)
1653
{
1654
	struct mgmt_pending_cmd *cmd;
1655
	struct mgmt_mode *cp;
1656
	struct hci_request req;
1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
	bool changed;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);
1669
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1670
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1671 1672 1673 1674
		goto remove_cmd;
	}

	cp = cmd->param;
1675
	if (cp->val) {
1676
		changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1677 1678 1679 1680 1681 1682 1683

		if (hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
					   to);
		}
	} else {
1684
		changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1685
	}
1686 1687 1688 1689 1690 1691

	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);

	if (changed)
		new_settings(hdev, cmd->sk);

1692 1693
	/* When the discoverable mode gets changed, make sure
	 * that class of device has the limited discoverable
1694 1695
	 * bit correctly set. Also update page scan based on whitelist
	 * entries.
1696 1697
	 */
	hci_req_init(&req, hdev);
1698
	__hci_update_page_scan(&req);
1699 1700 1701
	update_class(&req);
	hci_req_run(&req, NULL);

1702 1703 1704 1705 1706 1707 1708
remove_cmd:
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1709
static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1710
			    u16 len)
1711
{
1712
	struct mgmt_cp_set_discoverable *cp = data;
1713
	struct mgmt_pending_cmd *cmd;
1714
	struct hci_request req;
1715
	u16 timeout;
1716
	u8 scan;
1717 1718
	int err;

1719
	BT_DBG("request for %s", hdev->name);
1720

1721 1722
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1723 1724
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_REJECTED);
1725

1726
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1727 1728
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1729

1730
	timeout = __le16_to_cpu(cp->timeout);
1731 1732 1733 1734 1735 1736

	/* Disabling discoverable requires that no timeout is set,
	 * and enabling limited discoverable requires a timeout.
	 */
	if ((cp->val == 0x00 && timeout > 0) ||
	    (cp->val == 0x02 && timeout == 0))
1737 1738
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				       MGMT_STATUS_INVALID_PARAMS);
1739

1740
	hci_dev_lock(hdev);
1741

1742
	if (!hdev_is_powered(hdev) && timeout > 0) {
1743 1744
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_NOT_POWERED);
1745 1746 1747
		goto failed;
	}

1748
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1749
	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1750 1751
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_BUSY);
1752 1753 1754
		goto failed;
	}

1755
	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1756 1757
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				      MGMT_STATUS_REJECTED);
1758 1759 1760 1761
		goto failed;
	}

	if (!hdev_is_powered(hdev)) {
1762 1763
		bool changed = false;

1764 1765 1766 1767
		/* Setting limited discoverable when powered off is
		 * not a valid operation since it requires a timeout
		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
		 */
1768
		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1769
			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1770 1771 1772
			changed = true;
		}

1773
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1774 1775 1776 1777 1778 1779
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1780 1781 1782
		goto failed;
	}

1783 1784 1785 1786
	/* If the current mode is the same, then just update the timeout
	 * value with the new value. And if only the timeout gets updated,
	 * then no need for any HCI transactions.
	 */
1787 1788 1789
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
						   HCI_LIMITED_DISCOVERABLE)) {
1790 1791
		cancel_delayed_work(&hdev->discov_off);
		hdev->discov_timeout = timeout;
1792

1793 1794
		if (cp->val && hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1795
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1796
					   to);
1797 1798
		}

1799
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1800 1801 1802
		goto failed;
	}

1803
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1804 1805
	if (!cmd) {
		err = -ENOMEM;
1806
		goto failed;
1807
	}
1808

1809 1810 1811 1812 1813 1814 1815
	/* Cancel any potential discoverable timeout that might be
	 * still active and store new timeout value. The arming of
	 * the timeout happens in the complete handler.
	 */
	cancel_delayed_work(&hdev->discov_off);
	hdev->discov_timeout = timeout;

1816 1817
	/* Limited discoverable mode */
	if (cp->val == 0x02)
1818
		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1819
	else
1820
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1821

1822 1823
	hci_req_init(&req, hdev);

1824 1825 1826
	/* The procedure for LE-only controllers is much simpler - just
	 * update the advertising data.
	 */
1827
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1828 1829
		goto update_ad;

1830 1831
	scan = SCAN_PAGE;

1832 1833 1834 1835 1836
	if (cp->val) {
		struct hci_cp_write_current_iac_lap hci_cp;

		if (cp->val == 0x02) {
			/* Limited discoverable mode */
1837
			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854
			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
			hci_cp.iac_lap[4] = 0x8b;
			hci_cp.iac_lap[5] = 0x9e;
		} else {
			/* General discoverable mode */
			hci_cp.num_iac = 1;
			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
		}

		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
			    (hci_cp.num_iac * 3) + 1, &hci_cp);

1855
		scan |= SCAN_INQUIRY;
1856
	} else {
1857
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1858
	}
1859

1860
	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1861

1862 1863 1864
update_ad:
	update_adv_data(&req);

1865
	err = hci_req_run(&req, set_discoverable_complete);
1866
	if (err < 0)
1867
		mgmt_pending_remove(cmd);
1868 1869

failed:
1870
	hci_dev_unlock(hdev);
1871 1872 1873
	return err;
}

1874 1875
static void write_fast_connectable(struct hci_request *req, bool enable)
{
1876
	struct hci_dev *hdev = req->hdev;
1877 1878 1879
	struct hci_cp_write_page_scan_activity acp;
	u8 type;

1880
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1881 1882
		return;

1883 1884 1885
	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
		return;

1886 1887 1888 1889
	if (enable) {
		type = PAGE_SCAN_TYPE_INTERLACED;

		/* 160 msec page scan interval */
1890
		acp.interval = cpu_to_le16(0x0100);
1891 1892 1893 1894
	} else {
		type = PAGE_SCAN_TYPE_STANDARD;	/* default */

		/* default 1.28 sec page scan */
1895
		acp.interval = cpu_to_le16(0x0800);
1896 1897
	}

1898
	acp.window = cpu_to_le16(0x0012);
1899

1900 1901 1902 1903 1904 1905 1906
	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
			    sizeof(acp), &acp);

	if (hdev->page_scan_type != type)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1907 1908
}

1909 1910
static void set_connectable_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
1911
{
1912
	struct mgmt_pending_cmd *cmd;
1913
	struct mgmt_mode *cp;
1914
	bool conn_changed, discov_changed;
1915 1916 1917 1918 1919 1920 1921 1922 1923

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (!cmd)
		goto unlock;

1924 1925
	if (status) {
		u8 mgmt_err = mgmt_status(status);
1926
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1927 1928 1929
		goto remove_cmd;
	}

1930
	cp = cmd->param;
1931
	if (cp->val) {
1932 1933
		conn_changed = !hci_dev_test_and_set_flag(hdev,
							  HCI_CONNECTABLE);
1934 1935
		discov_changed = false;
	} else {
1936 1937 1938 1939
		conn_changed = hci_dev_test_and_clear_flag(hdev,
							   HCI_CONNECTABLE);
		discov_changed = hci_dev_test_and_clear_flag(hdev,
							     HCI_DISCOVERABLE);
1940
	}
1941

1942 1943
	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);

1944
	if (conn_changed || discov_changed) {
1945
		new_settings(hdev, cmd->sk);
1946
		hci_update_page_scan(hdev);
1947 1948
		if (discov_changed)
			mgmt_update_adv_data(hdev);
1949 1950
		hci_update_background_scan(hdev);
	}
1951

1952
remove_cmd:
1953 1954 1955 1956 1957 1958
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1959 1960 1961 1962 1963 1964
static int set_connectable_update_settings(struct hci_dev *hdev,
					   struct sock *sk, u8 val)
{
	bool changed = false;
	int err;

1965
	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1966 1967 1968
		changed = true;

	if (val) {
1969
		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1970
	} else {
1971 1972
		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1973 1974 1975 1976 1977 1978
	}

	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
	if (err < 0)
		return err;

1979
	if (changed) {
1980
		hci_update_page_scan(hdev);
1981
		hci_update_background_scan(hdev);
1982
		return new_settings(hdev, sk);
1983
	}
1984 1985 1986 1987

	return 0;
}

1988
static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1989
			   u16 len)
1990
{
1991
	struct mgmt_mode *cp = data;
1992
	struct mgmt_pending_cmd *cmd;
1993
	struct hci_request req;
1994
	u8 scan;
1995 1996
	int err;

1997
	BT_DBG("request for %s", hdev->name);
1998

1999 2000
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2001 2002
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				       MGMT_STATUS_REJECTED);
2003

2004
	if (cp->val != 0x00 && cp->val != 0x01)
2005 2006
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				       MGMT_STATUS_INVALID_PARAMS);
2007

2008
	hci_dev_lock(hdev);
2009

2010
	if (!hdev_is_powered(hdev)) {
2011
		err = set_connectable_update_settings(hdev, sk, cp->val);
2012 2013 2014
		goto failed;
	}

2015
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2016
	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2017 2018
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				      MGMT_STATUS_BUSY);
2019 2020 2021
		goto failed;
	}

2022
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2023 2024
	if (!cmd) {
		err = -ENOMEM;
2025
		goto failed;
2026
	}
2027

2028
	hci_req_init(&req, hdev);
2029

2030 2031 2032 2033
	/* If BR/EDR is not enabled and we disable advertising as a
	 * by-product of disabling connectable, we need to update the
	 * advertising flags.
	 */
2034
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2035
		if (!cp->val) {
2036 2037
			hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2038 2039 2040
		}
		update_adv_data(&req);
	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2041 2042 2043
		if (cp->val) {
			scan = SCAN_PAGE;
		} else {
2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055
			/* If we don't have any whitelist entries just
			 * disable all scanning. If there are entries
			 * and we had both page and inquiry scanning
			 * enabled then fall back to only page scanning.
			 * Otherwise no changes are needed.
			 */
			if (list_empty(&hdev->whitelist))
				scan = SCAN_DISABLED;
			else if (test_bit(HCI_ISCAN, &hdev->flags))
				scan = SCAN_PAGE;
			else
				goto no_scan_update;
2056 2057

			if (test_bit(HCI_ISCAN, &hdev->flags) &&
2058
			    hdev->discov_timeout > 0)
2059 2060
				cancel_delayed_work(&hdev->discov_off);
		}
2061

2062 2063
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}
2064

2065
no_scan_update:
2066
	/* Update the advertising parameters if necessary */
2067
	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2068 2069
		enable_advertising(&req);

2070
	err = hci_req_run(&req, set_connectable_complete);
2071
	if (err < 0) {
2072
		mgmt_pending_remove(cmd);
2073
		if (err == -ENODATA)
2074 2075
			err = set_connectable_update_settings(hdev, sk,
							      cp->val);
2076 2077
		goto failed;
	}
2078 2079

failed:
2080
	hci_dev_unlock(hdev);
2081 2082 2083
	return err;
}

2084
static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2085
			u16 len)
2086
{
2087
	struct mgmt_mode *cp = data;
2088
	bool changed;
2089 2090
	int err;

2091
	BT_DBG("request for %s", hdev->name);
2092

2093
	if (cp->val != 0x00 && cp->val != 0x01)
2094 2095
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
				       MGMT_STATUS_INVALID_PARAMS);
2096

2097
	hci_dev_lock(hdev);
2098 2099

	if (cp->val)
2100
		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2101
	else
2102
		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2103

2104
	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2105
	if (err < 0)
2106
		goto unlock;
2107

2108 2109
	if (changed)
		err = new_settings(hdev, sk);
2110

2111
unlock:
2112
	hci_dev_unlock(hdev);
2113 2114 2115
	return err;
}

2116 2117
static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
2118 2119
{
	struct mgmt_mode *cp = data;
2120
	struct mgmt_pending_cmd *cmd;
2121
	u8 val, status;
2122 2123
	int err;

2124
	BT_DBG("request for %s", hdev->name);
2125

2126 2127
	status = mgmt_bredr_support(hdev);
	if (status)
2128 2129
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				       status);
2130

2131
	if (cp->val != 0x00 && cp->val != 0x01)
2132 2133
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				       MGMT_STATUS_INVALID_PARAMS);
2134

2135 2136
	hci_dev_lock(hdev);

2137
	if (!hdev_is_powered(hdev)) {
2138 2139
		bool changed = false;

2140
		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2141
			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2142 2143 2144 2145 2146 2147 2148 2149 2150 2151
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

2152 2153 2154 2155
		goto failed;
	}

	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2156 2157
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				      MGMT_STATUS_BUSY);
2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
		goto failed;
	}

	val = !!cp->val;

	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

2185
static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2186 2187
{
	struct mgmt_mode *cp = data;
2188
	struct mgmt_pending_cmd *cmd;
2189
	u8 status;
2190 2191
	int err;

2192
	BT_DBG("request for %s", hdev->name);
2193

2194 2195
	status = mgmt_bredr_support(hdev);
	if (status)
2196
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2197

2198
	if (!lmp_ssp_capable(hdev))
2199 2200
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				       MGMT_STATUS_NOT_SUPPORTED);
2201

2202
	if (cp->val != 0x00 && cp->val != 0x01)
2203 2204
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				       MGMT_STATUS_INVALID_PARAMS);
2205

2206
	hci_dev_lock(hdev);
2207

2208
	if (!hdev_is_powered(hdev)) {
2209
		bool changed;
2210

2211
		if (cp->val) {
2212 2213
			changed = !hci_dev_test_and_set_flag(hdev,
							     HCI_SSP_ENABLED);
2214
		} else {
2215 2216
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_SSP_ENABLED);
2217
			if (!changed)
2218 2219
				changed = hci_dev_test_and_clear_flag(hdev,
								      HCI_HS_ENABLED);
2220
			else
2221
				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2222 2223 2224 2225 2226 2227 2228 2229 2230
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

2231 2232 2233
		goto failed;
	}

2234
	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2235 2236
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				      MGMT_STATUS_BUSY);
2237 2238 2239
		goto failed;
	}

2240
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2241 2242 2243 2244 2245 2246 2247 2248 2249 2250
		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

2251
	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2252 2253 2254
		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
			     sizeof(cp->val), &cp->val);

2255
	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2256 2257 2258 2259 2260 2261 2262 2263 2264 2265
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

2266
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2267 2268
{
	struct mgmt_mode *cp = data;
2269
	bool changed;
2270
	u8 status;
2271
	int err;
2272

2273
	BT_DBG("request for %s", hdev->name);
2274

2275 2276
	status = mgmt_bredr_support(hdev);
	if (status)
2277
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2278

2279
	if (!lmp_ssp_capable(hdev))
2280 2281
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_NOT_SUPPORTED);
2282

2283
	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2284 2285
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_REJECTED);
2286

2287
	if (cp->val != 0x00 && cp->val != 0x01)
2288 2289
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				       MGMT_STATUS_INVALID_PARAMS);
2290

2291 2292
	hci_dev_lock(hdev);

2293
	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2294 2295
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				      MGMT_STATUS_BUSY);
2296 2297 2298
		goto unlock;
	}

2299
	if (cp->val) {
2300
		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2301 2302
	} else {
		if (hdev_is_powered(hdev)) {
2303 2304
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
					      MGMT_STATUS_REJECTED);
2305 2306 2307
			goto unlock;
		}

2308
		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2309
	}
2310 2311 2312 2313 2314 2315 2316

	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);
2317

2318 2319 2320
unlock:
	hci_dev_unlock(hdev);
	return err;
2321 2322
}

2323
static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2324 2325 2326
{
	struct cmd_lookup match = { NULL, hdev };

2327 2328
	hci_dev_lock(hdev);

2329 2330 2331 2332 2333
	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
				     &mgmt_err);
2334
		goto unlock;
2335 2336 2337 2338 2339 2340 2341 2342
	}

	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
2343 2344 2345 2346 2347 2348

	/* Make sure the controller has a good default for
	 * advertising data. Restrict the update to when LE
	 * has actually been enabled. During power on, the
	 * update in powered_update_hci will take care of it.
	 */
2349
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2350 2351 2352
		struct hci_request req;

		hci_req_init(&req, hdev);
2353
		update_adv_data(&req);
2354
		update_scan_rsp_data(&req);
2355
		__hci_update_background_scan(&req);
2356 2357
		hci_req_run(&req, NULL);
	}
2358 2359 2360

unlock:
	hci_dev_unlock(hdev);
2361 2362
}

2363
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2364 2365 2366
{
	struct mgmt_mode *cp = data;
	struct hci_cp_write_le_host_supported hci_cp;
2367
	struct mgmt_pending_cmd *cmd;
2368
	struct hci_request req;
2369
	int err;
2370
	u8 val, enabled;
2371

2372
	BT_DBG("request for %s", hdev->name);
2373

2374
	if (!lmp_le_capable(hdev))
2375 2376
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_NOT_SUPPORTED);
2377

2378
	if (cp->val != 0x00 && cp->val != 0x01)
2379 2380
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_INVALID_PARAMS);
2381

2382
	/* LE-only devices do not allow toggling LE on/off */
2383
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2384 2385
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				       MGMT_STATUS_REJECTED);
2386

2387
	hci_dev_lock(hdev);
2388 2389

	val = !!cp->val;
2390
	enabled = lmp_host_le_capable(hdev);
2391

2392
	if (!hdev_is_powered(hdev) || val == enabled) {
2393 2394
		bool changed = false;

2395
		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2396
			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2397 2398 2399
			changed = true;
		}

2400
		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2401
			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2402 2403 2404
			changed = true;
		}

2405 2406
		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
		if (err < 0)
2407
			goto unlock;
2408 2409 2410 2411

		if (changed)
			err = new_settings(hdev, sk);

2412
		goto unlock;
2413 2414
	}

2415 2416
	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2417 2418
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				      MGMT_STATUS_BUSY);
2419
		goto unlock;
2420 2421 2422 2423 2424
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
2425
		goto unlock;
2426 2427
	}

2428 2429
	hci_req_init(&req, hdev);

2430 2431 2432 2433
	memset(&hci_cp, 0, sizeof(hci_cp));

	if (val) {
		hci_cp.le = val;
2434
		hci_cp.simul = 0x00;
2435
	} else {
2436
		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2437
			disable_advertising(&req);
2438 2439
	}

2440 2441 2442 2443
	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
		    &hci_cp);

	err = hci_req_run(&req, le_enable_complete);
2444
	if (err < 0)
2445 2446
		mgmt_pending_remove(cmd);

2447 2448
unlock:
	hci_dev_unlock(hdev);
2449 2450 2451
	return err;
}

2452 2453 2454 2455 2456 2457 2458 2459
/* This is a helper function to test for pending mgmt commands that can
 * cause CoD or EIR HCI commands. We can only allow one such pending
 * mgmt command at a time since otherwise we cannot easily track what
 * the current values are, will be, and based on that calculate if a new
 * HCI command needs to be sent and if yes with what value.
 */
static bool pending_eir_or_class(struct hci_dev *hdev)
{
2460
	struct mgmt_pending_cmd *cmd;
2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		switch (cmd->opcode) {
		case MGMT_OP_ADD_UUID:
		case MGMT_OP_REMOVE_UUID:
		case MGMT_OP_SET_DEV_CLASS:
		case MGMT_OP_SET_POWERED:
			return true;
		}
	}

	return false;
}

2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493
static const u8 bluetooth_base_uuid[] = {
			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};

static u8 get_uuid_size(const u8 *uuid)
{
	u32 val;

	if (memcmp(uuid, bluetooth_base_uuid, 12))
		return 128;

	val = get_unaligned_le32(&uuid[12]);
	if (val > 0xffff)
		return 32;

	return 16;
}

2494 2495
static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
{
2496
	struct mgmt_pending_cmd *cmd;
2497 2498 2499 2500 2501 2502 2503

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(mgmt_op, hdev);
	if (!cmd)
		goto unlock;

2504 2505
	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
			  mgmt_status(status), hdev->dev_class, 3);
2506 2507 2508 2509 2510 2511 2512

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

2513
static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2514 2515 2516 2517 2518 2519
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
}

2520
static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2521
{
2522
	struct mgmt_cp_add_uuid *cp = data;
2523
	struct mgmt_pending_cmd *cmd;
2524
	struct hci_request req;
2525 2526 2527
	struct bt_uuid *uuid;
	int err;

2528
	BT_DBG("request for %s", hdev->name);
2529

2530
	hci_dev_lock(hdev);
2531

2532
	if (pending_eir_or_class(hdev)) {
2533 2534
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
				      MGMT_STATUS_BUSY);
2535 2536 2537
		goto failed;
	}

2538
	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2539 2540 2541 2542 2543 2544
	if (!uuid) {
		err = -ENOMEM;
		goto failed;
	}

	memcpy(uuid->uuid, cp->uuid, 16);
2545
	uuid->svc_hint = cp->svc_hint;
2546
	uuid->size = get_uuid_size(cp->uuid);
2547

2548
	list_add_tail(&uuid->list, &hdev->uuids);
2549

2550
	hci_req_init(&req, hdev);
2551

2552 2553 2554
	update_class(&req);
	update_eir(&req);

2555 2556 2557 2558
	err = hci_req_run(&req, add_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto failed;
2559

2560 2561
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
					hdev->dev_class, 3);
2562 2563 2564 2565
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2566
	if (!cmd) {
2567
		err = -ENOMEM;
2568 2569 2570 2571
		goto failed;
	}

	err = 0;
2572 2573

failed:
2574
	hci_dev_unlock(hdev);
2575 2576 2577
	return err;
}

2578 2579 2580 2581 2582
static bool enable_service_cache(struct hci_dev *hdev)
{
	if (!hdev_is_powered(hdev))
		return false;

2583
	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2584 2585
		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
				   CACHE_TIMEOUT);
2586 2587 2588 2589 2590 2591
		return true;
	}

	return false;
}

2592
static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2593 2594 2595 2596 2597 2598
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
}

2599
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2600
		       u16 len)
2601
{
2602
	struct mgmt_cp_remove_uuid *cp = data;
2603
	struct mgmt_pending_cmd *cmd;
2604
	struct bt_uuid *match, *tmp;
2605
	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2606
	struct hci_request req;
2607 2608
	int err, found;

2609
	BT_DBG("request for %s", hdev->name);
2610

2611
	hci_dev_lock(hdev);
2612

2613
	if (pending_eir_or_class(hdev)) {
2614 2615
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
				      MGMT_STATUS_BUSY);
2616 2617 2618
		goto unlock;
	}

2619
	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2620
		hci_uuids_clear(hdev);
2621

2622
		if (enable_service_cache(hdev)) {
2623 2624 2625
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_REMOVE_UUID,
						0, hdev->dev_class, 3);
2626 2627
			goto unlock;
		}
2628

2629
		goto update_class;
2630 2631 2632 2633
	}

	found = 0;

2634
	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2635 2636 2637 2638
		if (memcmp(match->uuid, cp->uuid, 16) != 0)
			continue;

		list_del(&match->list);
2639
		kfree(match);
2640 2641 2642 2643
		found++;
	}

	if (found == 0) {
2644 2645
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
				      MGMT_STATUS_INVALID_PARAMS);
2646 2647 2648
		goto unlock;
	}

2649
update_class:
2650
	hci_req_init(&req, hdev);
2651

2652 2653 2654
	update_class(&req);
	update_eir(&req);

2655 2656 2657 2658
	err = hci_req_run(&req, remove_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2659

2660 2661
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
					hdev->dev_class, 3);
2662 2663 2664 2665
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2666
	if (!cmd) {
2667
		err = -ENOMEM;
2668 2669 2670 2671
		goto unlock;
	}

	err = 0;
2672 2673

unlock:
2674
	hci_dev_unlock(hdev);
2675 2676 2677
	return err;
}

2678
static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2679 2680 2681 2682 2683 2684
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
}

2685
static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2686
			 u16 len)
2687
{
2688
	struct mgmt_cp_set_dev_class *cp = data;
2689
	struct mgmt_pending_cmd *cmd;
2690
	struct hci_request req;
2691 2692
	int err;

2693
	BT_DBG("request for %s", hdev->name);
2694

2695
	if (!lmp_bredr_capable(hdev))
2696 2697
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				       MGMT_STATUS_NOT_SUPPORTED);
2698

2699
	hci_dev_lock(hdev);
2700

2701
	if (pending_eir_or_class(hdev)) {
2702 2703
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				      MGMT_STATUS_BUSY);
2704 2705
		goto unlock;
	}
2706

2707
	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2708 2709
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				      MGMT_STATUS_INVALID_PARAMS);
2710 2711
		goto unlock;
	}
2712

2713 2714 2715
	hdev->major_class = cp->major;
	hdev->minor_class = cp->minor;

2716
	if (!hdev_is_powered(hdev)) {
2717 2718
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
					hdev->dev_class, 3);
2719 2720 2721
		goto unlock;
	}

2722 2723
	hci_req_init(&req, hdev);

2724
	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2725 2726 2727
		hci_dev_unlock(hdev);
		cancel_delayed_work_sync(&hdev->service_cache);
		hci_dev_lock(hdev);
2728
		update_eir(&req);
2729
	}
2730

2731 2732
	update_class(&req);

2733 2734 2735 2736
	err = hci_req_run(&req, set_class_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2737

2738 2739
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
					hdev->dev_class, 3);
2740 2741 2742 2743
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2744
	if (!cmd) {
2745
		err = -ENOMEM;
2746 2747 2748 2749
		goto unlock;
	}

	err = 0;
2750

2751
unlock:
2752
	hci_dev_unlock(hdev);
2753 2754 2755
	return err;
}

2756
static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2757
			  u16 len)
2758
{
2759
	struct mgmt_cp_load_link_keys *cp = data;
2760 2761
	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_link_key_info));
2762
	u16 key_count, expected_len;
2763
	bool changed;
2764
	int i;
2765

2766 2767 2768
	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev))
2769 2770
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_NOT_SUPPORTED);
2771

2772
	key_count = __le16_to_cpu(cp->key_count);
2773 2774 2775
	if (key_count > max_key_count) {
		BT_ERR("load_link_keys: too big key_count value %u",
		       key_count);
2776 2777
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2778
	}
2779

2780 2781
	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_link_key_info);
2782
	if (expected_len != len) {
2783
		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2784
		       expected_len, len);
2785 2786
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2787 2788
	}

2789
	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2790 2791
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
2792

2793
	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2794
	       key_count);
2795

2796 2797 2798
	for (i = 0; i < key_count; i++) {
		struct mgmt_link_key_info *key = &cp->keys[i];

2799
		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2800 2801 2802
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_LINK_KEYS,
					       MGMT_STATUS_INVALID_PARAMS);
2803 2804
	}

2805
	hci_dev_lock(hdev);
2806 2807 2808 2809

	hci_link_keys_clear(hdev);

	if (cp->debug_keys)
2810
		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2811
	else
2812 2813
		changed = hci_dev_test_and_clear_flag(hdev,
						      HCI_KEEP_DEBUG_KEYS);
2814 2815 2816

	if (changed)
		new_settings(hdev, NULL);
2817

2818
	for (i = 0; i < key_count; i++) {
2819
		struct mgmt_link_key_info *key = &cp->keys[i];
2820

2821 2822 2823 2824 2825 2826
		/* Always ignore debug keys and require a new pairing if
		 * the user wants to use them.
		 */
		if (key->type == HCI_LK_DEBUG_COMBINATION)
			continue;

2827 2828
		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
				 key->type, key->pin_len, NULL);
2829 2830
	}

2831
	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2832

2833
	hci_dev_unlock(hdev);
2834

2835
	return 0;
2836 2837
}

2838
static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2839
			   u8 addr_type, struct sock *skip_sk)
2840 2841 2842 2843 2844 2845 2846
{
	struct mgmt_ev_device_unpaired ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = addr_type;

	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2847
			  skip_sk);
2848 2849
}

2850
static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2851
			 u16 len)
2852
{
2853 2854
	struct mgmt_cp_unpair_device *cp = data;
	struct mgmt_rp_unpair_device rp;
2855
	struct hci_cp_disconnect dc;
2856
	struct mgmt_pending_cmd *cmd;
2857 2858 2859
	struct hci_conn *conn;
	int err;

2860
	memset(&rp, 0, sizeof(rp));
2861 2862
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
2863

2864
	if (!bdaddr_type_is_valid(cp->addr.type))
2865 2866 2867
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2868

2869
	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2870 2871 2872
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2873

2874 2875
	hci_dev_lock(hdev);

2876
	if (!hdev_is_powered(hdev)) {
2877 2878 2879
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
2880 2881 2882
		goto unlock;
	}

2883
	if (cp->addr.type == BDADDR_BREDR) {
2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896
		/* If disconnection is requested, then look up the
		 * connection. If the remote device is connected, it
		 * will be later used to terminate the link.
		 *
		 * Setting it to NULL explicitly will cause no
		 * termination of the link.
		 */
		if (cp->disconnect)
			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
						       &cp->addr.bdaddr);
		else
			conn = NULL;

2897
		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2898 2899 2900
	} else {
		u8 addr_type;

2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
					       &cp->addr.bdaddr);
		if (conn) {
			/* Defer clearing up the connection parameters
			 * until closing to give a chance of keeping
			 * them if a repairing happens.
			 */
			set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);

			/* If disconnection is not requested, then
			 * clear the connection variable so that the
			 * link is not terminated.
			 */
			if (!cp->disconnect)
				conn = NULL;
		}

2918 2919 2920 2921 2922
		if (cp->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

2923 2924
		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);

2925 2926
		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
	}
2927

2928
	if (err < 0) {
2929 2930 2931
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
					MGMT_STATUS_NOT_PAIRED, &rp,
					sizeof(rp));
2932 2933 2934
		goto unlock;
	}

2935 2936 2937
	/* If the connection variable is set, then termination of the
	 * link is requested.
	 */
2938
	if (!conn) {
2939 2940
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
					&rp, sizeof(rp));
2941
		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2942 2943
		goto unlock;
	}
2944

2945
	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2946
			       sizeof(*cp));
2947 2948 2949
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
2950 2951
	}

2952 2953
	cmd->cmd_complete = addr_cmd_complete;

2954
	dc.handle = cpu_to_le16(conn->handle);
2955 2956 2957 2958 2959
	dc.reason = 0x13; /* Remote User Terminated Connection */
	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
	if (err < 0)
		mgmt_pending_remove(cmd);

2960
unlock:
2961
	hci_dev_unlock(hdev);
2962 2963 2964
	return err;
}

2965
static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2966
		      u16 len)
2967
{
2968
	struct mgmt_cp_disconnect *cp = data;
2969
	struct mgmt_rp_disconnect rp;
2970
	struct mgmt_pending_cmd *cmd;
2971 2972 2973 2974 2975
	struct hci_conn *conn;
	int err;

	BT_DBG("");

2976 2977 2978 2979
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2980
	if (!bdaddr_type_is_valid(cp->addr.type))
2981 2982 2983
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
2984

2985
	hci_dev_lock(hdev);
2986 2987

	if (!test_bit(HCI_UP, &hdev->flags)) {
2988 2989 2990
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
2991 2992 2993
		goto failed;
	}

2994
	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2995 2996
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2997 2998 2999
		goto failed;
	}

3000
	if (cp->addr.type == BDADDR_BREDR)
3001 3002
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
3003 3004
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3005

3006
	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3007 3008 3009
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
					MGMT_STATUS_NOT_CONNECTED, &rp,
					sizeof(rp));
3010 3011 3012
		goto failed;
	}

3013
	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3014 3015
	if (!cmd) {
		err = -ENOMEM;
3016
		goto failed;
3017
	}
3018

3019 3020
	cmd->cmd_complete = generic_cmd_complete;

3021
	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3022
	if (err < 0)
3023
		mgmt_pending_remove(cmd);
3024 3025

failed:
3026
	hci_dev_unlock(hdev);
3027 3028 3029
	return err;
}

3030
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3031 3032 3033
{
	switch (link_type) {
	case LE_LINK:
3034 3035
		switch (addr_type) {
		case ADDR_LE_DEV_PUBLIC:
3036
			return BDADDR_LE_PUBLIC;
3037

3038
		default:
3039
			/* Fallback to LE Random address type */
3040
			return BDADDR_LE_RANDOM;
3041
		}
3042

3043
	default:
3044
		/* Fallback to BR/EDR type */
3045
		return BDADDR_BREDR;
3046 3047 3048
	}
}

3049 3050
static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
3051 3052
{
	struct mgmt_rp_get_connections *rp;
3053
	struct hci_conn *c;
3054
	size_t rp_len;
3055 3056
	int err;
	u16 i;
3057 3058 3059

	BT_DBG("");

3060
	hci_dev_lock(hdev);
3061

3062
	if (!hdev_is_powered(hdev)) {
3063 3064
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
				      MGMT_STATUS_NOT_POWERED);
3065 3066 3067
		goto unlock;
	}

3068
	i = 0;
3069 3070
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3071
			i++;
3072 3073
	}

3074
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3075
	rp = kmalloc(rp_len, GFP_KERNEL);
3076
	if (!rp) {
3077 3078 3079 3080 3081
		err = -ENOMEM;
		goto unlock;
	}

	i = 0;
3082
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3083 3084
		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
			continue;
3085
		bacpy(&rp->addr[i].bdaddr, &c->dst);
3086
		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3087
		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3088 3089 3090 3091
			continue;
		i++;
	}

3092
	rp->conn_count = cpu_to_le16(i);
3093

3094 3095
	/* Recalculate length in case of filtered SCO connections, etc */
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3096

3097 3098
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
				rp_len);
3099

3100
	kfree(rp);
3101 3102

unlock:
3103
	hci_dev_unlock(hdev);
3104 3105 3106
	return err;
}

3107
static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3108
				   struct mgmt_cp_pin_code_neg_reply *cp)
3109
{
3110
	struct mgmt_pending_cmd *cmd;
3111 3112
	int err;

3113
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3114
			       sizeof(*cp));
3115 3116 3117
	if (!cmd)
		return -ENOMEM;

3118
	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3119
			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3120 3121 3122 3123 3124 3125
	if (err < 0)
		mgmt_pending_remove(cmd);

	return err;
}

3126
static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3127
			  u16 len)
3128
{
3129
	struct hci_conn *conn;
3130
	struct mgmt_cp_pin_code_reply *cp = data;
3131
	struct hci_cp_pin_code_reply reply;
3132
	struct mgmt_pending_cmd *cmd;
3133 3134 3135 3136
	int err;

	BT_DBG("");

3137
	hci_dev_lock(hdev);
3138

3139
	if (!hdev_is_powered(hdev)) {
3140 3141
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
				      MGMT_STATUS_NOT_POWERED);
3142 3143 3144
		goto failed;
	}

3145
	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3146
	if (!conn) {
3147 3148
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
				      MGMT_STATUS_NOT_CONNECTED);
3149 3150 3151 3152
		goto failed;
	}

	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3153 3154 3155
		struct mgmt_cp_pin_code_neg_reply ncp;

		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3156 3157 3158

		BT_ERR("PIN code is not 16 bytes long");

3159
		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3160
		if (err >= 0)
3161 3162
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
					      MGMT_STATUS_INVALID_PARAMS);
3163 3164 3165 3166

		goto failed;
	}

3167
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3168 3169
	if (!cmd) {
		err = -ENOMEM;
3170
		goto failed;
3171
	}
3172

3173 3174
	cmd->cmd_complete = addr_cmd_complete;

3175
	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3176
	reply.pin_len = cp->pin_len;
3177
	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3178 3179 3180

	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
	if (err < 0)
3181
		mgmt_pending_remove(cmd);
3182 3183

failed:
3184
	hci_dev_unlock(hdev);
3185 3186 3187
	return err;
}

3188 3189
static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
3190
{
3191
	struct mgmt_cp_set_io_capability *cp = data;
3192 3193 3194

	BT_DBG("");

3195
	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3196 3197
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
					 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3198

3199
	hci_dev_lock(hdev);
3200 3201 3202 3203

	hdev->io_capability = cp->io_capability;

	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3204
	       hdev->io_capability);
3205

3206
	hci_dev_unlock(hdev);
3207

3208 3209
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
				 NULL, 0);
3210 3211
}

3212
static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3213 3214
{
	struct hci_dev *hdev = conn->hdev;
3215
	struct mgmt_pending_cmd *cmd;
3216

3217
	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229
		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
			continue;

		if (cmd->user_data != conn)
			continue;

		return cmd;
	}

	return NULL;
}

3230
static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3231 3232 3233
{
	struct mgmt_rp_pair_device rp;
	struct hci_conn *conn = cmd->user_data;
3234
	int err;
3235

3236 3237
	bacpy(&rp.addr.bdaddr, &conn->dst);
	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3238

3239 3240
	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
				status, &rp, sizeof(rp));
3241 3242 3243 3244 3245 3246

	/* So we don't get further callbacks for this connection */
	conn->connect_cfm_cb = NULL;
	conn->security_cfm_cb = NULL;
	conn->disconn_cfm_cb = NULL;

3247
	hci_conn_drop(conn);
3248 3249 3250 3251 3252

	/* The device is paired so there is no need to remove
	 * its connection parameters anymore.
	 */
	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3253 3254

	hci_conn_put(conn);
3255 3256

	return err;
3257 3258
}

3259 3260 3261
void mgmt_smp_complete(struct hci_conn *conn, bool complete)
{
	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3262
	struct mgmt_pending_cmd *cmd;
3263 3264

	cmd = find_pairing(conn);
3265
	if (cmd) {
3266
		cmd->cmd_complete(cmd, status);
3267 3268
		mgmt_pending_remove(cmd);
	}
3269 3270
}

3271 3272
static void pairing_complete_cb(struct hci_conn *conn, u8 status)
{
3273
	struct mgmt_pending_cmd *cmd;
3274 3275 3276 3277

	BT_DBG("status %u", status);

	cmd = find_pairing(conn);
3278
	if (!cmd) {
3279
		BT_DBG("Unable to find a pending command");
3280 3281 3282 3283 3284
		return;
	}

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);
3285 3286
}

3287
static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3288
{
3289
	struct mgmt_pending_cmd *cmd;
3290 3291 3292 3293 3294 3295 3296

	BT_DBG("status %u", status);

	if (!status)
		return;

	cmd = find_pairing(conn);
3297
	if (!cmd) {
3298
		BT_DBG("Unable to find a pending command");
3299 3300 3301 3302 3303
		return;
	}

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);
3304 3305
}

3306
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3307
		       u16 len)
3308
{
3309
	struct mgmt_cp_pair_device *cp = data;
3310
	struct mgmt_rp_pair_device rp;
3311
	struct mgmt_pending_cmd *cmd;
3312 3313 3314 3315 3316 3317
	u8 sec_level, auth_type;
	struct hci_conn *conn;
	int err;

	BT_DBG("");

3318 3319 3320 3321
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

3322
	if (!bdaddr_type_is_valid(cp->addr.type))
3323 3324 3325
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
3326

3327
	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3328 3329 3330
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
3331

3332
	hci_dev_lock(hdev);
3333

3334
	if (!hdev_is_powered(hdev)) {
3335 3336 3337
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
3338 3339 3340
		goto unlock;
	}

3341 3342 3343 3344 3345 3346 3347
	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_ALREADY_PAIRED, &rp,
					sizeof(rp));
		goto unlock;
	}

3348
	sec_level = BT_SECURITY_MEDIUM;
3349
	auth_type = HCI_AT_DEDICATED_BONDING;
3350

3351
	if (cp->addr.type == BDADDR_BREDR) {
3352 3353
		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
				       auth_type);
3354 3355 3356 3357 3358 3359 3360 3361 3362 3363
	} else {
		u8 addr_type;

		/* Convert from L2CAP channel address type to HCI address type
		 */
		if (cp->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374
		/* When pairing a new device, it is expected to remember
		 * this device for future connections. Adding the connection
		 * parameter information ahead of time allows tracking
		 * of the slave preferred values and will speed up any
		 * further connection establishment.
		 *
		 * If connection parameters already exist, then they
		 * will be kept and this function does nothing.
		 */
		hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);

3375
		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3376 3377
				      sec_level, HCI_LE_CONN_TIMEOUT,
				      HCI_ROLE_MASTER);
3378
	}
3379

3380
	if (IS_ERR(conn)) {
3381 3382 3383 3384
		int status;

		if (PTR_ERR(conn) == -EBUSY)
			status = MGMT_STATUS_BUSY;
3385 3386 3387 3388
		else if (PTR_ERR(conn) == -EOPNOTSUPP)
			status = MGMT_STATUS_NOT_SUPPORTED;
		else if (PTR_ERR(conn) == -ECONNREFUSED)
			status = MGMT_STATUS_REJECTED;
3389 3390 3391
		else
			status = MGMT_STATUS_CONNECT_FAILED;

3392 3393
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					status, &rp, sizeof(rp));
3394 3395 3396 3397
		goto unlock;
	}

	if (conn->connect_cfm_cb) {
3398
		hci_conn_drop(conn);
3399 3400
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3401 3402 3403
		goto unlock;
	}

3404
	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3405 3406
	if (!cmd) {
		err = -ENOMEM;
3407
		hci_conn_drop(conn);
3408 3409 3410
		goto unlock;
	}

3411 3412
	cmd->cmd_complete = pairing_complete;

3413
	/* For LE, just connecting isn't a proof that the pairing finished */
3414
	if (cp->addr.type == BDADDR_BREDR) {
3415
		conn->connect_cfm_cb = pairing_complete_cb;
3416 3417 3418 3419 3420 3421 3422
		conn->security_cfm_cb = pairing_complete_cb;
		conn->disconn_cfm_cb = pairing_complete_cb;
	} else {
		conn->connect_cfm_cb = le_pairing_complete_cb;
		conn->security_cfm_cb = le_pairing_complete_cb;
		conn->disconn_cfm_cb = le_pairing_complete_cb;
	}
3423

3424
	conn->io_capability = cp->io_cap;
3425
	cmd->user_data = hci_conn_get(conn);
3426

3427
	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3428 3429 3430 3431
	    hci_conn_security(conn, sec_level, auth_type, true)) {
		cmd->cmd_complete(cmd, 0);
		mgmt_pending_remove(cmd);
	}
3432 3433 3434 3435

	err = 0;

unlock:
3436
	hci_dev_unlock(hdev);
3437 3438 3439
	return err;
}

3440 3441
static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3442
{
3443
	struct mgmt_addr_info *addr = data;
3444
	struct mgmt_pending_cmd *cmd;
3445 3446 3447 3448 3449 3450 3451
	struct hci_conn *conn;
	int err;

	BT_DBG("");

	hci_dev_lock(hdev);

3452
	if (!hdev_is_powered(hdev)) {
3453 3454
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_NOT_POWERED);
3455 3456 3457
		goto unlock;
	}

3458 3459
	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
	if (!cmd) {
3460 3461
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_INVALID_PARAMS);
3462 3463 3464 3465 3466 3467
		goto unlock;
	}

	conn = cmd->user_data;

	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3468 3469
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
				      MGMT_STATUS_INVALID_PARAMS);
3470 3471 3472
		goto unlock;
	}

3473 3474
	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
	mgmt_pending_remove(cmd);
3475

3476 3477
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
				addr, sizeof(*addr));
3478 3479 3480 3481 3482
unlock:
	hci_dev_unlock(hdev);
	return err;
}

3483
static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3484
			     struct mgmt_addr_info *addr, u16 mgmt_op,
3485
			     u16 hci_op, __le32 passkey)
3486
{
3487
	struct mgmt_pending_cmd *cmd;
3488
	struct hci_conn *conn;
3489 3490
	int err;

3491
	hci_dev_lock(hdev);
3492

3493
	if (!hdev_is_powered(hdev)) {
3494 3495 3496
		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
					MGMT_STATUS_NOT_POWERED, addr,
					sizeof(*addr));
3497
		goto done;
3498 3499
	}

3500 3501
	if (addr->type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3502
	else
3503
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3504 3505

	if (!conn) {
3506 3507 3508
		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
					MGMT_STATUS_NOT_CONNECTED, addr,
					sizeof(*addr));
3509 3510
		goto done;
	}
3511

3512
	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3513 3514
		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
		if (!err)
3515 3516 3517
			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
						MGMT_STATUS_SUCCESS, addr,
						sizeof(*addr));
3518
		else
3519 3520 3521
			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
						MGMT_STATUS_FAILED, addr,
						sizeof(*addr));
3522 3523 3524 3525

		goto done;
	}

3526
	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3527 3528
	if (!cmd) {
		err = -ENOMEM;
3529
		goto done;
3530 3531
	}

3532 3533
	cmd->cmd_complete = addr_cmd_complete;

3534
	/* Continue with pairing via HCI */
3535 3536 3537
	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
		struct hci_cp_user_passkey_reply cp;

3538
		bacpy(&cp.bdaddr, &addr->bdaddr);
3539 3540 3541
		cp.passkey = passkey;
		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
	} else
3542 3543
		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
				   &addr->bdaddr);
3544

3545 3546
	if (err < 0)
		mgmt_pending_remove(cmd);
3547

3548
done:
3549
	hci_dev_unlock(hdev);
3550 3551 3552
	return err;
}

3553 3554 3555 3556 3557 3558 3559
static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_pin_code_neg_reply *cp = data;

	BT_DBG("");

3560
	return user_pairing_resp(sk, hdev, &cp->addr,
3561 3562 3563 3564
				MGMT_OP_PIN_CODE_NEG_REPLY,
				HCI_OP_PIN_CODE_NEG_REPLY, 0);
}

3565 3566
static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3567
{
3568
	struct mgmt_cp_user_confirm_reply *cp = data;
3569 3570 3571 3572

	BT_DBG("");

	if (len != sizeof(*cp))
3573 3574
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
				       MGMT_STATUS_INVALID_PARAMS);
3575

3576
	return user_pairing_resp(sk, hdev, &cp->addr,
3577 3578
				 MGMT_OP_USER_CONFIRM_REPLY,
				 HCI_OP_USER_CONFIRM_REPLY, 0);
3579 3580
}

3581
static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3582
				  void *data, u16 len)
3583
{
3584
	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3585 3586 3587

	BT_DBG("");

3588
	return user_pairing_resp(sk, hdev, &cp->addr,
3589 3590
				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3591 3592
}

3593 3594
static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
3595
{
3596
	struct mgmt_cp_user_passkey_reply *cp = data;
3597 3598 3599

	BT_DBG("");

3600
	return user_pairing_resp(sk, hdev, &cp->addr,
3601 3602
				 MGMT_OP_USER_PASSKEY_REPLY,
				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3603 3604
}

3605
static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3606
				  void *data, u16 len)
3607
{
3608
	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3609 3610 3611

	BT_DBG("");

3612
	return user_pairing_resp(sk, hdev, &cp->addr,
3613 3614
				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3615 3616
}

3617
static void update_name(struct hci_request *req)
3618
{
3619
	struct hci_dev *hdev = req->hdev;
3620 3621
	struct hci_cp_write_local_name cp;

3622
	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3623

3624
	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3625 3626
}

3627
static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3628 3629
{
	struct mgmt_cp_set_local_name *cp;
3630
	struct mgmt_pending_cmd *cmd;
3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
	if (!cmd)
		goto unlock;

	cp = cmd->param;

	if (status)
3643 3644
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
			        mgmt_status(status));
3645
	else
3646 3647
		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
				  cp, sizeof(*cp));
3648 3649 3650 3651 3652 3653 3654

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

3655
static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3656
			  u16 len)
3657
{
3658
	struct mgmt_cp_set_local_name *cp = data;
3659
	struct mgmt_pending_cmd *cmd;
3660
	struct hci_request req;
3661 3662 3663 3664
	int err;

	BT_DBG("");

3665
	hci_dev_lock(hdev);
3666

3667 3668 3669 3670 3671 3672
	/* If the old values are the same as the new ones just return a
	 * direct command complete event.
	 */
	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
	    !memcmp(hdev->short_name, cp->short_name,
		    sizeof(hdev->short_name))) {
3673 3674
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
					data, len);
3675 3676 3677
		goto failed;
	}

3678
	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3679

3680
	if (!hdev_is_powered(hdev)) {
3681
		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3682

3683 3684
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
					data, len);
3685 3686 3687
		if (err < 0)
			goto failed;

3688 3689
		err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
					 data, len, sk);
3690

3691 3692 3693
		goto failed;
	}

3694
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3695 3696 3697 3698 3699
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

3700 3701
	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));

3702
	hci_req_init(&req, hdev);
3703 3704 3705 3706 3707 3708

	if (lmp_bredr_capable(hdev)) {
		update_name(&req);
		update_eir(&req);
	}

3709 3710 3711
	/* The name is stored in the scan response data and so
	 * no need to udpate the advertising data here.
	 */
3712
	if (lmp_le_capable(hdev))
3713
		update_scan_rsp_data(&req);
3714

3715
	err = hci_req_run(&req, set_name_complete);
3716 3717 3718 3719
	if (err < 0)
		mgmt_pending_remove(cmd);

failed:
3720
	hci_dev_unlock(hdev);
3721 3722 3723
	return err;
}

3724
static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3725
			       void *data, u16 data_len)
3726
{
3727
	struct mgmt_pending_cmd *cmd;
3728 3729
	int err;

3730
	BT_DBG("%s", hdev->name);
3731

3732
	hci_dev_lock(hdev);
3733

3734
	if (!hdev_is_powered(hdev)) {
3735 3736
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_NOT_POWERED);
3737 3738 3739
		goto unlock;
	}

3740
	if (!lmp_ssp_capable(hdev)) {
3741 3742
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_NOT_SUPPORTED);
3743 3744 3745
		goto unlock;
	}

3746
	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3747 3748
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				      MGMT_STATUS_BUSY);
3749 3750 3751
		goto unlock;
	}

3752
	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3753 3754 3755 3756 3757
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

3758
	if (bredr_sc_enabled(hdev))
3759 3760 3761 3762 3763
		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
				   0, NULL);
	else
		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);

3764 3765 3766 3767
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
3768
	hci_dev_unlock(hdev);
3769 3770 3771
	return err;
}

3772
static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3773
			       void *data, u16 len)
3774
{
3775
	struct mgmt_addr_info *addr = data;
3776 3777
	int err;

3778
	BT_DBG("%s ", hdev->name);
3779

3780
	if (!bdaddr_type_is_valid(addr->type))
3781 3782 3783 3784
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_ADD_REMOTE_OOB_DATA,
					 MGMT_STATUS_INVALID_PARAMS,
					 addr, sizeof(*addr));
3785

3786
	hci_dev_lock(hdev);
3787

3788 3789 3790
	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_data *cp = data;
		u8 status;
3791

3792
		if (cp->addr.type != BDADDR_BREDR) {
3793 3794 3795 3796
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_ADD_REMOTE_OOB_DATA,
						MGMT_STATUS_INVALID_PARAMS,
						&cp->addr, sizeof(cp->addr));
3797 3798 3799
			goto unlock;
		}

3800
		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3801 3802
					      cp->addr.type, cp->hash,
					      cp->rand, NULL, NULL);
3803 3804 3805 3806 3807
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

3808 3809 3810
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
					&cp->addr, sizeof(cp->addr));
3811 3812
	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3813
		u8 *rand192, *hash192, *rand256, *hash256;
3814 3815
		u8 status;

3816
		if (bdaddr_type_is_le(cp->addr.type)) {
3817 3818 3819 3820 3821
			/* Enforce zero-valued 192-bit parameters as
			 * long as legacy SMP OOB isn't implemented.
			 */
			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
			    memcmp(cp->hash192, ZERO_KEY, 16)) {
3822 3823 3824 3825
				err = mgmt_cmd_complete(sk, hdev->id,
							MGMT_OP_ADD_REMOTE_OOB_DATA,
							MGMT_STATUS_INVALID_PARAMS,
							addr, sizeof(*addr));
3826 3827 3828
				goto unlock;
			}

3829 3830 3831
			rand192 = NULL;
			hash192 = NULL;
		} else {
3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854
			/* In case one of the P-192 values is set to zero,
			 * then just disable OOB data for P-192.
			 */
			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
				rand192 = NULL;
				hash192 = NULL;
			} else {
				rand192 = cp->rand192;
				hash192 = cp->hash192;
			}
		}

		/* In case one of the P-256 values is set to zero, then just
		 * disable OOB data for P-256.
		 */
		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
			rand256 = NULL;
			hash256 = NULL;
		} else {
			rand256 = cp->rand256;
			hash256 = cp->hash256;
3855 3856
		}

3857
		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3858
					      cp->addr.type, hash192, rand192,
3859
					      hash256, rand256);
3860 3861 3862 3863 3864
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

3865 3866 3867
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_ADD_REMOTE_OOB_DATA,
					status, &cp->addr, sizeof(cp->addr));
3868 3869
	} else {
		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3870 3871
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
				      MGMT_STATUS_INVALID_PARAMS);
3872
	}
3873

3874
unlock:
3875
	hci_dev_unlock(hdev);
3876 3877 3878
	return err;
}

3879
static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3880
				  void *data, u16 len)
3881
{
3882
	struct mgmt_cp_remove_remote_oob_data *cp = data;
3883
	u8 status;
3884 3885
	int err;

3886
	BT_DBG("%s", hdev->name);
3887

3888
	if (cp->addr.type != BDADDR_BREDR)
3889 3890 3891 3892
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
3893

3894
	hci_dev_lock(hdev);
3895

3896 3897 3898 3899 3900 3901
	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
		hci_remote_oob_data_clear(hdev);
		status = MGMT_STATUS_SUCCESS;
		goto done;
	}

3902
	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3903
	if (err < 0)
3904
		status = MGMT_STATUS_INVALID_PARAMS;
3905
	else
3906
		status = MGMT_STATUS_SUCCESS;
3907

3908
done:
3909 3910
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
				status, &cp->addr, sizeof(cp->addr));
3911

3912
	hci_dev_unlock(hdev);
3913 3914 3915
	return err;
}

3916
static bool trigger_discovery(struct hci_request *req, u8 *status)
3917
{
3918 3919 3920 3921 3922 3923 3924
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_scan_param param_cp;
	struct hci_cp_le_set_scan_enable enable_cp;
	struct hci_cp_inquiry inq_cp;
	/* General inquiry access code (GIAC) */
	u8 lap[3] = { 0x33, 0x8b, 0x9e };
	u8 own_addr_type;
3925 3926
	int err;

3927 3928 3929 3930 3931
	switch (hdev->discovery.type) {
	case DISCOV_TYPE_BREDR:
		*status = mgmt_bredr_support(hdev);
		if (*status)
			return false;
3932

3933 3934 3935 3936
		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
			*status = MGMT_STATUS_BUSY;
			return false;
		}
3937

3938
		hci_inquiry_cache_flush(hdev);
3939

3940 3941 3942 3943 3944
		memset(&inq_cp, 0, sizeof(inq_cp));
		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
		hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
		break;
3945

3946 3947 3948 3949 3950 3951 3952
	case DISCOV_TYPE_LE:
	case DISCOV_TYPE_INTERLEAVED:
		*status = mgmt_le_support(hdev);
		if (*status)
			return false;

		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3953
		    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
3954 3955 3956 3957
			*status = MGMT_STATUS_NOT_SUPPORTED;
			return false;
		}

3958
		if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975
			/* Don't let discovery abort an outgoing
			 * connection attempt that's using directed
			 * advertising.
			 */
			if (hci_conn_hash_lookup_state(hdev, LE_LINK,
						       BT_CONNECT)) {
				*status = MGMT_STATUS_REJECTED;
				return false;
			}

			disable_advertising(req);
		}

		/* If controller is scanning, it means the background scanning
		 * is running. Thus, we should temporarily stop it in order to
		 * set the discovery scanning parameters.
		 */
3976
		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
3977 3978 3979 3980 3981 3982
			hci_req_add_le_scan_disable(req);

		memset(&param_cp, 0, sizeof(param_cp));

		/* All active scans will be done with either a resolvable
		 * private address (when privacy feature has been enabled)
3983
		 * or non-resolvable private address.
3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010
		 */
		err = hci_update_random_address(req, true, &own_addr_type);
		if (err < 0) {
			*status = MGMT_STATUS_FAILED;
			return false;
		}

		param_cp.type = LE_SCAN_ACTIVE;
		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
		param_cp.own_address_type = own_addr_type;
		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
			    &param_cp);

		memset(&enable_cp, 0, sizeof(enable_cp));
		enable_cp.enable = LE_SCAN_ENABLE;
		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
			    &enable_cp);
		break;

	default:
		*status = MGMT_STATUS_INVALID_PARAMS;
		return false;
	}

	return true;
4011 4012
}

4013 4014
static void start_discovery_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
4015
{
4016
	struct mgmt_pending_cmd *cmd;
4017
	unsigned long timeout;
4018

4019 4020
	BT_DBG("status %d", status);

4021
	hci_dev_lock(hdev);
4022

4023
	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4024 4025 4026
	if (!cmd)
		cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);

4027
	if (cmd) {
4028
		cmd->cmd_complete(cmd, mgmt_status(status));
4029 4030
		mgmt_pending_remove(cmd);
	}
4031 4032

	if (status) {
4033 4034
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
		goto unlock;
4035 4036 4037 4038
	}

	hci_discovery_set_state(hdev, DISCOVERY_FINDING);

4039 4040 4041
	/* If the scan involves LE scan, pick proper timeout to schedule
	 * hdev->le_scan_disable that will stop it.
	 */
4042 4043
	switch (hdev->discovery.type) {
	case DISCOV_TYPE_LE:
4044
		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4045 4046
		break;
	case DISCOV_TYPE_INTERLEAVED:
4047
		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4048 4049
		break;
	case DISCOV_TYPE_BREDR:
4050
		timeout = 0;
4051 4052 4053
		break;
	default:
		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4054 4055
		timeout = 0;
		break;
4056
	}
4057

4058 4059 4060 4061 4062 4063 4064 4065
	if (timeout) {
		/* When service discovery is used and the controller has
		 * a strict duplicate filter, it is important to remember
		 * the start and duration of the scan. This is required
		 * for restarting scanning during the discovery phase.
		 */
		if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
			     &hdev->quirks) &&
4066
		    hdev->discovery.result_filtering) {
4067 4068 4069 4070
			hdev->discovery.scan_start = jiffies;
			hdev->discovery.scan_duration = timeout;
		}

4071 4072
		queue_delayed_work(hdev->workqueue,
				   &hdev->le_scan_disable, timeout);
4073
	}
4074

4075 4076
unlock:
	hci_dev_unlock(hdev);
4077 4078
}

4079
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4080
			   void *data, u16 len)
4081
{
4082
	struct mgmt_cp_start_discovery *cp = data;
4083
	struct mgmt_pending_cmd *cmd;
4084
	struct hci_request req;
4085
	u8 status;
4086 4087
	int err;

4088
	BT_DBG("%s", hdev->name);
4089

4090
	hci_dev_lock(hdev);
4091

4092
	if (!hdev_is_powered(hdev)) {
4093 4094 4095
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					MGMT_STATUS_NOT_POWERED,
					&cp->type, sizeof(cp->type));
4096 4097 4098
		goto failed;
	}

4099
	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4100
	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4101 4102 4103
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					MGMT_STATUS_BUSY, &cp->type,
					sizeof(cp->type));
4104 4105 4106
		goto failed;
	}

4107
	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4108 4109 4110 4111 4112
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

4113 4114
	cmd->cmd_complete = generic_cmd_complete;

4115 4116 4117 4118 4119
	/* Clear the discovery filter first to free any previously
	 * allocated memory for the UUID list.
	 */
	hci_discovery_filter_clear(hdev);

A
Andre Guedes 已提交
4120
	hdev->discovery.type = cp->type;
4121
	hdev->discovery.report_invalid_rssi = false;
A
Andre Guedes 已提交
4122

4123 4124
	hci_req_init(&req, hdev);

4125
	if (!trigger_discovery(&req, &status)) {
4126 4127
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					status, &cp->type, sizeof(cp->type));
4128 4129
		mgmt_pending_remove(cmd);
		goto failed;
4130
	}
4131

4132
	err = hci_req_run(&req, start_discovery_complete);
4133
	if (err < 0) {
4134
		mgmt_pending_remove(cmd);
4135 4136
		goto failed;
	}
4137

4138
	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4139

4140
failed:
4141
	hci_dev_unlock(hdev);
4142 4143
	return err;
}
4144

4145 4146
static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
					  u8 status)
4147
{
4148 4149
	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
				 cmd->param, 1);
4150
}
4151

4152 4153 4154 4155
static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
				   void *data, u16 len)
{
	struct mgmt_cp_start_service_discovery *cp = data;
4156
	struct mgmt_pending_cmd *cmd;
4157 4158 4159 4160 4161
	struct hci_request req;
	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
	u16 uuid_count, expected_len;
	u8 status;
	int err;
4162

4163
	BT_DBG("%s", hdev->name);
4164

4165
	hci_dev_lock(hdev);
4166

4167
	if (!hdev_is_powered(hdev)) {
4168 4169 4170 4171
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_NOT_POWERED,
					&cp->type, sizeof(cp->type));
4172 4173
		goto failed;
	}
4174

4175
	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4176
	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4177 4178 4179 4180
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_BUSY, &cp->type,
					sizeof(cp->type));
4181 4182
		goto failed;
	}
4183

4184 4185 4186 4187
	uuid_count = __le16_to_cpu(cp->uuid_count);
	if (uuid_count > max_uuid_count) {
		BT_ERR("service_discovery: too big uuid_count value %u",
		       uuid_count);
4188 4189 4190 4191
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS, &cp->type,
					sizeof(cp->type));
4192 4193 4194 4195 4196 4197 4198
		goto failed;
	}

	expected_len = sizeof(*cp) + uuid_count * 16;
	if (expected_len != len) {
		BT_ERR("service_discovery: expected %u bytes, got %u bytes",
		       expected_len, len);
4199 4200 4201 4202
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS, &cp->type,
					sizeof(cp->type));
4203 4204 4205 4206
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4207
			       hdev, data, len);
4208 4209 4210 4211 4212
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

4213 4214
	cmd->cmd_complete = service_discovery_cmd_complete;

4215 4216 4217 4218 4219
	/* Clear the discovery filter first to free any previously
	 * allocated memory for the UUID list.
	 */
	hci_discovery_filter_clear(hdev);

4220
	hdev->discovery.result_filtering = true;
4221 4222 4223 4224 4225 4226 4227 4228
	hdev->discovery.type = cp->type;
	hdev->discovery.rssi = cp->rssi;
	hdev->discovery.uuid_count = uuid_count;

	if (uuid_count > 0) {
		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
						GFP_KERNEL);
		if (!hdev->discovery.uuids) {
4229 4230 4231 4232
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_START_SERVICE_DISCOVERY,
						MGMT_STATUS_FAILED,
						&cp->type, sizeof(cp->type));
4233 4234 4235
			mgmt_pending_remove(cmd);
			goto failed;
		}
4236
	}
4237

4238
	hci_req_init(&req, hdev);
4239

4240
	if (!trigger_discovery(&req, &status)) {
4241 4242 4243
		err = mgmt_cmd_complete(sk, hdev->id,
					MGMT_OP_START_SERVICE_DISCOVERY,
					status, &cp->type, sizeof(cp->type));
4244 4245
		mgmt_pending_remove(cmd);
		goto failed;
4246
	}
4247

4248
	err = hci_req_run(&req, start_discovery_complete);
4249
	if (err < 0) {
4250
		mgmt_pending_remove(cmd);
4251 4252 4253 4254
		goto failed;
	}

	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4255 4256

failed:
4257
	hci_dev_unlock(hdev);
4258 4259 4260
	return err;
}

4261
static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4262
{
4263
	struct mgmt_pending_cmd *cmd;
4264

4265 4266 4267 4268
	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

4269 4270
	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
	if (cmd) {
4271
		cmd->cmd_complete(cmd, mgmt_status(status));
4272
		mgmt_pending_remove(cmd);
4273 4274
	}

4275 4276
	if (!status)
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4277 4278 4279 4280

	hci_dev_unlock(hdev);
}

4281
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4282
			  u16 len)
4283
{
4284
	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4285
	struct mgmt_pending_cmd *cmd;
4286
	struct hci_request req;
4287 4288
	int err;

4289
	BT_DBG("%s", hdev->name);
4290

4291
	hci_dev_lock(hdev);
4292

4293
	if (!hci_discovery_active(hdev)) {
4294 4295 4296
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
					MGMT_STATUS_REJECTED, &mgmt_cp->type,
					sizeof(mgmt_cp->type));
4297 4298 4299 4300
		goto unlock;
	}

	if (hdev->discovery.type != mgmt_cp->type) {
4301 4302 4303
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
					MGMT_STATUS_INVALID_PARAMS,
					&mgmt_cp->type, sizeof(mgmt_cp->type));
4304
		goto unlock;
4305 4306
	}

4307
	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4308 4309
	if (!cmd) {
		err = -ENOMEM;
4310 4311 4312
		goto unlock;
	}

4313 4314
	cmd->cmd_complete = generic_cmd_complete;

4315 4316
	hci_req_init(&req, hdev);

4317
	hci_stop_discovery(&req);
4318

4319 4320 4321
	err = hci_req_run(&req, stop_discovery_complete);
	if (!err) {
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4322
		goto unlock;
4323 4324
	}

4325 4326 4327 4328
	mgmt_pending_remove(cmd);

	/* If no HCI commands were sent we're done */
	if (err == -ENODATA) {
4329 4330
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
					&mgmt_cp->type, sizeof(mgmt_cp->type));
4331 4332
		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
	}
4333

4334
unlock:
4335
	hci_dev_unlock(hdev);
4336 4337 4338
	return err;
}

4339
static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4340
			u16 len)
4341
{
4342
	struct mgmt_cp_confirm_name *cp = data;
4343 4344 4345
	struct inquiry_entry *e;
	int err;

4346
	BT_DBG("%s", hdev->name);
4347 4348 4349

	hci_dev_lock(hdev);

4350
	if (!hci_discovery_active(hdev)) {
4351 4352 4353
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
					MGMT_STATUS_FAILED, &cp->addr,
					sizeof(cp->addr));
4354 4355 4356
		goto failed;
	}

4357
	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4358
	if (!e) {
4359 4360 4361
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
					sizeof(cp->addr));
4362 4363 4364 4365 4366 4367 4368 4369
		goto failed;
	}

	if (cp->name_known) {
		e->name_state = NAME_KNOWN;
		list_del(&e->list);
	} else {
		e->name_state = NAME_NEEDED;
4370
		hci_inquiry_cache_update_resolve(hdev, e);
4371 4372
	}

4373 4374
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
				&cp->addr, sizeof(cp->addr));
4375 4376 4377 4378 4379 4380

failed:
	hci_dev_unlock(hdev);
	return err;
}

4381
static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4382
			u16 len)
4383
{
4384
	struct mgmt_cp_block_device *cp = data;
4385
	u8 status;
4386 4387
	int err;

4388
	BT_DBG("%s", hdev->name);
4389

4390
	if (!bdaddr_type_is_valid(cp->addr.type))
4391 4392 4393
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
4394

4395
	hci_dev_lock(hdev);
4396

4397 4398
	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
				  cp->addr.type);
4399
	if (err < 0) {
4400
		status = MGMT_STATUS_FAILED;
4401 4402 4403 4404 4405 4406
		goto done;
	}

	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
		   sk);
	status = MGMT_STATUS_SUCCESS;
4407

4408
done:
4409 4410
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
				&cp->addr, sizeof(cp->addr));
4411

4412
	hci_dev_unlock(hdev);
4413 4414 4415 4416

	return err;
}

4417
static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4418
			  u16 len)
4419
{
4420
	struct mgmt_cp_unblock_device *cp = data;
4421
	u8 status;
4422 4423
	int err;

4424
	BT_DBG("%s", hdev->name);
4425

4426
	if (!bdaddr_type_is_valid(cp->addr.type))
4427 4428 4429
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
4430

4431
	hci_dev_lock(hdev);
4432

4433 4434
	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
				  cp->addr.type);
4435
	if (err < 0) {
4436
		status = MGMT_STATUS_INVALID_PARAMS;
4437 4438 4439 4440 4441 4442
		goto done;
	}

	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
		   sk);
	status = MGMT_STATUS_SUCCESS;
4443

4444
done:
4445 4446
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
				&cp->addr, sizeof(cp->addr));
4447

4448
	hci_dev_unlock(hdev);
4449 4450 4451 4452

	return err;
}

4453 4454 4455 4456
static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_set_device_id *cp = data;
4457
	struct hci_request req;
4458
	int err;
4459
	__u16 source;
4460 4461 4462

	BT_DBG("%s", hdev->name);

4463 4464 4465
	source = __le16_to_cpu(cp->source);

	if (source > 0x0002)
4466 4467
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
				       MGMT_STATUS_INVALID_PARAMS);
4468

4469 4470
	hci_dev_lock(hdev);

4471
	hdev->devid_source = source;
4472 4473 4474 4475
	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
	hdev->devid_product = __le16_to_cpu(cp->product);
	hdev->devid_version = __le16_to_cpu(cp->version);

4476 4477
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
				NULL, 0);
4478

4479 4480 4481
	hci_req_init(&req, hdev);
	update_eir(&req);
	hci_req_run(&req, NULL);
4482 4483 4484 4485 4486 4487

	hci_dev_unlock(hdev);

	return err;
}

4488 4489
static void set_advertising_complete(struct hci_dev *hdev, u8 status,
				     u16 opcode)
4490 4491 4492
{
	struct cmd_lookup match = { NULL, hdev };

4493 4494
	hci_dev_lock(hdev);

4495 4496 4497 4498 4499
	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
				     cmd_status_rsp, &mgmt_err);
4500
		goto unlock;
4501 4502
	}

4503
	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4504
		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4505
	else
4506
		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4507

4508 4509 4510 4511 4512 4513 4514
	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
			     &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
4515 4516 4517

unlock:
	hci_dev_unlock(hdev);
4518 4519
}

4520 4521
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
4522 4523
{
	struct mgmt_mode *cp = data;
4524
	struct mgmt_pending_cmd *cmd;
4525
	struct hci_request req;
4526
	u8 val, status;
4527 4528 4529 4530
	int err;

	BT_DBG("request for %s", hdev->name);

4531 4532
	status = mgmt_le_support(hdev);
	if (status)
4533 4534
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				       status);
4535

4536
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4537 4538
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				       MGMT_STATUS_INVALID_PARAMS);
4539 4540 4541 4542 4543

	hci_dev_lock(hdev);

	val = !!cp->val;

4544 4545 4546 4547 4548
	/* The following conditions are ones which mean that we should
	 * not do any HCI communication but directly send a mgmt
	 * response to user space (after toggling the flag if
	 * necessary).
	 */
4549
	if (!hdev_is_powered(hdev) ||
4550 4551
	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4552
	    hci_conn_num(hdev, LE_LINK) > 0 ||
4553
	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4554
	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4555
		bool changed;
4556

4557
		if (cp->val) {
4558
			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4559
			if (cp->val == 0x02)
4560
				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4561
			else
4562
				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4563
		} else {
4564
			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4565
			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
		if (err < 0)
			goto unlock;

		if (changed)
			err = new_settings(hdev, sk);

		goto unlock;
	}

	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4580 4581
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				      MGMT_STATUS_BUSY);
4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

4593
	if (cp->val == 0x02)
4594
		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4595
	else
4596
		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4597

4598 4599 4600 4601
	if (val)
		enable_advertising(&req);
	else
		disable_advertising(&req);
4602 4603 4604 4605 4606 4607 4608 4609 4610 4611

	err = hci_req_run(&req, set_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4612 4613 4614 4615 4616 4617 4618 4619
static int set_static_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_static_address *cp = data;
	int err;

	BT_DBG("%s", hdev->name);

4620
	if (!lmp_le_capable(hdev))
4621 4622
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				       MGMT_STATUS_NOT_SUPPORTED);
4623 4624

	if (hdev_is_powered(hdev))
4625 4626
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				       MGMT_STATUS_REJECTED);
4627 4628 4629

	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4630 4631 4632
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_SET_STATIC_ADDRESS,
					       MGMT_STATUS_INVALID_PARAMS);
4633 4634 4635

		/* Two most significant bits shall be set */
		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4636 4637 4638
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_SET_STATIC_ADDRESS,
					       MGMT_STATUS_INVALID_PARAMS);
4639 4640 4641 4642 4643 4644
	}

	hci_dev_lock(hdev);

	bacpy(&hdev->static_addr, &cp->bdaddr);

4645 4646 4647 4648 4649
	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
	if (err < 0)
		goto unlock;

	err = new_settings(hdev, sk);
4650

4651
unlock:
4652 4653 4654 4655
	hci_dev_unlock(hdev);
	return err;
}

4656 4657 4658 4659 4660 4661 4662 4663 4664 4665
static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_cp_set_scan_params *cp = data;
	__u16 interval, window;
	int err;

	BT_DBG("%s", hdev->name);

	if (!lmp_le_capable(hdev))
4666 4667
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_NOT_SUPPORTED);
4668 4669 4670 4671

	interval = __le16_to_cpu(cp->interval);

	if (interval < 0x0004 || interval > 0x4000)
4672 4673
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4674 4675 4676 4677

	window = __le16_to_cpu(cp->window);

	if (window < 0x0004 || window > 0x4000)
4678 4679
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4680

4681
	if (window > interval)
4682 4683
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				       MGMT_STATUS_INVALID_PARAMS);
4684

4685 4686 4687 4688 4689
	hci_dev_lock(hdev);

	hdev->le_scan_interval = interval;
	hdev->le_scan_window = window;

4690 4691
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
				NULL, 0);
4692

4693 4694 4695
	/* If background scan is running, restart it so new parameters are
	 * loaded.
	 */
4696
	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707
	    hdev->discovery.state == DISCOVERY_STOPPED) {
		struct hci_request req;

		hci_req_init(&req, hdev);

		hci_req_add_le_scan_disable(&req);
		hci_req_add_le_passive_scan(&req);

		hci_req_run(&req, NULL);
	}

4708 4709 4710 4711 4712
	hci_dev_unlock(hdev);

	return err;
}

4713 4714
static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
				      u16 opcode)
4715
{
4716
	struct mgmt_pending_cmd *cmd;
4717 4718 4719 4720 4721 4722 4723 4724 4725 4726

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
4727 4728
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
			        mgmt_status(status));
4729
	} else {
4730 4731 4732
		struct mgmt_mode *cp = cmd->param;

		if (cp->val)
4733
			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4734
		else
4735
			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4736

4737 4738 4739 4740 4741 4742 4743 4744 4745 4746
		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

4747
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4748
				void *data, u16 len)
4749
{
4750
	struct mgmt_mode *cp = data;
4751
	struct mgmt_pending_cmd *cmd;
4752
	struct hci_request req;
4753 4754
	int err;

4755
	BT_DBG("%s", hdev->name);
4756

4757
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4758
	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4759 4760
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				       MGMT_STATUS_NOT_SUPPORTED);
4761

4762
	if (cp->val != 0x00 && cp->val != 0x01)
4763 4764
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				       MGMT_STATUS_INVALID_PARAMS);
4765

4766 4767
	hci_dev_lock(hdev);

4768
	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4769 4770
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				      MGMT_STATUS_BUSY);
4771 4772 4773
		goto unlock;
	}

4774
	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4775 4776 4777 4778 4779
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		goto unlock;
	}

4780
	if (!hdev_is_powered(hdev)) {
4781
		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4782 4783 4784 4785 4786 4787
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		new_settings(hdev, sk);
		goto unlock;
	}

4788 4789 4790 4791 4792
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
			       data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
4793 4794
	}

4795 4796
	hci_req_init(&req, hdev);

4797
	write_fast_connectable(&req, cp->val);
4798 4799

	err = hci_req_run(&req, fast_connectable_complete);
4800
	if (err < 0) {
4801 4802
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				      MGMT_STATUS_FAILED);
4803
		mgmt_pending_remove(cmd);
4804 4805
	}

4806
unlock:
4807
	hci_dev_unlock(hdev);
4808

4809 4810 4811
	return err;
}

4812
static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4813
{
4814
	struct mgmt_pending_cmd *cmd;
4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		/* We need to restore the flag if related HCI commands
		 * failed.
		 */
4830
		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4831

4832
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846
	} else {
		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
	struct mgmt_mode *cp = data;
4847
	struct mgmt_pending_cmd *cmd;
4848 4849 4850 4851 4852 4853
	struct hci_request req;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4854 4855
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_NOT_SUPPORTED);
4856

4857
	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4858 4859
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_REJECTED);
4860 4861

	if (cp->val != 0x00 && cp->val != 0x01)
4862 4863
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				       MGMT_STATUS_INVALID_PARAMS);
4864 4865 4866

	hci_dev_lock(hdev);

4867
	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4868 4869 4870 4871 4872 4873
		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		goto unlock;
	}

	if (!hdev_is_powered(hdev)) {
		if (!cp->val) {
4874 4875 4876 4877 4878
			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4879 4880
		}

4881
		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892

		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		if (err < 0)
			goto unlock;

		err = new_settings(hdev, sk);
		goto unlock;
	}

	/* Reject disabling when powered on */
	if (!cp->val) {
4893 4894
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				      MGMT_STATUS_REJECTED);
4895
		goto unlock;
4896 4897 4898 4899 4900 4901 4902 4903
	} else {
		/* When configuring a dual-mode controller to operate
		 * with LE only and using a static address, then switching
		 * BR/EDR back on is not allowed.
		 *
		 * Dual-mode controllers shall operate with the public
		 * address as its identity address for BR/EDR and LE. So
		 * reject the attempt to create an invalid configuration.
4904 4905 4906 4907 4908 4909
		 *
		 * The same restrictions applies when secure connections
		 * has been enabled. For BR/EDR this is a controller feature
		 * while for LE it is a host stack feature. This means that
		 * switching BR/EDR back on when secure connections has been
		 * enabled is not a supported transaction.
4910
		 */
4911
		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4912
		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4913
		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4914 4915
			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
					      MGMT_STATUS_REJECTED);
4916 4917
			goto unlock;
		}
4918 4919 4920
	}

	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4921 4922
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				      MGMT_STATUS_BUSY);
4923 4924 4925 4926 4927 4928 4929 4930 4931
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

4932
	/* We need to flip the bit already here so that update_adv_data
4933 4934
	 * generates the correct flags.
	 */
4935
	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4936 4937

	hci_req_init(&req, hdev);
4938

4939
	write_fast_connectable(&req, false);
4940
	__hci_update_page_scan(&req);
4941

4942 4943 4944
	/* Since only the advertising data flags will change, there
	 * is no need to update the scan response data.
	 */
4945
	update_adv_data(&req);
4946

4947 4948 4949 4950 4951 4952 4953 4954 4955
	err = hci_req_run(&req, set_bredr_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4956 4957
static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
4958
	struct mgmt_pending_cmd *cmd;
4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969
	struct mgmt_mode *cp;

	BT_DBG("%s status %u", hdev->name, status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
4970 4971
		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
			        mgmt_status(status));
4972 4973 4974 4975 4976 4977 4978
		goto remove;
	}

	cp = cmd->param;

	switch (cp->val) {
	case 0x00:
4979 4980
		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4981 4982
		break;
	case 0x01:
4983
		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4984
		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4985 4986
		break;
	case 0x02:
4987 4988
		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
		hci_dev_set_flag(hdev, HCI_SC_ONLY);
4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000
		break;
	}

	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
	new_settings(hdev, cmd->sk);

remove:
	mgmt_pending_remove(cmd);
unlock:
	hci_dev_unlock(hdev);
}

5001 5002 5003 5004
static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_mode *cp = data;
5005
	struct mgmt_pending_cmd *cmd;
5006
	struct hci_request req;
5007
	u8 val;
5008 5009 5010 5011
	int err;

	BT_DBG("request for %s", hdev->name);

5012
	if (!lmp_sc_capable(hdev) &&
5013
	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5014 5015
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				       MGMT_STATUS_NOT_SUPPORTED);
5016

5017
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5018
	    lmp_sc_capable(hdev) &&
5019
	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5020 5021
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				       MGMT_STATUS_REJECTED);
5022

5023
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5024
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5025 5026 5027 5028
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

5029
	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5030
	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5031 5032
		bool changed;

5033
		if (cp->val) {
5034 5035
			changed = !hci_dev_test_and_set_flag(hdev,
							     HCI_SC_ENABLED);
5036
			if (cp->val == 0x02)
5037
				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5038
			else
5039
				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5040
		} else {
5041 5042
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_SC_ENABLED);
5043
			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5044
		}
5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056

		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

		goto failed;
	}

	if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5057 5058
		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				      MGMT_STATUS_BUSY);
5059 5060 5061
		goto failed;
	}

5062 5063
	val = !!cp->val;

5064 5065
	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5066 5067 5068 5069 5070 5071 5072 5073 5074 5075
		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

5076 5077 5078
	hci_req_init(&req, hdev);
	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
	err = hci_req_run(&req, sc_enable_complete);
5079 5080 5081 5082 5083 5084 5085 5086 5087 5088
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

5089 5090 5091 5092
static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
			  void *data, u16 len)
{
	struct mgmt_mode *cp = data;
5093
	bool changed, use_changed;
5094 5095 5096 5097
	int err;

	BT_DBG("request for %s", hdev->name);

5098
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5099 5100
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5101 5102 5103 5104

	hci_dev_lock(hdev);

	if (cp->val)
5105
		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5106
	else
5107 5108
		changed = hci_dev_test_and_clear_flag(hdev,
						      HCI_KEEP_DEBUG_KEYS);
5109

5110
	if (cp->val == 0x02)
5111 5112
		use_changed = !hci_dev_test_and_set_flag(hdev,
							 HCI_USE_DEBUG_KEYS);
5113
	else
5114 5115
		use_changed = hci_dev_test_and_clear_flag(hdev,
							  HCI_USE_DEBUG_KEYS);
5116 5117

	if (hdev_is_powered(hdev) && use_changed &&
5118
	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5119 5120 5121 5122 5123
		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
			     sizeof(mode), &mode);
	}

5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135
	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5136 5137 5138 5139 5140 5141 5142 5143 5144 5145
static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
		       u16 len)
{
	struct mgmt_cp_set_privacy *cp = cp_data;
	bool changed;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
5146 5147
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_NOT_SUPPORTED);
5148 5149

	if (cp->privacy != 0x00 && cp->privacy != 0x01)
5150 5151
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_INVALID_PARAMS);
5152 5153

	if (hdev_is_powered(hdev))
5154 5155
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
				       MGMT_STATUS_REJECTED);
5156 5157 5158

	hci_dev_lock(hdev);

5159 5160 5161
	/* If user space supports this command it is also expected to
	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
	 */
5162
	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5163

5164
	if (cp->privacy) {
5165
		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5166
		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5167
		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5168
	} else {
5169
		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5170
		memset(hdev->irk, 0, sizeof(hdev->irk));
5171
		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185
	}

	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205
static bool irk_is_valid(struct mgmt_irk_info *irk)
{
	switch (irk->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
}

static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
		     u16 len)
{
	struct mgmt_cp_load_irks *cp = cp_data;
5206 5207
	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_irk_info));
5208 5209 5210 5211 5212 5213
	u16 irk_count, expected_len;
	int i, err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
5214 5215
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_NOT_SUPPORTED);
5216 5217

	irk_count = __le16_to_cpu(cp->irk_count);
5218 5219
	if (irk_count > max_irk_count) {
		BT_ERR("load_irks: too big irk_count value %u", irk_count);
5220 5221
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_INVALID_PARAMS);
5222
	}
5223 5224 5225 5226

	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
	if (expected_len != len) {
		BT_ERR("load_irks: expected %u bytes, got %u bytes",
5227
		       expected_len, len);
5228 5229
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				       MGMT_STATUS_INVALID_PARAMS);
5230 5231 5232 5233 5234 5235 5236 5237
	}

	BT_DBG("%s irk_count %u", hdev->name, irk_count);

	for (i = 0; i < irk_count; i++) {
		struct mgmt_irk_info *key = &cp->irks[i];

		if (!irk_is_valid(key))
5238 5239 5240
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_IRKS,
					       MGMT_STATUS_INVALID_PARAMS);
5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259
	}

	hci_dev_lock(hdev);

	hci_smp_irks_clear(hdev);

	for (i = 0; i < irk_count; i++) {
		struct mgmt_irk_info *irk = &cp->irks[i];
		u8 addr_type;

		if (irk->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
			    BDADDR_ANY);
	}

5260
	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5261

5262
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5263 5264 5265 5266 5267 5268

	hci_dev_unlock(hdev);

	return err;
}

5269 5270 5271 5272
static bool ltk_is_valid(struct mgmt_ltk_info *key)
{
	if (key->master != 0x00 && key->master != 0x01)
		return false;
5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285

	switch (key->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
5286 5287
}

5288
static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5289
			       void *cp_data, u16 len)
5290 5291
{
	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5292 5293
	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
				   sizeof(struct mgmt_ltk_info));
5294
	u16 key_count, expected_len;
5295
	int i, err;
5296

5297 5298 5299
	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
5300 5301
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_NOT_SUPPORTED);
5302

5303
	key_count = __le16_to_cpu(cp->key_count);
5304 5305
	if (key_count > max_key_count) {
		BT_ERR("load_ltks: too big key_count value %u", key_count);
5306 5307
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5308
	}
5309 5310 5311 5312 5313

	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_ltk_info);
	if (expected_len != len) {
		BT_ERR("load_keys: expected %u bytes, got %u bytes",
5314
		       expected_len, len);
5315 5316
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				       MGMT_STATUS_INVALID_PARAMS);
5317 5318
	}

5319
	BT_DBG("%s key_count %u", hdev->name, key_count);
5320

5321 5322 5323
	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];

5324
		if (!ltk_is_valid(key))
5325 5326 5327
			return mgmt_cmd_status(sk, hdev->id,
					       MGMT_OP_LOAD_LONG_TERM_KEYS,
					       MGMT_STATUS_INVALID_PARAMS);
5328 5329
	}

5330 5331 5332 5333 5334 5335
	hci_dev_lock(hdev);

	hci_smp_ltks_clear(hdev);

	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];
5336
		u8 type, addr_type, authenticated;
5337 5338 5339 5340 5341

		if (key->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;
5342

5343 5344
		switch (key->type) {
		case MGMT_LTK_UNAUTHENTICATED:
5345
			authenticated = 0x00;
5346
			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5347 5348
			break;
		case MGMT_LTK_AUTHENTICATED:
5349
			authenticated = 0x01;
5350 5351 5352 5353 5354
			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
			break;
		case MGMT_LTK_P256_UNAUTH:
			authenticated = 0x00;
			type = SMP_LTK_P256;
5355
			break;
5356 5357 5358
		case MGMT_LTK_P256_AUTH:
			authenticated = 0x01;
			type = SMP_LTK_P256;
5359
			break;
5360 5361 5362
		case MGMT_LTK_P256_DEBUG:
			authenticated = 0x00;
			type = SMP_LTK_P256_DEBUG;
5363 5364 5365
		default:
			continue;
		}
5366

5367
		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5368
			    authenticated, key->val, key->enc_size, key->ediv,
5369
			    key->rand);
5370 5371
	}

5372
	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5373 5374
			   NULL, 0);

5375 5376
	hci_dev_unlock(hdev);

5377
	return err;
5378 5379
}

5380
static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5381 5382
{
	struct hci_conn *conn = cmd->user_data;
5383
	struct mgmt_rp_get_conn_info rp;
5384
	int err;
5385

5386
	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5387

5388
	if (status == MGMT_STATUS_SUCCESS) {
5389
		rp.rssi = conn->rssi;
5390 5391 5392 5393 5394 5395
		rp.tx_power = conn->tx_power;
		rp.max_tx_power = conn->max_tx_power;
	} else {
		rp.rssi = HCI_RSSI_INVALID;
		rp.tx_power = HCI_TX_POWER_INVALID;
		rp.max_tx_power = HCI_TX_POWER_INVALID;
5396 5397
	}

5398 5399
	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
				status, &rp, sizeof(rp));
5400 5401

	hci_conn_drop(conn);
5402
	hci_conn_put(conn);
5403 5404

	return err;
5405 5406
}

5407 5408
static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
				       u16 opcode)
5409 5410
{
	struct hci_cp_read_rssi *cp;
5411
	struct mgmt_pending_cmd *cmd;
5412 5413
	struct hci_conn *conn;
	u16 handle;
5414
	u8 status;
5415

5416
	BT_DBG("status 0x%02x", hci_status);
5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431

	hci_dev_lock(hdev);

	/* Commands sent in request are either Read RSSI or Read Transmit Power
	 * Level so we check which one was last sent to retrieve connection
	 * handle.  Both commands have handle as first parameter so it's safe to
	 * cast data on the same command struct.
	 *
	 * First command sent is always Read RSSI and we fail only if it fails.
	 * In other case we simply override error to indicate success as we
	 * already remembered if TX power value is actually valid.
	 */
	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
	if (!cp) {
		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5432 5433 5434
		status = MGMT_STATUS_SUCCESS;
	} else {
		status = mgmt_status(hci_status);
5435 5436 5437
	}

	if (!cp) {
5438
		BT_ERR("invalid sent_cmd in conn_info response");
5439 5440 5441 5442 5443 5444
		goto unlock;
	}

	handle = __le16_to_cpu(cp->handle);
	conn = hci_conn_hash_lookup_handle(hdev, handle);
	if (!conn) {
5445
		BT_ERR("unknown handle (%d) in conn_info response", handle);
5446 5447 5448
		goto unlock;
	}

5449 5450 5451
	cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
	if (!cmd)
		goto unlock;
5452

5453 5454
	cmd->cmd_complete(cmd, status);
	mgmt_pending_remove(cmd);
5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475

unlock:
	hci_dev_unlock(hdev);
}

static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_get_conn_info *cp = data;
	struct mgmt_rp_get_conn_info rp;
	struct hci_conn *conn;
	unsigned long conn_info_age;
	int err = 0;

	BT_DBG("%s", hdev->name);

	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

	if (!bdaddr_type_is_valid(cp->addr.type))
5476 5477 5478
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
5479 5480 5481 5482

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
5483 5484 5485
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
5486 5487 5488 5489 5490 5491 5492 5493 5494 5495
		goto unlock;
	}

	if (cp->addr.type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);

	if (!conn || conn->state != BT_CONNECTED) {
5496 5497 5498
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_NOT_CONNECTED, &rp,
					sizeof(rp));
5499 5500 5501
		goto unlock;
	}

5502
	if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5503 5504
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_BUSY, &rp, sizeof(rp));
5505 5506 5507
		goto unlock;
	}

5508 5509 5510 5511 5512 5513 5514 5515 5516 5517
	/* To avoid client trying to guess when to poll again for information we
	 * calculate conn info age as random value between min/max set in hdev.
	 */
	conn_info_age = hdev->conn_info_min_age +
			prandom_u32_max(hdev->conn_info_max_age -
					hdev->conn_info_min_age);

	/* Query controller to refresh cached values if they are too old or were
	 * never read.
	 */
5518 5519
	if (time_after(jiffies, conn->conn_info_timestamp +
		       msecs_to_jiffies(conn_info_age)) ||
5520 5521 5522 5523
	    !conn->conn_info_timestamp) {
		struct hci_request req;
		struct hci_cp_read_tx_power req_txp_cp;
		struct hci_cp_read_rssi req_rssi_cp;
5524
		struct mgmt_pending_cmd *cmd;
5525 5526 5527 5528 5529 5530

		hci_req_init(&req, hdev);
		req_rssi_cp.handle = cpu_to_le16(conn->handle);
		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
			    &req_rssi_cp);

5531 5532 5533 5534 5535 5536 5537 5538 5539 5540
		/* For LE links TX power does not change thus we don't need to
		 * query for it once value is known.
		 */
		if (!bdaddr_type_is_le(cp->addr.type) ||
		    conn->tx_power == HCI_TX_POWER_INVALID) {
			req_txp_cp.handle = cpu_to_le16(conn->handle);
			req_txp_cp.type = 0x00;
			hci_req_add(&req, HCI_OP_READ_TX_POWER,
				    sizeof(req_txp_cp), &req_txp_cp);
		}
5541

5542 5543 5544 5545 5546 5547 5548 5549
		/* Max TX power needs to be read only once per connection */
		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
			req_txp_cp.handle = cpu_to_le16(conn->handle);
			req_txp_cp.type = 0x01;
			hci_req_add(&req, HCI_OP_READ_TX_POWER,
				    sizeof(req_txp_cp), &req_txp_cp);
		}

5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561
		err = hci_req_run(&req, conn_info_refresh_complete);
		if (err < 0)
			goto unlock;

		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
				       data, len);
		if (!cmd) {
			err = -ENOMEM;
			goto unlock;
		}

		hci_conn_hold(conn);
5562
		cmd->user_data = hci_conn_get(conn);
5563
		cmd->cmd_complete = conn_info_cmd_complete;
5564 5565 5566 5567 5568 5569

		conn->conn_info_timestamp = jiffies;
	} else {
		/* Cache is valid, just reply with values cached in hci_conn */
		rp.rssi = conn->rssi;
		rp.tx_power = conn->tx_power;
5570
		rp.max_tx_power = conn->max_tx_power;
5571

5572 5573
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5574 5575 5576 5577 5578 5579 5580
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5581
static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5582
{
5583
	struct hci_conn *conn = cmd->user_data;
5584
	struct mgmt_rp_get_clock_info rp;
5585
	struct hci_dev *hdev;
5586
	int err;
5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605

	memset(&rp, 0, sizeof(rp));
	memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));

	if (status)
		goto complete;

	hdev = hci_dev_get(cmd->index);
	if (hdev) {
		rp.local_clock = cpu_to_le32(hdev->clock);
		hci_dev_put(hdev);
	}

	if (conn) {
		rp.piconet_clock = cpu_to_le32(conn->clock);
		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
	}

complete:
5606 5607
	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
				sizeof(rp));
5608 5609 5610 5611 5612

	if (conn) {
		hci_conn_drop(conn);
		hci_conn_put(conn);
	}
5613 5614

	return err;
5615 5616
}

5617
static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5618
{
5619
	struct hci_cp_read_clock *hci_cp;
5620
	struct mgmt_pending_cmd *cmd;
5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641
	struct hci_conn *conn;

	BT_DBG("%s status %u", hdev->name, status);

	hci_dev_lock(hdev);

	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
	if (!hci_cp)
		goto unlock;

	if (hci_cp->which) {
		u16 handle = __le16_to_cpu(hci_cp->handle);
		conn = hci_conn_hash_lookup_handle(hdev, handle);
	} else {
		conn = NULL;
	}

	cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
	if (!cmd)
		goto unlock;

5642
	cmd->cmd_complete(cmd, mgmt_status(status));
5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_get_clock_info *cp = data;
	struct mgmt_rp_get_clock_info rp;
	struct hci_cp_read_clock hci_cp;
5655
	struct mgmt_pending_cmd *cmd;
5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666
	struct hci_request req;
	struct hci_conn *conn;
	int err;

	BT_DBG("%s", hdev->name);

	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

	if (cp->addr.type != BDADDR_BREDR)
5667 5668 5669
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
					 MGMT_STATUS_INVALID_PARAMS,
					 &rp, sizeof(rp));
5670 5671 5672 5673

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
5674 5675 5676
		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
					MGMT_STATUS_NOT_POWERED, &rp,
					sizeof(rp));
5677 5678 5679 5680 5681 5682 5683
		goto unlock;
	}

	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
		if (!conn || conn->state != BT_CONNECTED) {
5684 5685 5686 5687
			err = mgmt_cmd_complete(sk, hdev->id,
						MGMT_OP_GET_CLOCK_INFO,
						MGMT_STATUS_NOT_CONNECTED,
						&rp, sizeof(rp));
5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699
			goto unlock;
		}
	} else {
		conn = NULL;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

5700 5701
	cmd->cmd_complete = clock_info_cmd_complete;

5702 5703 5704 5705 5706 5707 5708
	hci_req_init(&req, hdev);

	memset(&hci_cp, 0, sizeof(hci_cp));
	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);

	if (conn) {
		hci_conn_hold(conn);
5709
		cmd->user_data = hci_conn_get(conn);
5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724

		hci_cp.handle = cpu_to_le16(conn->handle);
		hci_cp.which = 0x01; /* Piconet clock */
		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
	}

	err = hci_req_run(&req, get_clock_info_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783
static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
{
	struct hci_conn *conn;

	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
	if (!conn)
		return false;

	if (conn->dst_type != type)
		return false;

	if (conn->state != BT_CONNECTED)
		return false;

	return true;
}

/* This function requires the caller holds hdev->lock */
static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
			       u8 addr_type, u8 auto_connect)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_conn_params *params;

	params = hci_conn_params_add(hdev, addr, addr_type);
	if (!params)
		return -EIO;

	if (params->auto_connect == auto_connect)
		return 0;

	list_del_init(&params->action);

	switch (auto_connect) {
	case HCI_AUTO_CONN_DISABLED:
	case HCI_AUTO_CONN_LINK_LOSS:
		__hci_update_background_scan(req);
		break;
	case HCI_AUTO_CONN_REPORT:
		list_add(&params->action, &hdev->pend_le_reports);
		__hci_update_background_scan(req);
		break;
	case HCI_AUTO_CONN_DIRECT:
	case HCI_AUTO_CONN_ALWAYS:
		if (!is_connected(hdev, addr, addr_type)) {
			list_add(&params->action, &hdev->pend_le_conns);
			__hci_update_background_scan(req);
		}
		break;
	}

	params->auto_connect = auto_connect;

	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
	       auto_connect);

	return 0;
}

5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795
static void device_added(struct sock *sk, struct hci_dev *hdev,
			 bdaddr_t *bdaddr, u8 type, u8 action)
{
	struct mgmt_ev_device_added ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
	ev.action = action;

	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
}

5796
static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5797
{
5798
	struct mgmt_pending_cmd *cmd;
5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
	if (!cmd)
		goto unlock;

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

5815 5816 5817 5818
static int add_device(struct sock *sk, struct hci_dev *hdev,
		      void *data, u16 len)
{
	struct mgmt_cp_add_device *cp = data;
5819
	struct mgmt_pending_cmd *cmd;
5820
	struct hci_request req;
5821 5822 5823 5824 5825
	u8 auto_conn, addr_type;
	int err;

	BT_DBG("%s", hdev->name);

5826
	if (!bdaddr_type_is_valid(cp->addr.type) ||
5827
	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5828 5829 5830
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
5831

5832
	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5833 5834 5835
		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->addr, sizeof(cp->addr));
5836

5837 5838
	hci_req_init(&req, hdev);

5839 5840
	hci_dev_lock(hdev);

5841 5842 5843 5844 5845 5846 5847 5848
	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	cmd->cmd_complete = addr_cmd_complete;

5849
	if (cp->addr.type == BDADDR_BREDR) {
5850
		/* Only incoming connections action is supported for now */
5851
		if (cp->action != 0x01) {
5852 5853
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
5854
			mgmt_pending_remove(cmd);
5855 5856 5857 5858 5859 5860 5861
			goto unlock;
		}

		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
					  cp->addr.type);
		if (err)
			goto unlock;
5862

5863
		__hci_update_page_scan(&req);
5864

5865 5866 5867
		goto added;
	}

5868 5869 5870 5871 5872
	if (cp->addr.type == BDADDR_LE_PUBLIC)
		addr_type = ADDR_LE_DEV_PUBLIC;
	else
		addr_type = ADDR_LE_DEV_RANDOM;

5873
	if (cp->action == 0x02)
5874
		auto_conn = HCI_AUTO_CONN_ALWAYS;
5875 5876
	else if (cp->action == 0x01)
		auto_conn = HCI_AUTO_CONN_DIRECT;
5877
	else
5878
		auto_conn = HCI_AUTO_CONN_REPORT;
5879

5880 5881 5882
	/* If the connection parameters don't exist for this device,
	 * they will be created and configured with defaults.
	 */
5883
	if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5884
				auto_conn) < 0) {
5885
		err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5886
		mgmt_pending_remove(cmd);
5887 5888 5889
		goto unlock;
	}

5890
added:
5891 5892
	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);

5893 5894 5895 5896 5897
	err = hci_req_run(&req, add_device_complete);
	if (err < 0) {
		/* ENODATA means no HCI commands were needed (e.g. if
		 * the adapter is powered off).
		 */
5898 5899
		if (err == -ENODATA)
			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5900 5901
		mgmt_pending_remove(cmd);
	}
5902 5903 5904 5905 5906 5907

unlock:
	hci_dev_unlock(hdev);
	return err;
}

5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918
static void device_removed(struct sock *sk, struct hci_dev *hdev,
			   bdaddr_t *bdaddr, u8 type)
{
	struct mgmt_ev_device_removed ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;

	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
}

5919
static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5920
{
5921
	struct mgmt_pending_cmd *cmd;
5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
	if (!cmd)
		goto unlock;

	cmd->cmd_complete(cmd, mgmt_status(status));
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

5938 5939 5940 5941
static int remove_device(struct sock *sk, struct hci_dev *hdev,
			 void *data, u16 len)
{
	struct mgmt_cp_remove_device *cp = data;
5942
	struct mgmt_pending_cmd *cmd;
5943
	struct hci_request req;
5944 5945 5946 5947
	int err;

	BT_DBG("%s", hdev->name);

5948 5949
	hci_req_init(&req, hdev);

5950 5951
	hci_dev_lock(hdev);

5952 5953 5954 5955 5956 5957 5958 5959
	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	cmd->cmd_complete = addr_cmd_complete;

5960
	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5961
		struct hci_conn_params *params;
5962 5963
		u8 addr_type;

5964
		if (!bdaddr_type_is_valid(cp->addr.type)) {
5965 5966
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
5967
			mgmt_pending_remove(cmd);
5968 5969 5970
			goto unlock;
		}

5971 5972 5973 5974 5975
		if (cp->addr.type == BDADDR_BREDR) {
			err = hci_bdaddr_list_del(&hdev->whitelist,
						  &cp->addr.bdaddr,
						  cp->addr.type);
			if (err) {
5976 5977
				err = cmd->cmd_complete(cmd,
							MGMT_STATUS_INVALID_PARAMS);
5978
				mgmt_pending_remove(cmd);
5979 5980 5981
				goto unlock;
			}

5982
			__hci_update_page_scan(&req);
5983

5984 5985 5986 5987 5988
			device_removed(sk, hdev, &cp->addr.bdaddr,
				       cp->addr.type);
			goto complete;
		}

5989 5990 5991 5992 5993
		if (cp->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

5994 5995 5996
		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
						addr_type);
		if (!params) {
5997 5998
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
5999
			mgmt_pending_remove(cmd);
6000 6001 6002 6003
			goto unlock;
		}

		if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6004 6005
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
6006
			mgmt_pending_remove(cmd);
6007 6008 6009
			goto unlock;
		}

6010
		list_del(&params->action);
6011 6012
		list_del(&params->list);
		kfree(params);
6013
		__hci_update_background_scan(&req);
6014 6015

		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6016
	} else {
6017
		struct hci_conn_params *p, *tmp;
6018
		struct bdaddr_list *b, *btmp;
6019

6020
		if (cp->addr.type) {
6021 6022
			err = cmd->cmd_complete(cmd,
						MGMT_STATUS_INVALID_PARAMS);
6023
			mgmt_pending_remove(cmd);
6024 6025 6026
			goto unlock;
		}

6027 6028 6029 6030 6031 6032
		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
			list_del(&b->list);
			kfree(b);
		}

6033
		__hci_update_page_scan(&req);
6034

6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045
		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
				continue;
			device_removed(sk, hdev, &p->addr, p->addr_type);
			list_del(&p->action);
			list_del(&p->list);
			kfree(p);
		}

		BT_DBG("All LE connection parameters were removed");

6046
		__hci_update_background_scan(&req);
6047 6048
	}

6049
complete:
6050 6051 6052 6053 6054
	err = hci_req_run(&req, remove_device_complete);
	if (err < 0) {
		/* ENODATA means no HCI commands were needed (e.g. if
		 * the adapter is powered off).
		 */
6055 6056
		if (err == -ENODATA)
			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6057 6058
		mgmt_pending_remove(cmd);
	}
6059 6060 6061 6062 6063 6064

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6065 6066 6067 6068
static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
{
	struct mgmt_cp_load_conn_param *cp = data;
6069 6070
	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
				     sizeof(struct mgmt_conn_param));
6071 6072 6073 6074
	u16 param_count, expected_len;
	int i;

	if (!lmp_le_capable(hdev))
6075 6076
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_NOT_SUPPORTED);
6077 6078

	param_count = __le16_to_cpu(cp->param_count);
6079 6080 6081
	if (param_count > max_param_count) {
		BT_ERR("load_conn_param: too big param_count value %u",
		       param_count);
6082 6083
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_INVALID_PARAMS);
6084
	}
6085 6086 6087 6088 6089 6090

	expected_len = sizeof(*cp) + param_count *
					sizeof(struct mgmt_conn_param);
	if (expected_len != len) {
		BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
		       expected_len, len);
6091 6092
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
				       MGMT_STATUS_INVALID_PARAMS);
6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146
	}

	BT_DBG("%s param_count %u", hdev->name, param_count);

	hci_dev_lock(hdev);

	hci_conn_params_clear_disabled(hdev);

	for (i = 0; i < param_count; i++) {
		struct mgmt_conn_param *param = &cp->params[i];
		struct hci_conn_params *hci_param;
		u16 min, max, latency, timeout;
		u8 addr_type;

		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
		       param->addr.type);

		if (param->addr.type == BDADDR_LE_PUBLIC) {
			addr_type = ADDR_LE_DEV_PUBLIC;
		} else if (param->addr.type == BDADDR_LE_RANDOM) {
			addr_type = ADDR_LE_DEV_RANDOM;
		} else {
			BT_ERR("Ignoring invalid connection parameters");
			continue;
		}

		min = le16_to_cpu(param->min_interval);
		max = le16_to_cpu(param->max_interval);
		latency = le16_to_cpu(param->latency);
		timeout = le16_to_cpu(param->timeout);

		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
		       min, max, latency, timeout);

		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
			BT_ERR("Ignoring invalid connection parameters");
			continue;
		}

		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
						addr_type);
		if (!hci_param) {
			BT_ERR("Failed to add connection parameters");
			continue;
		}

		hci_param->conn_min_interval = min;
		hci_param->conn_max_interval = max;
		hci_param->conn_latency = latency;
		hci_param->supervision_timeout = timeout;
	}

	hci_dev_unlock(hdev);

6147 6148
	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
				 NULL, 0);
6149 6150
}

6151 6152 6153 6154 6155 6156 6157 6158 6159 6160
static int set_external_config(struct sock *sk, struct hci_dev *hdev,
			       void *data, u16 len)
{
	struct mgmt_cp_set_external_config *cp = data;
	bool changed;
	int err;

	BT_DBG("%s", hdev->name);

	if (hdev_is_powered(hdev))
6161 6162
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				       MGMT_STATUS_REJECTED);
6163 6164

	if (cp->config != 0x00 && cp->config != 0x01)
6165 6166
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				         MGMT_STATUS_INVALID_PARAMS);
6167 6168

	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6169 6170
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
				       MGMT_STATUS_NOT_SUPPORTED);
6171 6172 6173 6174

	hci_dev_lock(hdev);

	if (cp->config)
6175
		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6176
	else
6177
		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6178 6179 6180 6181 6182 6183 6184 6185

	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
	if (err < 0)
		goto unlock;

	if (!changed)
		goto unlock;

6186 6187
	err = new_options(hdev, sk);

6188
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6189
		mgmt_index_removed(hdev);
6190

6191
		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6192 6193
			hci_dev_set_flag(hdev, HCI_CONFIG);
			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6194 6195 6196

			queue_work(hdev->req_workqueue, &hdev->power_on);
		} else {
6197
			set_bit(HCI_RAW, &hdev->flags);
6198 6199
			mgmt_index_added(hdev);
		}
6200 6201 6202 6203 6204 6205 6206
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6207 6208 6209 6210 6211 6212 6213 6214 6215 6216
static int set_public_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_public_address *cp = data;
	bool changed;
	int err;

	BT_DBG("%s", hdev->name);

	if (hdev_is_powered(hdev))
6217 6218
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_REJECTED);
6219 6220

	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6221 6222
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_INVALID_PARAMS);
6223 6224

	if (!hdev->set_bdaddr)
6225 6226
		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
				       MGMT_STATUS_NOT_SUPPORTED);
6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239

	hci_dev_lock(hdev);

	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
	bacpy(&hdev->public_addr, &cp->bdaddr);

	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
	if (err < 0)
		goto unlock;

	if (!changed)
		goto unlock;

6240
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6241 6242 6243 6244 6245
		err = new_options(hdev, sk);

	if (is_configured(hdev)) {
		mgmt_index_removed(hdev);

6246
		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6247

6248 6249
		hci_dev_set_flag(hdev, HCI_CONFIG);
		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6250 6251 6252 6253 6254 6255 6256 6257 6258

		queue_work(hdev->req_workqueue, &hdev->power_on);
	}

unlock:
	hci_dev_unlock(hdev);
	return err;
}

6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
				  u8 data_len)
{
	eir[eir_len++] = sizeof(type) + data_len;
	eir[eir_len++] = type;
	memcpy(&eir[eir_len], data, data_len);
	eir_len += data_len;

	return eir_len;
}

6270 6271 6272 6273 6274 6275 6276
static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
				   void *data, u16 data_len)
{
	struct mgmt_cp_read_local_oob_ext_data *cp = data;
	struct mgmt_rp_read_local_oob_ext_data *rp;
	size_t rp_len;
	u16 eir_len;
6277
	u8 status, flags, role, addr[7], hash[16], rand[16];
6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304
	int err;

	BT_DBG("%s", hdev->name);

	if (!hdev_is_powered(hdev))
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
					 MGMT_STATUS_NOT_POWERED,
					 &cp->type, sizeof(cp->type));

	switch (cp->type) {
	case BIT(BDADDR_BREDR):
		status = mgmt_bredr_support(hdev);
		if (status)
			return mgmt_cmd_complete(sk, hdev->id,
						 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
						 status, &cp->type,
						 sizeof(cp->type));
		eir_len = 5;
		break;
	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
		status = mgmt_le_support(hdev);
		if (status)
			return mgmt_cmd_complete(sk, hdev->id,
						 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
						 status, &cp->type,
						 sizeof(cp->type));
6305
		eir_len = 9 + 3 + 18 + 18 + 3;
6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329
		break;
	default:
		return mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
					 MGMT_STATUS_INVALID_PARAMS,
					 &cp->type, sizeof(cp->type));
	}

	hci_dev_lock(hdev);

	rp_len = sizeof(*rp) + eir_len;
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		hci_dev_unlock(hdev);
		return -ENOMEM;
	}

	eir_len = 0;
	switch (cp->type) {
	case BIT(BDADDR_BREDR):
		eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV,
					  hdev->dev_class, 3);
		break;
	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6330 6331 6332 6333 6334 6335 6336 6337 6338
		if (smp_generate_oob(hdev, hash, rand) < 0) {
			hci_dev_unlock(hdev);
			err = mgmt_cmd_complete(sk, hdev->id,
					 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
					 MGMT_STATUS_FAILED,
					 &cp->type, sizeof(cp->type));
			goto done;
		}

6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363
		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
			memcpy(addr, &hdev->rpa, 6);
			addr[6] = 0x01;
		} else if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
			   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
			   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
			    bacmp(&hdev->static_addr, BDADDR_ANY))) {
			memcpy(addr, &hdev->static_addr, 6);
			addr[6] = 0x01;
		} else {
			memcpy(addr, &hdev->bdaddr, 6);
			addr[6] = 0x00;
		}

		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
					  addr, sizeof(addr));

		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
			role = 0x02;
		else
			role = 0x01;

		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
					  &role, sizeof(role));

6364 6365 6366 6367 6368 6369
		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_SC_CONFIRM,
					  hash, sizeof(hash));

		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_SC_RANDOM,
					  rand, sizeof(rand));

6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387
		flags = get_adv_discov_flags(hdev);

		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
			flags |= LE_AD_NO_BREDR;

		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
					  &flags, sizeof(flags));
		break;
	}

	rp->type = cp->type;
	rp->eir_len = cpu_to_le16(eir_len);

	hci_dev_unlock(hdev);

	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
				MGMT_STATUS_SUCCESS, rp, rp_len);

6388
done:
6389 6390 6391 6392 6393
	kfree(rp);

	return err;
}

6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427
static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
			     void *data, u16 data_len)
{
	struct mgmt_rp_read_adv_features *rp;
	size_t rp_len;
	int err;

	BT_DBG("%s", hdev->name);

	hci_dev_lock(hdev);

	rp_len = sizeof(*rp);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
		hci_dev_unlock(hdev);
		return -ENOMEM;
	}

	rp->supported_flags = cpu_to_le32(0);
	rp->max_adv_data_len = 31;
	rp->max_scan_rsp_len = 31;
	rp->max_instances = 0;
	rp->num_instances = 0;

	hci_dev_unlock(hdev);

	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
				MGMT_STATUS_SUCCESS, rp, rp_len);

	kfree(rp);

	return err;
}

6428
static const struct hci_mgmt_handler mgmt_handlers[] = {
6429
	{ NULL }, /* 0x0000 (no command) */
6430
	{ read_version,            MGMT_READ_VERSION_SIZE,
6431 6432
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
6433
	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
6434 6435
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
6436
	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
6437 6438 6439 6440
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
	{ read_controller_info,    MGMT_READ_INFO_SIZE,
						HCI_MGMT_UNTRUSTED },
6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453
	{ set_powered,             MGMT_SETTING_SIZE },
	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
	{ set_connectable,         MGMT_SETTING_SIZE },
	{ set_fast_connectable,    MGMT_SETTING_SIZE },
	{ set_bondable,            MGMT_SETTING_SIZE },
	{ set_link_security,       MGMT_SETTING_SIZE },
	{ set_ssp,                 MGMT_SETTING_SIZE },
	{ set_hs,                  MGMT_SETTING_SIZE },
	{ set_le,                  MGMT_SETTING_SIZE },
	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
	{ add_uuid,                MGMT_ADD_UUID_SIZE },
	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
6454 6455 6456 6457
	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
						HCI_MGMT_VAR_LEN },
	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
						HCI_MGMT_VAR_LEN },
6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469
	{ disconnect,              MGMT_DISCONNECT_SIZE },
	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6470 6471 6472
	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
						HCI_MGMT_VAR_LEN },
6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486
	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
	{ set_advertising,         MGMT_SETTING_SIZE },
	{ set_bredr,               MGMT_SETTING_SIZE },
	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
	{ set_secure_conn,         MGMT_SETTING_SIZE },
	{ set_debug_keys,          MGMT_SETTING_SIZE },
	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
6487 6488
	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
						HCI_MGMT_VAR_LEN },
6489 6490 6491 6492
	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
	{ add_device,              MGMT_ADD_DEVICE_SIZE },
	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
6493 6494 6495
	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
						HCI_MGMT_VAR_LEN },
	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6496 6497
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
6498
	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
6499 6500
						HCI_MGMT_UNCONFIGURED |
						HCI_MGMT_UNTRUSTED },
6501 6502 6503 6504 6505 6506
	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
						HCI_MGMT_UNCONFIGURED },
	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
						HCI_MGMT_UNCONFIGURED },
	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
						HCI_MGMT_VAR_LEN },
6507
	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6508
	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
6509 6510
						HCI_MGMT_NO_HDEV |
						HCI_MGMT_UNTRUSTED },
6511
	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
6512 6513
};

6514 6515
int mgmt_control(struct hci_mgmt_chan *chan, struct sock *sk,
		 struct msghdr *msg, size_t msglen)
6516
{
6517 6518
	void *buf;
	u8 *cp;
6519
	struct mgmt_hdr *hdr;
6520
	u16 opcode, index, len;
6521
	struct hci_dev *hdev = NULL;
6522
	const struct hci_mgmt_handler *handler;
6523
	bool var_len, no_hdev;
6524 6525 6526 6527 6528 6529 6530
	int err;

	BT_DBG("got %zu bytes", msglen);

	if (msglen < sizeof(*hdr))
		return -EINVAL;

6531
	buf = kmalloc(msglen, GFP_KERNEL);
6532 6533 6534
	if (!buf)
		return -ENOMEM;

A
Al Viro 已提交
6535
	if (memcpy_from_msg(buf, msg, msglen)) {
6536 6537 6538 6539
		err = -EFAULT;
		goto done;
	}

6540
	hdr = buf;
6541 6542 6543
	opcode = __le16_to_cpu(hdr->opcode);
	index = __le16_to_cpu(hdr->index);
	len = __le16_to_cpu(hdr->len);
6544 6545 6546 6547 6548 6549

	if (len != msglen - sizeof(*hdr)) {
		err = -EINVAL;
		goto done;
	}

6550 6551 6552
	if (opcode >= chan->handler_count ||
	    chan->handlers[opcode].func == NULL) {
		BT_DBG("Unknown op %u", opcode);
6553 6554
		err = mgmt_cmd_status(sk, index, opcode,
				      MGMT_STATUS_UNKNOWN_COMMAND);
6555 6556 6557 6558 6559
		goto done;
	}

	handler = &chan->handlers[opcode];

6560 6561 6562 6563 6564 6565 6566
	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
		err = mgmt_cmd_status(sk, index, opcode,
				      MGMT_STATUS_PERMISSION_DENIED);
		goto done;
	}

6567
	if (index != MGMT_INDEX_NONE) {
6568 6569
		hdev = hci_dev_get(index);
		if (!hdev) {
6570 6571
			err = mgmt_cmd_status(sk, index, opcode,
					      MGMT_STATUS_INVALID_INDEX);
6572 6573
			goto done;
		}
6574

6575 6576 6577
		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
6578 6579
			err = mgmt_cmd_status(sk, index, opcode,
					      MGMT_STATUS_INVALID_INDEX);
6580 6581
			goto done;
		}
6582

6583
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
6584
		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
6585 6586
			err = mgmt_cmd_status(sk, index, opcode,
					      MGMT_STATUS_INVALID_INDEX);
6587 6588
			goto done;
		}
6589 6590
	}

6591 6592
	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
	if (no_hdev != !hdev) {
6593 6594
		err = mgmt_cmd_status(sk, index, opcode,
				      MGMT_STATUS_INVALID_INDEX);
6595
		goto done;
6596 6597
	}

6598 6599 6600
	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
	if ((var_len && len < handler->data_len) ||
	    (!var_len && len != handler->data_len)) {
6601 6602
		err = mgmt_cmd_status(sk, index, opcode,
				      MGMT_STATUS_INVALID_PARAMS);
6603 6604 6605
		goto done;
	}

6606 6607 6608 6609 6610
	if (hdev)
		mgmt_init_hdev(sk, hdev);

	cp = buf + sizeof(*hdr);

6611
	err = handler->func(sk, hdev, cp, len);
6612 6613 6614
	if (err < 0)
		goto done;

6615 6616 6617
	err = msglen;

done:
6618 6619 6620
	if (hdev)
		hci_dev_put(hdev);

6621 6622 6623
	kfree(buf);
	return err;
}
6624

6625
void mgmt_index_added(struct hci_dev *hdev)
6626
{
6627
	struct mgmt_ev_ext_index ev;
6628

6629 6630 6631
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		return;

6632 6633 6634 6635 6636
	switch (hdev->dev_type) {
	case HCI_BREDR:
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6637
			ev.type = 0x01;
6638 6639 6640
		} else {
			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
					 HCI_MGMT_INDEX_EVENTS);
6641
			ev.type = 0x00;
6642 6643
		}
		break;
6644 6645 6646 6647 6648
	case HCI_AMP:
		ev.type = 0x02;
		break;
	default:
		return;
6649
	}
6650 6651 6652 6653 6654

	ev.bus = hdev->bus;

	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
			 HCI_MGMT_EXT_INDEX_EVENTS);
6655 6656
}

6657
void mgmt_index_removed(struct hci_dev *hdev)
6658
{
6659
	struct mgmt_ev_ext_index ev;
6660
	u8 status = MGMT_STATUS_INVALID_INDEX;
6661

6662 6663 6664
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		return;

6665 6666 6667
	switch (hdev->dev_type) {
	case HCI_BREDR:
		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6668

6669 6670 6671
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6672
			ev.type = 0x01;
6673 6674 6675
		} else {
			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
					 HCI_MGMT_INDEX_EVENTS);
6676
			ev.type = 0x00;
6677 6678
		}
		break;
6679 6680 6681 6682 6683
	case HCI_AMP:
		ev.type = 0x02;
		break;
	default:
		return;
6684
	}
6685 6686 6687 6688 6689

	ev.bus = hdev->bus;

	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
			 HCI_MGMT_EXT_INDEX_EVENTS);
6690 6691
}

6692
/* This function requires the caller holds hdev->lock */
6693
static void restart_le_actions(struct hci_request *req)
6694
{
6695
	struct hci_dev *hdev = req->hdev;
6696 6697 6698
	struct hci_conn_params *p;

	list_for_each_entry(p, &hdev->le_conn_params, list) {
6699 6700 6701 6702 6703 6704
		/* Needed for AUTO_OFF case where might not "really"
		 * have been powered off.
		 */
		list_del_init(&p->action);

		switch (p->auto_connect) {
6705
		case HCI_AUTO_CONN_DIRECT:
6706 6707 6708 6709 6710 6711 6712 6713
		case HCI_AUTO_CONN_ALWAYS:
			list_add(&p->action, &hdev->pend_le_conns);
			break;
		case HCI_AUTO_CONN_REPORT:
			list_add(&p->action, &hdev->pend_le_reports);
			break;
		default:
			break;
6714
		}
6715
	}
6716

6717
	__hci_update_background_scan(req);
6718 6719
}

6720
static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6721 6722 6723 6724 6725
{
	struct cmd_lookup match = { NULL, hdev };

	BT_DBG("status 0x%02x", status);

6726 6727 6728 6729 6730 6731 6732 6733 6734
	if (!status) {
		/* Register the available SMP channels (BR/EDR and LE) only
		 * when successfully powering on the controller. This late
		 * registration is required so that LE SMP can clearly
		 * decide if the public address or static address is used.
		 */
		smp_register(hdev);
	}

6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746
	hci_dev_lock(hdev);

	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	hci_dev_unlock(hdev);

	if (match.sk)
		sock_put(match.sk);
}

6747
static int powered_update_hci(struct hci_dev *hdev)
6748
{
6749
	struct hci_request req;
6750
	u8 link_sec;
6751

6752 6753
	hci_req_init(&req, hdev);

6754
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
6755
	    !lmp_host_ssp_capable(hdev)) {
6756
		u8 mode = 0x01;
6757

6758 6759 6760 6761
		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);

		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
			u8 support = 0x01;
6762

6763 6764 6765
			hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
				    sizeof(support), &support);
		}
6766 6767
	}

6768
	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
6769
	    lmp_bredr_capable(hdev)) {
6770
		struct hci_cp_write_le_host_supported cp;
6771

6772 6773
		cp.le = 0x01;
		cp.simul = 0x00;
6774

6775 6776 6777 6778 6779
		/* Check first if we already have the right
		 * host state (host features set)
		 */
		if (cp.le != lmp_host_le_capable(hdev) ||
		    cp.simul != lmp_host_le_br_capable(hdev))
6780 6781
			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
				    sizeof(cp), &cp);
6782
	}
6783

6784
	if (lmp_le_capable(hdev)) {
6785 6786 6787 6788
		/* Make sure the controller has a good default for
		 * advertising data. This also applies to the case
		 * where BR/EDR was toggled during the AUTO_OFF phase.
		 */
6789
		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
6790
			update_adv_data(&req);
6791 6792
			update_scan_rsp_data(&req);
		}
6793

6794
		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6795
			enable_advertising(&req);
6796 6797

		restart_le_actions(&req);
6798 6799
	}

6800
	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
6801
	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6802 6803
		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
			    sizeof(link_sec), &link_sec);
6804

6805
	if (lmp_bredr_capable(hdev)) {
6806
		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
6807 6808 6809
			write_fast_connectable(&req, true);
		else
			write_fast_connectable(&req, false);
6810
		__hci_update_page_scan(&req);
6811
		update_class(&req);
6812
		update_name(&req);
6813
		update_eir(&req);
6814
	}
6815

6816
	return hci_req_run(&req, powered_complete);
6817
}
6818

6819 6820 6821
int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
	struct cmd_lookup match = { NULL, hdev };
6822
	u8 status, zero_cod[] = { 0, 0, 0 };
6823
	int err;
6824

6825
	if (!hci_dev_test_flag(hdev, HCI_MGMT))
6826 6827 6828
		return 0;

	if (powered) {
6829 6830
		if (powered_update_hci(hdev) == 0)
			return 0;
6831

6832 6833 6834
		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
				     &match);
		goto new_settings;
6835 6836
	}

6837
	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6838 6839 6840 6841 6842 6843 6844 6845

	/* If the power off is because of hdev unregistration let
	 * use the appropriate INVALID_INDEX status. Otherwise use
	 * NOT_POWERED. We cover both scenarios here since later in
	 * mgmt_index_removed() any hci_conn callbacks will have already
	 * been triggered, potentially causing misleading DISCONNECTED
	 * status responses.
	 */
6846
	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
6847 6848 6849 6850 6851
		status = MGMT_STATUS_INVALID_INDEX;
	else
		status = MGMT_STATUS_NOT_POWERED;

	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6852 6853

	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6854 6855
		mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
				   zero_cod, sizeof(zero_cod), NULL);
6856 6857

new_settings:
6858
	err = new_settings(hdev, match.sk);
6859 6860 6861 6862

	if (match.sk)
		sock_put(match.sk);

6863
	return err;
6864
}
6865

6866
void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6867
{
6868
	struct mgmt_pending_cmd *cmd;
6869 6870 6871 6872
	u8 status;

	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
	if (!cmd)
6873
		return;
6874 6875 6876 6877 6878 6879

	if (err == -ERFKILL)
		status = MGMT_STATUS_RFKILLED;
	else
		status = MGMT_STATUS_FAILED;

6880
	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6881 6882 6883 6884

	mgmt_pending_remove(cmd);
}

6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895
void mgmt_discoverable_timeout(struct hci_dev *hdev)
{
	struct hci_request req;

	hci_dev_lock(hdev);

	/* When discoverable timeout triggers, then just make sure
	 * the limited discoverable flag is cleared. Even in the case
	 * of a timeout triggered from general discoverable, it is
	 * safe to unconditionally clear the flag.
	 */
6896 6897
	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6898 6899

	hci_req_init(&req, hdev);
6900
	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6901 6902 6903 6904
		u8 scan = SCAN_PAGE;
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
			    sizeof(scan), &scan);
	}
6905
	update_class(&req);
6906
	update_adv_data(&req);
6907 6908 6909 6910
	hci_req_run(&req, NULL);

	hdev->discov_timeout = 0;

6911 6912
	new_settings(hdev, NULL);

6913 6914 6915
	hci_dev_unlock(hdev);
}

6916 6917
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
		       bool persistent)
6918
{
6919
	struct mgmt_ev_new_link_key ev;
6920

6921
	memset(&ev, 0, sizeof(ev));
6922

6923
	ev.store_hint = persistent;
6924
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6925
	ev.key.addr.type = BDADDR_BREDR;
6926
	ev.key.type = key->type;
6927
	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6928
	ev.key.pin_len = key->pin_len;
6929

6930
	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6931
}
6932

6933 6934
static u8 mgmt_ltk_type(struct smp_ltk *ltk)
{
6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947
	switch (ltk->type) {
	case SMP_LTK:
	case SMP_LTK_SLAVE:
		if (ltk->authenticated)
			return MGMT_LTK_AUTHENTICATED;
		return MGMT_LTK_UNAUTHENTICATED;
	case SMP_LTK_P256:
		if (ltk->authenticated)
			return MGMT_LTK_P256_AUTH;
		return MGMT_LTK_P256_UNAUTH;
	case SMP_LTK_P256_DEBUG:
		return MGMT_LTK_P256_DEBUG;
	}
6948 6949 6950 6951

	return MGMT_LTK_UNAUTHENTICATED;
}

6952
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6953 6954 6955 6956 6957
{
	struct mgmt_ev_new_long_term_key ev;

	memset(&ev, 0, sizeof(ev));

6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968
	/* Devices using resolvable or non-resolvable random addresses
	 * without providing an indentity resolving key don't require
	 * to store long term keys. Their addresses will change the
	 * next time around.
	 *
	 * Only when a remote device provides an identity address
	 * make sure the long term key is stored. If the remote
	 * identity is known, the long term keys are internally
	 * mapped to the identity address. So allow static random
	 * and public addresses here.
	 */
6969 6970 6971 6972
	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
		ev.store_hint = 0x00;
	else
6973
		ev.store_hint = persistent;
6974

6975
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6976
	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6977
	ev.key.type = mgmt_ltk_type(key);
6978 6979
	ev.key.enc_size = key->enc_size;
	ev.key.ediv = key->ediv;
6980
	ev.key.rand = key->rand;
6981

6982
	if (key->type == SMP_LTK)
6983 6984 6985 6986
		ev.key.master = 1;

	memcpy(ev.key.val, key->val, sizeof(key->val));

6987
	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6988 6989
}

6990 6991 6992 6993 6994 6995
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
{
	struct mgmt_ev_new_irk ev;

	memset(&ev, 0, sizeof(ev));

6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011
	/* For identity resolving keys from devices that are already
	 * using a public address or static random address, do not
	 * ask for storing this key. The identity resolving key really
	 * is only mandatory for devices using resovlable random
	 * addresses.
	 *
	 * Storing all identity resolving keys has the downside that
	 * they will be also loaded on next boot of they system. More
	 * identity resolving keys, means more time during scanning is
	 * needed to actually resolve these addresses.
	 */
	if (bacmp(&irk->rpa, BDADDR_ANY))
		ev.store_hint = 0x01;
	else
		ev.store_hint = 0x00;

7012 7013 7014 7015 7016 7017 7018 7019
	bacpy(&ev.rpa, &irk->rpa);
	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
	memcpy(ev.irk.val, irk->val, sizeof(irk->val));

	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
}

7020 7021
void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
		   bool persistent)
7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039
{
	struct mgmt_ev_new_csrk ev;

	memset(&ev, 0, sizeof(ev));

	/* Devices using resolvable or non-resolvable random addresses
	 * without providing an indentity resolving key don't require
	 * to store signature resolving keys. Their addresses will change
	 * the next time around.
	 *
	 * Only when a remote device provides an identity address
	 * make sure the signature resolving key is stored. So allow
	 * static random and public addresses here.
	 */
	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
		ev.store_hint = 0x00;
	else
7040
		ev.store_hint = persistent;
7041 7042 7043

	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7044
	ev.key.type = csrk->type;
7045 7046 7047 7048 7049
	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));

	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
}

7050
void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7051 7052
			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
			 u16 max_interval, u16 latency, u16 timeout)
7053 7054 7055
{
	struct mgmt_ev_new_conn_param ev;

7056 7057 7058
	if (!hci_is_identity_address(bdaddr, bdaddr_type))
		return;

7059 7060 7061
	memset(&ev, 0, sizeof(ev));
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7062
	ev.store_hint = store_hint;
7063 7064 7065 7066 7067 7068 7069 7070
	ev.min_interval = cpu_to_le16(min_interval);
	ev.max_interval = cpu_to_le16(max_interval);
	ev.latency = cpu_to_le16(latency);
	ev.timeout = cpu_to_le16(timeout);

	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
}

7071 7072
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
			   u32 flags, u8 *name, u8 name_len)
7073
{
7074 7075 7076
	char buf[512];
	struct mgmt_ev_device_connected *ev = (void *) buf;
	u16 eir_len = 0;
7077

7078 7079
	bacpy(&ev->addr.bdaddr, &conn->dst);
	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7080

7081
	ev->flags = __cpu_to_le32(flags);
7082

7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094
	/* We must ensure that the EIR Data fields are ordered and
	 * unique. Keep it simple for now and avoid the problem by not
	 * adding any BR/EDR data to the LE adv.
	 */
	if (conn->le_adv_data_len > 0) {
		memcpy(&ev->eir[eir_len],
		       conn->le_adv_data, conn->le_adv_data_len);
		eir_len = conn->le_adv_data_len;
	} else {
		if (name_len > 0)
			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
						  name, name_len);
7095

7096
		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7097 7098 7099 7100
			eir_len = eir_append_data(ev->eir, eir_len,
						  EIR_CLASS_OF_DEV,
						  conn->dev_class, 3);
	}
7101

7102
	ev->eir_len = cpu_to_le16(eir_len);
7103

7104 7105
	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
		    sizeof(*ev) + eir_len, NULL);
7106 7107
}

7108
static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7109 7110 7111
{
	struct sock **sk = data;

7112
	cmd->cmd_complete(cmd, 0);
7113 7114 7115 7116

	*sk = cmd->sk;
	sock_hold(*sk);

7117
	mgmt_pending_remove(cmd);
7118 7119
}

7120
static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7121
{
7122
	struct hci_dev *hdev = data;
7123
	struct mgmt_cp_unpair_device *cp = cmd->param;
7124

7125 7126
	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);

7127
	cmd->cmd_complete(cmd, 0);
7128 7129 7130
	mgmt_pending_remove(cmd);
}

7131 7132
bool mgmt_powering_down(struct hci_dev *hdev)
{
7133
	struct mgmt_pending_cmd *cmd;
7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146
	struct mgmt_mode *cp;

	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
	if (!cmd)
		return false;

	cp = cmd->param;
	if (!cp->val)
		return true;

	return false;
}

7147
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7148 7149
			      u8 link_type, u8 addr_type, u8 reason,
			      bool mgmt_connected)
7150
{
7151
	struct mgmt_ev_device_disconnected ev;
7152 7153
	struct sock *sk = NULL;

7154 7155 7156 7157 7158 7159
	/* The connection is still in hci_conn_hash so test for 1
	 * instead of 0 to know if this is the last one.
	 */
	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
		cancel_delayed_work(&hdev->power_off);
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7160 7161
	}

7162 7163 7164
	if (!mgmt_connected)
		return;

7165 7166 7167
	if (link_type != ACL_LINK && link_type != LE_LINK)
		return;

7168
	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7169

7170 7171 7172
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.reason = reason;
7173

7174
	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7175 7176

	if (sk)
7177
		sock_put(sk);
7178

7179
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7180
			     hdev);
7181 7182
}

7183 7184
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
			    u8 link_type, u8 addr_type, u8 status)
7185
{
7186 7187
	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
	struct mgmt_cp_disconnect *cp;
7188
	struct mgmt_pending_cmd *cmd;
7189

7190 7191 7192
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
			     hdev);

7193
	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
7194
	if (!cmd)
7195
		return;
7196

7197 7198 7199 7200 7201 7202 7203 7204
	cp = cmd->param;

	if (bacmp(bdaddr, &cp->addr.bdaddr))
		return;

	if (cp->addr.type != bdaddr_type)
		return;

7205
	cmd->cmd_complete(cmd, mgmt_status(status));
7206
	mgmt_pending_remove(cmd);
7207
}
7208

7209 7210
void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			 u8 addr_type, u8 status)
7211 7212
{
	struct mgmt_ev_connect_failed ev;
7213

7214 7215 7216 7217 7218 7219
	/* The connection is still in hci_conn_hash so test for 1
	 * instead of 0 to know if this is the last one.
	 */
	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
		cancel_delayed_work(&hdev->power_off);
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7220
	}
7221

7222
	bacpy(&ev.addr.bdaddr, bdaddr);
7223
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7224
	ev.status = mgmt_status(status);
7225

7226
	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7227
}
7228

7229
void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7230 7231 7232
{
	struct mgmt_ev_pin_code_request ev;

7233
	bacpy(&ev.addr.bdaddr, bdaddr);
7234
	ev.addr.type = BDADDR_BREDR;
7235
	ev.secure = secure;
7236

7237
	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7238 7239
}

7240 7241
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				  u8 status)
7242
{
7243
	struct mgmt_pending_cmd *cmd;
7244

7245
	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7246
	if (!cmd)
7247
		return;
7248

7249
	cmd->cmd_complete(cmd, mgmt_status(status));
7250
	mgmt_pending_remove(cmd);
7251 7252
}

7253 7254
void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				      u8 status)
7255
{
7256
	struct mgmt_pending_cmd *cmd;
7257

7258
	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7259
	if (!cmd)
7260
		return;
7261

7262
	cmd->cmd_complete(cmd, mgmt_status(status));
7263
	mgmt_pending_remove(cmd);
7264
}
7265

7266
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7267
			      u8 link_type, u8 addr_type, u32 value,
7268
			      u8 confirm_hint)
7269 7270 7271
{
	struct mgmt_ev_user_confirm_request ev;

7272
	BT_DBG("%s", hdev->name);
7273

7274
	bacpy(&ev.addr.bdaddr, bdaddr);
7275
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7276
	ev.confirm_hint = confirm_hint;
7277
	ev.value = cpu_to_le32(value);
7278

7279
	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7280
			  NULL);
7281 7282
}

7283
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7284
			      u8 link_type, u8 addr_type)
7285 7286 7287 7288 7289
{
	struct mgmt_ev_user_passkey_request ev;

	BT_DBG("%s", hdev->name);

7290
	bacpy(&ev.addr.bdaddr, bdaddr);
7291
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7292 7293

	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7294
			  NULL);
7295 7296
}

7297
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7298 7299
				      u8 link_type, u8 addr_type, u8 status,
				      u8 opcode)
7300
{
7301
	struct mgmt_pending_cmd *cmd;
7302

7303
	cmd = mgmt_pending_find(opcode, hdev);
7304 7305 7306
	if (!cmd)
		return -ENOENT;

7307
	cmd->cmd_complete(cmd, mgmt_status(status));
7308
	mgmt_pending_remove(cmd);
7309

7310
	return 0;
7311 7312
}

7313
int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7314
				     u8 link_type, u8 addr_type, u8 status)
7315
{
7316
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7317
					  status, MGMT_OP_USER_CONFIRM_REPLY);
7318 7319
}

7320
int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7321
					 u8 link_type, u8 addr_type, u8 status)
7322
{
7323
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7324 7325
					  status,
					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
7326
}
7327

7328
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7329
				     u8 link_type, u8 addr_type, u8 status)
7330
{
7331
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7332
					  status, MGMT_OP_USER_PASSKEY_REPLY);
7333 7334
}

7335
int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7336
					 u8 link_type, u8 addr_type, u8 status)
7337
{
7338
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7339 7340
					  status,
					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
7341 7342
}

7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358
int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
			     u8 link_type, u8 addr_type, u32 passkey,
			     u8 entered)
{
	struct mgmt_ev_passkey_notify ev;

	BT_DBG("%s", hdev->name);

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.passkey = __cpu_to_le32(passkey);
	ev.entered = entered;

	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
}

7359
void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7360 7361
{
	struct mgmt_ev_auth_failed ev;
7362
	struct mgmt_pending_cmd *cmd;
7363
	u8 status = mgmt_status(hci_status);
7364

7365 7366 7367
	bacpy(&ev.addr.bdaddr, &conn->dst);
	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
	ev.status = status;
7368

7369 7370 7371 7372 7373
	cmd = find_pairing(conn);

	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
		    cmd ? cmd->sk : NULL);

7374 7375 7376 7377
	if (cmd) {
		cmd->cmd_complete(cmd, status);
		mgmt_pending_remove(cmd);
	}
7378
}
7379

7380
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7381 7382
{
	struct cmd_lookup match = { NULL, hdev };
7383
	bool changed;
7384 7385 7386 7387

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7388
				     cmd_status_rsp, &mgmt_err);
7389
		return;
7390 7391
	}

7392
	if (test_bit(HCI_AUTH, &hdev->flags))
7393
		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7394
	else
7395
		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7396

7397
	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7398
			     &match);
7399

7400
	if (changed)
7401
		new_settings(hdev, match.sk);
7402 7403 7404 7405 7406

	if (match.sk)
		sock_put(match.sk);
}

7407
static void clear_eir(struct hci_request *req)
7408
{
7409
	struct hci_dev *hdev = req->hdev;
7410 7411
	struct hci_cp_write_eir cp;

7412
	if (!lmp_ext_inq_capable(hdev))
7413
		return;
7414

7415 7416
	memset(hdev->eir, 0, sizeof(hdev->eir));

7417 7418
	memset(&cp, 0, sizeof(cp));

7419
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7420 7421
}

7422
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7423 7424
{
	struct cmd_lookup match = { NULL, hdev };
7425
	struct hci_request req;
7426
	bool changed = false;
7427 7428 7429

	if (status) {
		u8 mgmt_err = mgmt_status(status);
7430

7431 7432
		if (enable && hci_dev_test_and_clear_flag(hdev,
							  HCI_SSP_ENABLED)) {
7433
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7434
			new_settings(hdev, NULL);
7435
		}
7436

7437 7438
		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
				     &mgmt_err);
7439
		return;
7440 7441 7442
	}

	if (enable) {
7443
		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7444
	} else {
7445
		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7446
		if (!changed)
7447 7448
			changed = hci_dev_test_and_clear_flag(hdev,
							      HCI_HS_ENABLED);
7449
		else
7450
			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7451 7452 7453 7454
	}

	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);

7455
	if (changed)
7456
		new_settings(hdev, match.sk);
7457

7458
	if (match.sk)
7459 7460
		sock_put(match.sk);

7461 7462
	hci_req_init(&req, hdev);

7463 7464
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7465 7466
			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
				    sizeof(enable), &enable);
7467
		update_eir(&req);
7468
	} else {
7469
		clear_eir(&req);
7470
	}
7471 7472

	hci_req_run(&req, NULL);
7473 7474
}

7475
static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7476 7477 7478 7479 7480 7481 7482 7483 7484
{
	struct cmd_lookup *match = data;

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}
}

7485 7486
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
				    u8 status)
7487
{
7488
	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7489

7490 7491 7492
	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7493 7494

	if (!status)
7495 7496
		mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
				   dev_class, 3, NULL);
7497 7498 7499

	if (match.sk)
		sock_put(match.sk);
7500 7501
}

7502
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7503 7504
{
	struct mgmt_cp_set_local_name ev;
7505
	struct mgmt_pending_cmd *cmd;
7506

7507
	if (status)
7508
		return;
7509 7510 7511

	memset(&ev, 0, sizeof(ev));
	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7512
	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7513

7514
	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7515 7516
	if (!cmd) {
		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7517

7518 7519 7520 7521
		/* If this is a HCI command related to powering on the
		 * HCI dev don't send any mgmt signals.
		 */
		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7522
			return;
7523
	}
7524

7525 7526
	mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
			   cmd ? cmd->sk : NULL);
7527
}
7528

7529
void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7530 7531
				       u8 *rand192, u8 *hash256, u8 *rand256,
				       u8 status)
7532
{
7533
	struct mgmt_pending_cmd *cmd;
7534

7535
	BT_DBG("%s status %u", hdev->name, status);
7536

7537
	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7538
	if (!cmd)
7539
		return;
7540 7541

	if (status) {
7542 7543
		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
			        mgmt_status(status));
7544
	} else {
7545 7546
		struct mgmt_rp_read_local_oob_data rp;
		size_t rp_size = sizeof(rp);
7547

7548 7549
		memcpy(rp.hash192, hash192, sizeof(rp.hash192));
		memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7550

7551
		if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7552
			memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7553
			memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7554
		} else {
7555
			rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7556
		}
7557

7558 7559 7560
		mgmt_cmd_complete(cmd->sk, hdev->id,
				  MGMT_OP_READ_LOCAL_OOB_DATA, 0,
				  &rp, rp_size);
7561 7562 7563 7564
	}

	mgmt_pending_remove(cmd);
}
7565

7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577
static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
{
	int i;

	for (i = 0; i < uuid_count; i++) {
		if (!memcmp(uuid, uuids[i], 16))
			return true;
	}

	return false;
}

7578 7579
static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
{
7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596
	u16 parsed = 0;

	while (parsed < eir_len) {
		u8 field_len = eir[0];
		u8 uuid[16];
		int i;

		if (field_len == 0)
			break;

		if (eir_len - parsed < field_len + 1)
			break;

		switch (eir[1]) {
		case EIR_UUID16_ALL:
		case EIR_UUID16_SOME:
			for (i = 0; i + 3 <= field_len; i += 2) {
7597
				memcpy(uuid, bluetooth_base_uuid, 16);
7598 7599 7600 7601 7602 7603 7604 7605 7606
				uuid[13] = eir[i + 3];
				uuid[12] = eir[i + 2];
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		case EIR_UUID32_ALL:
		case EIR_UUID32_SOME:
			for (i = 0; i + 5 <= field_len; i += 4) {
7607
				memcpy(uuid, bluetooth_base_uuid, 16);
7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629
				uuid[15] = eir[i + 5];
				uuid[14] = eir[i + 4];
				uuid[13] = eir[i + 3];
				uuid[12] = eir[i + 2];
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		case EIR_UUID128_ALL:
		case EIR_UUID128_SOME:
			for (i = 0; i + 17 <= field_len; i += 16) {
				memcpy(uuid, eir + i + 2, 16);
				if (has_uuid(uuid, uuid_count, uuids))
					return true;
			}
			break;
		}

		parsed += field_len + 1;
		eir += field_len + 1;
	}

7630 7631 7632
	return false;
}

7633 7634 7635
static void restart_le_scan(struct hci_dev *hdev)
{
	/* If controller is not scanning we are done. */
7636
	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647
		return;

	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
		       hdev->discovery.scan_start +
		       hdev->discovery.scan_duration))
		return;

	queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
			   DISCOV_LE_RESTART_DELAY);
}

7648 7649
static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7650
{
7651 7652 7653 7654 7655
	/* If a RSSI threshold has been specified, and
	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
	 * is set, let it through for further processing, as we might need to
	 * restart the scan.
7656 7657 7658
	 *
	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
	 * the results are also dropped.
7659 7660
	 */
	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7661 7662 7663
	    (rssi == HCI_RSSI_INVALID ||
	    (rssi < hdev->discovery.rssi &&
	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7664
		return  false;
7665

7666 7667 7668
	if (hdev->discovery.uuid_count != 0) {
		/* If a list of UUIDs is provided in filter, results with no
		 * matching UUID should be dropped.
7669
		 */
7670 7671 7672 7673 7674 7675
		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
				   hdev->discovery.uuids) &&
		    !eir_has_uuids(scan_rsp, scan_rsp_len,
				   hdev->discovery.uuid_count,
				   hdev->discovery.uuids))
			return false;
7676
	}
7677

7678 7679
	/* If duplicate filtering does not report RSSI changes, then restart
	 * scanning to ensure updated result with updated RSSI values.
7680
	 */
7681 7682 7683 7684 7685 7686 7687 7688
	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
		restart_le_scan(hdev);

		/* Validate RSSI value against the RSSI threshold once more. */
		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
		    rssi < hdev->discovery.rssi)
			return false;
	}
7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711

	return true;
}

void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
{
	char buf[512];
	struct mgmt_ev_device_found *ev = (void *)buf;
	size_t ev_size;

	/* Don't send events for a non-kernel initiated discovery. With
	 * LE one exception is if we have pend_le_reports > 0 in which
	 * case we're doing passive scanning and want these events.
	 */
	if (!hci_discovery_active(hdev)) {
		if (link_type == ACL_LINK)
			return;
		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
			return;
	}

7712
	if (hdev->discovery.result_filtering) {
7713 7714 7715 7716 7717 7718 7719 7720 7721 7722
		/* We are using service discovery */
		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
				     scan_rsp_len))
			return;
	}

	/* Make sure that the buffer is big enough. The 5 extra bytes
	 * are for the potential CoD field.
	 */
	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7723 7724
		return;

7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755
	memset(buf, 0, sizeof(buf));

	/* In case of device discovery with BR/EDR devices (pre 1.2), the
	 * RSSI value was reported as 0 when not available. This behavior
	 * is kept when using device discovery. This is required for full
	 * backwards compatibility with the API.
	 *
	 * However when using service discovery, the value 127 will be
	 * returned when the RSSI is not available.
	 */
	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
	    link_type == ACL_LINK)
		rssi = 0;

	bacpy(&ev->addr.bdaddr, bdaddr);
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
	ev->rssi = rssi;
	ev->flags = cpu_to_le32(flags);

	if (eir_len > 0)
		/* Copy EIR or advertising data into event */
		memcpy(ev->eir, eir, eir_len);

	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
					  dev_class, 3);

	if (scan_rsp_len > 0)
		/* Append scan response data to event */
		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);

7756 7757
	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7758

7759
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7760
}
7761

7762 7763
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7764
{
7765 7766 7767
	struct mgmt_ev_device_found *ev;
	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
	u16 eir_len;
7768

7769
	ev = (struct mgmt_ev_device_found *) buf;
7770

7771 7772 7773
	memset(buf, 0, sizeof(buf));

	bacpy(&ev->addr.bdaddr, bdaddr);
7774
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7775 7776 7777
	ev->rssi = rssi;

	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7778
				  name_len);
7779

7780
	ev->eir_len = cpu_to_le16(eir_len);
7781

7782
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7783
}
7784

7785
void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7786
{
7787
	struct mgmt_ev_discovering ev;
7788

7789 7790
	BT_DBG("%s discovering %u", hdev->name, discovering);

7791 7792 7793 7794
	memset(&ev, 0, sizeof(ev));
	ev.type = hdev->discovery.type;
	ev.discovering = discovering;

7795
	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7796
}
7797

7798
static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7799 7800 7801 7802 7803 7804 7805 7806
{
	BT_DBG("%s status %u", hdev->name, status);
}

void mgmt_reenable_advertising(struct hci_dev *hdev)
{
	struct hci_request req;

7807
	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
7808 7809 7810 7811
		return;

	hci_req_init(&req, hdev);
	enable_advertising(&req);
7812
	hci_req_run(&req, adv_enable_complete);
7813
}
7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829

static struct hci_mgmt_chan chan = {
	.channel	= HCI_CHANNEL_CONTROL,
	.handler_count	= ARRAY_SIZE(mgmt_handlers),
	.handlers	= mgmt_handlers,
};

int mgmt_init(void)
{
	return hci_mgmt_chan_register(&chan);
}

void mgmt_exit(void)
{
	hci_mgmt_chan_unregister(&chan);
}