mgmt.c 129.2 KB
Newer Older
1 2
/*
   BlueZ - Bluetooth protocol stack for Linux
3

4
   Copyright (C) 2010  Nokia Corporation
5
   Copyright (C) 2011-2012 Intel Corporation
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI Management interface */

27
#include <linux/module.h>
28 29 30 31 32
#include <asm/unaligned.h>

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
33 34

#include "smp.h"
35

36
#define MGMT_VERSION	1
37
#define MGMT_REVISION	5
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
static const u16 mgmt_commands[] = {
	MGMT_OP_READ_INDEX_LIST,
	MGMT_OP_READ_INFO,
	MGMT_OP_SET_POWERED,
	MGMT_OP_SET_DISCOVERABLE,
	MGMT_OP_SET_CONNECTABLE,
	MGMT_OP_SET_FAST_CONNECTABLE,
	MGMT_OP_SET_PAIRABLE,
	MGMT_OP_SET_LINK_SECURITY,
	MGMT_OP_SET_SSP,
	MGMT_OP_SET_HS,
	MGMT_OP_SET_LE,
	MGMT_OP_SET_DEV_CLASS,
	MGMT_OP_SET_LOCAL_NAME,
	MGMT_OP_ADD_UUID,
	MGMT_OP_REMOVE_UUID,
	MGMT_OP_LOAD_LINK_KEYS,
	MGMT_OP_LOAD_LONG_TERM_KEYS,
	MGMT_OP_DISCONNECT,
	MGMT_OP_GET_CONNECTIONS,
	MGMT_OP_PIN_CODE_REPLY,
	MGMT_OP_PIN_CODE_NEG_REPLY,
	MGMT_OP_SET_IO_CAPABILITY,
	MGMT_OP_PAIR_DEVICE,
	MGMT_OP_CANCEL_PAIR_DEVICE,
	MGMT_OP_UNPAIR_DEVICE,
	MGMT_OP_USER_CONFIRM_REPLY,
	MGMT_OP_USER_CONFIRM_NEG_REPLY,
	MGMT_OP_USER_PASSKEY_REPLY,
	MGMT_OP_USER_PASSKEY_NEG_REPLY,
	MGMT_OP_READ_LOCAL_OOB_DATA,
	MGMT_OP_ADD_REMOTE_OOB_DATA,
	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
	MGMT_OP_START_DISCOVERY,
	MGMT_OP_STOP_DISCOVERY,
	MGMT_OP_CONFIRM_NAME,
	MGMT_OP_BLOCK_DEVICE,
	MGMT_OP_UNBLOCK_DEVICE,
77
	MGMT_OP_SET_DEVICE_ID,
78
	MGMT_OP_SET_ADVERTISING,
79
	MGMT_OP_SET_BREDR,
80
	MGMT_OP_SET_STATIC_ADDRESS,
81
	MGMT_OP_SET_SCAN_PARAMS,
82
	MGMT_OP_SET_SECURE_CONN,
83
	MGMT_OP_SET_DEBUG_KEYS,
84
	MGMT_OP_LOAD_IRKS,
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
};

static const u16 mgmt_events[] = {
	MGMT_EV_CONTROLLER_ERROR,
	MGMT_EV_INDEX_ADDED,
	MGMT_EV_INDEX_REMOVED,
	MGMT_EV_NEW_SETTINGS,
	MGMT_EV_CLASS_OF_DEV_CHANGED,
	MGMT_EV_LOCAL_NAME_CHANGED,
	MGMT_EV_NEW_LINK_KEY,
	MGMT_EV_NEW_LONG_TERM_KEY,
	MGMT_EV_DEVICE_CONNECTED,
	MGMT_EV_DEVICE_DISCONNECTED,
	MGMT_EV_CONNECT_FAILED,
	MGMT_EV_PIN_CODE_REQUEST,
	MGMT_EV_USER_CONFIRM_REQUEST,
	MGMT_EV_USER_PASSKEY_REQUEST,
	MGMT_EV_AUTH_FAILED,
	MGMT_EV_DEVICE_FOUND,
	MGMT_EV_DISCOVERING,
	MGMT_EV_DEVICE_BLOCKED,
	MGMT_EV_DEVICE_UNBLOCKED,
	MGMT_EV_DEVICE_UNPAIRED,
108
	MGMT_EV_PASSKEY_NOTIFY,
109
	MGMT_EV_NEW_IRK,
110 111
};

112
#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
113

114 115 116
#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
				!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))

117 118
struct pending_cmd {
	struct list_head list;
119
	u16 opcode;
120
	int index;
121
	void *param;
122
	struct sock *sk;
123
	void *user_data;
124 125
};

126 127 128 129 130 131 132 133
/* HCI to MGMT error code conversion table */
static u8 mgmt_status_table[] = {
	MGMT_STATUS_SUCCESS,
	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
	MGMT_STATUS_FAILED,		/* Hardware Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
134
	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
	MGMT_STATUS_BUSY,		/* Command Disallowed */
	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
	MGMT_STATUS_REJECTED,		/* Rejected Security */
	MGMT_STATUS_REJECTED,		/* Rejected Personal */
	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
	MGMT_STATUS_BUSY,		/* Repeated Attempts */
	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
	MGMT_STATUS_FAILED,		/* Unspecified Error */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
	MGMT_STATUS_FAILED,		/* Transaction Collision */
	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
	MGMT_STATUS_REJECTED,		/* QoS Rejected */
	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
	MGMT_STATUS_REJECTED,		/* Insufficient Security */
	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
	MGMT_STATUS_BUSY,		/* Role Switch Pending */
	MGMT_STATUS_FAILED,		/* Slot Violation */
	MGMT_STATUS_FAILED,		/* Role Switch Failed */
	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
	MGMT_STATUS_BUSY,		/* Controller Busy */
	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
};

static u8 mgmt_status(u8 hci_status)
{
	if (hci_status < ARRAY_SIZE(mgmt_status_table))
		return mgmt_status_table[hci_status];

	return MGMT_STATUS_FAILED;
}

199
static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
200 201 202 203
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;
	struct mgmt_ev_cmd_status *ev;
204
	int err;
205

206
	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
207

208
	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
209 210 211 212 213
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));

214
	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
215
	hdr->index = cpu_to_le16(index);
216 217 218 219
	hdr->len = cpu_to_le16(sizeof(*ev));

	ev = (void *) skb_put(skb, sizeof(*ev));
	ev->status = status;
220
	ev->opcode = cpu_to_le16(cmd);
221

222 223
	err = sock_queue_rcv_skb(sk, skb);
	if (err < 0)
224 225
		kfree_skb(skb);

226
	return err;
227 228
}

229
static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
230
			void *rp, size_t rp_len)
231 232 233 234
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;
	struct mgmt_ev_cmd_complete *ev;
235
	int err;
236 237 238

	BT_DBG("sock %p", sk);

239
	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
240 241 242 243 244
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));

245
	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
246
	hdr->index = cpu_to_le16(index);
247
	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
248

249
	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
250
	ev->opcode = cpu_to_le16(cmd);
251
	ev->status = status;
252 253 254

	if (rp)
		memcpy(ev->data, rp, rp_len);
255

256 257
	err = sock_queue_rcv_skb(sk, skb);
	if (err < 0)
258 259
		kfree_skb(skb);

260
	return err;
261 262
}

263 264
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
			u16 data_len)
265 266 267 268 269 270
{
	struct mgmt_rp_read_version rp;

	BT_DBG("sock %p", sk);

	rp.version = MGMT_VERSION;
271
	rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
272

273
	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
274
			    sizeof(rp));
275 276
}

277 278
static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 data_len)
279 280
{
	struct mgmt_rp_read_commands *rp;
281 282
	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
	const u16 num_events = ARRAY_SIZE(mgmt_events);
283
	__le16 *opcode;
284 285 286 287 288 289 290 291 292 293 294
	size_t rp_size;
	int i, err;

	BT_DBG("sock %p", sk);

	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));

	rp = kmalloc(rp_size, GFP_KERNEL);
	if (!rp)
		return -ENOMEM;

295 296
	rp->num_commands = __constant_cpu_to_le16(num_commands);
	rp->num_events = __constant_cpu_to_le16(num_events);
297 298 299 300 301 302 303

	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
		put_unaligned_le16(mgmt_commands[i], opcode);

	for (i = 0; i < num_events; i++, opcode++)
		put_unaligned_le16(mgmt_events[i], opcode);

304
	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
305
			   rp_size);
306 307 308 309 310
	kfree(rp);

	return err;
}

311 312
static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
313 314
{
	struct mgmt_rp_read_index_list *rp;
315
	struct hci_dev *d;
316
	size_t rp_len;
317
	u16 count;
318
	int err;
319 320 321 322 323 324

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
325
	list_for_each_entry(d, &hci_dev_list, list) {
326 327
		if (d->dev_type == HCI_BREDR)
			count++;
328 329
	}

330 331 332
	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
333
		read_unlock(&hci_dev_list_lock);
334
		return -ENOMEM;
335
	}
336

337
	count = 0;
338
	list_for_each_entry(d, &hci_dev_list, list) {
339
		if (test_bit(HCI_SETUP, &d->dev_flags))
340 341
			continue;

342 343 344
		if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
			continue;

345 346 347 348
		if (d->dev_type == HCI_BREDR) {
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
349 350
	}

351 352 353
	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

354 355
	read_unlock(&hci_dev_list_lock);

356
	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
357
			   rp_len);
358

359 360 361
	kfree(rp);

	return err;
362 363
}

364 365 366 367 368 369
static u32 get_supported_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

	settings |= MGMT_SETTING_POWERED;
	settings |= MGMT_SETTING_PAIRABLE;
370
	settings |= MGMT_SETTING_DEBUG_KEYS;
371

372
	if (lmp_bredr_capable(hdev)) {
373
		settings |= MGMT_SETTING_CONNECTABLE;
374 375
		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
			settings |= MGMT_SETTING_FAST_CONNECTABLE;
376
		settings |= MGMT_SETTING_DISCOVERABLE;
377 378
		settings |= MGMT_SETTING_BREDR;
		settings |= MGMT_SETTING_LINK_SECURITY;
379 380 381 382 383

		if (lmp_ssp_capable(hdev)) {
			settings |= MGMT_SETTING_SSP;
			settings |= MGMT_SETTING_HS;
		}
384

385 386
		if (lmp_sc_capable(hdev) ||
		    test_bit(HCI_FORCE_SC, &hdev->dev_flags))
387
			settings |= MGMT_SETTING_SECURE_CONN;
388
	}
389

390
	if (lmp_le_capable(hdev)) {
391
		settings |= MGMT_SETTING_LE;
392
		settings |= MGMT_SETTING_ADVERTISING;
393
		settings |= MGMT_SETTING_PRIVACY;
394
	}
395 396 397 398 399 400 401 402

	return settings;
}

static u32 get_current_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

403
	if (hdev_is_powered(hdev))
404 405
		settings |= MGMT_SETTING_POWERED;

406
	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
407 408
		settings |= MGMT_SETTING_CONNECTABLE;

409 410 411
	if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
		settings |= MGMT_SETTING_FAST_CONNECTABLE;

412
	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
413 414
		settings |= MGMT_SETTING_DISCOVERABLE;

415
	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
416 417
		settings |= MGMT_SETTING_PAIRABLE;

418
	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
419 420
		settings |= MGMT_SETTING_BREDR;

421
	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
422 423
		settings |= MGMT_SETTING_LE;

424
	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
425 426
		settings |= MGMT_SETTING_LINK_SECURITY;

427
	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
428 429
		settings |= MGMT_SETTING_SSP;

430 431 432
	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
		settings |= MGMT_SETTING_HS;

433
	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
434 435
		settings |= MGMT_SETTING_ADVERTISING;

436 437 438
	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
		settings |= MGMT_SETTING_SECURE_CONN;

439 440 441
	if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
		settings |= MGMT_SETTING_DEBUG_KEYS;

442 443 444
	if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
		settings |= MGMT_SETTING_PRIVACY;

445 446 447
	return settings;
}

448 449
#define PNP_INFO_SVCLASS_ID		0x1200

450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 4)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		u16 uuid16;

		if (uuid->size != 16)
			continue;

		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
		if (uuid16 < 0x1100)
			continue;

		if (uuid16 == PNP_INFO_SVCLASS_ID)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID16_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u16) > len) {
			uuids_start[1] = EIR_UUID16_SOME;
			break;
		}

		*ptr++ = (uuid16 & 0x00ff);
		*ptr++ = (uuid16 & 0xff00) >> 8;
		uuids_start[0] += sizeof(uuid16);
	}

	return ptr;
}

492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 6)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 32)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID32_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u32) > len) {
			uuids_start[1] = EIR_UUID32_SOME;
			break;
		}

		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
		ptr += sizeof(u32);
		uuids_start[0] += sizeof(u32);
	}

	return ptr;
}

525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 18)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 128)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID128_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + 16 > len) {
			uuids_start[1] = EIR_UUID128_SOME;
			break;
		}

		memcpy(ptr, uuid->uuid, 16);
		ptr += 16;
		uuids_start[0] += 16;
	}

	return ptr;
}

558 559 560 561 562 563 564 565 566 567 568 569
static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
{
	struct pending_cmd *cmd;

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		if (cmd->opcode == opcode)
			return cmd;
	}

	return NULL;
}

570 571
static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
{
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
	u8 ad_len = 0;
	size_t name_len;

	name_len = strlen(hdev->dev_name);
	if (name_len > 0) {
		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;

		if (name_len > max_len) {
			name_len = max_len;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ad_len += (name_len + 2);
		ptr += (name_len + 2);
	}

	return ad_len;
594 595 596 597 598 599 600 601
}

static void update_scan_rsp_data(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_scan_rsp_data cp;
	u8 len;

602
	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
603 604 605 606 607 608
		return;

	memset(&cp, 0, sizeof(cp));

	len = create_scan_rsp_data(hdev, cp.data);

609 610
	if (hdev->scan_rsp_data_len == len &&
	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
611 612
		return;

613 614
	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
	hdev->scan_rsp_data_len = len;
615 616 617 618 619 620

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
}

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
static u8 get_adv_discov_flags(struct hci_dev *hdev)
{
	struct pending_cmd *cmd;

	/* If there's a pending mgmt command the flags will not yet have
	 * their final values, so check for this first.
	 */
	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
		if (cp->val == 0x01)
			return LE_AD_GENERAL;
		else if (cp->val == 0x02)
			return LE_AD_LIMITED;
	} else {
		if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
			return LE_AD_LIMITED;
		else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
			return LE_AD_GENERAL;
	}

	return 0;
}

645
static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
646 647 648
{
	u8 ad_len = 0, flags = 0;

649
	flags |= get_adv_discov_flags(hdev);
650

651
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
		flags |= LE_AD_NO_BREDR;

	if (flags) {
		BT_DBG("adv flags 0x%02x", flags);

		ptr[0] = 2;
		ptr[1] = EIR_FLAGS;
		ptr[2] = flags;

		ad_len += 3;
		ptr += 3;
	}

	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->adv_tx_power;

		ad_len += 3;
		ptr += 3;
	}

	return ad_len;
}

677
static void update_adv_data(struct hci_request *req)
678 679 680 681 682
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_data cp;
	u8 len;

683
	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
684 685 686 687
		return;

	memset(&cp, 0, sizeof(cp));

688
	len = create_adv_data(hdev, cp.data);
689 690 691 692 693 694 695 696 697 698 699 700 701

	if (hdev->adv_data_len == len &&
	    memcmp(cp.data, hdev->adv_data, len) == 0)
		return;

	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
	hdev->adv_data_len = len;

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
}

702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
static void create_eir(struct hci_dev *hdev, u8 *data)
{
	u8 *ptr = data;
	size_t name_len;

	name_len = strlen(hdev->dev_name);

	if (name_len > 0) {
		/* EIR Data type */
		if (name_len > 48) {
			name_len = 48;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		/* EIR Data length */
		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ptr += (name_len + 2);
	}

725
	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
726 727 728 729 730 731 732
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->inq_tx_power;

		ptr += 3;
	}

733 734 735 736 737 738 739 740 741 742 743 744
	if (hdev->devid_source > 0) {
		ptr[0] = 9;
		ptr[1] = EIR_DEVICE_ID;

		put_unaligned_le16(hdev->devid_source, ptr + 2);
		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
		put_unaligned_le16(hdev->devid_product, ptr + 6);
		put_unaligned_le16(hdev->devid_version, ptr + 8);

		ptr += 10;
	}

745
	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
746
	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
747
	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
748 749
}

750
static void update_eir(struct hci_request *req)
751
{
752
	struct hci_dev *hdev = req->hdev;
753 754
	struct hci_cp_write_eir cp;

755
	if (!hdev_is_powered(hdev))
756
		return;
757

758
	if (!lmp_ext_inq_capable(hdev))
759
		return;
760

761
	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
762
		return;
763

764
	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
765
		return;
766 767 768 769 770 771

	memset(&cp, 0, sizeof(cp));

	create_eir(hdev, cp.data);

	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
772
		return;
773 774 775

	memcpy(hdev->eir, cp.data, sizeof(cp.data));

776
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
777 778 779 780 781 782 783 784 785 786 787 788 789
}

static u8 get_service_classes(struct hci_dev *hdev)
{
	struct bt_uuid *uuid;
	u8 val = 0;

	list_for_each_entry(uuid, &hdev->uuids, list)
		val |= uuid->svc_hint;

	return val;
}

790
static void update_class(struct hci_request *req)
791
{
792
	struct hci_dev *hdev = req->hdev;
793 794 795 796
	u8 cod[3];

	BT_DBG("%s", hdev->name);

797
	if (!hdev_is_powered(hdev))
798
		return;
799

800 801 802
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
		return;

803
	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
804
		return;
805 806 807 808 809

	cod[0] = hdev->minor_class;
	cod[1] = hdev->major_class;
	cod[2] = get_service_classes(hdev);

810 811 812
	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
		cod[1] |= 0x20;

813
	if (memcmp(cod, hdev->dev_class, 3) == 0)
814
		return;
815

816
	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
817 818
}

819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
static u8 get_adv_type(struct hci_dev *hdev)
{
	struct pending_cmd *cmd;
	bool connectable;

	/* If there's a pending mgmt command the flag will not yet have
	 * it's final value, so check for this first.
	 */
	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
		connectable = !!cp->val;
	} else {
		connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	}

	return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
}

static void enable_advertising(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_param cp;
	u8 enable = 0x01;

	memset(&cp, 0, sizeof(cp));
	cp.min_interval = __constant_cpu_to_le16(0x0800);
	cp.max_interval = __constant_cpu_to_le16(0x0800);
	cp.type = get_adv_type(hdev);
	cp.own_address_type = hdev->own_addr_type;
	cp.channel_map = hdev->le_adv_channel_map;

	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

static void disable_advertising(struct hci_request *req)
{
	u8 enable = 0x00;

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

863 864 865
static void service_cache_off(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
866
					    service_cache.work);
867
	struct hci_request req;
868

869
	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
870 871
		return;

872 873
	hci_req_init(&req, hdev);

874 875
	hci_dev_lock(hdev);

876 877
	update_eir(&req);
	update_class(&req);
878 879

	hci_dev_unlock(hdev);
880 881

	hci_req_run(&req, NULL);
882 883
}

884
static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
885
{
886
	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
887 888
		return;

889
	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
890

891 892 893 894 895 896
	/* Non-mgmt controlled devices get this bit set
	 * implicitly so that pairing works for them, however
	 * for mgmt we require user-space to explicitly enable
	 * it
	 */
	clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
897 898
}

899
static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
900
				void *data, u16 data_len)
901
{
902
	struct mgmt_rp_read_info rp;
903

904
	BT_DBG("sock %p %s", sk, hdev->name);
905

906
	hci_dev_lock(hdev);
907

908 909
	memset(&rp, 0, sizeof(rp));

910
	bacpy(&rp.bdaddr, &hdev->bdaddr);
911

912
	rp.version = hdev->hci_ver;
913
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
914 915 916

	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
917

918
	memcpy(rp.dev_class, hdev->dev_class, 3);
919

920
	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
921
	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
922

923
	hci_dev_unlock(hdev);
924

925
	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
926
			    sizeof(rp));
927 928
}

929 930 931
static void mgmt_pending_free(struct pending_cmd *cmd)
{
	sock_put(cmd->sk);
932
	kfree(cmd->param);
933 934 935
	kfree(cmd);
}

936
static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
937 938
					    struct hci_dev *hdev, void *data,
					    u16 len)
939 940 941
{
	struct pending_cmd *cmd;

942
	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
943
	if (!cmd)
944
		return NULL;
945 946

	cmd->opcode = opcode;
947
	cmd->index = hdev->id;
948

949
	cmd->param = kmalloc(len, GFP_KERNEL);
950
	if (!cmd->param) {
951
		kfree(cmd);
952
		return NULL;
953 954
	}

955 956
	if (data)
		memcpy(cmd->param, data, len);
957 958 959 960

	cmd->sk = sk;
	sock_hold(sk);

961
	list_add(&cmd->list, &hdev->mgmt_pending);
962

963
	return cmd;
964 965
}

966
static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
967 968
				 void (*cb)(struct pending_cmd *cmd,
					    void *data),
969
				 void *data)
970
{
971
	struct pending_cmd *cmd, *tmp;
972

973
	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
974
		if (opcode > 0 && cmd->opcode != opcode)
975 976 977 978 979 980
			continue;

		cb(cmd, data);
	}
}

981
static void mgmt_pending_remove(struct pending_cmd *cmd)
982 983 984 985 986
{
	list_del(&cmd->list);
	mgmt_pending_free(cmd);
}

987
static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
988
{
989
	__le32 settings = cpu_to_le32(get_current_settings(hdev));
990

991
	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
992
			    sizeof(settings));
993 994
}

995
static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
996
		       u16 len)
997
{
998
	struct mgmt_mode *cp = data;
999
	struct pending_cmd *cmd;
1000
	int err;
1001

1002
	BT_DBG("request for %s", hdev->name);
1003

1004 1005 1006 1007
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				  MGMT_STATUS_INVALID_PARAMS);

1008
	hci_dev_lock(hdev);
1009

1010 1011 1012 1013 1014 1015
	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				 MGMT_STATUS_BUSY);
		goto failed;
	}

1016 1017 1018 1019
	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
		cancel_delayed_work(&hdev->power_off);

		if (cp->val) {
1020 1021 1022
			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
					 data, len);
			err = mgmt_powered(hdev, 1);
1023 1024 1025 1026
			goto failed;
		}
	}

1027
	if (!!cp->val == hdev_is_powered(hdev)) {
1028
		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1029 1030 1031
		goto failed;
	}

1032
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1033 1034
	if (!cmd) {
		err = -ENOMEM;
1035
		goto failed;
1036
	}
1037

1038
	if (cp->val)
1039
		queue_work(hdev->req_workqueue, &hdev->power_on);
1040
	else
1041
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1042

1043
	err = 0;
1044 1045

failed:
1046
	hci_dev_unlock(hdev);
1047
	return err;
1048 1049
}

1050 1051
static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
		      struct sock *skip_sk)
1052 1053 1054 1055
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;

1056
	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1057 1058 1059 1060 1061 1062 1063 1064
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));
	hdr->opcode = cpu_to_le16(event);
	if (hdev)
		hdr->index = cpu_to_le16(hdev->id);
	else
1065
		hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1066 1067 1068 1069 1070
	hdr->len = cpu_to_le16(data_len);

	if (data)
		memcpy(skb_put(skb, data_len), data, data_len);

1071 1072 1073
	/* Time stamp */
	__net_timestamp(skb);

1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
	hci_send_to_control(skb, skip_sk);
	kfree_skb(skb);

	return 0;
}

static int new_settings(struct hci_dev *hdev, struct sock *skip)
{
	__le32 ev;

	ev = cpu_to_le32(get_current_settings(hdev));

	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
}

1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
struct cmd_lookup {
	struct sock *sk;
	struct hci_dev *hdev;
	u8 mgmt_status;
};

static void settings_rsp(struct pending_cmd *cmd, void *data)
{
	struct cmd_lookup *match = data;

	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);

	list_del(&cmd->list);

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}

	mgmt_pending_free(cmd);
}

static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
{
	u8 *status = data;

	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
	mgmt_pending_remove(cmd);
}

1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
static u8 mgmt_bredr_support(struct hci_dev *hdev)
{
	if (!lmp_bredr_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

static u8 mgmt_le_support(struct hci_dev *hdev)
{
	if (!lmp_le_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

1139 1140 1141 1142
static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
	struct mgmt_mode *cp;
1143
	struct hci_request req;
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
	bool changed;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1157
		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1158 1159 1160 1161
		goto remove_cmd;
	}

	cp = cmd->param;
1162
	if (cp->val) {
1163 1164
		changed = !test_and_set_bit(HCI_DISCOVERABLE,
					    &hdev->dev_flags);
1165 1166 1167 1168 1169 1170 1171

		if (hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
					   to);
		}
	} else {
1172 1173
		changed = test_and_clear_bit(HCI_DISCOVERABLE,
					     &hdev->dev_flags);
1174
	}
1175 1176 1177 1178 1179 1180

	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);

	if (changed)
		new_settings(hdev, cmd->sk);

1181 1182 1183 1184 1185 1186 1187 1188
	/* When the discoverable mode gets changed, make sure
	 * that class of device has the limited discoverable
	 * bit correctly set.
	 */
	hci_req_init(&req, hdev);
	update_class(&req);
	hci_req_run(&req, NULL);

1189 1190 1191 1192 1193 1194 1195
remove_cmd:
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1196
static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1197
			    u16 len)
1198
{
1199
	struct mgmt_cp_set_discoverable *cp = data;
1200
	struct pending_cmd *cmd;
1201
	struct hci_request req;
1202
	u16 timeout;
1203
	u8 scan;
1204 1205
	int err;

1206
	BT_DBG("request for %s", hdev->name);
1207

1208 1209
	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1210
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1211
				  MGMT_STATUS_REJECTED);
1212

1213
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1214 1215 1216
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				  MGMT_STATUS_INVALID_PARAMS);

1217
	timeout = __le16_to_cpu(cp->timeout);
1218 1219 1220 1221 1222 1223

	/* Disabling discoverable requires that no timeout is set,
	 * and enabling limited discoverable requires a timeout.
	 */
	if ((cp->val == 0x00 && timeout > 0) ||
	    (cp->val == 0x02 && timeout == 0))
1224
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1225
				  MGMT_STATUS_INVALID_PARAMS);
1226

1227
	hci_dev_lock(hdev);
1228

1229
	if (!hdev_is_powered(hdev) && timeout > 0) {
1230
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1231
				 MGMT_STATUS_NOT_POWERED);
1232 1233 1234
		goto failed;
	}

1235
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1236
	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1237
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1238
				 MGMT_STATUS_BUSY);
1239 1240 1241
		goto failed;
	}

1242
	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1243
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1244
				 MGMT_STATUS_REJECTED);
1245 1246 1247 1248
		goto failed;
	}

	if (!hdev_is_powered(hdev)) {
1249 1250
		bool changed = false;

1251 1252 1253 1254
		/* Setting limited discoverable when powered off is
		 * not a valid operation since it requires a timeout
		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
		 */
1255 1256 1257 1258 1259
		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
			changed = true;
		}

1260
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1261 1262 1263 1264 1265 1266
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1267 1268 1269
		goto failed;
	}

1270 1271 1272 1273 1274 1275 1276
	/* If the current mode is the same, then just update the timeout
	 * value with the new value. And if only the timeout gets updated,
	 * then no need for any HCI transactions.
	 */
	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
					  &hdev->dev_flags)) {
1277 1278
		cancel_delayed_work(&hdev->discov_off);
		hdev->discov_timeout = timeout;
1279

1280 1281
		if (cp->val && hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1282
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1283
					   to);
1284 1285
		}

1286
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1287 1288 1289
		goto failed;
	}

1290
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1291 1292
	if (!cmd) {
		err = -ENOMEM;
1293
		goto failed;
1294
	}
1295

1296 1297 1298 1299 1300 1301 1302
	/* Cancel any potential discoverable timeout that might be
	 * still active and store new timeout value. The arming of
	 * the timeout happens in the complete handler.
	 */
	cancel_delayed_work(&hdev->discov_off);
	hdev->discov_timeout = timeout;

1303 1304 1305 1306 1307 1308
	/* Limited discoverable mode */
	if (cp->val == 0x02)
		set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
	else
		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);

1309 1310
	hci_req_init(&req, hdev);

1311 1312 1313 1314 1315 1316
	/* The procedure for LE-only controllers is much simpler - just
	 * update the advertising data.
	 */
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
		goto update_ad;

1317 1318
	scan = SCAN_PAGE;

1319 1320 1321 1322 1323
	if (cp->val) {
		struct hci_cp_write_current_iac_lap hci_cp;

		if (cp->val == 0x02) {
			/* Limited discoverable mode */
1324
			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341
			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
			hci_cp.iac_lap[4] = 0x8b;
			hci_cp.iac_lap[5] = 0x9e;
		} else {
			/* General discoverable mode */
			hci_cp.num_iac = 1;
			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
		}

		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
			    (hci_cp.num_iac * 3) + 1, &hci_cp);

1342
		scan |= SCAN_INQUIRY;
1343 1344 1345
	} else {
		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
	}
1346

1347
	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1348

1349 1350 1351
update_ad:
	update_adv_data(&req);

1352
	err = hci_req_run(&req, set_discoverable_complete);
1353
	if (err < 0)
1354
		mgmt_pending_remove(cmd);
1355 1356

failed:
1357
	hci_dev_unlock(hdev);
1358 1359 1360
	return err;
}

1361 1362
static void write_fast_connectable(struct hci_request *req, bool enable)
{
1363
	struct hci_dev *hdev = req->hdev;
1364 1365 1366
	struct hci_cp_write_page_scan_activity acp;
	u8 type;

1367 1368 1369
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
		return;

1370 1371 1372
	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
		return;

1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
	if (enable) {
		type = PAGE_SCAN_TYPE_INTERLACED;

		/* 160 msec page scan interval */
		acp.interval = __constant_cpu_to_le16(0x0100);
	} else {
		type = PAGE_SCAN_TYPE_STANDARD;	/* default */

		/* default 1.28 sec page scan */
		acp.interval = __constant_cpu_to_le16(0x0800);
	}

	acp.window = __constant_cpu_to_le16(0x0012);

1387 1388 1389 1390 1391 1392 1393
	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
			    sizeof(acp), &acp);

	if (hdev->page_scan_type != type)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1394 1395
}

1396 1397 1398
static void set_connectable_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
1399 1400
	struct mgmt_mode *cp;
	bool changed;
1401 1402 1403 1404 1405 1406 1407 1408 1409

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (!cmd)
		goto unlock;

1410 1411 1412 1413 1414 1415
	if (status) {
		u8 mgmt_err = mgmt_status(status);
		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
		goto remove_cmd;
	}

1416 1417 1418 1419 1420 1421
	cp = cmd->param;
	if (cp->val)
		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);

1422 1423
	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);

1424 1425 1426
	if (changed)
		new_settings(hdev, cmd->sk);

1427
remove_cmd:
1428 1429 1430 1431 1432 1433
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
static int set_connectable_update_settings(struct hci_dev *hdev,
					   struct sock *sk, u8 val)
{
	bool changed = false;
	int err;

	if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
		changed = true;

	if (val) {
		set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	} else {
		clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
	}

	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
	if (err < 0)
		return err;

	if (changed)
		return new_settings(hdev, sk);

	return 0;
}

1460
static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1461
			   u16 len)
1462
{
1463
	struct mgmt_mode *cp = data;
1464
	struct pending_cmd *cmd;
1465
	struct hci_request req;
1466
	u8 scan;
1467 1468
	int err;

1469
	BT_DBG("request for %s", hdev->name);
1470

1471 1472
	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1473
		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1474
				  MGMT_STATUS_REJECTED);
1475

1476 1477 1478 1479
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				  MGMT_STATUS_INVALID_PARAMS);

1480
	hci_dev_lock(hdev);
1481

1482
	if (!hdev_is_powered(hdev)) {
1483
		err = set_connectable_update_settings(hdev, sk, cp->val);
1484 1485 1486
		goto failed;
	}

1487
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1488
	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1489
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1490
				 MGMT_STATUS_BUSY);
1491 1492 1493
		goto failed;
	}

1494
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1495 1496
	if (!cmd) {
		err = -ENOMEM;
1497
		goto failed;
1498
	}
1499

1500
	hci_req_init(&req, hdev);
1501

1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
	/* If BR/EDR is not enabled and we disable advertising as a
	 * by-product of disabling connectable, we need to update the
	 * advertising flags.
	 */
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
		if (!cp->val) {
			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
		}
		update_adv_data(&req);
	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1513 1514 1515 1516 1517 1518
		if (cp->val) {
			scan = SCAN_PAGE;
		} else {
			scan = 0;

			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1519
			    hdev->discov_timeout > 0)
1520 1521
				cancel_delayed_work(&hdev->discov_off);
		}
1522

1523 1524
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}
1525

1526 1527 1528 1529 1530 1531 1532
	/* If we're going from non-connectable to connectable or
	 * vice-versa when fast connectable is enabled ensure that fast
	 * connectable gets disabled. write_fast_connectable won't do
	 * anything if the page scan parameters are already what they
	 * should be.
	 */
	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1533 1534
		write_fast_connectable(&req, false);

1535 1536 1537 1538 1539 1540
	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
	    hci_conn_num(hdev, LE_LINK) == 0) {
		disable_advertising(&req);
		enable_advertising(&req);
	}

1541
	err = hci_req_run(&req, set_connectable_complete);
1542
	if (err < 0) {
1543
		mgmt_pending_remove(cmd);
1544
		if (err == -ENODATA)
1545 1546
			err = set_connectable_update_settings(hdev, sk,
							      cp->val);
1547 1548
		goto failed;
	}
1549 1550

failed:
1551
	hci_dev_unlock(hdev);
1552 1553 1554
	return err;
}

1555
static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1556
			u16 len)
1557
{
1558
	struct mgmt_mode *cp = data;
1559
	bool changed;
1560 1561
	int err;

1562
	BT_DBG("request for %s", hdev->name);
1563

1564 1565 1566 1567
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
				  MGMT_STATUS_INVALID_PARAMS);

1568
	hci_dev_lock(hdev);
1569 1570

	if (cp->val)
1571
		changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1572
	else
1573
		changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1574

1575
	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1576
	if (err < 0)
1577
		goto unlock;
1578

1579 1580
	if (changed)
		err = new_settings(hdev, sk);
1581

1582
unlock:
1583
	hci_dev_unlock(hdev);
1584 1585 1586
	return err;
}

1587 1588
static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
1589 1590 1591
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
1592
	u8 val, status;
1593 1594
	int err;

1595
	BT_DBG("request for %s", hdev->name);
1596

1597 1598
	status = mgmt_bredr_support(hdev);
	if (status)
1599
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1600
				  status);
1601

1602 1603 1604 1605
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				  MGMT_STATUS_INVALID_PARAMS);

1606 1607
	hci_dev_lock(hdev);

1608
	if (!hdev_is_powered(hdev)) {
1609 1610 1611
		bool changed = false;

		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1612
					  &hdev->dev_flags)) {
1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623
			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1624 1625 1626 1627
		goto failed;
	}

	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1628
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1629
				 MGMT_STATUS_BUSY);
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656
		goto failed;
	}

	val = !!cp->val;

	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

1657
static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1658 1659 1660
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
1661
	u8 status;
1662 1663
	int err;

1664
	BT_DBG("request for %s", hdev->name);
1665

1666 1667 1668 1669
	status = mgmt_bredr_support(hdev);
	if (status)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);

1670 1671 1672
	if (!lmp_ssp_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				  MGMT_STATUS_NOT_SUPPORTED);
1673

1674 1675 1676 1677
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				  MGMT_STATUS_INVALID_PARAMS);

1678
	hci_dev_lock(hdev);
1679

1680
	if (!hdev_is_powered(hdev)) {
1681
		bool changed;
1682

1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693
		if (cp->val) {
			changed = !test_and_set_bit(HCI_SSP_ENABLED,
						    &hdev->dev_flags);
		} else {
			changed = test_and_clear_bit(HCI_SSP_ENABLED,
						     &hdev->dev_flags);
			if (!changed)
				changed = test_and_clear_bit(HCI_HS_ENABLED,
							     &hdev->dev_flags);
			else
				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1694 1695 1696 1697 1698 1699 1700 1701 1702
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1703 1704 1705
		goto failed;
	}

1706 1707
	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1708 1709
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				 MGMT_STATUS_BUSY);
1710 1711 1712
		goto failed;
	}

1713
	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1714 1715 1716 1717 1718 1719 1720 1721 1722 1723
		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

1724
	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

1735
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1736 1737
{
	struct mgmt_mode *cp = data;
1738
	bool changed;
1739
	u8 status;
1740
	int err;
1741

1742
	BT_DBG("request for %s", hdev->name);
1743

1744 1745 1746
	status = mgmt_bredr_support(hdev);
	if (status)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1747

1748 1749 1750 1751 1752 1753 1754 1755
	if (!lmp_ssp_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				  MGMT_STATUS_NOT_SUPPORTED);

	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				  MGMT_STATUS_REJECTED);

1756 1757 1758 1759
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				  MGMT_STATUS_INVALID_PARAMS);

1760 1761
	hci_dev_lock(hdev);

1762
	if (cp->val) {
1763
		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1764 1765 1766 1767 1768 1769 1770
	} else {
		if (hdev_is_powered(hdev)) {
			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
					 MGMT_STATUS_REJECTED);
			goto unlock;
		}

1771
		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1772
	}
1773 1774 1775 1776 1777 1778 1779

	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);
1780

1781 1782 1783
unlock:
	hci_dev_unlock(hdev);
	return err;
1784 1785
}

1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
static void le_enable_complete(struct hci_dev *hdev, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
				     &mgmt_err);
		return;
	}

	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815

	/* Make sure the controller has a good default for
	 * advertising data. Restrict the update to when LE
	 * has actually been enabled. During power on, the
	 * update in powered_update_hci will take care of it.
	 */
	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
		struct hci_request req;

		hci_dev_lock(hdev);

		hci_req_init(&req, hdev);
1816
		update_adv_data(&req);
1817
		update_scan_rsp_data(&req);
1818 1819 1820 1821
		hci_req_run(&req, NULL);

		hci_dev_unlock(hdev);
	}
1822 1823
}

1824
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1825 1826 1827 1828
{
	struct mgmt_mode *cp = data;
	struct hci_cp_write_le_host_supported hci_cp;
	struct pending_cmd *cmd;
1829
	struct hci_request req;
1830
	int err;
1831
	u8 val, enabled;
1832

1833
	BT_DBG("request for %s", hdev->name);
1834

1835 1836 1837
	if (!lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				  MGMT_STATUS_NOT_SUPPORTED);
1838

1839 1840 1841 1842
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				  MGMT_STATUS_INVALID_PARAMS);

1843
	/* LE-only devices do not allow toggling LE on/off */
1844
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1845 1846 1847
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				  MGMT_STATUS_REJECTED);

1848
	hci_dev_lock(hdev);
1849 1850

	val = !!cp->val;
1851
	enabled = lmp_host_le_capable(hdev);
1852

1853
	if (!hdev_is_powered(hdev) || val == enabled) {
1854 1855 1856 1857 1858 1859 1860
		bool changed = false;

		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
			changed = true;
		}

1861 1862
		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1863 1864 1865
			changed = true;
		}

1866 1867
		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
		if (err < 0)
1868
			goto unlock;
1869 1870 1871 1872

		if (changed)
			err = new_settings(hdev, sk);

1873
		goto unlock;
1874 1875
	}

1876 1877
	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1878
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1879
				 MGMT_STATUS_BUSY);
1880
		goto unlock;
1881 1882 1883 1884 1885
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
1886
		goto unlock;
1887 1888
	}

1889 1890
	hci_req_init(&req, hdev);

1891 1892 1893 1894
	memset(&hci_cp, 0, sizeof(hci_cp));

	if (val) {
		hci_cp.le = val;
1895
		hci_cp.simul = lmp_le_br_capable(hdev);
1896 1897 1898
	} else {
		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
			disable_advertising(&req);
1899 1900
	}

1901 1902 1903 1904
	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
		    &hci_cp);

	err = hci_req_run(&req, le_enable_complete);
1905
	if (err < 0)
1906 1907
		mgmt_pending_remove(cmd);

1908 1909
unlock:
	hci_dev_unlock(hdev);
1910 1911 1912
	return err;
}

1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935
/* This is a helper function to test for pending mgmt commands that can
 * cause CoD or EIR HCI commands. We can only allow one such pending
 * mgmt command at a time since otherwise we cannot easily track what
 * the current values are, will be, and based on that calculate if a new
 * HCI command needs to be sent and if yes with what value.
 */
static bool pending_eir_or_class(struct hci_dev *hdev)
{
	struct pending_cmd *cmd;

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		switch (cmd->opcode) {
		case MGMT_OP_ADD_UUID:
		case MGMT_OP_REMOVE_UUID:
		case MGMT_OP_SET_DEV_CLASS:
		case MGMT_OP_SET_POWERED:
			return true;
		}
	}

	return false;
}

1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954
static const u8 bluetooth_base_uuid[] = {
			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};

static u8 get_uuid_size(const u8 *uuid)
{
	u32 val;

	if (memcmp(uuid, bluetooth_base_uuid, 12))
		return 128;

	val = get_unaligned_le32(&uuid[12]);
	if (val > 0xffff)
		return 32;

	return 16;
}

1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980
static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
{
	struct pending_cmd *cmd;

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(mgmt_op, hdev);
	if (!cmd)
		goto unlock;

	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
		     hdev->dev_class, 3);

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static void add_uuid_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
}

1981
static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1982
{
1983
	struct mgmt_cp_add_uuid *cp = data;
1984
	struct pending_cmd *cmd;
1985
	struct hci_request req;
1986 1987 1988
	struct bt_uuid *uuid;
	int err;

1989
	BT_DBG("request for %s", hdev->name);
1990

1991
	hci_dev_lock(hdev);
1992

1993
	if (pending_eir_or_class(hdev)) {
1994
		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1995
				 MGMT_STATUS_BUSY);
1996 1997 1998
		goto failed;
	}

1999
	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2000 2001 2002 2003 2004 2005
	if (!uuid) {
		err = -ENOMEM;
		goto failed;
	}

	memcpy(uuid->uuid, cp->uuid, 16);
2006
	uuid->svc_hint = cp->svc_hint;
2007
	uuid->size = get_uuid_size(cp->uuid);
2008

2009
	list_add_tail(&uuid->list, &hdev->uuids);
2010

2011
	hci_req_init(&req, hdev);
2012

2013 2014 2015
	update_class(&req);
	update_eir(&req);

2016 2017 2018 2019
	err = hci_req_run(&req, add_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto failed;
2020

2021
		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2022
				   hdev->dev_class, 3);
2023 2024 2025 2026
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2027
	if (!cmd) {
2028
		err = -ENOMEM;
2029 2030 2031 2032
		goto failed;
	}

	err = 0;
2033 2034

failed:
2035
	hci_dev_unlock(hdev);
2036 2037 2038
	return err;
}

2039 2040 2041 2042 2043 2044
static bool enable_service_cache(struct hci_dev *hdev)
{
	if (!hdev_is_powered(hdev))
		return false;

	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2045 2046
		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
				   CACHE_TIMEOUT);
2047 2048 2049 2050 2051 2052
		return true;
	}

	return false;
}

2053 2054 2055 2056 2057 2058 2059
static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
}

2060
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2061
		       u16 len)
2062
{
2063
	struct mgmt_cp_remove_uuid *cp = data;
2064
	struct pending_cmd *cmd;
2065
	struct bt_uuid *match, *tmp;
2066
	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2067
	struct hci_request req;
2068 2069
	int err, found;

2070
	BT_DBG("request for %s", hdev->name);
2071

2072
	hci_dev_lock(hdev);
2073

2074
	if (pending_eir_or_class(hdev)) {
2075
		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2076
				 MGMT_STATUS_BUSY);
2077 2078 2079
		goto unlock;
	}

2080
	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2081
		hci_uuids_clear(hdev);
2082

2083
		if (enable_service_cache(hdev)) {
2084
			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2085
					   0, hdev->dev_class, 3);
2086 2087
			goto unlock;
		}
2088

2089
		goto update_class;
2090 2091 2092 2093
	}

	found = 0;

2094
	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2095 2096 2097 2098
		if (memcmp(match->uuid, cp->uuid, 16) != 0)
			continue;

		list_del(&match->list);
2099
		kfree(match);
2100 2101 2102 2103
		found++;
	}

	if (found == 0) {
2104
		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2105
				 MGMT_STATUS_INVALID_PARAMS);
2106 2107 2108
		goto unlock;
	}

2109
update_class:
2110
	hci_req_init(&req, hdev);
2111

2112 2113 2114
	update_class(&req);
	update_eir(&req);

2115 2116 2117 2118
	err = hci_req_run(&req, remove_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2119

2120
		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2121
				   hdev->dev_class, 3);
2122 2123 2124 2125
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2126
	if (!cmd) {
2127
		err = -ENOMEM;
2128 2129 2130 2131
		goto unlock;
	}

	err = 0;
2132 2133

unlock:
2134
	hci_dev_unlock(hdev);
2135 2136 2137
	return err;
}

2138 2139 2140 2141 2142 2143 2144
static void set_class_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
}

2145
static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2146
			 u16 len)
2147
{
2148
	struct mgmt_cp_set_dev_class *cp = data;
2149
	struct pending_cmd *cmd;
2150
	struct hci_request req;
2151 2152
	int err;

2153
	BT_DBG("request for %s", hdev->name);
2154

2155
	if (!lmp_bredr_capable(hdev))
2156 2157
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				  MGMT_STATUS_NOT_SUPPORTED);
2158

2159
	hci_dev_lock(hdev);
2160

2161 2162 2163 2164 2165
	if (pending_eir_or_class(hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}
2166

2167 2168 2169 2170 2171
	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				 MGMT_STATUS_INVALID_PARAMS);
		goto unlock;
	}
2172

2173 2174 2175
	hdev->major_class = cp->major;
	hdev->minor_class = cp->minor;

2176
	if (!hdev_is_powered(hdev)) {
2177
		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2178
				   hdev->dev_class, 3);
2179 2180 2181
		goto unlock;
	}

2182 2183
	hci_req_init(&req, hdev);

2184
	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2185 2186 2187
		hci_dev_unlock(hdev);
		cancel_delayed_work_sync(&hdev->service_cache);
		hci_dev_lock(hdev);
2188
		update_eir(&req);
2189
	}
2190

2191 2192
	update_class(&req);

2193 2194 2195 2196
	err = hci_req_run(&req, set_class_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2197

2198
		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2199
				   hdev->dev_class, 3);
2200 2201 2202 2203
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2204
	if (!cmd) {
2205
		err = -ENOMEM;
2206 2207 2208 2209
		goto unlock;
	}

	err = 0;
2210

2211
unlock:
2212
	hci_dev_unlock(hdev);
2213 2214 2215
	return err;
}

2216
static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2217
			  u16 len)
2218
{
2219
	struct mgmt_cp_load_link_keys *cp = data;
2220
	u16 key_count, expected_len;
2221
	bool changed;
2222
	int i;
2223

2224 2225 2226 2227 2228 2229
	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				  MGMT_STATUS_NOT_SUPPORTED);

2230
	key_count = __le16_to_cpu(cp->key_count);
2231

2232 2233
	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_link_key_info);
2234
	if (expected_len != len) {
2235
		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2236
		       len, expected_len);
2237
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2238
				  MGMT_STATUS_INVALID_PARAMS);
2239 2240
	}

2241 2242 2243 2244
	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				  MGMT_STATUS_INVALID_PARAMS);

2245
	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2246
	       key_count);
2247

2248 2249 2250
	for (i = 0; i < key_count; i++) {
		struct mgmt_link_key_info *key = &cp->keys[i];

2251
		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2252 2253 2254 2255
			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
					  MGMT_STATUS_INVALID_PARAMS);
	}

2256
	hci_dev_lock(hdev);
2257 2258 2259 2260

	hci_link_keys_clear(hdev);

	if (cp->debug_keys)
2261
		changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2262
	else
2263 2264 2265 2266
		changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);

	if (changed)
		new_settings(hdev, NULL);
2267

2268
	for (i = 0; i < key_count; i++) {
2269
		struct mgmt_link_key_info *key = &cp->keys[i];
2270

2271
		hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2272
				 key->type, key->pin_len);
2273 2274
	}

2275
	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2276

2277
	hci_dev_unlock(hdev);
2278

2279
	return 0;
2280 2281
}

2282
static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2283
			   u8 addr_type, struct sock *skip_sk)
2284 2285 2286 2287 2288 2289 2290
{
	struct mgmt_ev_device_unpaired ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = addr_type;

	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2291
			  skip_sk);
2292 2293
}

2294
static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2295
			 u16 len)
2296
{
2297 2298
	struct mgmt_cp_unpair_device *cp = data;
	struct mgmt_rp_unpair_device rp;
2299 2300
	struct hci_cp_disconnect dc;
	struct pending_cmd *cmd;
2301 2302 2303
	struct hci_conn *conn;
	int err;

2304
	memset(&rp, 0, sizeof(rp));
2305 2306
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
2307

2308 2309 2310 2311 2312
	if (!bdaddr_type_is_valid(cp->addr.type))
		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));

2313 2314 2315 2316 2317
	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));

2318 2319
	hci_dev_lock(hdev);

2320
	if (!hdev_is_powered(hdev)) {
2321
		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2322
				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2323 2324 2325
		goto unlock;
	}

2326
	if (cp->addr.type == BDADDR_BREDR) {
2327
		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2328 2329 2330 2331 2332 2333 2334 2335
	} else {
		u8 addr_type;

		if (cp->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

2336 2337
		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);

2338 2339
		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
	}
2340

2341
	if (err < 0) {
2342
		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2343
				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2344 2345 2346
		goto unlock;
	}

2347
	if (cp->disconnect) {
2348
		if (cp->addr.type == BDADDR_BREDR)
2349
			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2350
						       &cp->addr.bdaddr);
2351 2352
		else
			conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2353
						       &cp->addr.bdaddr);
2354 2355 2356
	} else {
		conn = NULL;
	}
2357

2358
	if (!conn) {
2359
		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2360
				   &rp, sizeof(rp));
2361
		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2362 2363
		goto unlock;
	}
2364

2365
	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2366
			       sizeof(*cp));
2367 2368 2369
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
2370 2371
	}

2372
	dc.handle = cpu_to_le16(conn->handle);
2373 2374 2375 2376 2377
	dc.reason = 0x13; /* Remote User Terminated Connection */
	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
	if (err < 0)
		mgmt_pending_remove(cmd);

2378
unlock:
2379
	hci_dev_unlock(hdev);
2380 2381 2382
	return err;
}

2383
static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2384
		      u16 len)
2385
{
2386
	struct mgmt_cp_disconnect *cp = data;
2387
	struct mgmt_rp_disconnect rp;
2388
	struct hci_cp_disconnect dc;
2389
	struct pending_cmd *cmd;
2390 2391 2392 2393 2394
	struct hci_conn *conn;
	int err;

	BT_DBG("");

2395 2396 2397 2398
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2399
	if (!bdaddr_type_is_valid(cp->addr.type))
2400 2401 2402
		return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));
2403

2404
	hci_dev_lock(hdev);
2405 2406

	if (!test_bit(HCI_UP, &hdev->flags)) {
2407 2408
		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2409 2410 2411
		goto failed;
	}

2412
	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2413 2414
		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2415 2416 2417
		goto failed;
	}

2418
	if (cp->addr.type == BDADDR_BREDR)
2419 2420
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
2421 2422
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2423

2424
	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2425 2426
		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2427 2428 2429
		goto failed;
	}

2430
	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2431 2432
	if (!cmd) {
		err = -ENOMEM;
2433
		goto failed;
2434
	}
2435

2436
	dc.handle = cpu_to_le16(conn->handle);
2437
	dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2438 2439 2440

	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
	if (err < 0)
2441
		mgmt_pending_remove(cmd);
2442 2443

failed:
2444
	hci_dev_unlock(hdev);
2445 2446 2447
	return err;
}

2448
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2449 2450 2451
{
	switch (link_type) {
	case LE_LINK:
2452 2453
		switch (addr_type) {
		case ADDR_LE_DEV_PUBLIC:
2454
			return BDADDR_LE_PUBLIC;
2455

2456
		default:
2457
			/* Fallback to LE Random address type */
2458
			return BDADDR_LE_RANDOM;
2459
		}
2460

2461
	default:
2462
		/* Fallback to BR/EDR type */
2463
		return BDADDR_BREDR;
2464 2465 2466
	}
}

2467 2468
static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
2469 2470
{
	struct mgmt_rp_get_connections *rp;
2471
	struct hci_conn *c;
2472
	size_t rp_len;
2473 2474
	int err;
	u16 i;
2475 2476 2477

	BT_DBG("");

2478
	hci_dev_lock(hdev);
2479

2480
	if (!hdev_is_powered(hdev)) {
2481
		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2482
				 MGMT_STATUS_NOT_POWERED);
2483 2484 2485
		goto unlock;
	}

2486
	i = 0;
2487 2488
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2489
			i++;
2490 2491
	}

2492
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2493
	rp = kmalloc(rp_len, GFP_KERNEL);
2494
	if (!rp) {
2495 2496 2497 2498 2499
		err = -ENOMEM;
		goto unlock;
	}

	i = 0;
2500
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2501 2502
		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
			continue;
2503
		bacpy(&rp->addr[i].bdaddr, &c->dst);
2504
		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2505
		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2506 2507 2508 2509
			continue;
		i++;
	}

2510
	rp->conn_count = cpu_to_le16(i);
2511

2512 2513
	/* Recalculate length in case of filtered SCO connections, etc */
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2514

2515
	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2516
			   rp_len);
2517

2518
	kfree(rp);
2519 2520

unlock:
2521
	hci_dev_unlock(hdev);
2522 2523 2524
	return err;
}

2525
static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2526
				   struct mgmt_cp_pin_code_neg_reply *cp)
2527 2528 2529 2530
{
	struct pending_cmd *cmd;
	int err;

2531
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2532
			       sizeof(*cp));
2533 2534 2535
	if (!cmd)
		return -ENOMEM;

2536
	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2537
			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2538 2539 2540 2541 2542 2543
	if (err < 0)
		mgmt_pending_remove(cmd);

	return err;
}

2544
static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2545
			  u16 len)
2546
{
2547
	struct hci_conn *conn;
2548
	struct mgmt_cp_pin_code_reply *cp = data;
2549
	struct hci_cp_pin_code_reply reply;
2550
	struct pending_cmd *cmd;
2551 2552 2553 2554
	int err;

	BT_DBG("");

2555
	hci_dev_lock(hdev);
2556

2557
	if (!hdev_is_powered(hdev)) {
2558
		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2559
				 MGMT_STATUS_NOT_POWERED);
2560 2561 2562
		goto failed;
	}

2563
	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2564
	if (!conn) {
2565
		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2566
				 MGMT_STATUS_NOT_CONNECTED);
2567 2568 2569 2570
		goto failed;
	}

	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2571 2572 2573
		struct mgmt_cp_pin_code_neg_reply ncp;

		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2574 2575 2576

		BT_ERR("PIN code is not 16 bytes long");

2577
		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2578
		if (err >= 0)
2579
			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2580
					 MGMT_STATUS_INVALID_PARAMS);
2581 2582 2583 2584

		goto failed;
	}

2585
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2586 2587
	if (!cmd) {
		err = -ENOMEM;
2588
		goto failed;
2589
	}
2590

2591
	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2592
	reply.pin_len = cp->pin_len;
2593
	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2594 2595 2596

	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
	if (err < 0)
2597
		mgmt_pending_remove(cmd);
2598 2599

failed:
2600
	hci_dev_unlock(hdev);
2601 2602 2603
	return err;
}

2604 2605
static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
2606
{
2607
	struct mgmt_cp_set_io_capability *cp = data;
2608 2609 2610

	BT_DBG("");

2611
	hci_dev_lock(hdev);
2612 2613 2614 2615

	hdev->io_capability = cp->io_capability;

	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2616
	       hdev->io_capability);
2617

2618
	hci_dev_unlock(hdev);
2619

2620 2621
	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
			    0);
2622 2623
}

2624
static struct pending_cmd *find_pairing(struct hci_conn *conn)
2625 2626
{
	struct hci_dev *hdev = conn->hdev;
2627
	struct pending_cmd *cmd;
2628

2629
	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646
		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
			continue;

		if (cmd->user_data != conn)
			continue;

		return cmd;
	}

	return NULL;
}

static void pairing_complete(struct pending_cmd *cmd, u8 status)
{
	struct mgmt_rp_pair_device rp;
	struct hci_conn *conn = cmd->user_data;

2647
	bacpy(&rp.addr.bdaddr, &conn->dst);
2648
	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2649

2650
	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2651
		     &rp, sizeof(rp));
2652 2653 2654 2655 2656 2657

	/* So we don't get further callbacks for this connection */
	conn->connect_cfm_cb = NULL;
	conn->security_cfm_cb = NULL;
	conn->disconn_cfm_cb = NULL;

2658
	hci_conn_drop(conn);
2659

2660
	mgmt_pending_remove(cmd);
2661 2662
}

2663 2664 2665 2666 2667 2668 2669 2670 2671 2672
void mgmt_smp_complete(struct hci_conn *conn, bool complete)
{
	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
	struct pending_cmd *cmd;

	cmd = find_pairing(conn);
	if (cmd)
		pairing_complete(cmd, status);
}

2673 2674 2675 2676 2677 2678 2679
static void pairing_complete_cb(struct hci_conn *conn, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status %u", status);

	cmd = find_pairing(conn);
2680
	if (!cmd)
2681
		BT_DBG("Unable to find a pending command");
2682
	else
2683
		pairing_complete(cmd, mgmt_status(status));
2684 2685
}

2686
static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701
{
	struct pending_cmd *cmd;

	BT_DBG("status %u", status);

	if (!status)
		return;

	cmd = find_pairing(conn);
	if (!cmd)
		BT_DBG("Unable to find a pending command");
	else
		pairing_complete(cmd, mgmt_status(status));
}

2702
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2703
		       u16 len)
2704
{
2705
	struct mgmt_cp_pair_device *cp = data;
2706
	struct mgmt_rp_pair_device rp;
2707 2708 2709 2710 2711 2712 2713
	struct pending_cmd *cmd;
	u8 sec_level, auth_type;
	struct hci_conn *conn;
	int err;

	BT_DBG("");

2714 2715 2716 2717
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2718 2719 2720 2721 2722
	if (!bdaddr_type_is_valid(cp->addr.type))
		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));

2723
	hci_dev_lock(hdev);
2724

2725
	if (!hdev_is_powered(hdev)) {
2726 2727
		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2728 2729 2730
		goto unlock;
	}

2731 2732
	sec_level = BT_SECURITY_MEDIUM;
	if (cp->io_cap == 0x03)
2733
		auth_type = HCI_AT_DEDICATED_BONDING;
2734
	else
2735 2736
		auth_type = HCI_AT_DEDICATED_BONDING_MITM;

2737
	if (cp->addr.type == BDADDR_BREDR)
2738 2739
		conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
				   cp->addr.type, sec_level, auth_type);
2740
	else
2741 2742
		conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
				   cp->addr.type, sec_level, auth_type);
2743

2744
	if (IS_ERR(conn)) {
2745 2746 2747 2748 2749 2750 2751
		int status;

		if (PTR_ERR(conn) == -EBUSY)
			status = MGMT_STATUS_BUSY;
		else
			status = MGMT_STATUS_CONNECT_FAILED;

2752
		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2753
				   status, &rp,
2754
				   sizeof(rp));
2755 2756 2757 2758
		goto unlock;
	}

	if (conn->connect_cfm_cb) {
2759
		hci_conn_drop(conn);
2760
		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2761
				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2762 2763 2764
		goto unlock;
	}

2765
	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2766 2767
	if (!cmd) {
		err = -ENOMEM;
2768
		hci_conn_drop(conn);
2769 2770 2771
		goto unlock;
	}

2772
	/* For LE, just connecting isn't a proof that the pairing finished */
2773
	if (cp->addr.type == BDADDR_BREDR) {
2774
		conn->connect_cfm_cb = pairing_complete_cb;
2775 2776 2777 2778 2779 2780 2781
		conn->security_cfm_cb = pairing_complete_cb;
		conn->disconn_cfm_cb = pairing_complete_cb;
	} else {
		conn->connect_cfm_cb = le_pairing_complete_cb;
		conn->security_cfm_cb = le_pairing_complete_cb;
		conn->disconn_cfm_cb = le_pairing_complete_cb;
	}
2782

2783 2784 2785 2786
	conn->io_capability = cp->io_cap;
	cmd->user_data = conn;

	if (conn->state == BT_CONNECTED &&
2787
	    hci_conn_security(conn, sec_level, auth_type))
2788 2789 2790 2791 2792
		pairing_complete(cmd, 0);

	err = 0;

unlock:
2793
	hci_dev_unlock(hdev);
2794 2795 2796
	return err;
}

2797 2798
static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2799
{
2800
	struct mgmt_addr_info *addr = data;
2801 2802 2803 2804 2805 2806 2807 2808
	struct pending_cmd *cmd;
	struct hci_conn *conn;
	int err;

	BT_DBG("");

	hci_dev_lock(hdev);

2809
	if (!hdev_is_powered(hdev)) {
2810
		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2811
				 MGMT_STATUS_NOT_POWERED);
2812 2813 2814
		goto unlock;
	}

2815 2816
	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
	if (!cmd) {
2817
		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2818
				 MGMT_STATUS_INVALID_PARAMS);
2819 2820 2821 2822 2823 2824
		goto unlock;
	}

	conn = cmd->user_data;

	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2825
		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2826
				 MGMT_STATUS_INVALID_PARAMS);
2827 2828 2829 2830 2831
		goto unlock;
	}

	pairing_complete(cmd, MGMT_STATUS_CANCELLED);

2832
	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2833
			   addr, sizeof(*addr));
2834 2835 2836 2837 2838
unlock:
	hci_dev_unlock(hdev);
	return err;
}

2839
static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2840
			     struct mgmt_addr_info *addr, u16 mgmt_op,
2841
			     u16 hci_op, __le32 passkey)
2842 2843
{
	struct pending_cmd *cmd;
2844
	struct hci_conn *conn;
2845 2846
	int err;

2847
	hci_dev_lock(hdev);
2848

2849
	if (!hdev_is_powered(hdev)) {
2850 2851 2852
		err = cmd_complete(sk, hdev->id, mgmt_op,
				   MGMT_STATUS_NOT_POWERED, addr,
				   sizeof(*addr));
2853
		goto done;
2854 2855
	}

2856 2857
	if (addr->type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2858
	else
2859
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2860 2861

	if (!conn) {
2862 2863 2864
		err = cmd_complete(sk, hdev->id, mgmt_op,
				   MGMT_STATUS_NOT_CONNECTED, addr,
				   sizeof(*addr));
2865 2866
		goto done;
	}
2867

2868
	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2869
		/* Continue with pairing via SMP */
2870 2871 2872
		err = smp_user_confirm_reply(conn, mgmt_op, passkey);

		if (!err)
2873 2874 2875
			err = cmd_complete(sk, hdev->id, mgmt_op,
					   MGMT_STATUS_SUCCESS, addr,
					   sizeof(*addr));
2876
		else
2877 2878 2879
			err = cmd_complete(sk, hdev->id, mgmt_op,
					   MGMT_STATUS_FAILED, addr,
					   sizeof(*addr));
2880 2881 2882 2883

		goto done;
	}

2884
	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2885 2886
	if (!cmd) {
		err = -ENOMEM;
2887
		goto done;
2888 2889
	}

2890
	/* Continue with pairing via HCI */
2891 2892 2893
	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
		struct hci_cp_user_passkey_reply cp;

2894
		bacpy(&cp.bdaddr, &addr->bdaddr);
2895 2896 2897
		cp.passkey = passkey;
		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
	} else
2898 2899
		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
				   &addr->bdaddr);
2900

2901 2902
	if (err < 0)
		mgmt_pending_remove(cmd);
2903

2904
done:
2905
	hci_dev_unlock(hdev);
2906 2907 2908
	return err;
}

2909 2910 2911 2912 2913 2914 2915
static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_pin_code_neg_reply *cp = data;

	BT_DBG("");

2916
	return user_pairing_resp(sk, hdev, &cp->addr,
2917 2918 2919 2920
				MGMT_OP_PIN_CODE_NEG_REPLY,
				HCI_OP_PIN_CODE_NEG_REPLY, 0);
}

2921 2922
static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2923
{
2924
	struct mgmt_cp_user_confirm_reply *cp = data;
2925 2926 2927 2928

	BT_DBG("");

	if (len != sizeof(*cp))
2929
		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2930
				  MGMT_STATUS_INVALID_PARAMS);
2931

2932
	return user_pairing_resp(sk, hdev, &cp->addr,
2933 2934
				 MGMT_OP_USER_CONFIRM_REPLY,
				 HCI_OP_USER_CONFIRM_REPLY, 0);
2935 2936
}

2937
static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2938
				  void *data, u16 len)
2939
{
2940
	struct mgmt_cp_user_confirm_neg_reply *cp = data;
2941 2942 2943

	BT_DBG("");

2944
	return user_pairing_resp(sk, hdev, &cp->addr,
2945 2946
				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2947 2948
}

2949 2950
static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2951
{
2952
	struct mgmt_cp_user_passkey_reply *cp = data;
2953 2954 2955

	BT_DBG("");

2956
	return user_pairing_resp(sk, hdev, &cp->addr,
2957 2958
				 MGMT_OP_USER_PASSKEY_REPLY,
				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2959 2960
}

2961
static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2962
				  void *data, u16 len)
2963
{
2964
	struct mgmt_cp_user_passkey_neg_reply *cp = data;
2965 2966 2967

	BT_DBG("");

2968
	return user_pairing_resp(sk, hdev, &cp->addr,
2969 2970
				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2971 2972
}

2973
static void update_name(struct hci_request *req)
2974
{
2975
	struct hci_dev *hdev = req->hdev;
2976 2977
	struct hci_cp_write_local_name cp;

2978
	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2979

2980
	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2981 2982
}

2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010
static void set_name_complete(struct hci_dev *hdev, u8 status)
{
	struct mgmt_cp_set_local_name *cp;
	struct pending_cmd *cmd;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
	if (!cmd)
		goto unlock;

	cp = cmd->param;

	if (status)
		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
			   mgmt_status(status));
	else
		cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
			     cp, sizeof(*cp));

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

3011
static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3012
			  u16 len)
3013
{
3014
	struct mgmt_cp_set_local_name *cp = data;
3015
	struct pending_cmd *cmd;
3016
	struct hci_request req;
3017 3018 3019 3020
	int err;

	BT_DBG("");

3021
	hci_dev_lock(hdev);
3022

3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033
	/* If the old values are the same as the new ones just return a
	 * direct command complete event.
	 */
	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
	    !memcmp(hdev->short_name, cp->short_name,
		    sizeof(hdev->short_name))) {
		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
				   data, len);
		goto failed;
	}

3034
	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3035

3036
	if (!hdev_is_powered(hdev)) {
3037
		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3038 3039

		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3040
				   data, len);
3041 3042 3043 3044
		if (err < 0)
			goto failed;

		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3045
				 sk);
3046

3047 3048 3049
		goto failed;
	}

3050
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3051 3052 3053 3054 3055
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

3056 3057
	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));

3058
	hci_req_init(&req, hdev);
3059 3060 3061 3062 3063 3064

	if (lmp_bredr_capable(hdev)) {
		update_name(&req);
		update_eir(&req);
	}

3065 3066 3067
	/* The name is stored in the scan response data and so
	 * no need to udpate the advertising data here.
	 */
3068
	if (lmp_le_capable(hdev))
3069
		update_scan_rsp_data(&req);
3070

3071
	err = hci_req_run(&req, set_name_complete);
3072 3073 3074 3075
	if (err < 0)
		mgmt_pending_remove(cmd);

failed:
3076
	hci_dev_unlock(hdev);
3077 3078 3079
	return err;
}

3080
static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3081
			       void *data, u16 data_len)
3082 3083 3084 3085
{
	struct pending_cmd *cmd;
	int err;

3086
	BT_DBG("%s", hdev->name);
3087

3088
	hci_dev_lock(hdev);
3089

3090
	if (!hdev_is_powered(hdev)) {
3091
		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3092
				 MGMT_STATUS_NOT_POWERED);
3093 3094 3095
		goto unlock;
	}

3096
	if (!lmp_ssp_capable(hdev)) {
3097
		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3098
				 MGMT_STATUS_NOT_SUPPORTED);
3099 3100 3101
		goto unlock;
	}

3102
	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3103
		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3104
				 MGMT_STATUS_BUSY);
3105 3106 3107
		goto unlock;
	}

3108
	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3109 3110 3111 3112 3113
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

3114 3115 3116 3117 3118 3119
	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
				   0, NULL);
	else
		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);

3120 3121 3122 3123
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
3124
	hci_dev_unlock(hdev);
3125 3126 3127
	return err;
}

3128
static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3129
			       void *data, u16 len)
3130 3131 3132
{
	int err;

3133
	BT_DBG("%s ", hdev->name);
3134

3135
	hci_dev_lock(hdev);
3136

3137 3138 3139
	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_data *cp = data;
		u8 status;
3140

3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170
		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
					      cp->hash, cp->randomizer);
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
				   status, &cp->addr, sizeof(cp->addr));
	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
		u8 status;

		err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
						  cp->hash192,
						  cp->randomizer192,
						  cp->hash256,
						  cp->randomizer256);
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
				   status, &cp->addr, sizeof(cp->addr));
	} else {
		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
				 MGMT_STATUS_INVALID_PARAMS);
	}
3171

3172
	hci_dev_unlock(hdev);
3173 3174 3175
	return err;
}

3176
static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3177
				  void *data, u16 len)
3178
{
3179
	struct mgmt_cp_remove_remote_oob_data *cp = data;
3180
	u8 status;
3181 3182
	int err;

3183
	BT_DBG("%s", hdev->name);
3184

3185
	hci_dev_lock(hdev);
3186

3187
	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3188
	if (err < 0)
3189
		status = MGMT_STATUS_INVALID_PARAMS;
3190
	else
3191
		status = MGMT_STATUS_SUCCESS;
3192

3193
	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3194
			   status, &cp->addr, sizeof(cp->addr));
3195

3196
	hci_dev_unlock(hdev);
3197 3198 3199
	return err;
}

3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220
static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
	u8 type;
	int err;

	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);

	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
	if (!cmd)
		return -ENOENT;

	type = hdev->discovery.type;

	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
			   &type, sizeof(type));
	mgmt_pending_remove(cmd);

	return err;
}

3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238
static void start_discovery_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status %d", status);

	if (status) {
		hci_dev_lock(hdev);
		mgmt_start_discovery_failed(hdev, status);
		hci_dev_unlock(hdev);
		return;
	}

	hci_dev_lock(hdev);
	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
	hci_dev_unlock(hdev);

	switch (hdev->discovery.type) {
	case DISCOV_TYPE_LE:
		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3239
				   DISCOV_LE_TIMEOUT);
3240 3241 3242 3243
		break;

	case DISCOV_TYPE_INTERLEAVED:
		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3244
				   DISCOV_INTERLEAVED_TIMEOUT);
3245 3246 3247 3248 3249 3250 3251 3252 3253 3254
		break;

	case DISCOV_TYPE_BREDR:
		break;

	default:
		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
	}
}

3255
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3256
			   void *data, u16 len)
3257
{
3258
	struct mgmt_cp_start_discovery *cp = data;
3259
	struct pending_cmd *cmd;
3260 3261 3262 3263 3264 3265
	struct hci_cp_le_set_scan_param param_cp;
	struct hci_cp_le_set_scan_enable enable_cp;
	struct hci_cp_inquiry inq_cp;
	struct hci_request req;
	/* General inquiry access code (GIAC) */
	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3266
	u8 status;
3267 3268
	int err;

3269
	BT_DBG("%s", hdev->name);
3270

3271
	hci_dev_lock(hdev);
3272

3273
	if (!hdev_is_powered(hdev)) {
3274
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3275
				 MGMT_STATUS_NOT_POWERED);
3276 3277 3278
		goto failed;
	}

3279 3280 3281 3282 3283 3284
	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
				 MGMT_STATUS_BUSY);
		goto failed;
	}

3285
	if (hdev->discovery.state != DISCOVERY_STOPPED) {
3286
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3287
				 MGMT_STATUS_BUSY);
3288 3289 3290
		goto failed;
	}

3291
	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3292 3293 3294 3295 3296
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

A
Andre Guedes 已提交
3297 3298
	hdev->discovery.type = cp->type;

3299 3300
	hci_req_init(&req, hdev);

A
Andre Guedes 已提交
3301
	switch (hdev->discovery.type) {
3302
	case DISCOV_TYPE_BREDR:
3303 3304
		status = mgmt_bredr_support(hdev);
		if (status) {
3305
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3306
					 status);
3307 3308 3309 3310
			mgmt_pending_remove(cmd);
			goto failed;
		}

3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321
		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_BUSY);
			mgmt_pending_remove(cmd);
			goto failed;
		}

		hci_inquiry_cache_flush(hdev);

		memset(&inq_cp, 0, sizeof(inq_cp));
		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3322
		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3323
		hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3324 3325 3326
		break;

	case DISCOV_TYPE_LE:
3327
	case DISCOV_TYPE_INTERLEAVED:
3328 3329
		status = mgmt_le_support(hdev);
		if (status) {
3330
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3331
					 status);
3332 3333 3334 3335
			mgmt_pending_remove(cmd);
			goto failed;
		}

3336
		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3337
		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3338 3339 3340 3341 3342 3343
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_NOT_SUPPORTED);
			mgmt_pending_remove(cmd);
			goto failed;
		}

3344
		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_REJECTED);
			mgmt_pending_remove(cmd);
			goto failed;
		}

		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_BUSY);
			mgmt_pending_remove(cmd);
			goto failed;
		}

		memset(&param_cp, 0, sizeof(param_cp));
		param_cp.type = LE_SCAN_ACTIVE;
3360 3361
		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3362
		param_cp.own_address_type = hdev->own_addr_type;
3363 3364 3365 3366 3367 3368 3369 3370
		hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
			    &param_cp);

		memset(&enable_cp, 0, sizeof(enable_cp));
		enable_cp.enable = LE_SCAN_ENABLE;
		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
		hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
			    &enable_cp);
3371 3372
		break;

3373
	default:
3374 3375 3376 3377
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
				 MGMT_STATUS_INVALID_PARAMS);
		mgmt_pending_remove(cmd);
		goto failed;
3378
	}
3379

3380
	err = hci_req_run(&req, start_discovery_complete);
3381 3382
	if (err < 0)
		mgmt_pending_remove(cmd);
3383 3384
	else
		hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3385 3386

failed:
3387
	hci_dev_unlock(hdev);
3388 3389 3390
	return err;
}

3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406
static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
	int err;

	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
	if (!cmd)
		return -ENOENT;

	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
			   &hdev->discovery.type, sizeof(hdev->discovery.type));
	mgmt_pending_remove(cmd);

	return err;
}

3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423
static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

	if (status) {
		mgmt_stop_discovery_failed(hdev, status);
		goto unlock;
	}

	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);

unlock:
	hci_dev_unlock(hdev);
}

3424
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3425
			  u16 len)
3426
{
3427
	struct mgmt_cp_stop_discovery *mgmt_cp = data;
3428
	struct pending_cmd *cmd;
3429 3430
	struct hci_cp_remote_name_req_cancel cp;
	struct inquiry_entry *e;
3431 3432
	struct hci_request req;
	struct hci_cp_le_set_scan_enable enable_cp;
3433 3434
	int err;

3435
	BT_DBG("%s", hdev->name);
3436

3437
	hci_dev_lock(hdev);
3438

3439
	if (!hci_discovery_active(hdev)) {
3440
		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3441 3442
				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
				   sizeof(mgmt_cp->type));
3443 3444 3445 3446
		goto unlock;
	}

	if (hdev->discovery.type != mgmt_cp->type) {
3447
		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3448 3449
				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
				   sizeof(mgmt_cp->type));
3450
		goto unlock;
3451 3452
	}

3453
	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3454 3455
	if (!cmd) {
		err = -ENOMEM;
3456 3457 3458
		goto unlock;
	}

3459 3460
	hci_req_init(&req, hdev);

3461 3462
	switch (hdev->discovery.state) {
	case DISCOVERY_FINDING:
3463 3464 3465 3466 3467 3468 3469 3470 3471 3472
		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
			hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
		} else {
			cancel_delayed_work(&hdev->le_scan_disable);

			memset(&enable_cp, 0, sizeof(enable_cp));
			enable_cp.enable = LE_SCAN_DISABLE;
			hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
				    sizeof(enable_cp), &enable_cp);
		}
3473

3474 3475 3476 3477
		break;

	case DISCOVERY_RESOLVING:
		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3478
						     NAME_PENDING);
3479
		if (!e) {
3480
			mgmt_pending_remove(cmd);
3481 3482 3483 3484 3485 3486 3487
			err = cmd_complete(sk, hdev->id,
					   MGMT_OP_STOP_DISCOVERY, 0,
					   &mgmt_cp->type,
					   sizeof(mgmt_cp->type));
			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
			goto unlock;
		}
3488

3489
		bacpy(&cp.bdaddr, &e->data.bdaddr);
3490 3491
		hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
			    &cp);
3492 3493 3494 3495 3496

		break;

	default:
		BT_DBG("unknown discovery state %u", hdev->discovery.state);
3497 3498 3499 3500 3501 3502

		mgmt_pending_remove(cmd);
		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
				   MGMT_STATUS_FAILED, &mgmt_cp->type,
				   sizeof(mgmt_cp->type));
		goto unlock;
3503 3504
	}

3505
	err = hci_req_run(&req, stop_discovery_complete);
3506 3507
	if (err < 0)
		mgmt_pending_remove(cmd);
3508 3509
	else
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3510

3511
unlock:
3512
	hci_dev_unlock(hdev);
3513 3514 3515
	return err;
}

3516
static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3517
			u16 len)
3518
{
3519
	struct mgmt_cp_confirm_name *cp = data;
3520 3521 3522
	struct inquiry_entry *e;
	int err;

3523
	BT_DBG("%s", hdev->name);
3524 3525 3526

	hci_dev_lock(hdev);

3527
	if (!hci_discovery_active(hdev)) {
3528
		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3529
				 MGMT_STATUS_FAILED);
3530 3531 3532
		goto failed;
	}

3533
	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3534
	if (!e) {
3535
		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3536
				 MGMT_STATUS_INVALID_PARAMS);
3537 3538 3539 3540 3541 3542 3543 3544
		goto failed;
	}

	if (cp->name_known) {
		e->name_state = NAME_KNOWN;
		list_del(&e->list);
	} else {
		e->name_state = NAME_NEEDED;
3545
		hci_inquiry_cache_update_resolve(hdev, e);
3546 3547
	}

3548 3549
	err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
			   sizeof(cp->addr));
3550 3551 3552 3553 3554 3555

failed:
	hci_dev_unlock(hdev);
	return err;
}

3556
static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3557
			u16 len)
3558
{
3559
	struct mgmt_cp_block_device *cp = data;
3560
	u8 status;
3561 3562
	int err;

3563
	BT_DBG("%s", hdev->name);
3564

3565
	if (!bdaddr_type_is_valid(cp->addr.type))
3566 3567 3568
		return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &cp->addr, sizeof(cp->addr));
3569

3570
	hci_dev_lock(hdev);
3571

3572
	err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3573
	if (err < 0)
3574
		status = MGMT_STATUS_FAILED;
3575
	else
3576
		status = MGMT_STATUS_SUCCESS;
3577

3578
	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3579
			   &cp->addr, sizeof(cp->addr));
3580

3581
	hci_dev_unlock(hdev);
3582 3583 3584 3585

	return err;
}

3586
static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3587
			  u16 len)
3588
{
3589
	struct mgmt_cp_unblock_device *cp = data;
3590
	u8 status;
3591 3592
	int err;

3593
	BT_DBG("%s", hdev->name);
3594

3595
	if (!bdaddr_type_is_valid(cp->addr.type))
3596 3597 3598
		return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &cp->addr, sizeof(cp->addr));
3599

3600
	hci_dev_lock(hdev);
3601

3602
	err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3603
	if (err < 0)
3604
		status = MGMT_STATUS_INVALID_PARAMS;
3605
	else
3606
		status = MGMT_STATUS_SUCCESS;
3607

3608
	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3609
			   &cp->addr, sizeof(cp->addr));
3610

3611
	hci_dev_unlock(hdev);
3612 3613 3614 3615

	return err;
}

3616 3617 3618 3619
static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_set_device_id *cp = data;
3620
	struct hci_request req;
3621
	int err;
3622
	__u16 source;
3623 3624 3625

	BT_DBG("%s", hdev->name);

3626 3627 3628 3629 3630 3631
	source = __le16_to_cpu(cp->source);

	if (source > 0x0002)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
				  MGMT_STATUS_INVALID_PARAMS);

3632 3633
	hci_dev_lock(hdev);

3634
	hdev->devid_source = source;
3635 3636 3637 3638 3639 3640
	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
	hdev->devid_product = __le16_to_cpu(cp->product);
	hdev->devid_version = __le16_to_cpu(cp->version);

	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);

3641 3642 3643
	hci_req_init(&req, hdev);
	update_eir(&req);
	hci_req_run(&req, NULL);
3644 3645 3646 3647 3648 3649

	hci_dev_unlock(hdev);

	return err;
}

3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670
static void set_advertising_complete(struct hci_dev *hdev, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
				     cmd_status_rsp, &mgmt_err);
		return;
	}

	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
			     &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
}

3671 3672
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
3673 3674 3675 3676
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
	struct hci_request req;
3677
	u8 val, enabled, status;
3678 3679 3680 3681
	int err;

	BT_DBG("request for %s", hdev->name);

3682 3683
	status = mgmt_le_support(hdev);
	if (status)
3684
		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3685
				  status);
3686 3687 3688 3689 3690 3691 3692 3693

	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	val = !!cp->val;
3694
	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3695

3696 3697 3698 3699 3700 3701
	/* The following conditions are ones which mean that we should
	 * not do any HCI communication but directly send a mgmt
	 * response to user space (after toggling the flag if
	 * necessary).
	 */
	if (!hdev_is_powered(hdev) || val == enabled ||
3702
	    hci_conn_num(hdev, LE_LINK) > 0) {
3703 3704
		bool changed = false;

3705 3706
		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
		if (err < 0)
			goto unlock;

		if (changed)
			err = new_settings(hdev, sk);

		goto unlock;
	}

	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

3735 3736 3737 3738
	if (val)
		enable_advertising(&req);
	else
		disable_advertising(&req);
3739 3740 3741 3742 3743 3744 3745 3746 3747 3748

	err = hci_req_run(&req, set_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

3749 3750 3751 3752 3753 3754 3755 3756
static int set_static_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_static_address *cp = data;
	int err;

	BT_DBG("%s", hdev->name);

3757
	if (!lmp_le_capable(hdev))
3758
		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3759
				  MGMT_STATUS_NOT_SUPPORTED);
3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788

	if (hdev_is_powered(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				  MGMT_STATUS_REJECTED);

	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
			return cmd_status(sk, hdev->id,
					  MGMT_OP_SET_STATIC_ADDRESS,
					  MGMT_STATUS_INVALID_PARAMS);

		/* Two most significant bits shall be set */
		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
			return cmd_status(sk, hdev->id,
					  MGMT_OP_SET_STATIC_ADDRESS,
					  MGMT_STATUS_INVALID_PARAMS);
	}

	hci_dev_lock(hdev);

	bacpy(&hdev->static_addr, &cp->bdaddr);

	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);

	hci_dev_unlock(hdev);

	return err;
}

3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813
static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_cp_set_scan_params *cp = data;
	__u16 interval, window;
	int err;

	BT_DBG("%s", hdev->name);

	if (!lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_NOT_SUPPORTED);

	interval = __le16_to_cpu(cp->interval);

	if (interval < 0x0004 || interval > 0x4000)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_INVALID_PARAMS);

	window = __le16_to_cpu(cp->window);

	if (window < 0x0004 || window > 0x4000)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_INVALID_PARAMS);

3814 3815 3816 3817
	if (window > interval)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_INVALID_PARAMS);

3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829
	hci_dev_lock(hdev);

	hdev->le_scan_interval = interval;
	hdev->le_scan_window = window;

	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);

	hci_dev_unlock(hdev);

	return err;
}

3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845
static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
			   mgmt_status(status));
	} else {
3846 3847 3848 3849 3850 3851 3852
		struct mgmt_mode *cp = cmd->param;

		if (cp->val)
			set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
		else
			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);

3853 3854 3855 3856 3857 3858 3859 3860 3861 3862
		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

3863
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3864
				void *data, u16 len)
3865
{
3866
	struct mgmt_mode *cp = data;
3867 3868
	struct pending_cmd *cmd;
	struct hci_request req;
3869 3870
	int err;

3871
	BT_DBG("%s", hdev->name);
3872

3873 3874
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
	    hdev->hci_ver < BLUETOOTH_VER_1_2)
3875 3876 3877
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				  MGMT_STATUS_NOT_SUPPORTED);

3878 3879 3880 3881
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				  MGMT_STATUS_INVALID_PARAMS);

3882
	if (!hdev_is_powered(hdev))
3883
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3884
				  MGMT_STATUS_NOT_POWERED);
3885 3886

	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3887
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3888
				  MGMT_STATUS_REJECTED);
3889 3890 3891

	hci_dev_lock(hdev);

3892 3893 3894 3895 3896 3897
	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}

3898 3899 3900 3901 3902 3903
	if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		goto unlock;
	}

3904 3905 3906 3907 3908
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
			       data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
3909 3910
	}

3911 3912
	hci_req_init(&req, hdev);

3913
	write_fast_connectable(&req, cp->val);
3914 3915

	err = hci_req_run(&req, fast_connectable_complete);
3916
	if (err < 0) {
3917
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3918
				 MGMT_STATUS_FAILED);
3919
		mgmt_pending_remove(cmd);
3920 3921
	}

3922
unlock:
3923
	hci_dev_unlock(hdev);
3924

3925 3926 3927
	return err;
}

3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947
static void set_bredr_scan(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	u8 scan = 0;

	/* Ensure that fast connectable is disabled. This function will
	 * not do anything if the page scan parameters are already what
	 * they should be.
	 */
	write_fast_connectable(req, false);

	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
		scan |= SCAN_PAGE;
	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
		scan |= SCAN_INQUIRY;

	if (scan)
		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}

3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045
static void set_bredr_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		/* We need to restore the flag if related HCI commands
		 * failed.
		 */
		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);

		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
	} else {
		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
	struct hci_request req;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				  MGMT_STATUS_NOT_SUPPORTED);

	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				  MGMT_STATUS_REJECTED);

	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		goto unlock;
	}

	if (!hdev_is_powered(hdev)) {
		if (!cp->val) {
			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
		}

		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);

		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		if (err < 0)
			goto unlock;

		err = new_settings(hdev, sk);
		goto unlock;
	}

	/* Reject disabling when powered on */
	if (!cp->val) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				 MGMT_STATUS_REJECTED);
		goto unlock;
	}

	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

4046
	/* We need to flip the bit already here so that update_adv_data
4047 4048 4049 4050 4051
	 * generates the correct flags.
	 */
	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);

	hci_req_init(&req, hdev);
4052 4053 4054 4055

	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
		set_bredr_scan(&req);

4056 4057 4058
	/* Since only the advertising data flags will change, there
	 * is no need to update the scan response data.
	 */
4059
	update_adv_data(&req);
4060

4061 4062 4063 4064 4065 4066 4067 4068 4069
	err = hci_req_run(&req, set_bredr_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4070 4071 4072 4073 4074
static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
4075
	u8 val, status;
4076 4077 4078 4079 4080 4081 4082 4083 4084
	int err;

	BT_DBG("request for %s", hdev->name);

	status = mgmt_bredr_support(hdev);
	if (status)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				  status);

4085 4086
	if (!lmp_sc_capable(hdev) &&
	    !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4087 4088 4089
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				  MGMT_STATUS_NOT_SUPPORTED);

4090
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4091 4092 4093 4094 4095 4096 4097 4098
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
		bool changed;

4099
		if (cp->val) {
4100 4101
			changed = !test_and_set_bit(HCI_SC_ENABLED,
						    &hdev->dev_flags);
4102 4103 4104 4105 4106
			if (cp->val == 0x02)
				set_bit(HCI_SC_ONLY, &hdev->dev_flags);
			else
				clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
		} else {
4107 4108
			changed = test_and_clear_bit(HCI_SC_ENABLED,
						     &hdev->dev_flags);
4109 4110
			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
		}
4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127

		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

		goto failed;
	}

	if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				 MGMT_STATUS_BUSY);
		goto failed;
	}

4128 4129 4130 4131
	val = !!cp->val;

	if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
	    (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4132 4133 4134 4135 4136 4137 4138 4139 4140 4141
		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

4142
	err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4143 4144 4145 4146 4147
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

4148 4149 4150 4151 4152
	if (cp->val == 0x02)
		set_bit(HCI_SC_ONLY, &hdev->dev_flags);
	else
		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);

4153 4154 4155 4156 4157
failed:
	hci_dev_unlock(hdev);
	return err;
}

4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189
static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
			  void *data, u16 len)
{
	struct mgmt_mode *cp = data;
	bool changed;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	if (cp->val)
		changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);

	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265
static bool irk_is_valid(struct mgmt_irk_info *irk)
{
	switch (irk->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
}

static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
		     u16 len)
{
	struct mgmt_cp_load_irks *cp = cp_data;
	u16 irk_count, expected_len;
	int i, err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				  MGMT_STATUS_NOT_SUPPORTED);

	irk_count = __le16_to_cpu(cp->irk_count);

	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
	if (expected_len != len) {
		BT_ERR("load_irks: expected %u bytes, got %u bytes",
		       len, expected_len);
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
				  MGMT_STATUS_INVALID_PARAMS);
	}

	BT_DBG("%s irk_count %u", hdev->name, irk_count);

	for (i = 0; i < irk_count; i++) {
		struct mgmt_irk_info *key = &cp->irks[i];

		if (!irk_is_valid(key))
			return cmd_status(sk, hdev->id,
					  MGMT_OP_LOAD_IRKS,
					  MGMT_STATUS_INVALID_PARAMS);
	}

	hci_dev_lock(hdev);

	hci_smp_irks_clear(hdev);

	for (i = 0; i < irk_count; i++) {
		struct mgmt_irk_info *irk = &cp->irks[i];
		u8 addr_type;

		if (irk->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;

		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
			    BDADDR_ANY);
	}

	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);

	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);

	hci_dev_unlock(hdev);

	return err;
}

4266 4267 4268 4269
static bool ltk_is_valid(struct mgmt_ltk_info *key)
{
	if (key->master != 0x00 && key->master != 0x01)
		return false;
4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282

	switch (key->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
4283 4284
}

4285
static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4286
			       void *cp_data, u16 len)
4287 4288 4289
{
	struct mgmt_cp_load_long_term_keys *cp = cp_data;
	u16 key_count, expected_len;
4290
	int i, err;
4291

4292 4293 4294 4295 4296 4297
	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				  MGMT_STATUS_NOT_SUPPORTED);

4298
	key_count = __le16_to_cpu(cp->key_count);
4299 4300 4301 4302 4303

	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_ltk_info);
	if (expected_len != len) {
		BT_ERR("load_keys: expected %u bytes, got %u bytes",
4304
		       len, expected_len);
4305
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4306
				  MGMT_STATUS_INVALID_PARAMS);
4307 4308
	}

4309
	BT_DBG("%s key_count %u", hdev->name, key_count);
4310

4311 4312 4313
	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];

4314
		if (!ltk_is_valid(key))
4315 4316 4317 4318 4319
			return cmd_status(sk, hdev->id,
					  MGMT_OP_LOAD_LONG_TERM_KEYS,
					  MGMT_STATUS_INVALID_PARAMS);
	}

4320 4321 4322 4323 4324 4325
	hci_dev_lock(hdev);

	hci_smp_ltks_clear(hdev);

	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];
4326 4327 4328 4329 4330 4331
		u8 type, addr_type;

		if (key->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;
4332 4333 4334 4335 4336 4337

		if (key->master)
			type = HCI_SMP_LTK;
		else
			type = HCI_SMP_LTK_SLAVE;

4338 4339 4340
		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
			    key->type, key->val, key->enc_size, key->ediv,
			    key->rand);
4341 4342
	}

4343 4344 4345
	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
			   NULL, 0);

4346 4347
	hci_dev_unlock(hdev);

4348
	return err;
4349 4350
}

4351
static const struct mgmt_handler {
4352 4353
	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
		     u16 data_len);
4354 4355
	bool var_len;
	size_t data_len;
4356 4357
} mgmt_handlers[] = {
	{ NULL }, /* 0x0000 (no command) */
4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389
	{ read_version,           false, MGMT_READ_VERSION_SIZE },
	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
	{ set_powered,            false, MGMT_SETTING_SIZE },
	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
	{ set_connectable,        false, MGMT_SETTING_SIZE },
	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
	{ set_pairable,           false, MGMT_SETTING_SIZE },
	{ set_link_security,      false, MGMT_SETTING_SIZE },
	{ set_ssp,                false, MGMT_SETTING_SIZE },
	{ set_hs,                 false, MGMT_SETTING_SIZE },
	{ set_le,                 false, MGMT_SETTING_SIZE },
	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4390
	{ add_remote_oob_data,    true,  MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4391 4392 4393 4394 4395 4396
	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
4397
	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
4398
	{ set_advertising,        false, MGMT_SETTING_SIZE },
4399
	{ set_bredr,              false, MGMT_SETTING_SIZE },
4400
	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
4401
	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
4402
	{ set_secure_conn,        false, MGMT_SETTING_SIZE },
4403
	{ set_debug_keys,         false, MGMT_SETTING_SIZE },
4404 4405
	{ },
	{ load_irks,              true,  MGMT_LOAD_IRKS_SIZE },
4406 4407 4408
};


4409 4410
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
{
4411 4412
	void *buf;
	u8 *cp;
4413
	struct mgmt_hdr *hdr;
4414
	u16 opcode, index, len;
4415
	struct hci_dev *hdev = NULL;
4416
	const struct mgmt_handler *handler;
4417 4418 4419 4420 4421 4422 4423
	int err;

	BT_DBG("got %zu bytes", msglen);

	if (msglen < sizeof(*hdr))
		return -EINVAL;

4424
	buf = kmalloc(msglen, GFP_KERNEL);
4425 4426 4427 4428 4429 4430 4431 4432
	if (!buf)
		return -ENOMEM;

	if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
		err = -EFAULT;
		goto done;
	}

4433
	hdr = buf;
4434 4435 4436
	opcode = __le16_to_cpu(hdr->opcode);
	index = __le16_to_cpu(hdr->index);
	len = __le16_to_cpu(hdr->len);
4437 4438 4439 4440 4441 4442

	if (len != msglen - sizeof(*hdr)) {
		err = -EINVAL;
		goto done;
	}

4443
	if (index != MGMT_INDEX_NONE) {
4444 4445 4446
		hdev = hci_dev_get(index);
		if (!hdev) {
			err = cmd_status(sk, index, opcode,
4447
					 MGMT_STATUS_INVALID_INDEX);
4448 4449
			goto done;
		}
4450

4451 4452
		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4453 4454 4455 4456
			err = cmd_status(sk, index, opcode,
					 MGMT_STATUS_INVALID_INDEX);
			goto done;
		}
4457 4458
	}

4459
	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4460
	    mgmt_handlers[opcode].func == NULL) {
4461
		BT_DBG("Unknown op %u", opcode);
4462
		err = cmd_status(sk, index, opcode,
4463
				 MGMT_STATUS_UNKNOWN_COMMAND);
4464 4465 4466 4467
		goto done;
	}

	if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4468
	    (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4469
		err = cmd_status(sk, index, opcode,
4470
				 MGMT_STATUS_INVALID_INDEX);
4471
		goto done;
4472 4473
	}

4474 4475 4476
	handler = &mgmt_handlers[opcode];

	if ((handler->var_len && len < handler->data_len) ||
4477
	    (!handler->var_len && len != handler->data_len)) {
4478
		err = cmd_status(sk, index, opcode,
4479
				 MGMT_STATUS_INVALID_PARAMS);
4480 4481 4482
		goto done;
	}

4483 4484 4485 4486 4487
	if (hdev)
		mgmt_init_hdev(sk, hdev);

	cp = buf + sizeof(*hdr);

4488
	err = handler->func(sk, hdev, cp, len);
4489 4490 4491
	if (err < 0)
		goto done;

4492 4493 4494
	err = msglen;

done:
4495 4496 4497
	if (hdev)
		hci_dev_put(hdev);

4498 4499 4500
	kfree(buf);
	return err;
}
4501

4502
void mgmt_index_added(struct hci_dev *hdev)
4503
{
4504
	if (hdev->dev_type != HCI_BREDR)
4505
		return;
4506

4507
	mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4508 4509
}

4510
void mgmt_index_removed(struct hci_dev *hdev)
4511
{
4512
	u8 status = MGMT_STATUS_INVALID_INDEX;
4513

4514
	if (hdev->dev_type != HCI_BREDR)
4515
		return;
4516

4517
	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4518

4519
	mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4520 4521
}

4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539
static void powered_complete(struct hci_dev *hdev, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	hci_dev_unlock(hdev);

	if (match.sk)
		sock_put(match.sk);
}

4540
static int powered_update_hci(struct hci_dev *hdev)
4541
{
4542
	struct hci_request req;
4543
	u8 link_sec;
4544

4545 4546
	hci_req_init(&req, hdev);

4547 4548 4549
	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
	    !lmp_host_ssp_capable(hdev)) {
		u8 ssp = 1;
4550

4551
		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4552
	}
4553

4554 4555
	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
	    lmp_bredr_capable(hdev)) {
4556
		struct hci_cp_write_le_host_supported cp;
4557

4558 4559
		cp.le = 1;
		cp.simul = lmp_le_br_capable(hdev);
4560

4561 4562 4563 4564 4565
		/* Check first if we already have the right
		 * host state (host features set)
		 */
		if (cp.le != lmp_host_le_capable(hdev) ||
		    cp.simul != lmp_host_le_br_capable(hdev))
4566 4567
			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
				    sizeof(cp), &cp);
4568
	}
4569

4570 4571 4572 4573 4574
	if (lmp_le_capable(hdev)) {
		/* Set random address to static address if configured */
		if (bacmp(&hdev->static_addr, BDADDR_ANY))
			hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
				    &hdev->static_addr);
4575

4576 4577 4578 4579
		/* Make sure the controller has a good default for
		 * advertising data. This also applies to the case
		 * where BR/EDR was toggled during the AUTO_OFF phase.
		 */
4580
		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4581
			update_adv_data(&req);
4582 4583
			update_scan_rsp_data(&req);
		}
4584

4585 4586
		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
			enable_advertising(&req);
4587 4588
	}

4589 4590
	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4591 4592
		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
			    sizeof(link_sec), &link_sec);
4593

4594
	if (lmp_bredr_capable(hdev)) {
4595 4596
		if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
			set_bredr_scan(&req);
4597
		update_class(&req);
4598
		update_name(&req);
4599
		update_eir(&req);
4600
	}
4601

4602
	return hci_req_run(&req, powered_complete);
4603
}
4604

4605 4606 4607
int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
	struct cmd_lookup match = { NULL, hdev };
4608 4609
	u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
	u8 zero_cod[] = { 0, 0, 0 };
4610
	int err;
4611

4612 4613 4614 4615
	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
		return 0;

	if (powered) {
4616 4617
		if (powered_update_hci(hdev) == 0)
			return 0;
4618

4619 4620 4621
		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
				     &match);
		goto new_settings;
4622 4623
	}

4624 4625 4626 4627 4628 4629 4630 4631
	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);

	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
			   zero_cod, sizeof(zero_cod), NULL);

new_settings:
4632
	err = new_settings(hdev, match.sk);
4633 4634 4635 4636

	if (match.sk)
		sock_put(match.sk);

4637
	return err;
4638
}
4639

4640
void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4641 4642 4643 4644 4645 4646
{
	struct pending_cmd *cmd;
	u8 status;

	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
	if (!cmd)
4647
		return;
4648 4649 4650 4651 4652 4653

	if (err == -ERFKILL)
		status = MGMT_STATUS_RFKILLED;
	else
		status = MGMT_STATUS_FAILED;

4654
	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4655 4656 4657 4658

	mgmt_pending_remove(cmd);
}

4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670
void mgmt_discoverable_timeout(struct hci_dev *hdev)
{
	struct hci_request req;

	hci_dev_lock(hdev);

	/* When discoverable timeout triggers, then just make sure
	 * the limited discoverable flag is cleared. Even in the case
	 * of a timeout triggered from general discoverable, it is
	 * safe to unconditionally clear the flag.
	 */
	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4671
	clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4672 4673

	hci_req_init(&req, hdev);
4674 4675 4676 4677 4678
	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
		u8 scan = SCAN_PAGE;
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
			    sizeof(scan), &scan);
	}
4679
	update_class(&req);
4680
	update_adv_data(&req);
4681 4682 4683 4684
	hci_req_run(&req, NULL);

	hdev->discov_timeout = 0;

4685 4686
	new_settings(hdev, NULL);

4687 4688 4689
	hci_dev_unlock(hdev);
}

4690
void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4691
{
4692
	bool changed;
4693

4694 4695 4696 4697 4698
	/* Nothing needed here if there's a pending command since that
	 * commands request completion callback takes care of everything
	 * necessary.
	 */
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4699
		return;
4700

4701
	if (discoverable) {
4702
		changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4703 4704
	} else {
		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4705
		changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717
	}

	if (changed) {
		struct hci_request req;

		/* In case this change in discoverable was triggered by
		 * a disabling of connectable there could be a need to
		 * update the advertising flags.
		 */
		hci_req_init(&req, hdev);
		update_adv_data(&req);
		hci_req_run(&req, NULL);
4718

4719
		new_settings(hdev, NULL);
4720
	}
4721
}
4722

4723
void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4724
{
4725
	bool changed;
4726

4727 4728 4729 4730 4731
	/* Nothing needed here if there's a pending command since that
	 * commands request completion callback takes care of everything
	 * necessary.
	 */
	if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4732
		return;
4733

4734 4735 4736 4737
	if (connectable)
		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4738

4739
	if (changed)
4740
		new_settings(hdev, NULL);
4741
}
4742

4743
void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4744
{
4745 4746
	u8 mgmt_err = mgmt_status(status);

4747
	if (scan & SCAN_PAGE)
4748
		mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4749
				     cmd_status_rsp, &mgmt_err);
4750 4751

	if (scan & SCAN_INQUIRY)
4752
		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4753
				     cmd_status_rsp, &mgmt_err);
4754 4755
}

4756 4757
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
		       bool persistent)
4758
{
4759
	struct mgmt_ev_new_link_key ev;
4760

4761
	memset(&ev, 0, sizeof(ev));
4762

4763
	ev.store_hint = persistent;
4764
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4765
	ev.key.addr.type = BDADDR_BREDR;
4766
	ev.key.type = key->type;
4767
	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4768
	ev.key.pin_len = key->pin_len;
4769

4770
	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4771
}
4772

4773
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key)
4774 4775 4776 4777 4778
{
	struct mgmt_ev_new_long_term_key ev;

	memset(&ev, 0, sizeof(ev));

4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789
	/* Devices using resolvable or non-resolvable random addresses
	 * without providing an indentity resolving key don't require
	 * to store long term keys. Their addresses will change the
	 * next time around.
	 *
	 * Only when a remote device provides an identity address
	 * make sure the long term key is stored. If the remote
	 * identity is known, the long term keys are internally
	 * mapped to the identity address. So allow static random
	 * and public addresses here.
	 */
4790 4791 4792 4793 4794 4795
	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
		ev.store_hint = 0x00;
	else
		ev.store_hint = 0x01;

4796
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4797
	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4798
	ev.key.type = key->authenticated;
4799 4800 4801 4802 4803 4804 4805 4806 4807
	ev.key.enc_size = key->enc_size;
	ev.key.ediv = key->ediv;

	if (key->type == HCI_SMP_LTK)
		ev.key.master = 1;

	memcpy(ev.key.rand, key->rand, sizeof(key->rand));
	memcpy(ev.key.val, key->val, sizeof(key->val));

4808
	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4809 4810
}

4811 4812 4813 4814 4815 4816
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
{
	struct mgmt_ev_new_irk ev;

	memset(&ev, 0, sizeof(ev));

4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832
	/* For identity resolving keys from devices that are already
	 * using a public address or static random address, do not
	 * ask for storing this key. The identity resolving key really
	 * is only mandatory for devices using resovlable random
	 * addresses.
	 *
	 * Storing all identity resolving keys has the downside that
	 * they will be also loaded on next boot of they system. More
	 * identity resolving keys, means more time during scanning is
	 * needed to actually resolve these addresses.
	 */
	if (bacmp(&irk->rpa, BDADDR_ANY))
		ev.store_hint = 0x01;
	else
		ev.store_hint = 0x00;

4833 4834 4835 4836 4837 4838 4839 4840
	bacpy(&ev.rpa, &irk->rpa);
	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
	memcpy(ev.irk.val, irk->val, sizeof(irk->val));

	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
}

4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
				  u8 data_len)
{
	eir[eir_len++] = sizeof(type) + data_len;
	eir[eir_len++] = type;
	memcpy(&eir[eir_len], data, data_len);
	eir_len += data_len;

	return eir_len;
}

4852 4853 4854
void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			   u8 addr_type, u32 flags, u8 *name, u8 name_len,
			   u8 *dev_class)
4855
{
4856 4857 4858
	char buf[512];
	struct mgmt_ev_device_connected *ev = (void *) buf;
	u16 eir_len = 0;
4859

4860
	bacpy(&ev->addr.bdaddr, bdaddr);
4861
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
4862

4863
	ev->flags = __cpu_to_le32(flags);
4864

4865 4866
	if (name_len > 0)
		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4867
					  name, name_len);
4868 4869

	if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4870
		eir_len = eir_append_data(ev->eir, eir_len,
4871
					  EIR_CLASS_OF_DEV, dev_class, 3);
4872

4873
	ev->eir_len = cpu_to_le16(eir_len);
4874

4875 4876
	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
		    sizeof(*ev) + eir_len, NULL);
4877 4878
}

4879 4880
static void disconnect_rsp(struct pending_cmd *cmd, void *data)
{
4881
	struct mgmt_cp_disconnect *cp = cmd->param;
4882
	struct sock **sk = data;
4883
	struct mgmt_rp_disconnect rp;
4884

4885 4886
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
4887

4888
	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4889
		     sizeof(rp));
4890 4891 4892 4893

	*sk = cmd->sk;
	sock_hold(*sk);

4894
	mgmt_pending_remove(cmd);
4895 4896
}

4897
static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4898
{
4899
	struct hci_dev *hdev = data;
4900 4901
	struct mgmt_cp_unpair_device *cp = cmd->param;
	struct mgmt_rp_unpair_device rp;
4902 4903

	memset(&rp, 0, sizeof(rp));
4904 4905
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
4906

4907 4908
	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);

4909
	cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4910 4911 4912 4913

	mgmt_pending_remove(cmd);
}

4914 4915
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
			      u8 link_type, u8 addr_type, u8 reason)
4916
{
4917
	struct mgmt_ev_device_disconnected ev;
4918 4919
	struct sock *sk = NULL;

4920 4921 4922
	if (link_type != ACL_LINK && link_type != LE_LINK)
		return;

4923
	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4924

4925 4926 4927
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.reason = reason;
4928

4929
	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4930 4931

	if (sk)
4932
		sock_put(sk);
4933

4934
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4935
			     hdev);
4936 4937
}

4938 4939
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
			    u8 link_type, u8 addr_type, u8 status)
4940
{
4941 4942
	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
	struct mgmt_cp_disconnect *cp;
4943
	struct mgmt_rp_disconnect rp;
4944 4945
	struct pending_cmd *cmd;

4946 4947 4948
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
			     hdev);

4949
	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4950
	if (!cmd)
4951
		return;
4952

4953 4954 4955 4956 4957 4958 4959 4960
	cp = cmd->param;

	if (bacmp(bdaddr, &cp->addr.bdaddr))
		return;

	if (cp->addr.type != bdaddr_type)
		return;

4961
	bacpy(&rp.addr.bdaddr, bdaddr);
4962
	rp.addr.type = bdaddr_type;
4963

4964 4965
	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
		     mgmt_status(status), &rp, sizeof(rp));
4966

4967
	mgmt_pending_remove(cmd);
4968
}
4969

4970 4971
void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			 u8 addr_type, u8 status)
4972 4973 4974
{
	struct mgmt_ev_connect_failed ev;

4975
	bacpy(&ev.addr.bdaddr, bdaddr);
4976
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4977
	ev.status = mgmt_status(status);
4978

4979
	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4980
}
4981

4982
void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4983 4984 4985
{
	struct mgmt_ev_pin_code_request ev;

4986
	bacpy(&ev.addr.bdaddr, bdaddr);
4987
	ev.addr.type = BDADDR_BREDR;
4988
	ev.secure = secure;
4989

4990
	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4991 4992
}

4993 4994
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				  u8 status)
4995 4996
{
	struct pending_cmd *cmd;
4997
	struct mgmt_rp_pin_code_reply rp;
4998

4999
	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5000
	if (!cmd)
5001
		return;
5002

5003
	bacpy(&rp.addr.bdaddr, bdaddr);
5004
	rp.addr.type = BDADDR_BREDR;
5005

5006 5007
	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
		     mgmt_status(status), &rp, sizeof(rp));
5008

5009
	mgmt_pending_remove(cmd);
5010 5011
}

5012 5013
void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				      u8 status)
5014 5015
{
	struct pending_cmd *cmd;
5016
	struct mgmt_rp_pin_code_reply rp;
5017

5018
	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5019
	if (!cmd)
5020
		return;
5021

5022
	bacpy(&rp.addr.bdaddr, bdaddr);
5023
	rp.addr.type = BDADDR_BREDR;
5024

5025 5026
	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
		     mgmt_status(status), &rp, sizeof(rp));
5027

5028
	mgmt_pending_remove(cmd);
5029
}
5030

5031
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5032 5033
			      u8 link_type, u8 addr_type, __le32 value,
			      u8 confirm_hint)
5034 5035 5036
{
	struct mgmt_ev_user_confirm_request ev;

5037
	BT_DBG("%s", hdev->name);
5038

5039
	bacpy(&ev.addr.bdaddr, bdaddr);
5040
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5041
	ev.confirm_hint = confirm_hint;
5042
	ev.value = value;
5043

5044
	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5045
			  NULL);
5046 5047
}

5048
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5049
			      u8 link_type, u8 addr_type)
5050 5051 5052 5053 5054
{
	struct mgmt_ev_user_passkey_request ev;

	BT_DBG("%s", hdev->name);

5055
	bacpy(&ev.addr.bdaddr, bdaddr);
5056
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5057 5058

	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5059
			  NULL);
5060 5061
}

5062
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5063 5064
				      u8 link_type, u8 addr_type, u8 status,
				      u8 opcode)
5065 5066 5067 5068 5069
{
	struct pending_cmd *cmd;
	struct mgmt_rp_user_confirm_reply rp;
	int err;

5070
	cmd = mgmt_pending_find(opcode, hdev);
5071 5072 5073
	if (!cmd)
		return -ENOENT;

5074
	bacpy(&rp.addr.bdaddr, bdaddr);
5075
	rp.addr.type = link_to_bdaddr(link_type, addr_type);
5076
	err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5077
			   &rp, sizeof(rp));
5078

5079
	mgmt_pending_remove(cmd);
5080 5081 5082 5083

	return err;
}

5084
int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5085
				     u8 link_type, u8 addr_type, u8 status)
5086
{
5087
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5088
					  status, MGMT_OP_USER_CONFIRM_REPLY);
5089 5090
}

5091
int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5092
					 u8 link_type, u8 addr_type, u8 status)
5093
{
5094
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5095 5096
					  status,
					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
5097
}
5098

5099
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5100
				     u8 link_type, u8 addr_type, u8 status)
5101
{
5102
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5103
					  status, MGMT_OP_USER_PASSKEY_REPLY);
5104 5105
}

5106
int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5107
					 u8 link_type, u8 addr_type, u8 status)
5108
{
5109
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5110 5111
					  status,
					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
5112 5113
}

5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129
int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
			     u8 link_type, u8 addr_type, u32 passkey,
			     u8 entered)
{
	struct mgmt_ev_passkey_notify ev;

	BT_DBG("%s", hdev->name);

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.passkey = __cpu_to_le32(passkey);
	ev.entered = entered;

	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
}

5130 5131
void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, u8 status)
5132 5133 5134
{
	struct mgmt_ev_auth_failed ev;

5135
	bacpy(&ev.addr.bdaddr, bdaddr);
5136
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5137
	ev.status = mgmt_status(status);
5138

5139
	mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5140
}
5141

5142
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5143 5144
{
	struct cmd_lookup match = { NULL, hdev };
5145
	bool changed;
5146 5147 5148 5149

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5150
				     cmd_status_rsp, &mgmt_err);
5151
		return;
5152 5153
	}

5154 5155 5156 5157 5158 5159
	if (test_bit(HCI_AUTH, &hdev->flags))
		changed = !test_and_set_bit(HCI_LINK_SECURITY,
					    &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_LINK_SECURITY,
					     &hdev->dev_flags);
5160

5161
	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5162
			     &match);
5163

5164
	if (changed)
5165
		new_settings(hdev, match.sk);
5166 5167 5168 5169 5170

	if (match.sk)
		sock_put(match.sk);
}

5171
static void clear_eir(struct hci_request *req)
5172
{
5173
	struct hci_dev *hdev = req->hdev;
5174 5175
	struct hci_cp_write_eir cp;

5176
	if (!lmp_ext_inq_capable(hdev))
5177
		return;
5178

5179 5180
	memset(hdev->eir, 0, sizeof(hdev->eir));

5181 5182
	memset(&cp, 0, sizeof(cp));

5183
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5184 5185
}

5186
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5187 5188
{
	struct cmd_lookup match = { NULL, hdev };
5189
	struct hci_request req;
5190
	bool changed = false;
5191 5192 5193

	if (status) {
		u8 mgmt_err = mgmt_status(status);
5194 5195

		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5196 5197
						 &hdev->dev_flags)) {
			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5198
			new_settings(hdev, NULL);
5199
		}
5200

5201 5202
		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
				     &mgmt_err);
5203
		return;
5204 5205 5206
	}

	if (enable) {
5207
		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5208
	} else {
5209 5210 5211 5212 5213 5214
		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
		if (!changed)
			changed = test_and_clear_bit(HCI_HS_ENABLED,
						     &hdev->dev_flags);
		else
			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5215 5216 5217 5218
	}

	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);

5219
	if (changed)
5220
		new_settings(hdev, match.sk);
5221

5222
	if (match.sk)
5223 5224
		sock_put(match.sk);

5225 5226
	hci_req_init(&req, hdev);

5227
	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5228
		update_eir(&req);
5229
	else
5230 5231 5232
		clear_eir(&req);

	hci_req_run(&req, NULL);
5233 5234
}

5235 5236 5237 5238 5239 5240 5241 5242
void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };
	bool changed = false;

	if (status) {
		u8 mgmt_err = mgmt_status(status);

5243 5244 5245 5246 5247 5248
		if (enable) {
			if (test_and_clear_bit(HCI_SC_ENABLED,
					       &hdev->dev_flags))
				new_settings(hdev, NULL);
			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
		}
5249 5250 5251 5252 5253 5254

		mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
				     cmd_status_rsp, &mgmt_err);
		return;
	}

5255
	if (enable) {
5256
		changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5257
	} else {
5258
		changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5259 5260
		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
	}
5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271

	mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
			     settings_rsp, &match);

	if (changed)
		new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
}

5272
static void sk_lookup(struct pending_cmd *cmd, void *data)
5273 5274 5275 5276 5277 5278 5279 5280 5281
{
	struct cmd_lookup *match = data;

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}
}

5282 5283
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
				    u8 status)
5284
{
5285
	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5286

5287 5288 5289
	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5290 5291

	if (!status)
5292 5293
		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
			   NULL);
5294 5295 5296

	if (match.sk)
		sock_put(match.sk);
5297 5298
}

5299
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5300 5301
{
	struct mgmt_cp_set_local_name ev;
5302
	struct pending_cmd *cmd;
5303

5304
	if (status)
5305
		return;
5306 5307 5308

	memset(&ev, 0, sizeof(ev));
	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5309
	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5310

5311
	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5312 5313
	if (!cmd) {
		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5314

5315 5316 5317 5318
		/* If this is a HCI command related to powering on the
		 * HCI dev don't send any mgmt signals.
		 */
		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5319
			return;
5320
	}
5321

5322 5323
	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
		   cmd ? cmd->sk : NULL);
5324
}
5325

5326 5327 5328
void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
				       u8 *randomizer192, u8 *hash256,
				       u8 *randomizer256, u8 status)
5329 5330 5331
{
	struct pending_cmd *cmd;

5332
	BT_DBG("%s status %u", hdev->name, status);
5333

5334
	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5335
	if (!cmd)
5336
		return;
5337 5338

	if (status) {
5339 5340
		cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
			   mgmt_status(status));
5341
	} else {
5342 5343 5344 5345 5346 5347 5348
		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
		    hash256 && randomizer256) {
			struct mgmt_rp_read_local_oob_ext_data rp;

			memcpy(rp.hash192, hash192, sizeof(rp.hash192));
			memcpy(rp.randomizer192, randomizer192,
			       sizeof(rp.randomizer192));
5349

5350 5351 5352
			memcpy(rp.hash256, hash256, sizeof(rp.hash256));
			memcpy(rp.randomizer256, randomizer256,
			       sizeof(rp.randomizer256));
5353

5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367
			cmd_complete(cmd->sk, hdev->id,
				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
				     &rp, sizeof(rp));
		} else {
			struct mgmt_rp_read_local_oob_data rp;

			memcpy(rp.hash, hash192, sizeof(rp.hash));
			memcpy(rp.randomizer, randomizer192,
			       sizeof(rp.randomizer));

			cmd_complete(cmd->sk, hdev->id,
				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
				     &rp, sizeof(rp));
		}
5368 5369 5370 5371
	}

	mgmt_pending_remove(cmd);
}
5372

5373 5374 5375
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
		       ssp, u8 *eir, u16 eir_len)
5376
{
5377 5378
	char buf[512];
	struct mgmt_ev_device_found *ev = (void *) buf;
5379
	struct smp_irk *irk;
5380
	size_t ev_size;
5381

5382
	if (!hci_discovery_active(hdev))
5383
		return;
5384

5385 5386
	/* Leave 5 bytes for a potential CoD field */
	if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5387
		return;
5388

5389 5390
	memset(buf, 0, sizeof(buf));

5391 5392 5393 5394 5395 5396 5397 5398 5399
	irk = hci_get_irk(hdev, bdaddr, addr_type);
	if (irk) {
		bacpy(&ev->addr.bdaddr, &irk->bdaddr);
		ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
	} else {
		bacpy(&ev->addr.bdaddr, bdaddr);
		ev->addr.type = link_to_bdaddr(link_type, addr_type);
	}

5400
	ev->rssi = rssi;
5401
	if (cfm_name)
5402
		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5403
	if (!ssp)
5404
		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5405

5406
	if (eir_len > 0)
5407
		memcpy(ev->eir, eir, eir_len);
5408

5409 5410
	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5411
					  dev_class, 3);
5412

5413
	ev->eir_len = cpu_to_le16(eir_len);
5414
	ev_size = sizeof(*ev) + eir_len;
5415

5416
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5417
}
5418

5419 5420
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5421
{
5422 5423 5424
	struct mgmt_ev_device_found *ev;
	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
	u16 eir_len;
5425

5426
	ev = (struct mgmt_ev_device_found *) buf;
5427

5428 5429 5430
	memset(buf, 0, sizeof(buf));

	bacpy(&ev->addr.bdaddr, bdaddr);
5431
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
5432 5433 5434
	ev->rssi = rssi;

	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5435
				  name_len);
5436

5437
	ev->eir_len = cpu_to_le16(eir_len);
5438

5439
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5440
}
5441

5442
void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5443
{
5444
	struct mgmt_ev_discovering ev;
5445 5446
	struct pending_cmd *cmd;

5447 5448
	BT_DBG("%s discovering %u", hdev->name, discovering);

5449
	if (discovering)
5450
		cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5451
	else
5452
		cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5453 5454

	if (cmd != NULL) {
5455 5456
		u8 type = hdev->discovery.type;

5457 5458
		cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
			     sizeof(type));
5459 5460 5461
		mgmt_pending_remove(cmd);
	}

5462 5463 5464 5465
	memset(&ev, 0, sizeof(ev));
	ev.type = hdev->discovery.type;
	ev.discovering = discovering;

5466
	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5467
}
5468

5469
int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5470 5471 5472 5473
{
	struct pending_cmd *cmd;
	struct mgmt_ev_device_blocked ev;

5474
	cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5475

5476 5477
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
5478

5479
	return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5480
			  cmd ? cmd->sk : NULL);
5481 5482
}

5483
int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5484 5485 5486 5487
{
	struct pending_cmd *cmd;
	struct mgmt_ev_device_unblocked ev;

5488
	cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5489

5490 5491
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
5492

5493
	return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5494
			  cmd ? cmd->sk : NULL);
5495
}
5496 5497 5498 5499 5500 5501 5502 5503

static void adv_enable_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("%s status %u", hdev->name, status);

	/* Clear the advertising mgmt setting if we failed to re-enable it */
	if (status) {
		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5504
		new_settings(hdev, NULL);
5505 5506 5507 5508 5509 5510 5511
	}
}

void mgmt_reenable_advertising(struct hci_dev *hdev)
{
	struct hci_request req;

5512
	if (hci_conn_num(hdev, LE_LINK) > 0)
5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525
		return;

	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
		return;

	hci_req_init(&req, hdev);
	enable_advertising(&req);

	/* If this fails we have no option but to let user space know
	 * that we've disabled advertising.
	 */
	if (hci_req_run(&req, adv_enable_complete) < 0) {
		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5526
		new_settings(hdev, NULL);
5527 5528
	}
}