mgmt.c 117.4 KB
Newer Older
1 2
/*
   BlueZ - Bluetooth protocol stack for Linux
3

4
   Copyright (C) 2010  Nokia Corporation
5
   Copyright (C) 2011-2012 Intel Corporation
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI Management interface */

27
#include <linux/module.h>
28 29 30 31 32
#include <asm/unaligned.h>

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
33 34

#include "smp.h"
35

36
#define MGMT_VERSION	1
37
#define MGMT_REVISION	4
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
static const u16 mgmt_commands[] = {
	MGMT_OP_READ_INDEX_LIST,
	MGMT_OP_READ_INFO,
	MGMT_OP_SET_POWERED,
	MGMT_OP_SET_DISCOVERABLE,
	MGMT_OP_SET_CONNECTABLE,
	MGMT_OP_SET_FAST_CONNECTABLE,
	MGMT_OP_SET_PAIRABLE,
	MGMT_OP_SET_LINK_SECURITY,
	MGMT_OP_SET_SSP,
	MGMT_OP_SET_HS,
	MGMT_OP_SET_LE,
	MGMT_OP_SET_DEV_CLASS,
	MGMT_OP_SET_LOCAL_NAME,
	MGMT_OP_ADD_UUID,
	MGMT_OP_REMOVE_UUID,
	MGMT_OP_LOAD_LINK_KEYS,
	MGMT_OP_LOAD_LONG_TERM_KEYS,
	MGMT_OP_DISCONNECT,
	MGMT_OP_GET_CONNECTIONS,
	MGMT_OP_PIN_CODE_REPLY,
	MGMT_OP_PIN_CODE_NEG_REPLY,
	MGMT_OP_SET_IO_CAPABILITY,
	MGMT_OP_PAIR_DEVICE,
	MGMT_OP_CANCEL_PAIR_DEVICE,
	MGMT_OP_UNPAIR_DEVICE,
	MGMT_OP_USER_CONFIRM_REPLY,
	MGMT_OP_USER_CONFIRM_NEG_REPLY,
	MGMT_OP_USER_PASSKEY_REPLY,
	MGMT_OP_USER_PASSKEY_NEG_REPLY,
	MGMT_OP_READ_LOCAL_OOB_DATA,
	MGMT_OP_ADD_REMOTE_OOB_DATA,
	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
	MGMT_OP_START_DISCOVERY,
	MGMT_OP_STOP_DISCOVERY,
	MGMT_OP_CONFIRM_NAME,
	MGMT_OP_BLOCK_DEVICE,
	MGMT_OP_UNBLOCK_DEVICE,
77
	MGMT_OP_SET_DEVICE_ID,
78
	MGMT_OP_SET_ADVERTISING,
79
	MGMT_OP_SET_BREDR,
80
	MGMT_OP_SET_STATIC_ADDRESS,
81
	MGMT_OP_SET_SCAN_PARAMS,
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
};

static const u16 mgmt_events[] = {
	MGMT_EV_CONTROLLER_ERROR,
	MGMT_EV_INDEX_ADDED,
	MGMT_EV_INDEX_REMOVED,
	MGMT_EV_NEW_SETTINGS,
	MGMT_EV_CLASS_OF_DEV_CHANGED,
	MGMT_EV_LOCAL_NAME_CHANGED,
	MGMT_EV_NEW_LINK_KEY,
	MGMT_EV_NEW_LONG_TERM_KEY,
	MGMT_EV_DEVICE_CONNECTED,
	MGMT_EV_DEVICE_DISCONNECTED,
	MGMT_EV_CONNECT_FAILED,
	MGMT_EV_PIN_CODE_REQUEST,
	MGMT_EV_USER_CONFIRM_REQUEST,
	MGMT_EV_USER_PASSKEY_REQUEST,
	MGMT_EV_AUTH_FAILED,
	MGMT_EV_DEVICE_FOUND,
	MGMT_EV_DISCOVERING,
	MGMT_EV_DEVICE_BLOCKED,
	MGMT_EV_DEVICE_UNBLOCKED,
	MGMT_EV_DEVICE_UNPAIRED,
105
	MGMT_EV_PASSKEY_NOTIFY,
106 107
};

108
#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
109

110 111 112
#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
				!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))

113 114
struct pending_cmd {
	struct list_head list;
115
	u16 opcode;
116
	int index;
117
	void *param;
118
	struct sock *sk;
119
	void *user_data;
120 121
};

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
/* HCI to MGMT error code conversion table */
static u8 mgmt_status_table[] = {
	MGMT_STATUS_SUCCESS,
	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
	MGMT_STATUS_FAILED,		/* Hardware Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
	MGMT_STATUS_NOT_PAIRED,		/* PIN or Key Missing */
	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
	MGMT_STATUS_BUSY,		/* Command Disallowed */
	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
	MGMT_STATUS_REJECTED,		/* Rejected Security */
	MGMT_STATUS_REJECTED,		/* Rejected Personal */
	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
	MGMT_STATUS_BUSY,		/* Repeated Attempts */
	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
	MGMT_STATUS_FAILED,		/* Unspecified Error */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
	MGMT_STATUS_FAILED,		/* Transaction Collision */
	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
	MGMT_STATUS_REJECTED,		/* QoS Rejected */
	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
	MGMT_STATUS_REJECTED,		/* Insufficient Security */
	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
	MGMT_STATUS_BUSY,		/* Role Switch Pending */
	MGMT_STATUS_FAILED,		/* Slot Violation */
	MGMT_STATUS_FAILED,		/* Role Switch Failed */
	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
	MGMT_STATUS_BUSY,		/* Controller Busy */
	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
};

static u8 mgmt_status(u8 hci_status)
{
	if (hci_status < ARRAY_SIZE(mgmt_status_table))
		return mgmt_status_table[hci_status];

	return MGMT_STATUS_FAILED;
}

195
static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
196 197 198 199
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;
	struct mgmt_ev_cmd_status *ev;
200
	int err;
201

202
	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
203

204
	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
205 206 207 208 209
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));

210
	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
211
	hdr->index = cpu_to_le16(index);
212 213 214 215
	hdr->len = cpu_to_le16(sizeof(*ev));

	ev = (void *) skb_put(skb, sizeof(*ev));
	ev->status = status;
216
	ev->opcode = cpu_to_le16(cmd);
217

218 219
	err = sock_queue_rcv_skb(sk, skb);
	if (err < 0)
220 221
		kfree_skb(skb);

222
	return err;
223 224
}

225
static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
226
			void *rp, size_t rp_len)
227 228 229 230
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;
	struct mgmt_ev_cmd_complete *ev;
231
	int err;
232 233 234

	BT_DBG("sock %p", sk);

235
	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
236 237 238 239 240
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));

241
	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
242
	hdr->index = cpu_to_le16(index);
243
	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
244

245
	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
246
	ev->opcode = cpu_to_le16(cmd);
247
	ev->status = status;
248 249 250

	if (rp)
		memcpy(ev->data, rp, rp_len);
251

252 253
	err = sock_queue_rcv_skb(sk, skb);
	if (err < 0)
254 255
		kfree_skb(skb);

256
	return err;
257 258
}

259 260
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
			u16 data_len)
261 262 263 264 265 266
{
	struct mgmt_rp_read_version rp;

	BT_DBG("sock %p", sk);

	rp.version = MGMT_VERSION;
267
	rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
268

269
	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
270
			    sizeof(rp));
271 272
}

273 274
static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 data_len)
275 276
{
	struct mgmt_rp_read_commands *rp;
277 278
	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
	const u16 num_events = ARRAY_SIZE(mgmt_events);
279
	__le16 *opcode;
280 281 282 283 284 285 286 287 288 289 290
	size_t rp_size;
	int i, err;

	BT_DBG("sock %p", sk);

	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));

	rp = kmalloc(rp_size, GFP_KERNEL);
	if (!rp)
		return -ENOMEM;

291 292
	rp->num_commands = __constant_cpu_to_le16(num_commands);
	rp->num_events = __constant_cpu_to_le16(num_events);
293 294 295 296 297 298 299

	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
		put_unaligned_le16(mgmt_commands[i], opcode);

	for (i = 0; i < num_events; i++, opcode++)
		put_unaligned_le16(mgmt_events[i], opcode);

300
	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
301
			   rp_size);
302 303 304 305 306
	kfree(rp);

	return err;
}

307 308
static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
309 310
{
	struct mgmt_rp_read_index_list *rp;
311
	struct hci_dev *d;
312
	size_t rp_len;
313
	u16 count;
314
	int err;
315 316 317 318 319 320

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
321
	list_for_each_entry(d, &hci_dev_list, list) {
322 323
		if (d->dev_type == HCI_BREDR)
			count++;
324 325
	}

326 327 328
	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
329
		read_unlock(&hci_dev_list_lock);
330
		return -ENOMEM;
331
	}
332

333
	count = 0;
334
	list_for_each_entry(d, &hci_dev_list, list) {
335
		if (test_bit(HCI_SETUP, &d->dev_flags))
336 337
			continue;

338 339 340
		if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
			continue;

341 342 343 344
		if (d->dev_type == HCI_BREDR) {
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
345 346
	}

347 348 349
	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

350 351
	read_unlock(&hci_dev_list_lock);

352
	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
353
			   rp_len);
354

355 356 357
	kfree(rp);

	return err;
358 359
}

360 361 362 363 364 365 366
static u32 get_supported_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

	settings |= MGMT_SETTING_POWERED;
	settings |= MGMT_SETTING_PAIRABLE;

367
	if (lmp_bredr_capable(hdev)) {
368
		settings |= MGMT_SETTING_CONNECTABLE;
369 370
		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
			settings |= MGMT_SETTING_FAST_CONNECTABLE;
371
		settings |= MGMT_SETTING_DISCOVERABLE;
372 373
		settings |= MGMT_SETTING_BREDR;
		settings |= MGMT_SETTING_LINK_SECURITY;
374 375 376 377 378

		if (lmp_ssp_capable(hdev)) {
			settings |= MGMT_SETTING_SSP;
			settings |= MGMT_SETTING_HS;
		}
379
	}
380

381
	if (lmp_le_capable(hdev)) {
382
		settings |= MGMT_SETTING_LE;
383 384
		settings |= MGMT_SETTING_ADVERTISING;
	}
385 386 387 388 389 390 391 392

	return settings;
}

static u32 get_current_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

393
	if (hdev_is_powered(hdev))
394 395
		settings |= MGMT_SETTING_POWERED;

396
	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
397 398
		settings |= MGMT_SETTING_CONNECTABLE;

399 400 401
	if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
		settings |= MGMT_SETTING_FAST_CONNECTABLE;

402
	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
403 404
		settings |= MGMT_SETTING_DISCOVERABLE;

405
	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
406 407
		settings |= MGMT_SETTING_PAIRABLE;

408
	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
409 410
		settings |= MGMT_SETTING_BREDR;

411
	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
412 413
		settings |= MGMT_SETTING_LE;

414
	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
415 416
		settings |= MGMT_SETTING_LINK_SECURITY;

417
	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
418 419
		settings |= MGMT_SETTING_SSP;

420 421 422
	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
		settings |= MGMT_SETTING_HS;

423
	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
424 425
		settings |= MGMT_SETTING_ADVERTISING;

426 427 428
	return settings;
}

429 430
#define PNP_INFO_SVCLASS_ID		0x1200

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 4)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		u16 uuid16;

		if (uuid->size != 16)
			continue;

		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
		if (uuid16 < 0x1100)
			continue;

		if (uuid16 == PNP_INFO_SVCLASS_ID)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID16_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u16) > len) {
			uuids_start[1] = EIR_UUID16_SOME;
			break;
		}

		*ptr++ = (uuid16 & 0x00ff);
		*ptr++ = (uuid16 & 0xff00) >> 8;
		uuids_start[0] += sizeof(uuid16);
	}

	return ptr;
}

473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 6)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 32)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID32_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u32) > len) {
			uuids_start[1] = EIR_UUID32_SOME;
			break;
		}

		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
		ptr += sizeof(u32);
		uuids_start[0] += sizeof(u32);
	}

	return ptr;
}

506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 18)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 128)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID128_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + 16 > len) {
			uuids_start[1] = EIR_UUID128_SOME;
			break;
		}

		memcpy(ptr, uuid->uuid, 16);
		ptr += 16;
		uuids_start[0] += 16;
	}

	return ptr;
}

539 540
static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
{
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
	u8 ad_len = 0;
	size_t name_len;

	name_len = strlen(hdev->dev_name);
	if (name_len > 0) {
		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;

		if (name_len > max_len) {
			name_len = max_len;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ad_len += (name_len + 2);
		ptr += (name_len + 2);
	}

	return ad_len;
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
}

static void update_scan_rsp_data(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_scan_rsp_data cp;
	u8 len;

	if (!lmp_le_capable(hdev))
		return;

	memset(&cp, 0, sizeof(cp));

	len = create_scan_rsp_data(hdev, cp.data);

578 579
	if (hdev->scan_rsp_data_len == len &&
	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
580 581
		return;

582 583
	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
	hdev->scan_rsp_data_len = len;
584 585 586 587 588 589

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
}

590
static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
{
	u8 ad_len = 0, flags = 0;

	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
		flags |= LE_AD_GENERAL;

	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
		if (lmp_le_br_capable(hdev))
			flags |= LE_AD_SIM_LE_BREDR_CTRL;
		if (lmp_host_le_br_capable(hdev))
			flags |= LE_AD_SIM_LE_BREDR_HOST;
	} else {
		flags |= LE_AD_NO_BREDR;
	}

	if (flags) {
		BT_DBG("adv flags 0x%02x", flags);

		ptr[0] = 2;
		ptr[1] = EIR_FLAGS;
		ptr[2] = flags;

		ad_len += 3;
		ptr += 3;
	}

	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->adv_tx_power;

		ad_len += 3;
		ptr += 3;
	}

	return ad_len;
}

629
static void update_adv_data(struct hci_request *req)
630 631 632 633 634 635 636 637 638 639
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_data cp;
	u8 len;

	if (!lmp_le_capable(hdev))
		return;

	memset(&cp, 0, sizeof(cp));

640
	len = create_adv_data(hdev, cp.data);
641 642 643 644 645 646 647 648 649 650 651 652 653

	if (hdev->adv_data_len == len &&
	    memcmp(cp.data, hdev->adv_data, len) == 0)
		return;

	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
	hdev->adv_data_len = len;

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
}

654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
static void create_eir(struct hci_dev *hdev, u8 *data)
{
	u8 *ptr = data;
	size_t name_len;

	name_len = strlen(hdev->dev_name);

	if (name_len > 0) {
		/* EIR Data type */
		if (name_len > 48) {
			name_len = 48;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		/* EIR Data length */
		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ptr += (name_len + 2);
	}

677
	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
678 679 680 681 682 683 684
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->inq_tx_power;

		ptr += 3;
	}

685 686 687 688 689 690 691 692 693 694 695 696
	if (hdev->devid_source > 0) {
		ptr[0] = 9;
		ptr[1] = EIR_DEVICE_ID;

		put_unaligned_le16(hdev->devid_source, ptr + 2);
		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
		put_unaligned_le16(hdev->devid_product, ptr + 6);
		put_unaligned_le16(hdev->devid_version, ptr + 8);

		ptr += 10;
	}

697
	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
698
	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
699
	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
700 701
}

702
static void update_eir(struct hci_request *req)
703
{
704
	struct hci_dev *hdev = req->hdev;
705 706
	struct hci_cp_write_eir cp;

707
	if (!hdev_is_powered(hdev))
708
		return;
709

710
	if (!lmp_ext_inq_capable(hdev))
711
		return;
712

713
	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
714
		return;
715

716
	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
717
		return;
718 719 720 721 722 723

	memset(&cp, 0, sizeof(cp));

	create_eir(hdev, cp.data);

	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
724
		return;
725 726 727

	memcpy(hdev->eir, cp.data, sizeof(cp.data));

728
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
729 730 731 732 733 734 735 736 737 738 739 740 741
}

static u8 get_service_classes(struct hci_dev *hdev)
{
	struct bt_uuid *uuid;
	u8 val = 0;

	list_for_each_entry(uuid, &hdev->uuids, list)
		val |= uuid->svc_hint;

	return val;
}

742
static void update_class(struct hci_request *req)
743
{
744
	struct hci_dev *hdev = req->hdev;
745 746 747 748
	u8 cod[3];

	BT_DBG("%s", hdev->name);

749
	if (!hdev_is_powered(hdev))
750
		return;
751

752
	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
753
		return;
754 755 756 757 758

	cod[0] = hdev->minor_class;
	cod[1] = hdev->major_class;
	cod[2] = get_service_classes(hdev);

759 760 761
	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
		cod[1] |= 0x20;

762
	if (memcmp(cod, hdev->dev_class, 3) == 0)
763
		return;
764

765
	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
766 767
}

768 769 770
static void service_cache_off(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
771
					    service_cache.work);
772
	struct hci_request req;
773

774
	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
775 776
		return;

777 778
	hci_req_init(&req, hdev);

779 780
	hci_dev_lock(hdev);

781 782
	update_eir(&req);
	update_class(&req);
783 784

	hci_dev_unlock(hdev);
785 786

	hci_req_run(&req, NULL);
787 788
}

789
static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
790
{
791
	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
792 793
		return;

794
	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
795

796 797 798 799 800 801
	/* Non-mgmt controlled devices get this bit set
	 * implicitly so that pairing works for them, however
	 * for mgmt we require user-space to explicitly enable
	 * it
	 */
	clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
802 803
}

804
static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
805
				void *data, u16 data_len)
806
{
807
	struct mgmt_rp_read_info rp;
808

809
	BT_DBG("sock %p %s", sk, hdev->name);
810

811
	hci_dev_lock(hdev);
812

813 814
	memset(&rp, 0, sizeof(rp));

815
	bacpy(&rp.bdaddr, &hdev->bdaddr);
816

817
	rp.version = hdev->hci_ver;
818
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
819 820 821

	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
822

823
	memcpy(rp.dev_class, hdev->dev_class, 3);
824

825
	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
826
	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
827

828
	hci_dev_unlock(hdev);
829

830
	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
831
			    sizeof(rp));
832 833
}

834 835 836
static void mgmt_pending_free(struct pending_cmd *cmd)
{
	sock_put(cmd->sk);
837
	kfree(cmd->param);
838 839 840
	kfree(cmd);
}

841
static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
842 843
					    struct hci_dev *hdev, void *data,
					    u16 len)
844 845 846
{
	struct pending_cmd *cmd;

847
	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
848
	if (!cmd)
849
		return NULL;
850 851

	cmd->opcode = opcode;
852
	cmd->index = hdev->id;
853

854
	cmd->param = kmalloc(len, GFP_KERNEL);
855
	if (!cmd->param) {
856
		kfree(cmd);
857
		return NULL;
858 859
	}

860 861
	if (data)
		memcpy(cmd->param, data, len);
862 863 864 865

	cmd->sk = sk;
	sock_hold(sk);

866
	list_add(&cmd->list, &hdev->mgmt_pending);
867

868
	return cmd;
869 870
}

871
static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
872 873
				 void (*cb)(struct pending_cmd *cmd,
					    void *data),
874
				 void *data)
875
{
876
	struct pending_cmd *cmd, *tmp;
877

878
	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
879
		if (opcode > 0 && cmd->opcode != opcode)
880 881 882 883 884 885
			continue;

		cb(cmd, data);
	}
}

886
static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
887
{
888
	struct pending_cmd *cmd;
889

890
	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
891 892
		if (cmd->opcode == opcode)
			return cmd;
893 894 895 896 897
	}

	return NULL;
}

898
static void mgmt_pending_remove(struct pending_cmd *cmd)
899 900 901 902 903
{
	list_del(&cmd->list);
	mgmt_pending_free(cmd);
}

904
static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
905
{
906
	__le32 settings = cpu_to_le32(get_current_settings(hdev));
907

908
	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
909
			    sizeof(settings));
910 911
}

912
static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
913
		       u16 len)
914
{
915
	struct mgmt_mode *cp = data;
916
	struct pending_cmd *cmd;
917
	int err;
918

919
	BT_DBG("request for %s", hdev->name);
920

921 922 923 924
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				  MGMT_STATUS_INVALID_PARAMS);

925
	hci_dev_lock(hdev);
926

927 928 929 930 931 932
	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				 MGMT_STATUS_BUSY);
		goto failed;
	}

933 934 935 936
	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
		cancel_delayed_work(&hdev->power_off);

		if (cp->val) {
937 938 939
			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
					 data, len);
			err = mgmt_powered(hdev, 1);
940 941 942 943
			goto failed;
		}
	}

944
	if (!!cp->val == hdev_is_powered(hdev)) {
945
		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
946 947 948
		goto failed;
	}

949
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
950 951
	if (!cmd) {
		err = -ENOMEM;
952
		goto failed;
953
	}
954

955
	if (cp->val)
956
		queue_work(hdev->req_workqueue, &hdev->power_on);
957
	else
958
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
959

960
	err = 0;
961 962

failed:
963
	hci_dev_unlock(hdev);
964
	return err;
965 966
}

967 968
static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
		      struct sock *skip_sk)
969 970 971 972
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;

973
	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
974 975 976 977 978 979 980 981
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));
	hdr->opcode = cpu_to_le16(event);
	if (hdev)
		hdr->index = cpu_to_le16(hdev->id);
	else
982
		hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
983 984 985 986 987
	hdr->len = cpu_to_le16(data_len);

	if (data)
		memcpy(skb_put(skb, data_len), data, data_len);

988 989 990
	/* Time stamp */
	__net_timestamp(skb);

991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
	hci_send_to_control(skb, skip_sk);
	kfree_skb(skb);

	return 0;
}

static int new_settings(struct hci_dev *hdev, struct sock *skip)
{
	__le32 ev;

	ev = cpu_to_le32(get_current_settings(hdev));

	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
}

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
struct cmd_lookup {
	struct sock *sk;
	struct hci_dev *hdev;
	u8 mgmt_status;
};

static void settings_rsp(struct pending_cmd *cmd, void *data)
{
	struct cmd_lookup *match = data;

	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);

	list_del(&cmd->list);

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}

	mgmt_pending_free(cmd);
}

static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
{
	u8 *status = data;

	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
	mgmt_pending_remove(cmd);
}

1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
static u8 mgmt_bredr_support(struct hci_dev *hdev)
{
	if (!lmp_bredr_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

static u8 mgmt_le_support(struct hci_dev *hdev)
{
	if (!lmp_le_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

1056 1057 1058 1059
static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
	struct mgmt_mode *cp;
1060
	struct hci_request req;
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
	bool changed;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1074
		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1075 1076 1077 1078
		goto remove_cmd;
	}

	cp = cmd->param;
1079
	if (cp->val) {
1080 1081
		changed = !test_and_set_bit(HCI_DISCOVERABLE,
					    &hdev->dev_flags);
1082 1083 1084 1085 1086 1087 1088

		if (hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
					   to);
		}
	} else {
1089 1090
		changed = test_and_clear_bit(HCI_DISCOVERABLE,
					     &hdev->dev_flags);
1091
	}
1092 1093 1094 1095 1096 1097

	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);

	if (changed)
		new_settings(hdev, cmd->sk);

1098 1099 1100 1101 1102 1103 1104 1105
	/* When the discoverable mode gets changed, make sure
	 * that class of device has the limited discoverable
	 * bit correctly set.
	 */
	hci_req_init(&req, hdev);
	update_class(&req);
	hci_req_run(&req, NULL);

1106 1107 1108 1109 1110 1111 1112
remove_cmd:
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1113
static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1114
			    u16 len)
1115
{
1116
	struct mgmt_cp_set_discoverable *cp = data;
1117
	struct pending_cmd *cmd;
1118
	struct hci_request req;
1119
	u16 timeout;
1120
	u8 scan, status;
1121 1122
	int err;

1123
	BT_DBG("request for %s", hdev->name);
1124

1125 1126
	status = mgmt_bredr_support(hdev);
	if (status)
1127
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1128
				  status);
1129

1130
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1131 1132 1133
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				  MGMT_STATUS_INVALID_PARAMS);

1134
	timeout = __le16_to_cpu(cp->timeout);
1135 1136 1137 1138 1139 1140

	/* Disabling discoverable requires that no timeout is set,
	 * and enabling limited discoverable requires a timeout.
	 */
	if ((cp->val == 0x00 && timeout > 0) ||
	    (cp->val == 0x02 && timeout == 0))
1141
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1142
				  MGMT_STATUS_INVALID_PARAMS);
1143

1144
	hci_dev_lock(hdev);
1145

1146
	if (!hdev_is_powered(hdev) && timeout > 0) {
1147
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1148
				 MGMT_STATUS_NOT_POWERED);
1149 1150 1151
		goto failed;
	}

1152
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1153
	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1154
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1155
				 MGMT_STATUS_BUSY);
1156 1157 1158
		goto failed;
	}

1159
	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1160
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1161
				 MGMT_STATUS_REJECTED);
1162 1163 1164 1165
		goto failed;
	}

	if (!hdev_is_powered(hdev)) {
1166 1167
		bool changed = false;

1168 1169 1170 1171
		/* Setting limited discoverable when powered off is
		 * not a valid operation since it requires a timeout
		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
		 */
1172 1173 1174 1175 1176
		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
			changed = true;
		}

1177
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1178 1179 1180 1181 1182 1183
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1184 1185 1186
		goto failed;
	}

1187 1188 1189 1190 1191 1192 1193
	/* If the current mode is the same, then just update the timeout
	 * value with the new value. And if only the timeout gets updated,
	 * then no need for any HCI transactions.
	 */
	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
					  &hdev->dev_flags)) {
1194 1195
		cancel_delayed_work(&hdev->discov_off);
		hdev->discov_timeout = timeout;
1196

1197 1198
		if (cp->val && hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1199
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1200
					   to);
1201 1202
		}

1203
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1204 1205 1206
		goto failed;
	}

1207
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1208 1209
	if (!cmd) {
		err = -ENOMEM;
1210
		goto failed;
1211
	}
1212

1213 1214 1215 1216 1217 1218 1219
	/* Cancel any potential discoverable timeout that might be
	 * still active and store new timeout value. The arming of
	 * the timeout happens in the complete handler.
	 */
	cancel_delayed_work(&hdev->discov_off);
	hdev->discov_timeout = timeout;

1220 1221
	hci_req_init(&req, hdev);

1222 1223
	scan = SCAN_PAGE;

1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
	if (cp->val) {
		struct hci_cp_write_current_iac_lap hci_cp;

		if (cp->val == 0x02) {
			/* Limited discoverable mode */
			set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);

			hci_cp.num_iac = 2;
			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
			hci_cp.iac_lap[4] = 0x8b;
			hci_cp.iac_lap[5] = 0x9e;
		} else {
			/* General discoverable mode */
			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);

			hci_cp.num_iac = 1;
			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
		}

		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
			    (hci_cp.num_iac * 3) + 1, &hci_cp);

1251
		scan |= SCAN_INQUIRY;
1252 1253 1254
	} else {
		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
	}
1255

1256
	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1257 1258

	err = hci_req_run(&req, set_discoverable_complete);
1259
	if (err < 0)
1260
		mgmt_pending_remove(cmd);
1261 1262

failed:
1263
	hci_dev_unlock(hdev);
1264 1265 1266
	return err;
}

1267 1268
static void write_fast_connectable(struct hci_request *req, bool enable)
{
1269
	struct hci_dev *hdev = req->hdev;
1270 1271 1272
	struct hci_cp_write_page_scan_activity acp;
	u8 type;

1273 1274 1275
	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
		return;

1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
	if (enable) {
		type = PAGE_SCAN_TYPE_INTERLACED;

		/* 160 msec page scan interval */
		acp.interval = __constant_cpu_to_le16(0x0100);
	} else {
		type = PAGE_SCAN_TYPE_STANDARD;	/* default */

		/* default 1.28 sec page scan */
		acp.interval = __constant_cpu_to_le16(0x0800);
	}

	acp.window = __constant_cpu_to_le16(0x0012);

1290 1291 1292 1293 1294 1295 1296
	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
			    sizeof(acp), &acp);

	if (hdev->page_scan_type != type)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1297 1298
}

1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
static u8 get_adv_type(struct hci_dev *hdev)
{
	struct pending_cmd *cmd;
	bool connectable;

	/* If there's a pending mgmt command the flag will not yet have
	 * it's final value, so check for this first.
	 */
	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
		connectable = !!cp->val;
	} else {
		connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	}

	return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
}

1318 1319 1320 1321 1322 1323 1324 1325 1326
static void enable_advertising(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_param cp;
	u8 enable = 0x01;

	memset(&cp, 0, sizeof(cp));
	cp.min_interval = __constant_cpu_to_le16(0x0800);
	cp.max_interval = __constant_cpu_to_le16(0x0800);
1327
	cp.type = get_adv_type(hdev);
1328
	cp.own_address_type = hdev->own_addr_type;
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
	cp.channel_map = 0x07;

	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

static void disable_advertising(struct hci_request *req)
{
	u8 enable = 0x00;

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

1343 1344 1345
static void set_connectable_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
1346 1347
	struct mgmt_mode *cp;
	bool changed;
1348 1349 1350 1351 1352 1353 1354 1355 1356

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (!cmd)
		goto unlock;

1357 1358 1359 1360 1361 1362
	if (status) {
		u8 mgmt_err = mgmt_status(status);
		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
		goto remove_cmd;
	}

1363 1364 1365 1366 1367 1368
	cp = cmd->param;
	if (cp->val)
		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);

1369 1370
	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);

1371 1372 1373
	if (changed)
		new_settings(hdev, cmd->sk);

1374
remove_cmd:
1375 1376 1377 1378 1379 1380
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1381
static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1382
			   u16 len)
1383
{
1384
	struct mgmt_mode *cp = data;
1385
	struct pending_cmd *cmd;
1386
	struct hci_request req;
1387
	u8 scan;
1388 1389
	int err;

1390
	BT_DBG("request for %s", hdev->name);
1391

1392 1393
	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1394
		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1395
				  MGMT_STATUS_REJECTED);
1396

1397 1398 1399 1400
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				  MGMT_STATUS_INVALID_PARAMS);

1401
	hci_dev_lock(hdev);
1402

1403
	if (!hdev_is_powered(hdev)) {
1404 1405 1406 1407 1408
		bool changed = false;

		if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
			changed = true;

1409
		if (cp->val) {
1410
			set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1411
		} else {
1412 1413 1414
			clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
		}
1415

1416
		err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1417 1418 1419 1420 1421 1422
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1423 1424 1425
		goto failed;
	}

1426
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1427
	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1428
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1429
				 MGMT_STATUS_BUSY);
1430 1431 1432
		goto failed;
	}

1433
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1434 1435
	if (!cmd) {
		err = -ENOMEM;
1436
		goto failed;
1437
	}
1438

1439
	hci_req_init(&req, hdev);
1440

1441 1442 1443 1444 1445 1446 1447 1448
	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
	    cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
		if (cp->val) {
			scan = SCAN_PAGE;
		} else {
			scan = 0;

			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1449
			    hdev->discov_timeout > 0)
1450 1451
				cancel_delayed_work(&hdev->discov_off);
		}
1452

1453 1454
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}
1455

1456 1457 1458 1459 1460 1461 1462
	/* If we're going from non-connectable to connectable or
	 * vice-versa when fast connectable is enabled ensure that fast
	 * connectable gets disabled. write_fast_connectable won't do
	 * anything if the page scan parameters are already what they
	 * should be.
	 */
	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1463 1464
		write_fast_connectable(&req, false);

1465 1466 1467 1468 1469 1470
	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
	    hci_conn_num(hdev, LE_LINK) == 0) {
		disable_advertising(&req);
		enable_advertising(&req);
	}

1471
	err = hci_req_run(&req, set_connectable_complete);
1472
	if (err < 0) {
1473
		mgmt_pending_remove(cmd);
1474 1475 1476 1477 1478
		if (err == -ENODATA)
			err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE,
						hdev);
		goto failed;
	}
1479 1480

failed:
1481
	hci_dev_unlock(hdev);
1482 1483 1484
	return err;
}

1485
static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1486
			u16 len)
1487
{
1488
	struct mgmt_mode *cp = data;
1489
	bool changed;
1490 1491
	int err;

1492
	BT_DBG("request for %s", hdev->name);
1493

1494 1495 1496 1497
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
				  MGMT_STATUS_INVALID_PARAMS);

1498
	hci_dev_lock(hdev);
1499 1500

	if (cp->val)
1501
		changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1502
	else
1503
		changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1504

1505
	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1506
	if (err < 0)
1507
		goto unlock;
1508

1509 1510
	if (changed)
		err = new_settings(hdev, sk);
1511

1512
unlock:
1513
	hci_dev_unlock(hdev);
1514 1515 1516
	return err;
}

1517 1518
static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
1519 1520 1521
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
1522
	u8 val, status;
1523 1524
	int err;

1525
	BT_DBG("request for %s", hdev->name);
1526

1527 1528
	status = mgmt_bredr_support(hdev);
	if (status)
1529
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1530
				  status);
1531

1532 1533 1534 1535
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				  MGMT_STATUS_INVALID_PARAMS);

1536 1537
	hci_dev_lock(hdev);

1538
	if (!hdev_is_powered(hdev)) {
1539 1540 1541
		bool changed = false;

		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1542
					  &hdev->dev_flags)) {
1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1554 1555 1556 1557
		goto failed;
	}

	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1558
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1559
				 MGMT_STATUS_BUSY);
1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586
		goto failed;
	}

	val = !!cp->val;

	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

1587
static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1588 1589 1590
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
1591
	u8 status;
1592 1593
	int err;

1594
	BT_DBG("request for %s", hdev->name);
1595

1596 1597 1598 1599
	status = mgmt_bredr_support(hdev);
	if (status)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);

1600 1601 1602
	if (!lmp_ssp_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				  MGMT_STATUS_NOT_SUPPORTED);
1603

1604 1605 1606 1607
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				  MGMT_STATUS_INVALID_PARAMS);

1608
	hci_dev_lock(hdev);
1609

1610
	if (!hdev_is_powered(hdev)) {
1611
		bool changed;
1612

1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623
		if (cp->val) {
			changed = !test_and_set_bit(HCI_SSP_ENABLED,
						    &hdev->dev_flags);
		} else {
			changed = test_and_clear_bit(HCI_SSP_ENABLED,
						     &hdev->dev_flags);
			if (!changed)
				changed = test_and_clear_bit(HCI_HS_ENABLED,
							     &hdev->dev_flags);
			else
				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1624 1625 1626 1627 1628 1629 1630 1631 1632
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1633 1634 1635
		goto failed;
	}

1636 1637
	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1638 1639
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				 MGMT_STATUS_BUSY);
1640 1641 1642
		goto failed;
	}

1643
	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

1654
	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

1665
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1666 1667
{
	struct mgmt_mode *cp = data;
1668
	bool changed;
1669
	u8 status;
1670
	int err;
1671

1672
	BT_DBG("request for %s", hdev->name);
1673

1674 1675 1676
	status = mgmt_bredr_support(hdev);
	if (status)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1677

1678 1679 1680 1681 1682 1683 1684 1685
	if (!lmp_ssp_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				  MGMT_STATUS_NOT_SUPPORTED);

	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				  MGMT_STATUS_REJECTED);

1686 1687 1688 1689
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				  MGMT_STATUS_INVALID_PARAMS);

1690 1691
	hci_dev_lock(hdev);

1692
	if (cp->val) {
1693
		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1694 1695 1696 1697 1698 1699 1700
	} else {
		if (hdev_is_powered(hdev)) {
			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
					 MGMT_STATUS_REJECTED);
			goto unlock;
		}

1701
		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1702
	}
1703 1704 1705 1706 1707 1708 1709

	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);
1710

1711 1712 1713
unlock:
	hci_dev_unlock(hdev);
	return err;
1714 1715
}

1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
static void le_enable_complete(struct hci_dev *hdev, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
				     &mgmt_err);
		return;
	}

	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745

	/* Make sure the controller has a good default for
	 * advertising data. Restrict the update to when LE
	 * has actually been enabled. During power on, the
	 * update in powered_update_hci will take care of it.
	 */
	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
		struct hci_request req;

		hci_dev_lock(hdev);

		hci_req_init(&req, hdev);
1746
		update_adv_data(&req);
1747
		update_scan_rsp_data(&req);
1748 1749 1750 1751
		hci_req_run(&req, NULL);

		hci_dev_unlock(hdev);
	}
1752 1753
}

1754
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1755 1756 1757 1758
{
	struct mgmt_mode *cp = data;
	struct hci_cp_write_le_host_supported hci_cp;
	struct pending_cmd *cmd;
1759
	struct hci_request req;
1760
	int err;
1761
	u8 val, enabled;
1762

1763
	BT_DBG("request for %s", hdev->name);
1764

1765 1766 1767
	if (!lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				  MGMT_STATUS_NOT_SUPPORTED);
1768

1769 1770 1771 1772
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				  MGMT_STATUS_INVALID_PARAMS);

1773
	/* LE-only devices do not allow toggling LE on/off */
1774
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1775 1776 1777
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				  MGMT_STATUS_REJECTED);

1778
	hci_dev_lock(hdev);
1779 1780

	val = !!cp->val;
1781
	enabled = lmp_host_le_capable(hdev);
1782

1783
	if (!hdev_is_powered(hdev) || val == enabled) {
1784 1785 1786 1787 1788 1789 1790
		bool changed = false;

		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
			changed = true;
		}

1791 1792
		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1793 1794 1795
			changed = true;
		}

1796 1797
		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
		if (err < 0)
1798
			goto unlock;
1799 1800 1801 1802

		if (changed)
			err = new_settings(hdev, sk);

1803
		goto unlock;
1804 1805
	}

1806 1807
	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1808
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1809
				 MGMT_STATUS_BUSY);
1810
		goto unlock;
1811 1812 1813 1814 1815
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
1816
		goto unlock;
1817 1818
	}

1819 1820
	hci_req_init(&req, hdev);

1821 1822 1823 1824
	memset(&hci_cp, 0, sizeof(hci_cp));

	if (val) {
		hci_cp.le = val;
1825
		hci_cp.simul = lmp_le_br_capable(hdev);
1826 1827 1828
	} else {
		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
			disable_advertising(&req);
1829 1830
	}

1831 1832 1833 1834
	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
		    &hci_cp);

	err = hci_req_run(&req, le_enable_complete);
1835
	if (err < 0)
1836 1837
		mgmt_pending_remove(cmd);

1838 1839
unlock:
	hci_dev_unlock(hdev);
1840 1841 1842
	return err;
}

1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
/* This is a helper function to test for pending mgmt commands that can
 * cause CoD or EIR HCI commands. We can only allow one such pending
 * mgmt command at a time since otherwise we cannot easily track what
 * the current values are, will be, and based on that calculate if a new
 * HCI command needs to be sent and if yes with what value.
 */
static bool pending_eir_or_class(struct hci_dev *hdev)
{
	struct pending_cmd *cmd;

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		switch (cmd->opcode) {
		case MGMT_OP_ADD_UUID:
		case MGMT_OP_REMOVE_UUID:
		case MGMT_OP_SET_DEV_CLASS:
		case MGMT_OP_SET_POWERED:
			return true;
		}
	}

	return false;
}

1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
static const u8 bluetooth_base_uuid[] = {
			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};

static u8 get_uuid_size(const u8 *uuid)
{
	u32 val;

	if (memcmp(uuid, bluetooth_base_uuid, 12))
		return 128;

	val = get_unaligned_le32(&uuid[12]);
	if (val > 0xffff)
		return 32;

	return 16;
}

1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910
static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
{
	struct pending_cmd *cmd;

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(mgmt_op, hdev);
	if (!cmd)
		goto unlock;

	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
		     hdev->dev_class, 3);

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static void add_uuid_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
}

1911
static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1912
{
1913
	struct mgmt_cp_add_uuid *cp = data;
1914
	struct pending_cmd *cmd;
1915
	struct hci_request req;
1916 1917 1918
	struct bt_uuid *uuid;
	int err;

1919
	BT_DBG("request for %s", hdev->name);
1920

1921
	hci_dev_lock(hdev);
1922

1923
	if (pending_eir_or_class(hdev)) {
1924
		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1925
				 MGMT_STATUS_BUSY);
1926 1927 1928
		goto failed;
	}

1929
	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1930 1931 1932 1933 1934 1935
	if (!uuid) {
		err = -ENOMEM;
		goto failed;
	}

	memcpy(uuid->uuid, cp->uuid, 16);
1936
	uuid->svc_hint = cp->svc_hint;
1937
	uuid->size = get_uuid_size(cp->uuid);
1938

1939
	list_add_tail(&uuid->list, &hdev->uuids);
1940

1941
	hci_req_init(&req, hdev);
1942

1943 1944 1945
	update_class(&req);
	update_eir(&req);

1946 1947 1948 1949
	err = hci_req_run(&req, add_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto failed;
1950

1951
		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1952
				   hdev->dev_class, 3);
1953 1954 1955 1956
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1957
	if (!cmd) {
1958
		err = -ENOMEM;
1959 1960 1961 1962
		goto failed;
	}

	err = 0;
1963 1964

failed:
1965
	hci_dev_unlock(hdev);
1966 1967 1968
	return err;
}

1969 1970 1971 1972 1973 1974
static bool enable_service_cache(struct hci_dev *hdev)
{
	if (!hdev_is_powered(hdev))
		return false;

	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1975 1976
		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
				   CACHE_TIMEOUT);
1977 1978 1979 1980 1981 1982
		return true;
	}

	return false;
}

1983 1984 1985 1986 1987 1988 1989
static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
}

1990
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1991
		       u16 len)
1992
{
1993
	struct mgmt_cp_remove_uuid *cp = data;
1994
	struct pending_cmd *cmd;
1995
	struct bt_uuid *match, *tmp;
1996
	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1997
	struct hci_request req;
1998 1999
	int err, found;

2000
	BT_DBG("request for %s", hdev->name);
2001

2002
	hci_dev_lock(hdev);
2003

2004
	if (pending_eir_or_class(hdev)) {
2005
		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2006
				 MGMT_STATUS_BUSY);
2007 2008 2009
		goto unlock;
	}

2010 2011
	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
		err = hci_uuids_clear(hdev);
2012

2013
		if (enable_service_cache(hdev)) {
2014
			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2015
					   0, hdev->dev_class, 3);
2016 2017
			goto unlock;
		}
2018

2019
		goto update_class;
2020 2021 2022 2023
	}

	found = 0;

2024
	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2025 2026 2027 2028
		if (memcmp(match->uuid, cp->uuid, 16) != 0)
			continue;

		list_del(&match->list);
2029
		kfree(match);
2030 2031 2032 2033
		found++;
	}

	if (found == 0) {
2034
		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2035
				 MGMT_STATUS_INVALID_PARAMS);
2036 2037 2038
		goto unlock;
	}

2039
update_class:
2040
	hci_req_init(&req, hdev);
2041

2042 2043 2044
	update_class(&req);
	update_eir(&req);

2045 2046 2047 2048
	err = hci_req_run(&req, remove_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2049

2050
		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2051
				   hdev->dev_class, 3);
2052 2053 2054 2055
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2056
	if (!cmd) {
2057
		err = -ENOMEM;
2058 2059 2060 2061
		goto unlock;
	}

	err = 0;
2062 2063

unlock:
2064
	hci_dev_unlock(hdev);
2065 2066 2067
	return err;
}

2068 2069 2070 2071 2072 2073 2074
static void set_class_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
}

2075
static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2076
			 u16 len)
2077
{
2078
	struct mgmt_cp_set_dev_class *cp = data;
2079
	struct pending_cmd *cmd;
2080
	struct hci_request req;
2081 2082
	int err;

2083
	BT_DBG("request for %s", hdev->name);
2084

2085
	if (!lmp_bredr_capable(hdev))
2086 2087
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				  MGMT_STATUS_NOT_SUPPORTED);
2088

2089
	hci_dev_lock(hdev);
2090

2091 2092 2093 2094 2095
	if (pending_eir_or_class(hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}
2096

2097 2098 2099 2100 2101
	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				 MGMT_STATUS_INVALID_PARAMS);
		goto unlock;
	}
2102

2103 2104 2105
	hdev->major_class = cp->major;
	hdev->minor_class = cp->minor;

2106
	if (!hdev_is_powered(hdev)) {
2107
		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2108
				   hdev->dev_class, 3);
2109 2110 2111
		goto unlock;
	}

2112 2113
	hci_req_init(&req, hdev);

2114
	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2115 2116 2117
		hci_dev_unlock(hdev);
		cancel_delayed_work_sync(&hdev->service_cache);
		hci_dev_lock(hdev);
2118
		update_eir(&req);
2119
	}
2120

2121 2122
	update_class(&req);

2123 2124 2125 2126
	err = hci_req_run(&req, set_class_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2127

2128
		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2129
				   hdev->dev_class, 3);
2130 2131 2132 2133
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2134
	if (!cmd) {
2135
		err = -ENOMEM;
2136 2137 2138 2139
		goto unlock;
	}

	err = 0;
2140

2141
unlock:
2142
	hci_dev_unlock(hdev);
2143 2144 2145
	return err;
}

2146
static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2147
			  u16 len)
2148
{
2149
	struct mgmt_cp_load_link_keys *cp = data;
2150
	u16 key_count, expected_len;
2151
	int i;
2152

2153 2154 2155 2156 2157 2158
	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				  MGMT_STATUS_NOT_SUPPORTED);

2159
	key_count = __le16_to_cpu(cp->key_count);
2160

2161 2162
	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_link_key_info);
2163
	if (expected_len != len) {
2164
		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2165
		       len, expected_len);
2166
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2167
				  MGMT_STATUS_INVALID_PARAMS);
2168 2169
	}

2170 2171 2172 2173
	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				  MGMT_STATUS_INVALID_PARAMS);

2174
	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2175
	       key_count);
2176

2177 2178 2179 2180 2181 2182 2183 2184
	for (i = 0; i < key_count; i++) {
		struct mgmt_link_key_info *key = &cp->keys[i];

		if (key->addr.type != BDADDR_BREDR)
			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
					  MGMT_STATUS_INVALID_PARAMS);
	}

2185
	hci_dev_lock(hdev);
2186 2187 2188 2189

	hci_link_keys_clear(hdev);

	if (cp->debug_keys)
2190
		set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2191
	else
2192
		clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2193

2194
	for (i = 0; i < key_count; i++) {
2195
		struct mgmt_link_key_info *key = &cp->keys[i];
2196

2197
		hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2198
				 key->type, key->pin_len);
2199 2200
	}

2201
	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2202

2203
	hci_dev_unlock(hdev);
2204

2205
	return 0;
2206 2207
}

2208
static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2209
			   u8 addr_type, struct sock *skip_sk)
2210 2211 2212 2213 2214 2215 2216
{
	struct mgmt_ev_device_unpaired ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = addr_type;

	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2217
			  skip_sk);
2218 2219
}

2220
static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2221
			 u16 len)
2222
{
2223 2224
	struct mgmt_cp_unpair_device *cp = data;
	struct mgmt_rp_unpair_device rp;
2225 2226
	struct hci_cp_disconnect dc;
	struct pending_cmd *cmd;
2227 2228 2229
	struct hci_conn *conn;
	int err;

2230
	memset(&rp, 0, sizeof(rp));
2231 2232
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
2233

2234 2235 2236 2237 2238
	if (!bdaddr_type_is_valid(cp->addr.type))
		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));

2239 2240 2241 2242 2243
	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));

2244 2245
	hci_dev_lock(hdev);

2246
	if (!hdev_is_powered(hdev)) {
2247
		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2248
				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2249 2250 2251
		goto unlock;
	}

2252
	if (cp->addr.type == BDADDR_BREDR)
2253 2254 2255
		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
	else
		err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
2256

2257
	if (err < 0) {
2258
		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2259
				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2260 2261 2262
		goto unlock;
	}

2263
	if (cp->disconnect) {
2264
		if (cp->addr.type == BDADDR_BREDR)
2265
			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2266
						       &cp->addr.bdaddr);
2267 2268
		else
			conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2269
						       &cp->addr.bdaddr);
2270 2271 2272
	} else {
		conn = NULL;
	}
2273

2274
	if (!conn) {
2275
		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2276
				   &rp, sizeof(rp));
2277
		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2278 2279
		goto unlock;
	}
2280

2281
	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2282
			       sizeof(*cp));
2283 2284 2285
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
2286 2287
	}

2288
	dc.handle = cpu_to_le16(conn->handle);
2289 2290 2291 2292 2293
	dc.reason = 0x13; /* Remote User Terminated Connection */
	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
	if (err < 0)
		mgmt_pending_remove(cmd);

2294
unlock:
2295
	hci_dev_unlock(hdev);
2296 2297 2298
	return err;
}

2299
static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2300
		      u16 len)
2301
{
2302
	struct mgmt_cp_disconnect *cp = data;
2303
	struct mgmt_rp_disconnect rp;
2304
	struct hci_cp_disconnect dc;
2305
	struct pending_cmd *cmd;
2306 2307 2308 2309 2310
	struct hci_conn *conn;
	int err;

	BT_DBG("");

2311 2312 2313 2314
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2315
	if (!bdaddr_type_is_valid(cp->addr.type))
2316 2317 2318
		return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));
2319

2320
	hci_dev_lock(hdev);
2321 2322

	if (!test_bit(HCI_UP, &hdev->flags)) {
2323 2324
		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2325 2326 2327
		goto failed;
	}

2328
	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2329 2330
		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2331 2332 2333
		goto failed;
	}

2334
	if (cp->addr.type == BDADDR_BREDR)
2335 2336
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
2337 2338
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2339

2340
	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2341 2342
		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2343 2344 2345
		goto failed;
	}

2346
	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2347 2348
	if (!cmd) {
		err = -ENOMEM;
2349
		goto failed;
2350
	}
2351

2352
	dc.handle = cpu_to_le16(conn->handle);
2353
	dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2354 2355 2356

	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
	if (err < 0)
2357
		mgmt_pending_remove(cmd);
2358 2359

failed:
2360
	hci_dev_unlock(hdev);
2361 2362 2363
	return err;
}

2364
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2365 2366 2367
{
	switch (link_type) {
	case LE_LINK:
2368 2369
		switch (addr_type) {
		case ADDR_LE_DEV_PUBLIC:
2370
			return BDADDR_LE_PUBLIC;
2371

2372
		default:
2373
			/* Fallback to LE Random address type */
2374
			return BDADDR_LE_RANDOM;
2375
		}
2376

2377
	default:
2378
		/* Fallback to BR/EDR type */
2379
		return BDADDR_BREDR;
2380 2381 2382
	}
}

2383 2384
static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
2385 2386
{
	struct mgmt_rp_get_connections *rp;
2387
	struct hci_conn *c;
2388
	size_t rp_len;
2389 2390
	int err;
	u16 i;
2391 2392 2393

	BT_DBG("");

2394
	hci_dev_lock(hdev);
2395

2396
	if (!hdev_is_powered(hdev)) {
2397
		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2398
				 MGMT_STATUS_NOT_POWERED);
2399 2400 2401
		goto unlock;
	}

2402
	i = 0;
2403 2404
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2405
			i++;
2406 2407
	}

2408
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2409
	rp = kmalloc(rp_len, GFP_KERNEL);
2410
	if (!rp) {
2411 2412 2413 2414 2415
		err = -ENOMEM;
		goto unlock;
	}

	i = 0;
2416
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2417 2418
		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
			continue;
2419
		bacpy(&rp->addr[i].bdaddr, &c->dst);
2420
		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2421
		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2422 2423 2424 2425
			continue;
		i++;
	}

2426
	rp->conn_count = cpu_to_le16(i);
2427

2428 2429
	/* Recalculate length in case of filtered SCO connections, etc */
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2430

2431
	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2432
			   rp_len);
2433

2434
	kfree(rp);
2435 2436

unlock:
2437
	hci_dev_unlock(hdev);
2438 2439 2440
	return err;
}

2441
static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2442
				   struct mgmt_cp_pin_code_neg_reply *cp)
2443 2444 2445 2446
{
	struct pending_cmd *cmd;
	int err;

2447
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2448
			       sizeof(*cp));
2449 2450 2451
	if (!cmd)
		return -ENOMEM;

2452
	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2453
			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2454 2455 2456 2457 2458 2459
	if (err < 0)
		mgmt_pending_remove(cmd);

	return err;
}

2460
static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2461
			  u16 len)
2462
{
2463
	struct hci_conn *conn;
2464
	struct mgmt_cp_pin_code_reply *cp = data;
2465
	struct hci_cp_pin_code_reply reply;
2466
	struct pending_cmd *cmd;
2467 2468 2469 2470
	int err;

	BT_DBG("");

2471
	hci_dev_lock(hdev);
2472

2473
	if (!hdev_is_powered(hdev)) {
2474
		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2475
				 MGMT_STATUS_NOT_POWERED);
2476 2477 2478
		goto failed;
	}

2479
	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2480
	if (!conn) {
2481
		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2482
				 MGMT_STATUS_NOT_CONNECTED);
2483 2484 2485 2486
		goto failed;
	}

	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2487 2488 2489
		struct mgmt_cp_pin_code_neg_reply ncp;

		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2490 2491 2492

		BT_ERR("PIN code is not 16 bytes long");

2493
		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2494
		if (err >= 0)
2495
			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2496
					 MGMT_STATUS_INVALID_PARAMS);
2497 2498 2499 2500

		goto failed;
	}

2501
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2502 2503
	if (!cmd) {
		err = -ENOMEM;
2504
		goto failed;
2505
	}
2506

2507
	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2508
	reply.pin_len = cp->pin_len;
2509
	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2510 2511 2512

	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
	if (err < 0)
2513
		mgmt_pending_remove(cmd);
2514 2515

failed:
2516
	hci_dev_unlock(hdev);
2517 2518 2519
	return err;
}

2520 2521
static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
2522
{
2523
	struct mgmt_cp_set_io_capability *cp = data;
2524 2525 2526

	BT_DBG("");

2527
	hci_dev_lock(hdev);
2528 2529 2530 2531

	hdev->io_capability = cp->io_capability;

	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2532
	       hdev->io_capability);
2533

2534
	hci_dev_unlock(hdev);
2535

2536 2537
	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
			    0);
2538 2539
}

2540
static struct pending_cmd *find_pairing(struct hci_conn *conn)
2541 2542
{
	struct hci_dev *hdev = conn->hdev;
2543
	struct pending_cmd *cmd;
2544

2545
	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562
		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
			continue;

		if (cmd->user_data != conn)
			continue;

		return cmd;
	}

	return NULL;
}

static void pairing_complete(struct pending_cmd *cmd, u8 status)
{
	struct mgmt_rp_pair_device rp;
	struct hci_conn *conn = cmd->user_data;

2563
	bacpy(&rp.addr.bdaddr, &conn->dst);
2564
	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2565

2566
	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2567
		     &rp, sizeof(rp));
2568 2569 2570 2571 2572 2573

	/* So we don't get further callbacks for this connection */
	conn->connect_cfm_cb = NULL;
	conn->security_cfm_cb = NULL;
	conn->disconn_cfm_cb = NULL;

2574
	hci_conn_drop(conn);
2575

2576
	mgmt_pending_remove(cmd);
2577 2578 2579 2580 2581 2582 2583 2584 2585
}

static void pairing_complete_cb(struct hci_conn *conn, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status %u", status);

	cmd = find_pairing(conn);
2586
	if (!cmd)
2587
		BT_DBG("Unable to find a pending command");
2588
	else
2589
		pairing_complete(cmd, mgmt_status(status));
2590 2591
}

2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607
static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status %u", status);

	if (!status)
		return;

	cmd = find_pairing(conn);
	if (!cmd)
		BT_DBG("Unable to find a pending command");
	else
		pairing_complete(cmd, mgmt_status(status));
}

2608
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2609
		       u16 len)
2610
{
2611
	struct mgmt_cp_pair_device *cp = data;
2612
	struct mgmt_rp_pair_device rp;
2613 2614 2615 2616 2617 2618 2619
	struct pending_cmd *cmd;
	u8 sec_level, auth_type;
	struct hci_conn *conn;
	int err;

	BT_DBG("");

2620 2621 2622 2623
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2624 2625 2626 2627 2628
	if (!bdaddr_type_is_valid(cp->addr.type))
		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));

2629
	hci_dev_lock(hdev);
2630

2631
	if (!hdev_is_powered(hdev)) {
2632 2633
		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2634 2635 2636
		goto unlock;
	}

2637 2638
	sec_level = BT_SECURITY_MEDIUM;
	if (cp->io_cap == 0x03)
2639
		auth_type = HCI_AT_DEDICATED_BONDING;
2640
	else
2641 2642
		auth_type = HCI_AT_DEDICATED_BONDING_MITM;

2643
	if (cp->addr.type == BDADDR_BREDR)
2644 2645
		conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
				   cp->addr.type, sec_level, auth_type);
2646
	else
2647 2648
		conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
				   cp->addr.type, sec_level, auth_type);
2649

2650
	if (IS_ERR(conn)) {
2651 2652 2653 2654 2655 2656 2657
		int status;

		if (PTR_ERR(conn) == -EBUSY)
			status = MGMT_STATUS_BUSY;
		else
			status = MGMT_STATUS_CONNECT_FAILED;

2658
		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2659
				   status, &rp,
2660
				   sizeof(rp));
2661 2662 2663 2664
		goto unlock;
	}

	if (conn->connect_cfm_cb) {
2665
		hci_conn_drop(conn);
2666
		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2667
				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2668 2669 2670
		goto unlock;
	}

2671
	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2672 2673
	if (!cmd) {
		err = -ENOMEM;
2674
		hci_conn_drop(conn);
2675 2676 2677
		goto unlock;
	}

2678
	/* For LE, just connecting isn't a proof that the pairing finished */
2679
	if (cp->addr.type == BDADDR_BREDR)
2680
		conn->connect_cfm_cb = pairing_complete_cb;
2681 2682
	else
		conn->connect_cfm_cb = le_connect_complete_cb;
2683

2684 2685 2686 2687 2688 2689
	conn->security_cfm_cb = pairing_complete_cb;
	conn->disconn_cfm_cb = pairing_complete_cb;
	conn->io_capability = cp->io_cap;
	cmd->user_data = conn;

	if (conn->state == BT_CONNECTED &&
2690
	    hci_conn_security(conn, sec_level, auth_type))
2691 2692 2693 2694 2695
		pairing_complete(cmd, 0);

	err = 0;

unlock:
2696
	hci_dev_unlock(hdev);
2697 2698 2699
	return err;
}

2700 2701
static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2702
{
2703
	struct mgmt_addr_info *addr = data;
2704 2705 2706 2707 2708 2709 2710 2711
	struct pending_cmd *cmd;
	struct hci_conn *conn;
	int err;

	BT_DBG("");

	hci_dev_lock(hdev);

2712
	if (!hdev_is_powered(hdev)) {
2713
		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2714
				 MGMT_STATUS_NOT_POWERED);
2715 2716 2717
		goto unlock;
	}

2718 2719
	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
	if (!cmd) {
2720
		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2721
				 MGMT_STATUS_INVALID_PARAMS);
2722 2723 2724 2725 2726 2727
		goto unlock;
	}

	conn = cmd->user_data;

	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2728
		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2729
				 MGMT_STATUS_INVALID_PARAMS);
2730 2731 2732 2733 2734
		goto unlock;
	}

	pairing_complete(cmd, MGMT_STATUS_CANCELLED);

2735
	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2736
			   addr, sizeof(*addr));
2737 2738 2739 2740 2741
unlock:
	hci_dev_unlock(hdev);
	return err;
}

2742
static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2743
			     struct mgmt_addr_info *addr, u16 mgmt_op,
2744
			     u16 hci_op, __le32 passkey)
2745 2746
{
	struct pending_cmd *cmd;
2747
	struct hci_conn *conn;
2748 2749
	int err;

2750
	hci_dev_lock(hdev);
2751

2752
	if (!hdev_is_powered(hdev)) {
2753 2754 2755
		err = cmd_complete(sk, hdev->id, mgmt_op,
				   MGMT_STATUS_NOT_POWERED, addr,
				   sizeof(*addr));
2756
		goto done;
2757 2758
	}

2759 2760
	if (addr->type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2761
	else
2762
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2763 2764

	if (!conn) {
2765 2766 2767
		err = cmd_complete(sk, hdev->id, mgmt_op,
				   MGMT_STATUS_NOT_CONNECTED, addr,
				   sizeof(*addr));
2768 2769
		goto done;
	}
2770

2771
	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2772
		/* Continue with pairing via SMP */
2773 2774 2775
		err = smp_user_confirm_reply(conn, mgmt_op, passkey);

		if (!err)
2776 2777 2778
			err = cmd_complete(sk, hdev->id, mgmt_op,
					   MGMT_STATUS_SUCCESS, addr,
					   sizeof(*addr));
2779
		else
2780 2781 2782
			err = cmd_complete(sk, hdev->id, mgmt_op,
					   MGMT_STATUS_FAILED, addr,
					   sizeof(*addr));
2783 2784 2785 2786

		goto done;
	}

2787
	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2788 2789
	if (!cmd) {
		err = -ENOMEM;
2790
		goto done;
2791 2792
	}

2793
	/* Continue with pairing via HCI */
2794 2795 2796
	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
		struct hci_cp_user_passkey_reply cp;

2797
		bacpy(&cp.bdaddr, &addr->bdaddr);
2798 2799 2800
		cp.passkey = passkey;
		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
	} else
2801 2802
		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
				   &addr->bdaddr);
2803

2804 2805
	if (err < 0)
		mgmt_pending_remove(cmd);
2806

2807
done:
2808
	hci_dev_unlock(hdev);
2809 2810 2811
	return err;
}

2812 2813 2814 2815 2816 2817 2818
static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_pin_code_neg_reply *cp = data;

	BT_DBG("");

2819
	return user_pairing_resp(sk, hdev, &cp->addr,
2820 2821 2822 2823
				MGMT_OP_PIN_CODE_NEG_REPLY,
				HCI_OP_PIN_CODE_NEG_REPLY, 0);
}

2824 2825
static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2826
{
2827
	struct mgmt_cp_user_confirm_reply *cp = data;
2828 2829 2830 2831

	BT_DBG("");

	if (len != sizeof(*cp))
2832
		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2833
				  MGMT_STATUS_INVALID_PARAMS);
2834

2835
	return user_pairing_resp(sk, hdev, &cp->addr,
2836 2837
				 MGMT_OP_USER_CONFIRM_REPLY,
				 HCI_OP_USER_CONFIRM_REPLY, 0);
2838 2839
}

2840
static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2841
				  void *data, u16 len)
2842
{
2843
	struct mgmt_cp_user_confirm_neg_reply *cp = data;
2844 2845 2846

	BT_DBG("");

2847
	return user_pairing_resp(sk, hdev, &cp->addr,
2848 2849
				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2850 2851
}

2852 2853
static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2854
{
2855
	struct mgmt_cp_user_passkey_reply *cp = data;
2856 2857 2858

	BT_DBG("");

2859
	return user_pairing_resp(sk, hdev, &cp->addr,
2860 2861
				 MGMT_OP_USER_PASSKEY_REPLY,
				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2862 2863
}

2864
static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2865
				  void *data, u16 len)
2866
{
2867
	struct mgmt_cp_user_passkey_neg_reply *cp = data;
2868 2869 2870

	BT_DBG("");

2871
	return user_pairing_resp(sk, hdev, &cp->addr,
2872 2873
				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2874 2875
}

2876
static void update_name(struct hci_request *req)
2877
{
2878
	struct hci_dev *hdev = req->hdev;
2879 2880
	struct hci_cp_write_local_name cp;

2881
	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2882

2883
	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2884 2885
}

2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913
static void set_name_complete(struct hci_dev *hdev, u8 status)
{
	struct mgmt_cp_set_local_name *cp;
	struct pending_cmd *cmd;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
	if (!cmd)
		goto unlock;

	cp = cmd->param;

	if (status)
		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
			   mgmt_status(status));
	else
		cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
			     cp, sizeof(*cp));

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

2914
static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2915
			  u16 len)
2916
{
2917
	struct mgmt_cp_set_local_name *cp = data;
2918
	struct pending_cmd *cmd;
2919
	struct hci_request req;
2920 2921 2922 2923
	int err;

	BT_DBG("");

2924
	hci_dev_lock(hdev);
2925

2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936
	/* If the old values are the same as the new ones just return a
	 * direct command complete event.
	 */
	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
	    !memcmp(hdev->short_name, cp->short_name,
		    sizeof(hdev->short_name))) {
		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
				   data, len);
		goto failed;
	}

2937
	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2938

2939
	if (!hdev_is_powered(hdev)) {
2940
		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2941 2942

		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2943
				   data, len);
2944 2945 2946 2947
		if (err < 0)
			goto failed;

		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2948
				 sk);
2949

2950 2951 2952
		goto failed;
	}

2953
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2954 2955 2956 2957 2958
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

2959 2960
	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));

2961
	hci_req_init(&req, hdev);
2962 2963 2964 2965 2966 2967

	if (lmp_bredr_capable(hdev)) {
		update_name(&req);
		update_eir(&req);
	}

2968 2969 2970
	/* The name is stored in the scan response data and so
	 * no need to udpate the advertising data here.
	 */
2971
	if (lmp_le_capable(hdev))
2972
		update_scan_rsp_data(&req);
2973

2974
	err = hci_req_run(&req, set_name_complete);
2975 2976 2977 2978
	if (err < 0)
		mgmt_pending_remove(cmd);

failed:
2979
	hci_dev_unlock(hdev);
2980 2981 2982
	return err;
}

2983
static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2984
			       void *data, u16 data_len)
2985 2986 2987 2988
{
	struct pending_cmd *cmd;
	int err;

2989
	BT_DBG("%s", hdev->name);
2990

2991
	hci_dev_lock(hdev);
2992

2993
	if (!hdev_is_powered(hdev)) {
2994
		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2995
				 MGMT_STATUS_NOT_POWERED);
2996 2997 2998
		goto unlock;
	}

2999
	if (!lmp_ssp_capable(hdev)) {
3000
		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3001
				 MGMT_STATUS_NOT_SUPPORTED);
3002 3003 3004
		goto unlock;
	}

3005
	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3006
		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3007
				 MGMT_STATUS_BUSY);
3008 3009 3010
		goto unlock;
	}

3011
	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3012 3013 3014 3015 3016 3017 3018 3019 3020 3021
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
3022
	hci_dev_unlock(hdev);
3023 3024 3025
	return err;
}

3026
static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3027
			       void *data, u16 len)
3028
{
3029
	struct mgmt_cp_add_remote_oob_data *cp = data;
3030
	u8 status;
3031 3032
	int err;

3033
	BT_DBG("%s ", hdev->name);
3034

3035
	hci_dev_lock(hdev);
3036

3037
	err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
3038
				      cp->randomizer);
3039
	if (err < 0)
3040
		status = MGMT_STATUS_FAILED;
3041
	else
3042
		status = MGMT_STATUS_SUCCESS;
3043

3044
	err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3045
			   &cp->addr, sizeof(cp->addr));
3046

3047
	hci_dev_unlock(hdev);
3048 3049 3050
	return err;
}

3051
static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3052
				  void *data, u16 len)
3053
{
3054
	struct mgmt_cp_remove_remote_oob_data *cp = data;
3055
	u8 status;
3056 3057
	int err;

3058
	BT_DBG("%s", hdev->name);
3059

3060
	hci_dev_lock(hdev);
3061

3062
	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3063
	if (err < 0)
3064
		status = MGMT_STATUS_INVALID_PARAMS;
3065
	else
3066
		status = MGMT_STATUS_SUCCESS;
3067

3068
	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3069
			   status, &cp->addr, sizeof(cp->addr));
3070

3071
	hci_dev_unlock(hdev);
3072 3073 3074
	return err;
}

3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095
static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
	u8 type;
	int err;

	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);

	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
	if (!cmd)
		return -ENOENT;

	type = hdev->discovery.type;

	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
			   &type, sizeof(type));
	mgmt_pending_remove(cmd);

	return err;
}

3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113
static void start_discovery_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status %d", status);

	if (status) {
		hci_dev_lock(hdev);
		mgmt_start_discovery_failed(hdev, status);
		hci_dev_unlock(hdev);
		return;
	}

	hci_dev_lock(hdev);
	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
	hci_dev_unlock(hdev);

	switch (hdev->discovery.type) {
	case DISCOV_TYPE_LE:
		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3114
				   DISCOV_LE_TIMEOUT);
3115 3116 3117 3118
		break;

	case DISCOV_TYPE_INTERLEAVED:
		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3119
				   DISCOV_INTERLEAVED_TIMEOUT);
3120 3121 3122 3123 3124 3125 3126 3127 3128 3129
		break;

	case DISCOV_TYPE_BREDR:
		break;

	default:
		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
	}
}

3130
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3131
			   void *data, u16 len)
3132
{
3133
	struct mgmt_cp_start_discovery *cp = data;
3134
	struct pending_cmd *cmd;
3135 3136 3137 3138 3139 3140
	struct hci_cp_le_set_scan_param param_cp;
	struct hci_cp_le_set_scan_enable enable_cp;
	struct hci_cp_inquiry inq_cp;
	struct hci_request req;
	/* General inquiry access code (GIAC) */
	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3141
	u8 status;
3142 3143
	int err;

3144
	BT_DBG("%s", hdev->name);
3145

3146
	hci_dev_lock(hdev);
3147

3148
	if (!hdev_is_powered(hdev)) {
3149
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3150
				 MGMT_STATUS_NOT_POWERED);
3151 3152 3153
		goto failed;
	}

3154 3155 3156 3157 3158 3159
	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
				 MGMT_STATUS_BUSY);
		goto failed;
	}

3160
	if (hdev->discovery.state != DISCOVERY_STOPPED) {
3161
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3162
				 MGMT_STATUS_BUSY);
3163 3164 3165
		goto failed;
	}

3166
	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3167 3168 3169 3170 3171
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

A
Andre Guedes 已提交
3172 3173
	hdev->discovery.type = cp->type;

3174 3175
	hci_req_init(&req, hdev);

A
Andre Guedes 已提交
3176
	switch (hdev->discovery.type) {
3177
	case DISCOV_TYPE_BREDR:
3178 3179
		status = mgmt_bredr_support(hdev);
		if (status) {
3180
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3181
					 status);
3182 3183 3184 3185
			mgmt_pending_remove(cmd);
			goto failed;
		}

3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196
		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_BUSY);
			mgmt_pending_remove(cmd);
			goto failed;
		}

		hci_inquiry_cache_flush(hdev);

		memset(&inq_cp, 0, sizeof(inq_cp));
		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3197
		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3198
		hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3199 3200 3201
		break;

	case DISCOV_TYPE_LE:
3202
	case DISCOV_TYPE_INTERLEAVED:
3203 3204
		status = mgmt_le_support(hdev);
		if (status) {
3205
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3206
					 status);
3207 3208 3209 3210
			mgmt_pending_remove(cmd);
			goto failed;
		}

3211
		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3212
		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3213 3214 3215 3216 3217 3218
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_NOT_SUPPORTED);
			mgmt_pending_remove(cmd);
			goto failed;
		}

3219
		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_REJECTED);
			mgmt_pending_remove(cmd);
			goto failed;
		}

		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_BUSY);
			mgmt_pending_remove(cmd);
			goto failed;
		}

		memset(&param_cp, 0, sizeof(param_cp));
		param_cp.type = LE_SCAN_ACTIVE;
3235 3236
		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3237
		param_cp.own_address_type = hdev->own_addr_type;
3238 3239 3240 3241 3242 3243 3244 3245
		hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
			    &param_cp);

		memset(&enable_cp, 0, sizeof(enable_cp));
		enable_cp.enable = LE_SCAN_ENABLE;
		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
		hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
			    &enable_cp);
3246 3247
		break;

3248
	default:
3249 3250 3251 3252
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
				 MGMT_STATUS_INVALID_PARAMS);
		mgmt_pending_remove(cmd);
		goto failed;
3253
	}
3254

3255
	err = hci_req_run(&req, start_discovery_complete);
3256 3257
	if (err < 0)
		mgmt_pending_remove(cmd);
3258 3259
	else
		hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3260 3261

failed:
3262
	hci_dev_unlock(hdev);
3263 3264 3265
	return err;
}

3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281
static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
	int err;

	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
	if (!cmd)
		return -ENOENT;

	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
			   &hdev->discovery.type, sizeof(hdev->discovery.type));
	mgmt_pending_remove(cmd);

	return err;
}

3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298
static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

	if (status) {
		mgmt_stop_discovery_failed(hdev, status);
		goto unlock;
	}

	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);

unlock:
	hci_dev_unlock(hdev);
}

3299
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3300
			  u16 len)
3301
{
3302
	struct mgmt_cp_stop_discovery *mgmt_cp = data;
3303
	struct pending_cmd *cmd;
3304 3305
	struct hci_cp_remote_name_req_cancel cp;
	struct inquiry_entry *e;
3306 3307
	struct hci_request req;
	struct hci_cp_le_set_scan_enable enable_cp;
3308 3309
	int err;

3310
	BT_DBG("%s", hdev->name);
3311

3312
	hci_dev_lock(hdev);
3313

3314
	if (!hci_discovery_active(hdev)) {
3315
		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3316 3317
				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
				   sizeof(mgmt_cp->type));
3318 3319 3320 3321
		goto unlock;
	}

	if (hdev->discovery.type != mgmt_cp->type) {
3322
		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3323 3324
				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
				   sizeof(mgmt_cp->type));
3325
		goto unlock;
3326 3327
	}

3328
	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3329 3330
	if (!cmd) {
		err = -ENOMEM;
3331 3332 3333
		goto unlock;
	}

3334 3335
	hci_req_init(&req, hdev);

3336 3337
	switch (hdev->discovery.state) {
	case DISCOVERY_FINDING:
3338 3339 3340 3341 3342 3343 3344 3345 3346 3347
		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
			hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
		} else {
			cancel_delayed_work(&hdev->le_scan_disable);

			memset(&enable_cp, 0, sizeof(enable_cp));
			enable_cp.enable = LE_SCAN_DISABLE;
			hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
				    sizeof(enable_cp), &enable_cp);
		}
3348

3349 3350 3351 3352
		break;

	case DISCOVERY_RESOLVING:
		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3353
						     NAME_PENDING);
3354
		if (!e) {
3355
			mgmt_pending_remove(cmd);
3356 3357 3358 3359 3360 3361 3362
			err = cmd_complete(sk, hdev->id,
					   MGMT_OP_STOP_DISCOVERY, 0,
					   &mgmt_cp->type,
					   sizeof(mgmt_cp->type));
			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
			goto unlock;
		}
3363

3364
		bacpy(&cp.bdaddr, &e->data.bdaddr);
3365 3366
		hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
			    &cp);
3367 3368 3369 3370 3371

		break;

	default:
		BT_DBG("unknown discovery state %u", hdev->discovery.state);
3372 3373 3374 3375 3376 3377

		mgmt_pending_remove(cmd);
		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
				   MGMT_STATUS_FAILED, &mgmt_cp->type,
				   sizeof(mgmt_cp->type));
		goto unlock;
3378 3379
	}

3380
	err = hci_req_run(&req, stop_discovery_complete);
3381 3382
	if (err < 0)
		mgmt_pending_remove(cmd);
3383 3384
	else
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3385

3386
unlock:
3387
	hci_dev_unlock(hdev);
3388 3389 3390
	return err;
}

3391
static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3392
			u16 len)
3393
{
3394
	struct mgmt_cp_confirm_name *cp = data;
3395 3396 3397
	struct inquiry_entry *e;
	int err;

3398
	BT_DBG("%s", hdev->name);
3399 3400 3401

	hci_dev_lock(hdev);

3402
	if (!hci_discovery_active(hdev)) {
3403
		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3404
				 MGMT_STATUS_FAILED);
3405 3406 3407
		goto failed;
	}

3408
	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3409
	if (!e) {
3410
		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3411
				 MGMT_STATUS_INVALID_PARAMS);
3412 3413 3414 3415 3416 3417 3418 3419
		goto failed;
	}

	if (cp->name_known) {
		e->name_state = NAME_KNOWN;
		list_del(&e->list);
	} else {
		e->name_state = NAME_NEEDED;
3420
		hci_inquiry_cache_update_resolve(hdev, e);
3421 3422
	}

3423 3424
	err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
			   sizeof(cp->addr));
3425 3426 3427 3428 3429 3430

failed:
	hci_dev_unlock(hdev);
	return err;
}

3431
static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3432
			u16 len)
3433
{
3434
	struct mgmt_cp_block_device *cp = data;
3435
	u8 status;
3436 3437
	int err;

3438
	BT_DBG("%s", hdev->name);
3439

3440
	if (!bdaddr_type_is_valid(cp->addr.type))
3441 3442 3443
		return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &cp->addr, sizeof(cp->addr));
3444

3445
	hci_dev_lock(hdev);
3446

3447
	err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3448
	if (err < 0)
3449
		status = MGMT_STATUS_FAILED;
3450
	else
3451
		status = MGMT_STATUS_SUCCESS;
3452

3453
	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3454
			   &cp->addr, sizeof(cp->addr));
3455

3456
	hci_dev_unlock(hdev);
3457 3458 3459 3460

	return err;
}

3461
static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3462
			  u16 len)
3463
{
3464
	struct mgmt_cp_unblock_device *cp = data;
3465
	u8 status;
3466 3467
	int err;

3468
	BT_DBG("%s", hdev->name);
3469

3470
	if (!bdaddr_type_is_valid(cp->addr.type))
3471 3472 3473
		return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &cp->addr, sizeof(cp->addr));
3474

3475
	hci_dev_lock(hdev);
3476

3477
	err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3478
	if (err < 0)
3479
		status = MGMT_STATUS_INVALID_PARAMS;
3480
	else
3481
		status = MGMT_STATUS_SUCCESS;
3482

3483
	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3484
			   &cp->addr, sizeof(cp->addr));
3485

3486
	hci_dev_unlock(hdev);
3487 3488 3489 3490

	return err;
}

3491 3492 3493 3494
static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_set_device_id *cp = data;
3495
	struct hci_request req;
3496
	int err;
3497
	__u16 source;
3498 3499 3500

	BT_DBG("%s", hdev->name);

3501 3502 3503 3504 3505 3506
	source = __le16_to_cpu(cp->source);

	if (source > 0x0002)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
				  MGMT_STATUS_INVALID_PARAMS);

3507 3508
	hci_dev_lock(hdev);

3509
	hdev->devid_source = source;
3510 3511 3512 3513 3514 3515
	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
	hdev->devid_product = __le16_to_cpu(cp->product);
	hdev->devid_version = __le16_to_cpu(cp->version);

	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);

3516 3517 3518
	hci_req_init(&req, hdev);
	update_eir(&req);
	hci_req_run(&req, NULL);
3519 3520 3521 3522 3523 3524

	hci_dev_unlock(hdev);

	return err;
}

3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545
static void set_advertising_complete(struct hci_dev *hdev, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
				     cmd_status_rsp, &mgmt_err);
		return;
	}

	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
			     &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
}

3546 3547
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
3548 3549 3550 3551
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
	struct hci_request req;
3552
	u8 val, enabled, status;
3553 3554 3555 3556
	int err;

	BT_DBG("request for %s", hdev->name);

3557 3558
	status = mgmt_le_support(hdev);
	if (status)
3559
		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3560
				  status);
3561 3562 3563 3564 3565 3566 3567 3568

	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	val = !!cp->val;
3569
	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3570

3571 3572 3573 3574 3575 3576
	/* The following conditions are ones which mean that we should
	 * not do any HCI communication but directly send a mgmt
	 * response to user space (after toggling the flag if
	 * necessary).
	 */
	if (!hdev_is_powered(hdev) || val == enabled ||
3577
	    hci_conn_num(hdev, LE_LINK) > 0) {
3578 3579
		bool changed = false;

3580 3581
		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
		if (err < 0)
			goto unlock;

		if (changed)
			err = new_settings(hdev, sk);

		goto unlock;
	}

	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

3610 3611 3612 3613
	if (val)
		enable_advertising(&req);
	else
		disable_advertising(&req);
3614 3615 3616 3617 3618 3619 3620 3621 3622 3623

	err = hci_req_run(&req, set_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

3624 3625 3626 3627 3628 3629 3630 3631
static int set_static_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_static_address *cp = data;
	int err;

	BT_DBG("%s", hdev->name);

3632
	if (!lmp_le_capable(hdev))
3633
		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3634
				  MGMT_STATUS_NOT_SUPPORTED);
3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663

	if (hdev_is_powered(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				  MGMT_STATUS_REJECTED);

	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
			return cmd_status(sk, hdev->id,
					  MGMT_OP_SET_STATIC_ADDRESS,
					  MGMT_STATUS_INVALID_PARAMS);

		/* Two most significant bits shall be set */
		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
			return cmd_status(sk, hdev->id,
					  MGMT_OP_SET_STATIC_ADDRESS,
					  MGMT_STATUS_INVALID_PARAMS);
	}

	hci_dev_lock(hdev);

	bacpy(&hdev->static_addr, &cp->bdaddr);

	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);

	hci_dev_unlock(hdev);

	return err;
}

3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688
static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_cp_set_scan_params *cp = data;
	__u16 interval, window;
	int err;

	BT_DBG("%s", hdev->name);

	if (!lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_NOT_SUPPORTED);

	interval = __le16_to_cpu(cp->interval);

	if (interval < 0x0004 || interval > 0x4000)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_INVALID_PARAMS);

	window = __le16_to_cpu(cp->window);

	if (window < 0x0004 || window > 0x4000)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_INVALID_PARAMS);

3689 3690 3691 3692
	if (window > interval)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_INVALID_PARAMS);

3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704
	hci_dev_lock(hdev);

	hdev->le_scan_interval = interval;
	hdev->le_scan_window = window;

	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);

	hci_dev_unlock(hdev);

	return err;
}

3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720
static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
			   mgmt_status(status));
	} else {
3721 3722 3723 3724 3725 3726 3727
		struct mgmt_mode *cp = cmd->param;

		if (cp->val)
			set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
		else
			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);

3728 3729 3730 3731 3732 3733 3734 3735 3736 3737
		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

3738
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3739
				void *data, u16 len)
3740
{
3741
	struct mgmt_mode *cp = data;
3742 3743
	struct pending_cmd *cmd;
	struct hci_request req;
3744 3745
	int err;

3746
	BT_DBG("%s", hdev->name);
3747

3748 3749
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
	    hdev->hci_ver < BLUETOOTH_VER_1_2)
3750 3751 3752
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				  MGMT_STATUS_NOT_SUPPORTED);

3753 3754 3755 3756
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				  MGMT_STATUS_INVALID_PARAMS);

3757
	if (!hdev_is_powered(hdev))
3758
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3759
				  MGMT_STATUS_NOT_POWERED);
3760 3761

	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3762
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3763
				  MGMT_STATUS_REJECTED);
3764 3765 3766

	hci_dev_lock(hdev);

3767 3768 3769 3770 3771 3772
	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}

3773 3774 3775 3776 3777 3778
	if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		goto unlock;
	}

3779 3780 3781 3782 3783
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
			       data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
3784 3785
	}

3786 3787
	hci_req_init(&req, hdev);

3788
	write_fast_connectable(&req, cp->val);
3789 3790

	err = hci_req_run(&req, fast_connectable_complete);
3791
	if (err < 0) {
3792
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3793
				 MGMT_STATUS_FAILED);
3794
		mgmt_pending_remove(cmd);
3795 3796
	}

3797
unlock:
3798
	hci_dev_unlock(hdev);
3799

3800 3801 3802
	return err;
}

3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822
static void set_bredr_scan(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	u8 scan = 0;

	/* Ensure that fast connectable is disabled. This function will
	 * not do anything if the page scan parameters are already what
	 * they should be.
	 */
	write_fast_connectable(req, false);

	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
		scan |= SCAN_PAGE;
	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
		scan |= SCAN_INQUIRY;

	if (scan)
		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}

3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920
static void set_bredr_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		/* We need to restore the flag if related HCI commands
		 * failed.
		 */
		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);

		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
	} else {
		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
	struct hci_request req;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				  MGMT_STATUS_NOT_SUPPORTED);

	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				  MGMT_STATUS_REJECTED);

	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		goto unlock;
	}

	if (!hdev_is_powered(hdev)) {
		if (!cp->val) {
			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
		}

		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);

		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		if (err < 0)
			goto unlock;

		err = new_settings(hdev, sk);
		goto unlock;
	}

	/* Reject disabling when powered on */
	if (!cp->val) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				 MGMT_STATUS_REJECTED);
		goto unlock;
	}

	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

3921
	/* We need to flip the bit already here so that update_adv_data
3922 3923 3924 3925 3926
	 * generates the correct flags.
	 */
	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);

	hci_req_init(&req, hdev);
3927 3928 3929 3930

	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
		set_bredr_scan(&req);

3931 3932 3933
	/* Since only the advertising data flags will change, there
	 * is no need to update the scan response data.
	 */
3934
	update_adv_data(&req);
3935

3936 3937 3938 3939 3940 3941 3942 3943 3944
	err = hci_req_run(&req, set_bredr_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

3945 3946
static bool ltk_is_valid(struct mgmt_ltk_info *key)
{
3947 3948
	if (key->authenticated != 0x00 && key->authenticated != 0x01)
		return false;
3949 3950
	if (key->master != 0x00 && key->master != 0x01)
		return false;
3951 3952
	if (!bdaddr_type_is_le(key->addr.type))
		return false;
3953 3954 3955
	return true;
}

3956
static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3957
			       void *cp_data, u16 len)
3958 3959 3960
{
	struct mgmt_cp_load_long_term_keys *cp = cp_data;
	u16 key_count, expected_len;
3961
	int i, err;
3962

3963 3964 3965 3966 3967 3968
	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				  MGMT_STATUS_NOT_SUPPORTED);

3969
	key_count = __le16_to_cpu(cp->key_count);
3970 3971 3972 3973 3974

	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_ltk_info);
	if (expected_len != len) {
		BT_ERR("load_keys: expected %u bytes, got %u bytes",
3975
		       len, expected_len);
3976
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3977
				  MGMT_STATUS_INVALID_PARAMS);
3978 3979
	}

3980
	BT_DBG("%s key_count %u", hdev->name, key_count);
3981

3982 3983 3984
	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];

3985
		if (!ltk_is_valid(key))
3986 3987 3988 3989 3990
			return cmd_status(sk, hdev->id,
					  MGMT_OP_LOAD_LONG_TERM_KEYS,
					  MGMT_STATUS_INVALID_PARAMS);
	}

3991 3992 3993 3994 3995 3996
	hci_dev_lock(hdev);

	hci_smp_ltks_clear(hdev);

	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];
3997 3998 3999 4000 4001 4002
		u8 type, addr_type;

		if (key->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;
4003 4004 4005 4006 4007 4008

		if (key->master)
			type = HCI_SMP_LTK;
		else
			type = HCI_SMP_LTK_SLAVE;

4009
		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
4010 4011
			    type, 0, key->authenticated, key->val,
			    key->enc_size, key->ediv, key->rand);
4012 4013
	}

4014 4015 4016
	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
			   NULL, 0);

4017 4018
	hci_dev_unlock(hdev);

4019
	return err;
4020 4021
}

4022
static const struct mgmt_handler {
4023 4024
	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
		     u16 data_len);
4025 4026
	bool var_len;
	size_t data_len;
4027 4028
} mgmt_handlers[] = {
	{ NULL }, /* 0x0000 (no command) */
4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067
	{ read_version,           false, MGMT_READ_VERSION_SIZE },
	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
	{ set_powered,            false, MGMT_SETTING_SIZE },
	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
	{ set_connectable,        false, MGMT_SETTING_SIZE },
	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
	{ set_pairable,           false, MGMT_SETTING_SIZE },
	{ set_link_security,      false, MGMT_SETTING_SIZE },
	{ set_ssp,                false, MGMT_SETTING_SIZE },
	{ set_hs,                 false, MGMT_SETTING_SIZE },
	{ set_le,                 false, MGMT_SETTING_SIZE },
	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
	{ add_remote_oob_data,    false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
4068
	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
4069
	{ set_advertising,        false, MGMT_SETTING_SIZE },
4070
	{ set_bredr,              false, MGMT_SETTING_SIZE },
4071
	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
4072
	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
4073 4074 4075
};


4076 4077
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
{
4078 4079
	void *buf;
	u8 *cp;
4080
	struct mgmt_hdr *hdr;
4081
	u16 opcode, index, len;
4082
	struct hci_dev *hdev = NULL;
4083
	const struct mgmt_handler *handler;
4084 4085 4086 4087 4088 4089 4090
	int err;

	BT_DBG("got %zu bytes", msglen);

	if (msglen < sizeof(*hdr))
		return -EINVAL;

4091
	buf = kmalloc(msglen, GFP_KERNEL);
4092 4093 4094 4095 4096 4097 4098 4099
	if (!buf)
		return -ENOMEM;

	if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
		err = -EFAULT;
		goto done;
	}

4100
	hdr = buf;
4101 4102 4103
	opcode = __le16_to_cpu(hdr->opcode);
	index = __le16_to_cpu(hdr->index);
	len = __le16_to_cpu(hdr->len);
4104 4105 4106 4107 4108 4109

	if (len != msglen - sizeof(*hdr)) {
		err = -EINVAL;
		goto done;
	}

4110
	if (index != MGMT_INDEX_NONE) {
4111 4112 4113
		hdev = hci_dev_get(index);
		if (!hdev) {
			err = cmd_status(sk, index, opcode,
4114
					 MGMT_STATUS_INVALID_INDEX);
4115 4116
			goto done;
		}
4117

4118 4119
		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4120 4121 4122 4123
			err = cmd_status(sk, index, opcode,
					 MGMT_STATUS_INVALID_INDEX);
			goto done;
		}
4124 4125
	}

4126
	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4127
	    mgmt_handlers[opcode].func == NULL) {
4128
		BT_DBG("Unknown op %u", opcode);
4129
		err = cmd_status(sk, index, opcode,
4130
				 MGMT_STATUS_UNKNOWN_COMMAND);
4131 4132 4133 4134
		goto done;
	}

	if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4135
	    (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4136
		err = cmd_status(sk, index, opcode,
4137
				 MGMT_STATUS_INVALID_INDEX);
4138
		goto done;
4139 4140
	}

4141 4142 4143
	handler = &mgmt_handlers[opcode];

	if ((handler->var_len && len < handler->data_len) ||
4144
	    (!handler->var_len && len != handler->data_len)) {
4145
		err = cmd_status(sk, index, opcode,
4146
				 MGMT_STATUS_INVALID_PARAMS);
4147 4148 4149
		goto done;
	}

4150 4151 4152 4153 4154
	if (hdev)
		mgmt_init_hdev(sk, hdev);

	cp = buf + sizeof(*hdr);

4155
	err = handler->func(sk, hdev, cp, len);
4156 4157 4158
	if (err < 0)
		goto done;

4159 4160 4161
	err = msglen;

done:
4162 4163 4164
	if (hdev)
		hci_dev_put(hdev);

4165 4166 4167
	kfree(buf);
	return err;
}
4168

4169
void mgmt_index_added(struct hci_dev *hdev)
4170
{
4171
	if (hdev->dev_type != HCI_BREDR)
4172
		return;
4173

4174
	mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4175 4176
}

4177
void mgmt_index_removed(struct hci_dev *hdev)
4178
{
4179
	u8 status = MGMT_STATUS_INVALID_INDEX;
4180

4181
	if (hdev->dev_type != HCI_BREDR)
4182
		return;
4183

4184
	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4185

4186
	mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4187 4188
}

4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206
static void powered_complete(struct hci_dev *hdev, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	hci_dev_unlock(hdev);

	if (match.sk)
		sock_put(match.sk);
}

4207
static int powered_update_hci(struct hci_dev *hdev)
4208
{
4209
	struct hci_request req;
4210
	u8 link_sec;
4211

4212 4213
	hci_req_init(&req, hdev);

4214 4215 4216
	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
	    !lmp_host_ssp_capable(hdev)) {
		u8 ssp = 1;
4217

4218
		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4219
	}
4220

4221 4222
	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
	    lmp_bredr_capable(hdev)) {
4223
		struct hci_cp_write_le_host_supported cp;
4224

4225 4226
		cp.le = 1;
		cp.simul = lmp_le_br_capable(hdev);
4227

4228 4229 4230 4231 4232
		/* Check first if we already have the right
		 * host state (host features set)
		 */
		if (cp.le != lmp_host_le_capable(hdev) ||
		    cp.simul != lmp_host_le_br_capable(hdev))
4233 4234
			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
				    sizeof(cp), &cp);
4235
	}
4236

4237 4238 4239 4240 4241
	if (lmp_le_capable(hdev)) {
		/* Set random address to static address if configured */
		if (bacmp(&hdev->static_addr, BDADDR_ANY))
			hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
				    &hdev->static_addr);
4242

4243 4244 4245 4246
		/* Make sure the controller has a good default for
		 * advertising data. This also applies to the case
		 * where BR/EDR was toggled during the AUTO_OFF phase.
		 */
4247
		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4248
			update_adv_data(&req);
4249 4250
			update_scan_rsp_data(&req);
		}
4251

4252 4253
		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
			enable_advertising(&req);
4254 4255
	}

4256 4257
	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4258 4259
		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
			    sizeof(link_sec), &link_sec);
4260

4261
	if (lmp_bredr_capable(hdev)) {
4262 4263
		if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
			set_bredr_scan(&req);
4264
		update_class(&req);
4265
		update_name(&req);
4266
		update_eir(&req);
4267
	}
4268

4269
	return hci_req_run(&req, powered_complete);
4270
}
4271

4272 4273 4274
int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
	struct cmd_lookup match = { NULL, hdev };
4275 4276
	u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
	u8 zero_cod[] = { 0, 0, 0 };
4277
	int err;
4278

4279 4280 4281 4282
	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
		return 0;

	if (powered) {
4283 4284
		if (powered_update_hci(hdev) == 0)
			return 0;
4285

4286 4287 4288
		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
				     &match);
		goto new_settings;
4289 4290
	}

4291 4292 4293 4294 4295 4296 4297 4298
	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);

	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
			   zero_cod, sizeof(zero_cod), NULL);

new_settings:
4299
	err = new_settings(hdev, match.sk);
4300 4301 4302 4303

	if (match.sk)
		sock_put(match.sk);

4304
	return err;
4305
}
4306

4307
void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4308 4309 4310 4311 4312 4313
{
	struct pending_cmd *cmd;
	u8 status;

	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
	if (!cmd)
4314
		return;
4315 4316 4317 4318 4319 4320

	if (err == -ERFKILL)
		status = MGMT_STATUS_RFKILLED;
	else
		status = MGMT_STATUS_FAILED;

4321
	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4322 4323 4324 4325

	mgmt_pending_remove(cmd);
}

4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349
void mgmt_discoverable_timeout(struct hci_dev *hdev)
{
	struct hci_request req;
	u8 scan = SCAN_PAGE;

	hci_dev_lock(hdev);

	/* When discoverable timeout triggers, then just make sure
	 * the limited discoverable flag is cleared. Even in the case
	 * of a timeout triggered from general discoverable, it is
	 * safe to unconditionally clear the flag.
	 */
	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);

	hci_req_init(&req, hdev);
	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
	update_class(&req);
	hci_req_run(&req, NULL);

	hdev->discov_timeout = 0;

	hci_dev_unlock(hdev);
}

4350
void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4351
{
4352
	bool changed;
4353

4354 4355 4356 4357 4358
	/* Nothing needed here if there's a pending command since that
	 * commands request completion callback takes care of everything
	 * necessary.
	 */
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4359
		return;
4360

4361 4362 4363 4364
	if (discoverable)
		changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4365

4366
	if (changed)
4367
		new_settings(hdev, NULL);
4368
}
4369

4370
void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4371
{
4372
	bool changed;
4373

4374 4375 4376 4377 4378
	/* Nothing needed here if there's a pending command since that
	 * commands request completion callback takes care of everything
	 * necessary.
	 */
	if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4379
		return;
4380

4381 4382 4383 4384
	if (connectable)
		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4385

4386
	if (changed)
4387
		new_settings(hdev, NULL);
4388
}
4389

4390
void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4391
{
4392 4393
	u8 mgmt_err = mgmt_status(status);

4394
	if (scan & SCAN_PAGE)
4395
		mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4396
				     cmd_status_rsp, &mgmt_err);
4397 4398

	if (scan & SCAN_INQUIRY)
4399
		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4400
				     cmd_status_rsp, &mgmt_err);
4401 4402
}

4403 4404
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
		       bool persistent)
4405
{
4406
	struct mgmt_ev_new_link_key ev;
4407

4408
	memset(&ev, 0, sizeof(ev));
4409

4410
	ev.store_hint = persistent;
4411
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4412
	ev.key.addr.type = BDADDR_BREDR;
4413
	ev.key.type = key->type;
4414
	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4415
	ev.key.pin_len = key->pin_len;
4416

4417
	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4418
}
4419

4420
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4421 4422 4423 4424 4425 4426 4427
{
	struct mgmt_ev_new_long_term_key ev;

	memset(&ev, 0, sizeof(ev));

	ev.store_hint = persistent;
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4428
	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4429 4430 4431 4432 4433 4434 4435 4436 4437 4438
	ev.key.authenticated = key->authenticated;
	ev.key.enc_size = key->enc_size;
	ev.key.ediv = key->ediv;

	if (key->type == HCI_SMP_LTK)
		ev.key.master = 1;

	memcpy(ev.key.rand, key->rand, sizeof(key->rand));
	memcpy(ev.key.val, key->val, sizeof(key->val));

4439
	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4440 4441
}

4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
				  u8 data_len)
{
	eir[eir_len++] = sizeof(type) + data_len;
	eir[eir_len++] = type;
	memcpy(&eir[eir_len], data, data_len);
	eir_len += data_len;

	return eir_len;
}

4453 4454 4455
void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			   u8 addr_type, u32 flags, u8 *name, u8 name_len,
			   u8 *dev_class)
4456
{
4457 4458 4459
	char buf[512];
	struct mgmt_ev_device_connected *ev = (void *) buf;
	u16 eir_len = 0;
4460

4461
	bacpy(&ev->addr.bdaddr, bdaddr);
4462
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
4463

4464
	ev->flags = __cpu_to_le32(flags);
4465

4466 4467
	if (name_len > 0)
		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4468
					  name, name_len);
4469 4470

	if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4471
		eir_len = eir_append_data(ev->eir, eir_len,
4472
					  EIR_CLASS_OF_DEV, dev_class, 3);
4473

4474
	ev->eir_len = cpu_to_le16(eir_len);
4475

4476 4477
	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
		    sizeof(*ev) + eir_len, NULL);
4478 4479
}

4480 4481
static void disconnect_rsp(struct pending_cmd *cmd, void *data)
{
4482
	struct mgmt_cp_disconnect *cp = cmd->param;
4483
	struct sock **sk = data;
4484
	struct mgmt_rp_disconnect rp;
4485

4486 4487
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
4488

4489
	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4490
		     sizeof(rp));
4491 4492 4493 4494

	*sk = cmd->sk;
	sock_hold(*sk);

4495
	mgmt_pending_remove(cmd);
4496 4497
}

4498
static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4499
{
4500
	struct hci_dev *hdev = data;
4501 4502
	struct mgmt_cp_unpair_device *cp = cmd->param;
	struct mgmt_rp_unpair_device rp;
4503 4504

	memset(&rp, 0, sizeof(rp));
4505 4506
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
4507

4508 4509
	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);

4510
	cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4511 4512 4513 4514

	mgmt_pending_remove(cmd);
}

4515 4516
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
			      u8 link_type, u8 addr_type, u8 reason)
4517
{
4518
	struct mgmt_ev_device_disconnected ev;
4519 4520
	struct sock *sk = NULL;

4521
	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4522

4523 4524 4525
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.reason = reason;
4526

4527
	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4528 4529

	if (sk)
4530
		sock_put(sk);
4531

4532
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4533
			     hdev);
4534 4535
}

4536 4537
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
			    u8 link_type, u8 addr_type, u8 status)
4538
{
4539
	struct mgmt_rp_disconnect rp;
4540 4541
	struct pending_cmd *cmd;

4542 4543 4544
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
			     hdev);

4545
	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4546
	if (!cmd)
4547
		return;
4548

4549
	bacpy(&rp.addr.bdaddr, bdaddr);
4550
	rp.addr.type = link_to_bdaddr(link_type, addr_type);
4551

4552 4553
	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
		     mgmt_status(status), &rp, sizeof(rp));
4554

4555
	mgmt_pending_remove(cmd);
4556
}
4557

4558 4559
void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			 u8 addr_type, u8 status)
4560 4561 4562
{
	struct mgmt_ev_connect_failed ev;

4563
	bacpy(&ev.addr.bdaddr, bdaddr);
4564
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4565
	ev.status = mgmt_status(status);
4566

4567
	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4568
}
4569

4570
void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4571 4572 4573
{
	struct mgmt_ev_pin_code_request ev;

4574
	bacpy(&ev.addr.bdaddr, bdaddr);
4575
	ev.addr.type = BDADDR_BREDR;
4576
	ev.secure = secure;
4577

4578
	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4579 4580
}

4581 4582
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				  u8 status)
4583 4584
{
	struct pending_cmd *cmd;
4585
	struct mgmt_rp_pin_code_reply rp;
4586

4587
	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4588
	if (!cmd)
4589
		return;
4590

4591
	bacpy(&rp.addr.bdaddr, bdaddr);
4592
	rp.addr.type = BDADDR_BREDR;
4593

4594 4595
	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
		     mgmt_status(status), &rp, sizeof(rp));
4596

4597
	mgmt_pending_remove(cmd);
4598 4599
}

4600 4601
void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				      u8 status)
4602 4603
{
	struct pending_cmd *cmd;
4604
	struct mgmt_rp_pin_code_reply rp;
4605

4606
	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4607
	if (!cmd)
4608
		return;
4609

4610
	bacpy(&rp.addr.bdaddr, bdaddr);
4611
	rp.addr.type = BDADDR_BREDR;
4612

4613 4614
	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
		     mgmt_status(status), &rp, sizeof(rp));
4615

4616
	mgmt_pending_remove(cmd);
4617
}
4618

4619
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4620 4621
			      u8 link_type, u8 addr_type, __le32 value,
			      u8 confirm_hint)
4622 4623 4624
{
	struct mgmt_ev_user_confirm_request ev;

4625
	BT_DBG("%s", hdev->name);
4626

4627
	bacpy(&ev.addr.bdaddr, bdaddr);
4628
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4629
	ev.confirm_hint = confirm_hint;
4630
	ev.value = value;
4631

4632
	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4633
			  NULL);
4634 4635
}

4636
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4637
			      u8 link_type, u8 addr_type)
4638 4639 4640 4641 4642
{
	struct mgmt_ev_user_passkey_request ev;

	BT_DBG("%s", hdev->name);

4643
	bacpy(&ev.addr.bdaddr, bdaddr);
4644
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4645 4646

	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4647
			  NULL);
4648 4649
}

4650
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4651 4652
				      u8 link_type, u8 addr_type, u8 status,
				      u8 opcode)
4653 4654 4655 4656 4657
{
	struct pending_cmd *cmd;
	struct mgmt_rp_user_confirm_reply rp;
	int err;

4658
	cmd = mgmt_pending_find(opcode, hdev);
4659 4660 4661
	if (!cmd)
		return -ENOENT;

4662
	bacpy(&rp.addr.bdaddr, bdaddr);
4663
	rp.addr.type = link_to_bdaddr(link_type, addr_type);
4664
	err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4665
			   &rp, sizeof(rp));
4666

4667
	mgmt_pending_remove(cmd);
4668 4669 4670 4671

	return err;
}

4672
int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4673
				     u8 link_type, u8 addr_type, u8 status)
4674
{
4675
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4676
					  status, MGMT_OP_USER_CONFIRM_REPLY);
4677 4678
}

4679
int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4680
					 u8 link_type, u8 addr_type, u8 status)
4681
{
4682
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4683 4684
					  status,
					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
4685
}
4686

4687
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4688
				     u8 link_type, u8 addr_type, u8 status)
4689
{
4690
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4691
					  status, MGMT_OP_USER_PASSKEY_REPLY);
4692 4693
}

4694
int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4695
					 u8 link_type, u8 addr_type, u8 status)
4696
{
4697
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4698 4699
					  status,
					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
4700 4701
}

4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717
int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
			     u8 link_type, u8 addr_type, u32 passkey,
			     u8 entered)
{
	struct mgmt_ev_passkey_notify ev;

	BT_DBG("%s", hdev->name);

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.passkey = __cpu_to_le32(passkey);
	ev.entered = entered;

	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
}

4718 4719
void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, u8 status)
4720 4721 4722
{
	struct mgmt_ev_auth_failed ev;

4723
	bacpy(&ev.addr.bdaddr, bdaddr);
4724
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4725
	ev.status = mgmt_status(status);
4726

4727
	mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4728
}
4729

4730
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4731 4732
{
	struct cmd_lookup match = { NULL, hdev };
4733
	bool changed;
4734 4735 4736 4737

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4738
				     cmd_status_rsp, &mgmt_err);
4739
		return;
4740 4741
	}

4742 4743 4744 4745 4746 4747
	if (test_bit(HCI_AUTH, &hdev->flags))
		changed = !test_and_set_bit(HCI_LINK_SECURITY,
					    &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_LINK_SECURITY,
					     &hdev->dev_flags);
4748

4749
	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4750
			     &match);
4751

4752
	if (changed)
4753
		new_settings(hdev, match.sk);
4754 4755 4756 4757 4758

	if (match.sk)
		sock_put(match.sk);
}

4759
static void clear_eir(struct hci_request *req)
4760
{
4761
	struct hci_dev *hdev = req->hdev;
4762 4763
	struct hci_cp_write_eir cp;

4764
	if (!lmp_ext_inq_capable(hdev))
4765
		return;
4766

4767 4768
	memset(hdev->eir, 0, sizeof(hdev->eir));

4769 4770
	memset(&cp, 0, sizeof(cp));

4771
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4772 4773
}

4774
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4775 4776
{
	struct cmd_lookup match = { NULL, hdev };
4777
	struct hci_request req;
4778
	bool changed = false;
4779 4780 4781

	if (status) {
		u8 mgmt_err = mgmt_status(status);
4782 4783

		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4784 4785
						 &hdev->dev_flags)) {
			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4786
			new_settings(hdev, NULL);
4787
		}
4788

4789 4790
		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
				     &mgmt_err);
4791
		return;
4792 4793 4794
	}

	if (enable) {
4795
		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4796
	} else {
4797 4798 4799 4800 4801 4802
		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
		if (!changed)
			changed = test_and_clear_bit(HCI_HS_ENABLED,
						     &hdev->dev_flags);
		else
			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4803 4804 4805 4806
	}

	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);

4807
	if (changed)
4808
		new_settings(hdev, match.sk);
4809

4810
	if (match.sk)
4811 4812
		sock_put(match.sk);

4813 4814
	hci_req_init(&req, hdev);

4815
	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4816
		update_eir(&req);
4817
	else
4818 4819 4820
		clear_eir(&req);

	hci_req_run(&req, NULL);
4821 4822
}

4823
static void sk_lookup(struct pending_cmd *cmd, void *data)
4824 4825 4826 4827 4828 4829 4830 4831 4832
{
	struct cmd_lookup *match = data;

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}
}

4833 4834
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
				    u8 status)
4835
{
4836
	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4837

4838 4839 4840
	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4841 4842

	if (!status)
4843 4844
		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
			   NULL);
4845 4846 4847

	if (match.sk)
		sock_put(match.sk);
4848 4849
}

4850
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4851 4852
{
	struct mgmt_cp_set_local_name ev;
4853
	struct pending_cmd *cmd;
4854

4855
	if (status)
4856
		return;
4857 4858 4859

	memset(&ev, 0, sizeof(ev));
	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4860
	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4861

4862
	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4863 4864
	if (!cmd) {
		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4865

4866 4867 4868 4869
		/* If this is a HCI command related to powering on the
		 * HCI dev don't send any mgmt signals.
		 */
		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4870
			return;
4871
	}
4872

4873 4874
	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
		   cmd ? cmd->sk : NULL);
4875
}
4876

4877 4878
void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
					     u8 *randomizer, u8 status)
4879 4880 4881
{
	struct pending_cmd *cmd;

4882
	BT_DBG("%s status %u", hdev->name, status);
4883

4884
	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4885
	if (!cmd)
4886
		return;
4887 4888

	if (status) {
4889 4890
		cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
			   mgmt_status(status));
4891 4892 4893 4894 4895 4896
	} else {
		struct mgmt_rp_read_local_oob_data rp;

		memcpy(rp.hash, hash, sizeof(rp.hash));
		memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));

4897 4898
		cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
			     0, &rp, sizeof(rp));
4899 4900 4901 4902
	}

	mgmt_pending_remove(cmd);
}
4903

4904 4905 4906
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
		       ssp, u8 *eir, u16 eir_len)
4907
{
4908 4909
	char buf[512];
	struct mgmt_ev_device_found *ev = (void *) buf;
4910
	size_t ev_size;
4911

4912
	if (!hci_discovery_active(hdev))
4913
		return;
4914

4915 4916
	/* Leave 5 bytes for a potential CoD field */
	if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4917
		return;
4918

4919 4920
	memset(buf, 0, sizeof(buf));

4921
	bacpy(&ev->addr.bdaddr, bdaddr);
4922
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
4923
	ev->rssi = rssi;
4924
	if (cfm_name)
4925
		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4926
	if (!ssp)
4927
		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4928

4929
	if (eir_len > 0)
4930
		memcpy(ev->eir, eir, eir_len);
4931

4932 4933
	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4934
					  dev_class, 3);
4935

4936
	ev->eir_len = cpu_to_le16(eir_len);
4937
	ev_size = sizeof(*ev) + eir_len;
4938

4939
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4940
}
4941

4942 4943
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4944
{
4945 4946 4947
	struct mgmt_ev_device_found *ev;
	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
	u16 eir_len;
4948

4949
	ev = (struct mgmt_ev_device_found *) buf;
4950

4951 4952 4953
	memset(buf, 0, sizeof(buf));

	bacpy(&ev->addr.bdaddr, bdaddr);
4954
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
4955 4956 4957
	ev->rssi = rssi;

	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4958
				  name_len);
4959

4960
	ev->eir_len = cpu_to_le16(eir_len);
4961

4962
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
4963
}
4964

4965
void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4966
{
4967
	struct mgmt_ev_discovering ev;
4968 4969
	struct pending_cmd *cmd;

4970 4971
	BT_DBG("%s discovering %u", hdev->name, discovering);

4972
	if (discovering)
4973
		cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4974
	else
4975
		cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4976 4977

	if (cmd != NULL) {
4978 4979
		u8 type = hdev->discovery.type;

4980 4981
		cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
			     sizeof(type));
4982 4983 4984
		mgmt_pending_remove(cmd);
	}

4985 4986 4987 4988
	memset(&ev, 0, sizeof(ev));
	ev.type = hdev->discovery.type;
	ev.discovering = discovering;

4989
	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4990
}
4991

4992
int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4993 4994 4995 4996
{
	struct pending_cmd *cmd;
	struct mgmt_ev_device_blocked ev;

4997
	cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4998

4999 5000
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
5001

5002
	return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5003
			  cmd ? cmd->sk : NULL);
5004 5005
}

5006
int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5007 5008 5009 5010
{
	struct pending_cmd *cmd;
	struct mgmt_ev_device_unblocked ev;

5011
	cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5012

5013 5014
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
5015

5016
	return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5017
			  cmd ? cmd->sk : NULL);
5018
}
5019 5020 5021 5022 5023 5024 5025 5026

static void adv_enable_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("%s status %u", hdev->name, status);

	/* Clear the advertising mgmt setting if we failed to re-enable it */
	if (status) {
		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5027
		new_settings(hdev, NULL);
5028 5029 5030 5031 5032 5033 5034
	}
}

void mgmt_reenable_advertising(struct hci_dev *hdev)
{
	struct hci_request req;

5035
	if (hci_conn_num(hdev, LE_LINK) > 0)
5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048
		return;

	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
		return;

	hci_req_init(&req, hdev);
	enable_advertising(&req);

	/* If this fails we have no option but to let user space know
	 * that we've disabled advertising.
	 */
	if (hci_req_run(&req, adv_enable_complete) < 0) {
		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5049
		new_settings(hdev, NULL);
5050 5051
	}
}