mgmt.c 117.6 KB
Newer Older
1 2
/*
   BlueZ - Bluetooth protocol stack for Linux
3

4
   Copyright (C) 2010  Nokia Corporation
5
   Copyright (C) 2011-2012 Intel Corporation
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI Management interface */

27
#include <linux/module.h>
28 29 30 31 32
#include <asm/unaligned.h>

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
33 34

#include "smp.h"
35

36
#define MGMT_VERSION	1
37
#define MGMT_REVISION	4
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
static const u16 mgmt_commands[] = {
	MGMT_OP_READ_INDEX_LIST,
	MGMT_OP_READ_INFO,
	MGMT_OP_SET_POWERED,
	MGMT_OP_SET_DISCOVERABLE,
	MGMT_OP_SET_CONNECTABLE,
	MGMT_OP_SET_FAST_CONNECTABLE,
	MGMT_OP_SET_PAIRABLE,
	MGMT_OP_SET_LINK_SECURITY,
	MGMT_OP_SET_SSP,
	MGMT_OP_SET_HS,
	MGMT_OP_SET_LE,
	MGMT_OP_SET_DEV_CLASS,
	MGMT_OP_SET_LOCAL_NAME,
	MGMT_OP_ADD_UUID,
	MGMT_OP_REMOVE_UUID,
	MGMT_OP_LOAD_LINK_KEYS,
	MGMT_OP_LOAD_LONG_TERM_KEYS,
	MGMT_OP_DISCONNECT,
	MGMT_OP_GET_CONNECTIONS,
	MGMT_OP_PIN_CODE_REPLY,
	MGMT_OP_PIN_CODE_NEG_REPLY,
	MGMT_OP_SET_IO_CAPABILITY,
	MGMT_OP_PAIR_DEVICE,
	MGMT_OP_CANCEL_PAIR_DEVICE,
	MGMT_OP_UNPAIR_DEVICE,
	MGMT_OP_USER_CONFIRM_REPLY,
	MGMT_OP_USER_CONFIRM_NEG_REPLY,
	MGMT_OP_USER_PASSKEY_REPLY,
	MGMT_OP_USER_PASSKEY_NEG_REPLY,
	MGMT_OP_READ_LOCAL_OOB_DATA,
	MGMT_OP_ADD_REMOTE_OOB_DATA,
	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
	MGMT_OP_START_DISCOVERY,
	MGMT_OP_STOP_DISCOVERY,
	MGMT_OP_CONFIRM_NAME,
	MGMT_OP_BLOCK_DEVICE,
	MGMT_OP_UNBLOCK_DEVICE,
77
	MGMT_OP_SET_DEVICE_ID,
78
	MGMT_OP_SET_ADVERTISING,
79
	MGMT_OP_SET_BREDR,
80
	MGMT_OP_SET_STATIC_ADDRESS,
81
	MGMT_OP_SET_SCAN_PARAMS,
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
};

static const u16 mgmt_events[] = {
	MGMT_EV_CONTROLLER_ERROR,
	MGMT_EV_INDEX_ADDED,
	MGMT_EV_INDEX_REMOVED,
	MGMT_EV_NEW_SETTINGS,
	MGMT_EV_CLASS_OF_DEV_CHANGED,
	MGMT_EV_LOCAL_NAME_CHANGED,
	MGMT_EV_NEW_LINK_KEY,
	MGMT_EV_NEW_LONG_TERM_KEY,
	MGMT_EV_DEVICE_CONNECTED,
	MGMT_EV_DEVICE_DISCONNECTED,
	MGMT_EV_CONNECT_FAILED,
	MGMT_EV_PIN_CODE_REQUEST,
	MGMT_EV_USER_CONFIRM_REQUEST,
	MGMT_EV_USER_PASSKEY_REQUEST,
	MGMT_EV_AUTH_FAILED,
	MGMT_EV_DEVICE_FOUND,
	MGMT_EV_DISCOVERING,
	MGMT_EV_DEVICE_BLOCKED,
	MGMT_EV_DEVICE_UNBLOCKED,
	MGMT_EV_DEVICE_UNPAIRED,
105
	MGMT_EV_PASSKEY_NOTIFY,
106 107
};

108
#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
109

110 111 112
#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
				!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))

113 114
struct pending_cmd {
	struct list_head list;
115
	u16 opcode;
116
	int index;
117
	void *param;
118
	struct sock *sk;
119
	void *user_data;
120 121
};

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
/* HCI to MGMT error code conversion table */
static u8 mgmt_status_table[] = {
	MGMT_STATUS_SUCCESS,
	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
	MGMT_STATUS_FAILED,		/* Hardware Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
	MGMT_STATUS_NOT_PAIRED,		/* PIN or Key Missing */
	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
	MGMT_STATUS_BUSY,		/* Command Disallowed */
	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
	MGMT_STATUS_REJECTED,		/* Rejected Security */
	MGMT_STATUS_REJECTED,		/* Rejected Personal */
	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
	MGMT_STATUS_BUSY,		/* Repeated Attempts */
	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
	MGMT_STATUS_FAILED,		/* Unspecified Error */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
	MGMT_STATUS_FAILED,		/* Transaction Collision */
	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
	MGMT_STATUS_REJECTED,		/* QoS Rejected */
	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
	MGMT_STATUS_REJECTED,		/* Insufficient Security */
	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
	MGMT_STATUS_BUSY,		/* Role Switch Pending */
	MGMT_STATUS_FAILED,		/* Slot Violation */
	MGMT_STATUS_FAILED,		/* Role Switch Failed */
	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
	MGMT_STATUS_BUSY,		/* Controller Busy */
	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
};

static u8 mgmt_status(u8 hci_status)
{
	if (hci_status < ARRAY_SIZE(mgmt_status_table))
		return mgmt_status_table[hci_status];

	return MGMT_STATUS_FAILED;
}

195
static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
196 197 198 199
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;
	struct mgmt_ev_cmd_status *ev;
200
	int err;
201

202
	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
203

204
	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
205 206 207 208 209
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));

210
	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
211
	hdr->index = cpu_to_le16(index);
212 213 214 215
	hdr->len = cpu_to_le16(sizeof(*ev));

	ev = (void *) skb_put(skb, sizeof(*ev));
	ev->status = status;
216
	ev->opcode = cpu_to_le16(cmd);
217

218 219
	err = sock_queue_rcv_skb(sk, skb);
	if (err < 0)
220 221
		kfree_skb(skb);

222
	return err;
223 224
}

225
static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
226
			void *rp, size_t rp_len)
227 228 229 230
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;
	struct mgmt_ev_cmd_complete *ev;
231
	int err;
232 233 234

	BT_DBG("sock %p", sk);

235
	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
236 237 238 239 240
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));

241
	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
242
	hdr->index = cpu_to_le16(index);
243
	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
244

245
	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
246
	ev->opcode = cpu_to_le16(cmd);
247
	ev->status = status;
248 249 250

	if (rp)
		memcpy(ev->data, rp, rp_len);
251

252 253
	err = sock_queue_rcv_skb(sk, skb);
	if (err < 0)
254 255
		kfree_skb(skb);

256
	return err;
257 258
}

259 260
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
			u16 data_len)
261 262 263 264 265 266
{
	struct mgmt_rp_read_version rp;

	BT_DBG("sock %p", sk);

	rp.version = MGMT_VERSION;
267
	rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
268

269
	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
270
			    sizeof(rp));
271 272
}

273 274
static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 data_len)
275 276
{
	struct mgmt_rp_read_commands *rp;
277 278
	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
	const u16 num_events = ARRAY_SIZE(mgmt_events);
279
	__le16 *opcode;
280 281 282 283 284 285 286 287 288 289 290
	size_t rp_size;
	int i, err;

	BT_DBG("sock %p", sk);

	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));

	rp = kmalloc(rp_size, GFP_KERNEL);
	if (!rp)
		return -ENOMEM;

291 292
	rp->num_commands = __constant_cpu_to_le16(num_commands);
	rp->num_events = __constant_cpu_to_le16(num_events);
293 294 295 296 297 298 299

	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
		put_unaligned_le16(mgmt_commands[i], opcode);

	for (i = 0; i < num_events; i++, opcode++)
		put_unaligned_le16(mgmt_events[i], opcode);

300
	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
301
			   rp_size);
302 303 304 305 306
	kfree(rp);

	return err;
}

307 308
static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
309 310
{
	struct mgmt_rp_read_index_list *rp;
311
	struct hci_dev *d;
312
	size_t rp_len;
313
	u16 count;
314
	int err;
315 316 317 318 319 320

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
321
	list_for_each_entry(d, &hci_dev_list, list) {
322 323
		if (d->dev_type == HCI_BREDR)
			count++;
324 325
	}

326 327 328
	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
329
		read_unlock(&hci_dev_list_lock);
330
		return -ENOMEM;
331
	}
332

333
	count = 0;
334
	list_for_each_entry(d, &hci_dev_list, list) {
335
		if (test_bit(HCI_SETUP, &d->dev_flags))
336 337
			continue;

338 339 340
		if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
			continue;

341 342 343 344
		if (d->dev_type == HCI_BREDR) {
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
345 346
	}

347 348 349
	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

350 351
	read_unlock(&hci_dev_list_lock);

352
	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
353
			   rp_len);
354

355 356 357
	kfree(rp);

	return err;
358 359
}

360 361 362 363 364 365 366
static u32 get_supported_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

	settings |= MGMT_SETTING_POWERED;
	settings |= MGMT_SETTING_PAIRABLE;

367
	if (lmp_bredr_capable(hdev)) {
368
		settings |= MGMT_SETTING_CONNECTABLE;
369 370
		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
			settings |= MGMT_SETTING_FAST_CONNECTABLE;
371
		settings |= MGMT_SETTING_DISCOVERABLE;
372 373
		settings |= MGMT_SETTING_BREDR;
		settings |= MGMT_SETTING_LINK_SECURITY;
374 375 376 377 378

		if (lmp_ssp_capable(hdev)) {
			settings |= MGMT_SETTING_SSP;
			settings |= MGMT_SETTING_HS;
		}
379
	}
380

381
	if (lmp_le_capable(hdev)) {
382
		settings |= MGMT_SETTING_LE;
383 384
		settings |= MGMT_SETTING_ADVERTISING;
	}
385 386 387 388 389 390 391 392

	return settings;
}

static u32 get_current_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

393
	if (hdev_is_powered(hdev))
394 395
		settings |= MGMT_SETTING_POWERED;

396
	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
397 398
		settings |= MGMT_SETTING_CONNECTABLE;

399 400 401
	if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
		settings |= MGMT_SETTING_FAST_CONNECTABLE;

402
	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
403 404
		settings |= MGMT_SETTING_DISCOVERABLE;

405
	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
406 407
		settings |= MGMT_SETTING_PAIRABLE;

408
	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
409 410
		settings |= MGMT_SETTING_BREDR;

411
	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
412 413
		settings |= MGMT_SETTING_LE;

414
	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
415 416
		settings |= MGMT_SETTING_LINK_SECURITY;

417
	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
418 419
		settings |= MGMT_SETTING_SSP;

420 421 422
	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
		settings |= MGMT_SETTING_HS;

423
	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
424 425
		settings |= MGMT_SETTING_ADVERTISING;

426 427 428
	return settings;
}

429 430
#define PNP_INFO_SVCLASS_ID		0x1200

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 4)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		u16 uuid16;

		if (uuid->size != 16)
			continue;

		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
		if (uuid16 < 0x1100)
			continue;

		if (uuid16 == PNP_INFO_SVCLASS_ID)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID16_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u16) > len) {
			uuids_start[1] = EIR_UUID16_SOME;
			break;
		}

		*ptr++ = (uuid16 & 0x00ff);
		*ptr++ = (uuid16 & 0xff00) >> 8;
		uuids_start[0] += sizeof(uuid16);
	}

	return ptr;
}

473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 6)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 32)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID32_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u32) > len) {
			uuids_start[1] = EIR_UUID32_SOME;
			break;
		}

		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
		ptr += sizeof(u32);
		uuids_start[0] += sizeof(u32);
	}

	return ptr;
}

506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 18)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 128)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID128_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + 16 > len) {
			uuids_start[1] = EIR_UUID128_SOME;
			break;
		}

		memcpy(ptr, uuid->uuid, 16);
		ptr += 16;
		uuids_start[0] += 16;
	}

	return ptr;
}

539 540
static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
{
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
	u8 ad_len = 0;
	size_t name_len;

	name_len = strlen(hdev->dev_name);
	if (name_len > 0) {
		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;

		if (name_len > max_len) {
			name_len = max_len;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ad_len += (name_len + 2);
		ptr += (name_len + 2);
	}

	return ad_len;
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
}

static void update_scan_rsp_data(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_scan_rsp_data cp;
	u8 len;

	if (!lmp_le_capable(hdev))
		return;

	memset(&cp, 0, sizeof(cp));

	len = create_scan_rsp_data(hdev, cp.data);

	if (hdev->adv_data_len == len &&
	    memcmp(cp.data, hdev->adv_data, len) == 0)
		return;

	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
	hdev->adv_data_len = len;

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
}

590
static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
{
	u8 ad_len = 0, flags = 0;

	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
		flags |= LE_AD_GENERAL;

	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
		if (lmp_le_br_capable(hdev))
			flags |= LE_AD_SIM_LE_BREDR_CTRL;
		if (lmp_host_le_br_capable(hdev))
			flags |= LE_AD_SIM_LE_BREDR_HOST;
	} else {
		flags |= LE_AD_NO_BREDR;
	}

	if (flags) {
		BT_DBG("adv flags 0x%02x", flags);

		ptr[0] = 2;
		ptr[1] = EIR_FLAGS;
		ptr[2] = flags;

		ad_len += 3;
		ptr += 3;
	}

	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->adv_tx_power;

		ad_len += 3;
		ptr += 3;
	}

	return ad_len;
}

629
static void update_adv_data(struct hci_request *req)
630 631 632 633 634 635 636 637 638 639
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_data cp;
	u8 len;

	if (!lmp_le_capable(hdev))
		return;

	memset(&cp, 0, sizeof(cp));

640
	len = create_adv_data(hdev, cp.data);
641 642 643 644 645 646 647 648 649 650 651 652 653

	if (hdev->adv_data_len == len &&
	    memcmp(cp.data, hdev->adv_data, len) == 0)
		return;

	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
	hdev->adv_data_len = len;

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
}

654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
static void create_eir(struct hci_dev *hdev, u8 *data)
{
	u8 *ptr = data;
	size_t name_len;

	name_len = strlen(hdev->dev_name);

	if (name_len > 0) {
		/* EIR Data type */
		if (name_len > 48) {
			name_len = 48;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		/* EIR Data length */
		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ptr += (name_len + 2);
	}

677
	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
678 679 680 681 682 683 684
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->inq_tx_power;

		ptr += 3;
	}

685 686 687 688 689 690 691 692 693 694 695 696
	if (hdev->devid_source > 0) {
		ptr[0] = 9;
		ptr[1] = EIR_DEVICE_ID;

		put_unaligned_le16(hdev->devid_source, ptr + 2);
		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
		put_unaligned_le16(hdev->devid_product, ptr + 6);
		put_unaligned_le16(hdev->devid_version, ptr + 8);

		ptr += 10;
	}

697
	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
698
	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
699
	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
700 701
}

702
static void update_eir(struct hci_request *req)
703
{
704
	struct hci_dev *hdev = req->hdev;
705 706
	struct hci_cp_write_eir cp;

707
	if (!hdev_is_powered(hdev))
708
		return;
709

710
	if (!lmp_ext_inq_capable(hdev))
711
		return;
712

713
	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
714
		return;
715

716
	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
717
		return;
718 719 720 721 722 723

	memset(&cp, 0, sizeof(cp));

	create_eir(hdev, cp.data);

	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
724
		return;
725 726 727

	memcpy(hdev->eir, cp.data, sizeof(cp.data));

728
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
729 730 731 732 733 734 735 736 737 738 739 740 741
}

static u8 get_service_classes(struct hci_dev *hdev)
{
	struct bt_uuid *uuid;
	u8 val = 0;

	list_for_each_entry(uuid, &hdev->uuids, list)
		val |= uuid->svc_hint;

	return val;
}

742
static void update_class(struct hci_request *req)
743
{
744
	struct hci_dev *hdev = req->hdev;
745 746 747 748
	u8 cod[3];

	BT_DBG("%s", hdev->name);

749
	if (!hdev_is_powered(hdev))
750
		return;
751

752
	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
753
		return;
754 755 756 757 758

	cod[0] = hdev->minor_class;
	cod[1] = hdev->major_class;
	cod[2] = get_service_classes(hdev);

759 760 761
	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
		cod[1] |= 0x20;

762
	if (memcmp(cod, hdev->dev_class, 3) == 0)
763
		return;
764

765
	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
766 767
}

768 769 770
static void service_cache_off(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
771
					    service_cache.work);
772
	struct hci_request req;
773

774
	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
775 776
		return;

777 778
	hci_req_init(&req, hdev);

779 780
	hci_dev_lock(hdev);

781 782
	update_eir(&req);
	update_class(&req);
783 784

	hci_dev_unlock(hdev);
785 786

	hci_req_run(&req, NULL);
787 788
}

789
static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
790
{
791
	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
792 793
		return;

794
	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
795

796 797 798 799 800 801
	/* Non-mgmt controlled devices get this bit set
	 * implicitly so that pairing works for them, however
	 * for mgmt we require user-space to explicitly enable
	 * it
	 */
	clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
802 803
}

804
static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
805
				void *data, u16 data_len)
806
{
807
	struct mgmt_rp_read_info rp;
808

809
	BT_DBG("sock %p %s", sk, hdev->name);
810

811
	hci_dev_lock(hdev);
812

813 814
	memset(&rp, 0, sizeof(rp));

815
	bacpy(&rp.bdaddr, &hdev->bdaddr);
816

817
	rp.version = hdev->hci_ver;
818
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
819 820 821

	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
822

823
	memcpy(rp.dev_class, hdev->dev_class, 3);
824

825
	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
826
	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
827

828
	hci_dev_unlock(hdev);
829

830
	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
831
			    sizeof(rp));
832 833
}

834 835 836
static void mgmt_pending_free(struct pending_cmd *cmd)
{
	sock_put(cmd->sk);
837
	kfree(cmd->param);
838 839 840
	kfree(cmd);
}

841
static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
842 843
					    struct hci_dev *hdev, void *data,
					    u16 len)
844 845 846
{
	struct pending_cmd *cmd;

847
	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
848
	if (!cmd)
849
		return NULL;
850 851

	cmd->opcode = opcode;
852
	cmd->index = hdev->id;
853

854
	cmd->param = kmalloc(len, GFP_KERNEL);
855
	if (!cmd->param) {
856
		kfree(cmd);
857
		return NULL;
858 859
	}

860 861
	if (data)
		memcpy(cmd->param, data, len);
862 863 864 865

	cmd->sk = sk;
	sock_hold(sk);

866
	list_add(&cmd->list, &hdev->mgmt_pending);
867

868
	return cmd;
869 870
}

871
static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
872 873
				 void (*cb)(struct pending_cmd *cmd,
					    void *data),
874
				 void *data)
875
{
876
	struct pending_cmd *cmd, *tmp;
877

878
	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
879
		if (opcode > 0 && cmd->opcode != opcode)
880 881 882 883 884 885
			continue;

		cb(cmd, data);
	}
}

886
static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
887
{
888
	struct pending_cmd *cmd;
889

890
	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
891 892
		if (cmd->opcode == opcode)
			return cmd;
893 894 895 896 897
	}

	return NULL;
}

898
static void mgmt_pending_remove(struct pending_cmd *cmd)
899 900 901 902 903
{
	list_del(&cmd->list);
	mgmt_pending_free(cmd);
}

904
static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
905
{
906
	__le32 settings = cpu_to_le32(get_current_settings(hdev));
907

908
	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
909
			    sizeof(settings));
910 911
}

912
static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
913
		       u16 len)
914
{
915
	struct mgmt_mode *cp = data;
916
	struct pending_cmd *cmd;
917
	int err;
918

919
	BT_DBG("request for %s", hdev->name);
920

921 922 923 924
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				  MGMT_STATUS_INVALID_PARAMS);

925
	hci_dev_lock(hdev);
926

927 928 929 930 931 932
	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				 MGMT_STATUS_BUSY);
		goto failed;
	}

933 934 935 936
	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
		cancel_delayed_work(&hdev->power_off);

		if (cp->val) {
937 938 939
			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
					 data, len);
			err = mgmt_powered(hdev, 1);
940 941 942 943
			goto failed;
		}
	}

944
	if (!!cp->val == hdev_is_powered(hdev)) {
945
		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
946 947 948
		goto failed;
	}

949
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
950 951
	if (!cmd) {
		err = -ENOMEM;
952
		goto failed;
953
	}
954

955
	if (cp->val)
956
		queue_work(hdev->req_workqueue, &hdev->power_on);
957
	else
958
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
959

960
	err = 0;
961 962

failed:
963
	hci_dev_unlock(hdev);
964
	return err;
965 966
}

967 968
static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
		      struct sock *skip_sk)
969 970 971 972
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;

973
	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
974 975 976 977 978 979 980 981
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));
	hdr->opcode = cpu_to_le16(event);
	if (hdev)
		hdr->index = cpu_to_le16(hdev->id);
	else
982
		hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
983 984 985 986 987
	hdr->len = cpu_to_le16(data_len);

	if (data)
		memcpy(skb_put(skb, data_len), data, data_len);

988 989 990
	/* Time stamp */
	__net_timestamp(skb);

991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
	hci_send_to_control(skb, skip_sk);
	kfree_skb(skb);

	return 0;
}

static int new_settings(struct hci_dev *hdev, struct sock *skip)
{
	__le32 ev;

	ev = cpu_to_le32(get_current_settings(hdev));

	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
}

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
struct cmd_lookup {
	struct sock *sk;
	struct hci_dev *hdev;
	u8 mgmt_status;
};

static void settings_rsp(struct pending_cmd *cmd, void *data)
{
	struct cmd_lookup *match = data;

	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);

	list_del(&cmd->list);

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}

	mgmt_pending_free(cmd);
}

static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
{
	u8 *status = data;

	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
	mgmt_pending_remove(cmd);
}

1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
static u8 mgmt_bredr_support(struct hci_dev *hdev)
{
	if (!lmp_bredr_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

static u8 mgmt_le_support(struct hci_dev *hdev)
{
	if (!lmp_le_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

1056 1057 1058 1059
static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
	struct mgmt_mode *cp;
1060
	struct hci_request req;
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
	bool changed;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1074
		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1075 1076 1077 1078
		goto remove_cmd;
	}

	cp = cmd->param;
1079
	if (cp->val) {
1080 1081
		changed = !test_and_set_bit(HCI_DISCOVERABLE,
					    &hdev->dev_flags);
1082 1083 1084 1085 1086 1087 1088

		if (hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
					   to);
		}
	} else {
1089 1090
		changed = test_and_clear_bit(HCI_DISCOVERABLE,
					     &hdev->dev_flags);
1091
	}
1092 1093 1094 1095 1096 1097

	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);

	if (changed)
		new_settings(hdev, cmd->sk);

1098 1099 1100 1101 1102 1103 1104 1105
	/* When the discoverable mode gets changed, make sure
	 * that class of device has the limited discoverable
	 * bit correctly set.
	 */
	hci_req_init(&req, hdev);
	update_class(&req);
	hci_req_run(&req, NULL);

1106 1107 1108 1109 1110 1111 1112
remove_cmd:
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1113
static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1114
			    u16 len)
1115
{
1116
	struct mgmt_cp_set_discoverable *cp = data;
1117
	struct pending_cmd *cmd;
1118
	struct hci_request req;
1119
	u16 timeout;
1120
	u8 scan, status;
1121 1122
	int err;

1123
	BT_DBG("request for %s", hdev->name);
1124

1125 1126
	status = mgmt_bredr_support(hdev);
	if (status)
1127
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1128
				  status);
1129

1130
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1131 1132 1133
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				  MGMT_STATUS_INVALID_PARAMS);

1134
	timeout = __le16_to_cpu(cp->timeout);
1135 1136 1137 1138 1139 1140

	/* Disabling discoverable requires that no timeout is set,
	 * and enabling limited discoverable requires a timeout.
	 */
	if ((cp->val == 0x00 && timeout > 0) ||
	    (cp->val == 0x02 && timeout == 0))
1141
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1142
				  MGMT_STATUS_INVALID_PARAMS);
1143

1144
	hci_dev_lock(hdev);
1145

1146
	if (!hdev_is_powered(hdev) && timeout > 0) {
1147
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1148
				 MGMT_STATUS_NOT_POWERED);
1149 1150 1151
		goto failed;
	}

1152
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1153
	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1154
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1155
				 MGMT_STATUS_BUSY);
1156 1157 1158
		goto failed;
	}

1159
	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1160
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1161
				 MGMT_STATUS_REJECTED);
1162 1163 1164 1165
		goto failed;
	}

	if (!hdev_is_powered(hdev)) {
1166 1167
		bool changed = false;

1168 1169 1170 1171
		/* Setting limited discoverable when powered off is
		 * not a valid operation since it requires a timeout
		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
		 */
1172 1173 1174 1175 1176
		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
			changed = true;
		}

1177
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1178 1179 1180 1181 1182 1183
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1184 1185 1186
		goto failed;
	}

1187 1188 1189 1190 1191 1192 1193
	/* If the current mode is the same, then just update the timeout
	 * value with the new value. And if only the timeout gets updated,
	 * then no need for any HCI transactions.
	 */
	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
					  &hdev->dev_flags)) {
1194 1195
		cancel_delayed_work(&hdev->discov_off);
		hdev->discov_timeout = timeout;
1196

1197 1198
		if (cp->val && hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1199
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1200
					   to);
1201 1202
		}

1203
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1204 1205 1206
		goto failed;
	}

1207
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1208 1209
	if (!cmd) {
		err = -ENOMEM;
1210
		goto failed;
1211
	}
1212

1213 1214 1215 1216 1217 1218 1219
	/* Cancel any potential discoverable timeout that might be
	 * still active and store new timeout value. The arming of
	 * the timeout happens in the complete handler.
	 */
	cancel_delayed_work(&hdev->discov_off);
	hdev->discov_timeout = timeout;

1220 1221
	hci_req_init(&req, hdev);

1222 1223
	scan = SCAN_PAGE;

1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
	if (cp->val) {
		struct hci_cp_write_current_iac_lap hci_cp;

		if (cp->val == 0x02) {
			/* Limited discoverable mode */
			set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);

			hci_cp.num_iac = 2;
			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
			hci_cp.iac_lap[4] = 0x8b;
			hci_cp.iac_lap[5] = 0x9e;
		} else {
			/* General discoverable mode */
			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);

			hci_cp.num_iac = 1;
			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
		}

		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
			    (hci_cp.num_iac * 3) + 1, &hci_cp);

1251
		scan |= SCAN_INQUIRY;
1252 1253 1254
	} else {
		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
	}
1255

1256
	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1257 1258

	err = hci_req_run(&req, set_discoverable_complete);
1259
	if (err < 0)
1260
		mgmt_pending_remove(cmd);
1261 1262

failed:
1263
	hci_dev_unlock(hdev);
1264 1265 1266
	return err;
}

1267 1268
static void write_fast_connectable(struct hci_request *req, bool enable)
{
1269
	struct hci_dev *hdev = req->hdev;
1270 1271 1272
	struct hci_cp_write_page_scan_activity acp;
	u8 type;

1273 1274 1275
	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
		return;

1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
	if (enable) {
		type = PAGE_SCAN_TYPE_INTERLACED;

		/* 160 msec page scan interval */
		acp.interval = __constant_cpu_to_le16(0x0100);
	} else {
		type = PAGE_SCAN_TYPE_STANDARD;	/* default */

		/* default 1.28 sec page scan */
		acp.interval = __constant_cpu_to_le16(0x0800);
	}

	acp.window = __constant_cpu_to_le16(0x0012);

1290 1291 1292 1293 1294 1295 1296
	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
			    sizeof(acp), &acp);

	if (hdev->page_scan_type != type)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1297 1298
}

1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
static u8 get_adv_type(struct hci_dev *hdev)
{
	struct pending_cmd *cmd;
	bool connectable;

	/* If there's a pending mgmt command the flag will not yet have
	 * it's final value, so check for this first.
	 */
	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
		connectable = !!cp->val;
	} else {
		connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	}

	return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
}

1318 1319 1320 1321 1322 1323 1324 1325 1326
static void enable_advertising(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_param cp;
	u8 enable = 0x01;

	memset(&cp, 0, sizeof(cp));
	cp.min_interval = __constant_cpu_to_le16(0x0800);
	cp.max_interval = __constant_cpu_to_le16(0x0800);
1327
	cp.type = get_adv_type(hdev);
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
	if (bacmp(&hdev->bdaddr, BDADDR_ANY))
		cp.own_address_type = ADDR_LE_DEV_PUBLIC;
	else
		cp.own_address_type = ADDR_LE_DEV_RANDOM;
	cp.channel_map = 0x07;

	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

static void disable_advertising(struct hci_request *req)
{
	u8 enable = 0x00;

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

1346 1347 1348
static void set_connectable_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
1349 1350
	struct mgmt_mode *cp;
	bool changed;
1351 1352 1353 1354 1355 1356 1357 1358 1359

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (!cmd)
		goto unlock;

1360 1361 1362 1363 1364 1365
	if (status) {
		u8 mgmt_err = mgmt_status(status);
		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
		goto remove_cmd;
	}

1366 1367 1368 1369 1370 1371
	cp = cmd->param;
	if (cp->val)
		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);

1372 1373
	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);

1374 1375 1376
	if (changed)
		new_settings(hdev, cmd->sk);

1377
remove_cmd:
1378 1379 1380 1381 1382 1383
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1384
static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1385
			   u16 len)
1386
{
1387
	struct mgmt_mode *cp = data;
1388
	struct pending_cmd *cmd;
1389
	struct hci_request req;
1390
	u8 scan;
1391 1392
	int err;

1393
	BT_DBG("request for %s", hdev->name);
1394

1395 1396
	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1397
		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1398
				  MGMT_STATUS_REJECTED);
1399

1400 1401 1402 1403
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				  MGMT_STATUS_INVALID_PARAMS);

1404
	hci_dev_lock(hdev);
1405

1406
	if (!hdev_is_powered(hdev)) {
1407 1408 1409 1410 1411
		bool changed = false;

		if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
			changed = true;

1412
		if (cp->val) {
1413
			set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1414
		} else {
1415 1416 1417
			clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
		}
1418

1419
		err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1420 1421 1422 1423 1424 1425
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1426 1427 1428
		goto failed;
	}

1429
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1430
	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1431
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1432
				 MGMT_STATUS_BUSY);
1433 1434 1435
		goto failed;
	}

1436
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1437 1438
	if (!cmd) {
		err = -ENOMEM;
1439
		goto failed;
1440
	}
1441

1442
	hci_req_init(&req, hdev);
1443

1444 1445 1446 1447 1448 1449 1450 1451
	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
	    cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
		if (cp->val) {
			scan = SCAN_PAGE;
		} else {
			scan = 0;

			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1452
			    hdev->discov_timeout > 0)
1453 1454
				cancel_delayed_work(&hdev->discov_off);
		}
1455

1456 1457
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}
1458

1459 1460 1461 1462 1463 1464 1465
	/* If we're going from non-connectable to connectable or
	 * vice-versa when fast connectable is enabled ensure that fast
	 * connectable gets disabled. write_fast_connectable won't do
	 * anything if the page scan parameters are already what they
	 * should be.
	 */
	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1466 1467
		write_fast_connectable(&req, false);

1468 1469 1470 1471 1472 1473
	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
	    hci_conn_num(hdev, LE_LINK) == 0) {
		disable_advertising(&req);
		enable_advertising(&req);
	}

1474
	err = hci_req_run(&req, set_connectable_complete);
1475
	if (err < 0) {
1476
		mgmt_pending_remove(cmd);
1477 1478 1479 1480 1481
		if (err == -ENODATA)
			err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE,
						hdev);
		goto failed;
	}
1482 1483

failed:
1484
	hci_dev_unlock(hdev);
1485 1486 1487
	return err;
}

1488
static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1489
			u16 len)
1490
{
1491
	struct mgmt_mode *cp = data;
1492
	bool changed;
1493 1494
	int err;

1495
	BT_DBG("request for %s", hdev->name);
1496

1497 1498 1499 1500
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
				  MGMT_STATUS_INVALID_PARAMS);

1501
	hci_dev_lock(hdev);
1502 1503

	if (cp->val)
1504
		changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1505
	else
1506
		changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1507

1508
	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1509
	if (err < 0)
1510
		goto unlock;
1511

1512 1513
	if (changed)
		err = new_settings(hdev, sk);
1514

1515
unlock:
1516
	hci_dev_unlock(hdev);
1517 1518 1519
	return err;
}

1520 1521
static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
1522 1523 1524
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
1525
	u8 val, status;
1526 1527
	int err;

1528
	BT_DBG("request for %s", hdev->name);
1529

1530 1531
	status = mgmt_bredr_support(hdev);
	if (status)
1532
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1533
				  status);
1534

1535 1536 1537 1538
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				  MGMT_STATUS_INVALID_PARAMS);

1539 1540
	hci_dev_lock(hdev);

1541
	if (!hdev_is_powered(hdev)) {
1542 1543 1544
		bool changed = false;

		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1545
					  &hdev->dev_flags)) {
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556
			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1557 1558 1559 1560
		goto failed;
	}

	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1561
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1562
				 MGMT_STATUS_BUSY);
1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589
		goto failed;
	}

	val = !!cp->val;

	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

1590
static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1591 1592 1593
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
1594
	u8 status;
1595 1596
	int err;

1597
	BT_DBG("request for %s", hdev->name);
1598

1599 1600 1601 1602
	status = mgmt_bredr_support(hdev);
	if (status)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);

1603 1604 1605
	if (!lmp_ssp_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				  MGMT_STATUS_NOT_SUPPORTED);
1606

1607 1608 1609 1610
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				  MGMT_STATUS_INVALID_PARAMS);

1611
	hci_dev_lock(hdev);
1612

1613
	if (!hdev_is_powered(hdev)) {
1614
		bool changed;
1615

1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
		if (cp->val) {
			changed = !test_and_set_bit(HCI_SSP_ENABLED,
						    &hdev->dev_flags);
		} else {
			changed = test_and_clear_bit(HCI_SSP_ENABLED,
						     &hdev->dev_flags);
			if (!changed)
				changed = test_and_clear_bit(HCI_HS_ENABLED,
							     &hdev->dev_flags);
			else
				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1627 1628 1629 1630 1631 1632 1633 1634 1635
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1636 1637 1638
		goto failed;
	}

1639 1640
	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1641 1642
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				 MGMT_STATUS_BUSY);
1643 1644 1645
		goto failed;
	}

1646
	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656
		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

1657
	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

1668
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1669 1670
{
	struct mgmt_mode *cp = data;
1671
	bool changed;
1672
	u8 status;
1673
	int err;
1674

1675
	BT_DBG("request for %s", hdev->name);
1676

1677 1678 1679
	status = mgmt_bredr_support(hdev);
	if (status)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1680

1681 1682 1683 1684 1685 1686 1687 1688
	if (!lmp_ssp_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				  MGMT_STATUS_NOT_SUPPORTED);

	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				  MGMT_STATUS_REJECTED);

1689 1690 1691 1692
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				  MGMT_STATUS_INVALID_PARAMS);

1693 1694
	hci_dev_lock(hdev);

1695
	if (cp->val) {
1696
		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1697 1698 1699 1700 1701 1702 1703
	} else {
		if (hdev_is_powered(hdev)) {
			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
					 MGMT_STATUS_REJECTED);
			goto unlock;
		}

1704
		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1705
	}
1706 1707 1708 1709 1710 1711 1712

	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);
1713

1714 1715 1716
unlock:
	hci_dev_unlock(hdev);
	return err;
1717 1718
}

1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736
static void le_enable_complete(struct hci_dev *hdev, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
				     &mgmt_err);
		return;
	}

	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748

	/* Make sure the controller has a good default for
	 * advertising data. Restrict the update to when LE
	 * has actually been enabled. During power on, the
	 * update in powered_update_hci will take care of it.
	 */
	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
		struct hci_request req;

		hci_dev_lock(hdev);

		hci_req_init(&req, hdev);
1749
		update_adv_data(&req);
1750
		update_scan_rsp_data(&req);
1751 1752 1753 1754
		hci_req_run(&req, NULL);

		hci_dev_unlock(hdev);
	}
1755 1756
}

1757
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1758 1759 1760 1761
{
	struct mgmt_mode *cp = data;
	struct hci_cp_write_le_host_supported hci_cp;
	struct pending_cmd *cmd;
1762
	struct hci_request req;
1763
	int err;
1764
	u8 val, enabled;
1765

1766
	BT_DBG("request for %s", hdev->name);
1767

1768 1769 1770
	if (!lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				  MGMT_STATUS_NOT_SUPPORTED);
1771

1772 1773 1774 1775
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				  MGMT_STATUS_INVALID_PARAMS);

1776
	/* LE-only devices do not allow toggling LE on/off */
1777
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1778 1779 1780
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				  MGMT_STATUS_REJECTED);

1781
	hci_dev_lock(hdev);
1782 1783

	val = !!cp->val;
1784
	enabled = lmp_host_le_capable(hdev);
1785

1786
	if (!hdev_is_powered(hdev) || val == enabled) {
1787 1788 1789 1790 1791 1792 1793
		bool changed = false;

		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
			changed = true;
		}

1794 1795
		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1796 1797 1798
			changed = true;
		}

1799 1800
		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
		if (err < 0)
1801
			goto unlock;
1802 1803 1804 1805

		if (changed)
			err = new_settings(hdev, sk);

1806
		goto unlock;
1807 1808
	}

1809 1810
	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1811
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1812
				 MGMT_STATUS_BUSY);
1813
		goto unlock;
1814 1815 1816 1817 1818
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
1819
		goto unlock;
1820 1821
	}

1822 1823
	hci_req_init(&req, hdev);

1824 1825 1826 1827
	memset(&hci_cp, 0, sizeof(hci_cp));

	if (val) {
		hci_cp.le = val;
1828
		hci_cp.simul = lmp_le_br_capable(hdev);
1829 1830 1831
	} else {
		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
			disable_advertising(&req);
1832 1833
	}

1834 1835 1836 1837
	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
		    &hci_cp);

	err = hci_req_run(&req, le_enable_complete);
1838
	if (err < 0)
1839 1840
		mgmt_pending_remove(cmd);

1841 1842
unlock:
	hci_dev_unlock(hdev);
1843 1844 1845
	return err;
}

1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868
/* This is a helper function to test for pending mgmt commands that can
 * cause CoD or EIR HCI commands. We can only allow one such pending
 * mgmt command at a time since otherwise we cannot easily track what
 * the current values are, will be, and based on that calculate if a new
 * HCI command needs to be sent and if yes with what value.
 */
static bool pending_eir_or_class(struct hci_dev *hdev)
{
	struct pending_cmd *cmd;

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		switch (cmd->opcode) {
		case MGMT_OP_ADD_UUID:
		case MGMT_OP_REMOVE_UUID:
		case MGMT_OP_SET_DEV_CLASS:
		case MGMT_OP_SET_POWERED:
			return true;
		}
	}

	return false;
}

1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887
static const u8 bluetooth_base_uuid[] = {
			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};

static u8 get_uuid_size(const u8 *uuid)
{
	u32 val;

	if (memcmp(uuid, bluetooth_base_uuid, 12))
		return 128;

	val = get_unaligned_le32(&uuid[12]);
	if (val > 0xffff)
		return 32;

	return 16;
}

1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913
static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
{
	struct pending_cmd *cmd;

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(mgmt_op, hdev);
	if (!cmd)
		goto unlock;

	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
		     hdev->dev_class, 3);

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static void add_uuid_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
}

1914
static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1915
{
1916
	struct mgmt_cp_add_uuid *cp = data;
1917
	struct pending_cmd *cmd;
1918
	struct hci_request req;
1919 1920 1921
	struct bt_uuid *uuid;
	int err;

1922
	BT_DBG("request for %s", hdev->name);
1923

1924
	hci_dev_lock(hdev);
1925

1926
	if (pending_eir_or_class(hdev)) {
1927
		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1928
				 MGMT_STATUS_BUSY);
1929 1930 1931
		goto failed;
	}

1932
	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1933 1934 1935 1936 1937 1938
	if (!uuid) {
		err = -ENOMEM;
		goto failed;
	}

	memcpy(uuid->uuid, cp->uuid, 16);
1939
	uuid->svc_hint = cp->svc_hint;
1940
	uuid->size = get_uuid_size(cp->uuid);
1941

1942
	list_add_tail(&uuid->list, &hdev->uuids);
1943

1944
	hci_req_init(&req, hdev);
1945

1946 1947 1948
	update_class(&req);
	update_eir(&req);

1949 1950 1951 1952
	err = hci_req_run(&req, add_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto failed;
1953

1954
		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1955
				   hdev->dev_class, 3);
1956 1957 1958 1959
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1960
	if (!cmd) {
1961
		err = -ENOMEM;
1962 1963 1964 1965
		goto failed;
	}

	err = 0;
1966 1967

failed:
1968
	hci_dev_unlock(hdev);
1969 1970 1971
	return err;
}

1972 1973 1974 1975 1976 1977
static bool enable_service_cache(struct hci_dev *hdev)
{
	if (!hdev_is_powered(hdev))
		return false;

	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1978 1979
		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
				   CACHE_TIMEOUT);
1980 1981 1982 1983 1984 1985
		return true;
	}

	return false;
}

1986 1987 1988 1989 1990 1991 1992
static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
}

1993
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1994
		       u16 len)
1995
{
1996
	struct mgmt_cp_remove_uuid *cp = data;
1997
	struct pending_cmd *cmd;
1998
	struct bt_uuid *match, *tmp;
1999
	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2000
	struct hci_request req;
2001 2002
	int err, found;

2003
	BT_DBG("request for %s", hdev->name);
2004

2005
	hci_dev_lock(hdev);
2006

2007
	if (pending_eir_or_class(hdev)) {
2008
		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2009
				 MGMT_STATUS_BUSY);
2010 2011 2012
		goto unlock;
	}

2013 2014
	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
		err = hci_uuids_clear(hdev);
2015

2016
		if (enable_service_cache(hdev)) {
2017
			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2018
					   0, hdev->dev_class, 3);
2019 2020
			goto unlock;
		}
2021

2022
		goto update_class;
2023 2024 2025 2026
	}

	found = 0;

2027
	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2028 2029 2030 2031
		if (memcmp(match->uuid, cp->uuid, 16) != 0)
			continue;

		list_del(&match->list);
2032
		kfree(match);
2033 2034 2035 2036
		found++;
	}

	if (found == 0) {
2037
		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2038
				 MGMT_STATUS_INVALID_PARAMS);
2039 2040 2041
		goto unlock;
	}

2042
update_class:
2043
	hci_req_init(&req, hdev);
2044

2045 2046 2047
	update_class(&req);
	update_eir(&req);

2048 2049 2050 2051
	err = hci_req_run(&req, remove_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2052

2053
		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2054
				   hdev->dev_class, 3);
2055 2056 2057 2058
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2059
	if (!cmd) {
2060
		err = -ENOMEM;
2061 2062 2063 2064
		goto unlock;
	}

	err = 0;
2065 2066

unlock:
2067
	hci_dev_unlock(hdev);
2068 2069 2070
	return err;
}

2071 2072 2073 2074 2075 2076 2077
static void set_class_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
}

2078
static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2079
			 u16 len)
2080
{
2081
	struct mgmt_cp_set_dev_class *cp = data;
2082
	struct pending_cmd *cmd;
2083
	struct hci_request req;
2084 2085
	int err;

2086
	BT_DBG("request for %s", hdev->name);
2087

2088
	if (!lmp_bredr_capable(hdev))
2089 2090
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				  MGMT_STATUS_NOT_SUPPORTED);
2091

2092
	hci_dev_lock(hdev);
2093

2094 2095 2096 2097 2098
	if (pending_eir_or_class(hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}
2099

2100 2101 2102 2103 2104
	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				 MGMT_STATUS_INVALID_PARAMS);
		goto unlock;
	}
2105

2106 2107 2108
	hdev->major_class = cp->major;
	hdev->minor_class = cp->minor;

2109
	if (!hdev_is_powered(hdev)) {
2110
		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2111
				   hdev->dev_class, 3);
2112 2113 2114
		goto unlock;
	}

2115 2116
	hci_req_init(&req, hdev);

2117
	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2118 2119 2120
		hci_dev_unlock(hdev);
		cancel_delayed_work_sync(&hdev->service_cache);
		hci_dev_lock(hdev);
2121
		update_eir(&req);
2122
	}
2123

2124 2125
	update_class(&req);

2126 2127 2128 2129
	err = hci_req_run(&req, set_class_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2130

2131
		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2132
				   hdev->dev_class, 3);
2133 2134 2135 2136
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2137
	if (!cmd) {
2138
		err = -ENOMEM;
2139 2140 2141 2142
		goto unlock;
	}

	err = 0;
2143

2144
unlock:
2145
	hci_dev_unlock(hdev);
2146 2147 2148
	return err;
}

2149
static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2150
			  u16 len)
2151
{
2152
	struct mgmt_cp_load_link_keys *cp = data;
2153
	u16 key_count, expected_len;
2154
	int i;
2155

2156 2157 2158 2159 2160 2161
	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				  MGMT_STATUS_NOT_SUPPORTED);

2162
	key_count = __le16_to_cpu(cp->key_count);
2163

2164 2165
	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_link_key_info);
2166
	if (expected_len != len) {
2167
		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2168
		       len, expected_len);
2169
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2170
				  MGMT_STATUS_INVALID_PARAMS);
2171 2172
	}

2173 2174 2175 2176
	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				  MGMT_STATUS_INVALID_PARAMS);

2177
	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2178
	       key_count);
2179

2180 2181 2182 2183 2184 2185 2186 2187
	for (i = 0; i < key_count; i++) {
		struct mgmt_link_key_info *key = &cp->keys[i];

		if (key->addr.type != BDADDR_BREDR)
			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
					  MGMT_STATUS_INVALID_PARAMS);
	}

2188
	hci_dev_lock(hdev);
2189 2190 2191 2192

	hci_link_keys_clear(hdev);

	if (cp->debug_keys)
2193
		set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2194
	else
2195
		clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2196

2197
	for (i = 0; i < key_count; i++) {
2198
		struct mgmt_link_key_info *key = &cp->keys[i];
2199

2200
		hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2201
				 key->type, key->pin_len);
2202 2203
	}

2204
	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2205

2206
	hci_dev_unlock(hdev);
2207

2208
	return 0;
2209 2210
}

2211
static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2212
			   u8 addr_type, struct sock *skip_sk)
2213 2214 2215 2216 2217 2218 2219
{
	struct mgmt_ev_device_unpaired ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = addr_type;

	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2220
			  skip_sk);
2221 2222
}

2223
static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2224
			 u16 len)
2225
{
2226 2227
	struct mgmt_cp_unpair_device *cp = data;
	struct mgmt_rp_unpair_device rp;
2228 2229
	struct hci_cp_disconnect dc;
	struct pending_cmd *cmd;
2230 2231 2232
	struct hci_conn *conn;
	int err;

2233
	memset(&rp, 0, sizeof(rp));
2234 2235
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
2236

2237 2238 2239 2240 2241
	if (!bdaddr_type_is_valid(cp->addr.type))
		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));

2242 2243 2244 2245 2246
	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));

2247 2248
	hci_dev_lock(hdev);

2249
	if (!hdev_is_powered(hdev)) {
2250
		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2251
				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2252 2253 2254
		goto unlock;
	}

2255
	if (cp->addr.type == BDADDR_BREDR)
2256 2257 2258
		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
	else
		err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
2259

2260
	if (err < 0) {
2261
		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2262
				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2263 2264 2265
		goto unlock;
	}

2266
	if (cp->disconnect) {
2267
		if (cp->addr.type == BDADDR_BREDR)
2268
			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2269
						       &cp->addr.bdaddr);
2270 2271
		else
			conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2272
						       &cp->addr.bdaddr);
2273 2274 2275
	} else {
		conn = NULL;
	}
2276

2277
	if (!conn) {
2278
		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2279
				   &rp, sizeof(rp));
2280
		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2281 2282
		goto unlock;
	}
2283

2284
	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2285
			       sizeof(*cp));
2286 2287 2288
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
2289 2290
	}

2291
	dc.handle = cpu_to_le16(conn->handle);
2292 2293 2294 2295 2296
	dc.reason = 0x13; /* Remote User Terminated Connection */
	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
	if (err < 0)
		mgmt_pending_remove(cmd);

2297
unlock:
2298
	hci_dev_unlock(hdev);
2299 2300 2301
	return err;
}

2302
static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2303
		      u16 len)
2304
{
2305
	struct mgmt_cp_disconnect *cp = data;
2306
	struct mgmt_rp_disconnect rp;
2307
	struct hci_cp_disconnect dc;
2308
	struct pending_cmd *cmd;
2309 2310 2311 2312 2313
	struct hci_conn *conn;
	int err;

	BT_DBG("");

2314 2315 2316 2317
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2318
	if (!bdaddr_type_is_valid(cp->addr.type))
2319 2320 2321
		return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));
2322

2323
	hci_dev_lock(hdev);
2324 2325

	if (!test_bit(HCI_UP, &hdev->flags)) {
2326 2327
		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2328 2329 2330
		goto failed;
	}

2331
	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2332 2333
		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2334 2335 2336
		goto failed;
	}

2337
	if (cp->addr.type == BDADDR_BREDR)
2338 2339
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
2340 2341
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2342

2343
	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2344 2345
		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2346 2347 2348
		goto failed;
	}

2349
	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2350 2351
	if (!cmd) {
		err = -ENOMEM;
2352
		goto failed;
2353
	}
2354

2355
	dc.handle = cpu_to_le16(conn->handle);
2356
	dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2357 2358 2359

	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
	if (err < 0)
2360
		mgmt_pending_remove(cmd);
2361 2362

failed:
2363
	hci_dev_unlock(hdev);
2364 2365 2366
	return err;
}

2367
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2368 2369 2370
{
	switch (link_type) {
	case LE_LINK:
2371 2372
		switch (addr_type) {
		case ADDR_LE_DEV_PUBLIC:
2373
			return BDADDR_LE_PUBLIC;
2374

2375
		default:
2376
			/* Fallback to LE Random address type */
2377
			return BDADDR_LE_RANDOM;
2378
		}
2379

2380
	default:
2381
		/* Fallback to BR/EDR type */
2382
		return BDADDR_BREDR;
2383 2384 2385
	}
}

2386 2387
static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
2388 2389
{
	struct mgmt_rp_get_connections *rp;
2390
	struct hci_conn *c;
2391
	size_t rp_len;
2392 2393
	int err;
	u16 i;
2394 2395 2396

	BT_DBG("");

2397
	hci_dev_lock(hdev);
2398

2399
	if (!hdev_is_powered(hdev)) {
2400
		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2401
				 MGMT_STATUS_NOT_POWERED);
2402 2403 2404
		goto unlock;
	}

2405
	i = 0;
2406 2407
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2408
			i++;
2409 2410
	}

2411
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2412
	rp = kmalloc(rp_len, GFP_KERNEL);
2413
	if (!rp) {
2414 2415 2416 2417 2418
		err = -ENOMEM;
		goto unlock;
	}

	i = 0;
2419
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2420 2421
		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
			continue;
2422
		bacpy(&rp->addr[i].bdaddr, &c->dst);
2423
		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2424
		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2425 2426 2427 2428
			continue;
		i++;
	}

2429
	rp->conn_count = cpu_to_le16(i);
2430

2431 2432
	/* Recalculate length in case of filtered SCO connections, etc */
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2433

2434
	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2435
			   rp_len);
2436

2437
	kfree(rp);
2438 2439

unlock:
2440
	hci_dev_unlock(hdev);
2441 2442 2443
	return err;
}

2444
static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2445
				   struct mgmt_cp_pin_code_neg_reply *cp)
2446 2447 2448 2449
{
	struct pending_cmd *cmd;
	int err;

2450
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2451
			       sizeof(*cp));
2452 2453 2454
	if (!cmd)
		return -ENOMEM;

2455
	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2456
			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2457 2458 2459 2460 2461 2462
	if (err < 0)
		mgmt_pending_remove(cmd);

	return err;
}

2463
static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2464
			  u16 len)
2465
{
2466
	struct hci_conn *conn;
2467
	struct mgmt_cp_pin_code_reply *cp = data;
2468
	struct hci_cp_pin_code_reply reply;
2469
	struct pending_cmd *cmd;
2470 2471 2472 2473
	int err;

	BT_DBG("");

2474
	hci_dev_lock(hdev);
2475

2476
	if (!hdev_is_powered(hdev)) {
2477
		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2478
				 MGMT_STATUS_NOT_POWERED);
2479 2480 2481
		goto failed;
	}

2482
	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2483
	if (!conn) {
2484
		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2485
				 MGMT_STATUS_NOT_CONNECTED);
2486 2487 2488 2489
		goto failed;
	}

	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2490 2491 2492
		struct mgmt_cp_pin_code_neg_reply ncp;

		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2493 2494 2495

		BT_ERR("PIN code is not 16 bytes long");

2496
		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2497
		if (err >= 0)
2498
			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2499
					 MGMT_STATUS_INVALID_PARAMS);
2500 2501 2502 2503

		goto failed;
	}

2504
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2505 2506
	if (!cmd) {
		err = -ENOMEM;
2507
		goto failed;
2508
	}
2509

2510
	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2511
	reply.pin_len = cp->pin_len;
2512
	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2513 2514 2515

	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
	if (err < 0)
2516
		mgmt_pending_remove(cmd);
2517 2518

failed:
2519
	hci_dev_unlock(hdev);
2520 2521 2522
	return err;
}

2523 2524
static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
2525
{
2526
	struct mgmt_cp_set_io_capability *cp = data;
2527 2528 2529

	BT_DBG("");

2530
	hci_dev_lock(hdev);
2531 2532 2533 2534

	hdev->io_capability = cp->io_capability;

	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2535
	       hdev->io_capability);
2536

2537
	hci_dev_unlock(hdev);
2538

2539 2540
	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
			    0);
2541 2542
}

2543
static struct pending_cmd *find_pairing(struct hci_conn *conn)
2544 2545
{
	struct hci_dev *hdev = conn->hdev;
2546
	struct pending_cmd *cmd;
2547

2548
	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565
		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
			continue;

		if (cmd->user_data != conn)
			continue;

		return cmd;
	}

	return NULL;
}

static void pairing_complete(struct pending_cmd *cmd, u8 status)
{
	struct mgmt_rp_pair_device rp;
	struct hci_conn *conn = cmd->user_data;

2566
	bacpy(&rp.addr.bdaddr, &conn->dst);
2567
	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2568

2569
	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2570
		     &rp, sizeof(rp));
2571 2572 2573 2574 2575 2576

	/* So we don't get further callbacks for this connection */
	conn->connect_cfm_cb = NULL;
	conn->security_cfm_cb = NULL;
	conn->disconn_cfm_cb = NULL;

2577
	hci_conn_drop(conn);
2578

2579
	mgmt_pending_remove(cmd);
2580 2581 2582 2583 2584 2585 2586 2587 2588
}

static void pairing_complete_cb(struct hci_conn *conn, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status %u", status);

	cmd = find_pairing(conn);
2589
	if (!cmd)
2590
		BT_DBG("Unable to find a pending command");
2591
	else
2592
		pairing_complete(cmd, mgmt_status(status));
2593 2594
}

2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610
static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status %u", status);

	if (!status)
		return;

	cmd = find_pairing(conn);
	if (!cmd)
		BT_DBG("Unable to find a pending command");
	else
		pairing_complete(cmd, mgmt_status(status));
}

2611
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2612
		       u16 len)
2613
{
2614
	struct mgmt_cp_pair_device *cp = data;
2615
	struct mgmt_rp_pair_device rp;
2616 2617 2618 2619 2620 2621 2622
	struct pending_cmd *cmd;
	u8 sec_level, auth_type;
	struct hci_conn *conn;
	int err;

	BT_DBG("");

2623 2624 2625 2626
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2627 2628 2629 2630 2631
	if (!bdaddr_type_is_valid(cp->addr.type))
		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));

2632
	hci_dev_lock(hdev);
2633

2634
	if (!hdev_is_powered(hdev)) {
2635 2636
		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2637 2638 2639
		goto unlock;
	}

2640 2641
	sec_level = BT_SECURITY_MEDIUM;
	if (cp->io_cap == 0x03)
2642
		auth_type = HCI_AT_DEDICATED_BONDING;
2643
	else
2644 2645
		auth_type = HCI_AT_DEDICATED_BONDING_MITM;

2646
	if (cp->addr.type == BDADDR_BREDR)
2647 2648
		conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
				   cp->addr.type, sec_level, auth_type);
2649
	else
2650 2651
		conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
				   cp->addr.type, sec_level, auth_type);
2652

2653
	if (IS_ERR(conn)) {
2654 2655 2656 2657 2658 2659 2660
		int status;

		if (PTR_ERR(conn) == -EBUSY)
			status = MGMT_STATUS_BUSY;
		else
			status = MGMT_STATUS_CONNECT_FAILED;

2661
		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2662
				   status, &rp,
2663
				   sizeof(rp));
2664 2665 2666 2667
		goto unlock;
	}

	if (conn->connect_cfm_cb) {
2668
		hci_conn_drop(conn);
2669
		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2670
				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2671 2672 2673
		goto unlock;
	}

2674
	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2675 2676
	if (!cmd) {
		err = -ENOMEM;
2677
		hci_conn_drop(conn);
2678 2679 2680
		goto unlock;
	}

2681
	/* For LE, just connecting isn't a proof that the pairing finished */
2682
	if (cp->addr.type == BDADDR_BREDR)
2683
		conn->connect_cfm_cb = pairing_complete_cb;
2684 2685
	else
		conn->connect_cfm_cb = le_connect_complete_cb;
2686

2687 2688 2689 2690 2691 2692
	conn->security_cfm_cb = pairing_complete_cb;
	conn->disconn_cfm_cb = pairing_complete_cb;
	conn->io_capability = cp->io_cap;
	cmd->user_data = conn;

	if (conn->state == BT_CONNECTED &&
2693
	    hci_conn_security(conn, sec_level, auth_type))
2694 2695 2696 2697 2698
		pairing_complete(cmd, 0);

	err = 0;

unlock:
2699
	hci_dev_unlock(hdev);
2700 2701 2702
	return err;
}

2703 2704
static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2705
{
2706
	struct mgmt_addr_info *addr = data;
2707 2708 2709 2710 2711 2712 2713 2714
	struct pending_cmd *cmd;
	struct hci_conn *conn;
	int err;

	BT_DBG("");

	hci_dev_lock(hdev);

2715
	if (!hdev_is_powered(hdev)) {
2716
		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2717
				 MGMT_STATUS_NOT_POWERED);
2718 2719 2720
		goto unlock;
	}

2721 2722
	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
	if (!cmd) {
2723
		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2724
				 MGMT_STATUS_INVALID_PARAMS);
2725 2726 2727 2728 2729 2730
		goto unlock;
	}

	conn = cmd->user_data;

	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2731
		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2732
				 MGMT_STATUS_INVALID_PARAMS);
2733 2734 2735 2736 2737
		goto unlock;
	}

	pairing_complete(cmd, MGMT_STATUS_CANCELLED);

2738
	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2739
			   addr, sizeof(*addr));
2740 2741 2742 2743 2744
unlock:
	hci_dev_unlock(hdev);
	return err;
}

2745
static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2746
			     struct mgmt_addr_info *addr, u16 mgmt_op,
2747
			     u16 hci_op, __le32 passkey)
2748 2749
{
	struct pending_cmd *cmd;
2750
	struct hci_conn *conn;
2751 2752
	int err;

2753
	hci_dev_lock(hdev);
2754

2755
	if (!hdev_is_powered(hdev)) {
2756 2757 2758
		err = cmd_complete(sk, hdev->id, mgmt_op,
				   MGMT_STATUS_NOT_POWERED, addr,
				   sizeof(*addr));
2759
		goto done;
2760 2761
	}

2762 2763
	if (addr->type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2764
	else
2765
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2766 2767

	if (!conn) {
2768 2769 2770
		err = cmd_complete(sk, hdev->id, mgmt_op,
				   MGMT_STATUS_NOT_CONNECTED, addr,
				   sizeof(*addr));
2771 2772
		goto done;
	}
2773

2774
	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2775
		/* Continue with pairing via SMP */
2776 2777 2778
		err = smp_user_confirm_reply(conn, mgmt_op, passkey);

		if (!err)
2779 2780 2781
			err = cmd_complete(sk, hdev->id, mgmt_op,
					   MGMT_STATUS_SUCCESS, addr,
					   sizeof(*addr));
2782
		else
2783 2784 2785
			err = cmd_complete(sk, hdev->id, mgmt_op,
					   MGMT_STATUS_FAILED, addr,
					   sizeof(*addr));
2786 2787 2788 2789

		goto done;
	}

2790
	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2791 2792
	if (!cmd) {
		err = -ENOMEM;
2793
		goto done;
2794 2795
	}

2796
	/* Continue with pairing via HCI */
2797 2798 2799
	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
		struct hci_cp_user_passkey_reply cp;

2800
		bacpy(&cp.bdaddr, &addr->bdaddr);
2801 2802 2803
		cp.passkey = passkey;
		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
	} else
2804 2805
		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
				   &addr->bdaddr);
2806

2807 2808
	if (err < 0)
		mgmt_pending_remove(cmd);
2809

2810
done:
2811
	hci_dev_unlock(hdev);
2812 2813 2814
	return err;
}

2815 2816 2817 2818 2819 2820 2821
static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_pin_code_neg_reply *cp = data;

	BT_DBG("");

2822
	return user_pairing_resp(sk, hdev, &cp->addr,
2823 2824 2825 2826
				MGMT_OP_PIN_CODE_NEG_REPLY,
				HCI_OP_PIN_CODE_NEG_REPLY, 0);
}

2827 2828
static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2829
{
2830
	struct mgmt_cp_user_confirm_reply *cp = data;
2831 2832 2833 2834

	BT_DBG("");

	if (len != sizeof(*cp))
2835
		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2836
				  MGMT_STATUS_INVALID_PARAMS);
2837

2838
	return user_pairing_resp(sk, hdev, &cp->addr,
2839 2840
				 MGMT_OP_USER_CONFIRM_REPLY,
				 HCI_OP_USER_CONFIRM_REPLY, 0);
2841 2842
}

2843
static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2844
				  void *data, u16 len)
2845
{
2846
	struct mgmt_cp_user_confirm_neg_reply *cp = data;
2847 2848 2849

	BT_DBG("");

2850
	return user_pairing_resp(sk, hdev, &cp->addr,
2851 2852
				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2853 2854
}

2855 2856
static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2857
{
2858
	struct mgmt_cp_user_passkey_reply *cp = data;
2859 2860 2861

	BT_DBG("");

2862
	return user_pairing_resp(sk, hdev, &cp->addr,
2863 2864
				 MGMT_OP_USER_PASSKEY_REPLY,
				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2865 2866
}

2867
static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2868
				  void *data, u16 len)
2869
{
2870
	struct mgmt_cp_user_passkey_neg_reply *cp = data;
2871 2872 2873

	BT_DBG("");

2874
	return user_pairing_resp(sk, hdev, &cp->addr,
2875 2876
				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2877 2878
}

2879
static void update_name(struct hci_request *req)
2880
{
2881
	struct hci_dev *hdev = req->hdev;
2882 2883
	struct hci_cp_write_local_name cp;

2884
	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2885

2886
	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2887 2888
}

2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916
static void set_name_complete(struct hci_dev *hdev, u8 status)
{
	struct mgmt_cp_set_local_name *cp;
	struct pending_cmd *cmd;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
	if (!cmd)
		goto unlock;

	cp = cmd->param;

	if (status)
		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
			   mgmt_status(status));
	else
		cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
			     cp, sizeof(*cp));

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

2917
static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2918
			  u16 len)
2919
{
2920
	struct mgmt_cp_set_local_name *cp = data;
2921
	struct pending_cmd *cmd;
2922
	struct hci_request req;
2923 2924 2925 2926
	int err;

	BT_DBG("");

2927
	hci_dev_lock(hdev);
2928

2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939
	/* If the old values are the same as the new ones just return a
	 * direct command complete event.
	 */
	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
	    !memcmp(hdev->short_name, cp->short_name,
		    sizeof(hdev->short_name))) {
		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
				   data, len);
		goto failed;
	}

2940
	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2941

2942
	if (!hdev_is_powered(hdev)) {
2943
		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2944 2945

		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2946
				   data, len);
2947 2948 2949 2950
		if (err < 0)
			goto failed;

		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2951
				 sk);
2952

2953 2954 2955
		goto failed;
	}

2956
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2957 2958 2959 2960 2961
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

2962 2963
	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));

2964
	hci_req_init(&req, hdev);
2965 2966 2967 2968 2969 2970

	if (lmp_bredr_capable(hdev)) {
		update_name(&req);
		update_eir(&req);
	}

2971 2972 2973
	/* The name is stored in the scan response data and so
	 * no need to udpate the advertising data here.
	 */
2974
	if (lmp_le_capable(hdev))
2975
		update_scan_rsp_data(&req);
2976

2977
	err = hci_req_run(&req, set_name_complete);
2978 2979 2980 2981
	if (err < 0)
		mgmt_pending_remove(cmd);

failed:
2982
	hci_dev_unlock(hdev);
2983 2984 2985
	return err;
}

2986
static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2987
			       void *data, u16 data_len)
2988 2989 2990 2991
{
	struct pending_cmd *cmd;
	int err;

2992
	BT_DBG("%s", hdev->name);
2993

2994
	hci_dev_lock(hdev);
2995

2996
	if (!hdev_is_powered(hdev)) {
2997
		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2998
				 MGMT_STATUS_NOT_POWERED);
2999 3000 3001
		goto unlock;
	}

3002
	if (!lmp_ssp_capable(hdev)) {
3003
		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3004
				 MGMT_STATUS_NOT_SUPPORTED);
3005 3006 3007
		goto unlock;
	}

3008
	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3009
		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3010
				 MGMT_STATUS_BUSY);
3011 3012 3013
		goto unlock;
	}

3014
	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3015 3016 3017 3018 3019 3020 3021 3022 3023 3024
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
3025
	hci_dev_unlock(hdev);
3026 3027 3028
	return err;
}

3029
static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3030
			       void *data, u16 len)
3031
{
3032
	struct mgmt_cp_add_remote_oob_data *cp = data;
3033
	u8 status;
3034 3035
	int err;

3036
	BT_DBG("%s ", hdev->name);
3037

3038
	hci_dev_lock(hdev);
3039

3040
	err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
3041
				      cp->randomizer);
3042
	if (err < 0)
3043
		status = MGMT_STATUS_FAILED;
3044
	else
3045
		status = MGMT_STATUS_SUCCESS;
3046

3047
	err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3048
			   &cp->addr, sizeof(cp->addr));
3049

3050
	hci_dev_unlock(hdev);
3051 3052 3053
	return err;
}

3054
static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3055
				  void *data, u16 len)
3056
{
3057
	struct mgmt_cp_remove_remote_oob_data *cp = data;
3058
	u8 status;
3059 3060
	int err;

3061
	BT_DBG("%s", hdev->name);
3062

3063
	hci_dev_lock(hdev);
3064

3065
	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3066
	if (err < 0)
3067
		status = MGMT_STATUS_INVALID_PARAMS;
3068
	else
3069
		status = MGMT_STATUS_SUCCESS;
3070

3071
	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3072
			   status, &cp->addr, sizeof(cp->addr));
3073

3074
	hci_dev_unlock(hdev);
3075 3076 3077
	return err;
}

3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098
static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
	u8 type;
	int err;

	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);

	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
	if (!cmd)
		return -ENOENT;

	type = hdev->discovery.type;

	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
			   &type, sizeof(type));
	mgmt_pending_remove(cmd);

	return err;
}

3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116
static void start_discovery_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status %d", status);

	if (status) {
		hci_dev_lock(hdev);
		mgmt_start_discovery_failed(hdev, status);
		hci_dev_unlock(hdev);
		return;
	}

	hci_dev_lock(hdev);
	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
	hci_dev_unlock(hdev);

	switch (hdev->discovery.type) {
	case DISCOV_TYPE_LE:
		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3117
				   DISCOV_LE_TIMEOUT);
3118 3119 3120 3121
		break;

	case DISCOV_TYPE_INTERLEAVED:
		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3122
				   DISCOV_INTERLEAVED_TIMEOUT);
3123 3124 3125 3126 3127 3128 3129 3130 3131 3132
		break;

	case DISCOV_TYPE_BREDR:
		break;

	default:
		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
	}
}

3133
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3134
			   void *data, u16 len)
3135
{
3136
	struct mgmt_cp_start_discovery *cp = data;
3137
	struct pending_cmd *cmd;
3138 3139 3140 3141 3142 3143
	struct hci_cp_le_set_scan_param param_cp;
	struct hci_cp_le_set_scan_enable enable_cp;
	struct hci_cp_inquiry inq_cp;
	struct hci_request req;
	/* General inquiry access code (GIAC) */
	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3144
	u8 status;
3145 3146
	int err;

3147
	BT_DBG("%s", hdev->name);
3148

3149
	hci_dev_lock(hdev);
3150

3151
	if (!hdev_is_powered(hdev)) {
3152
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3153
				 MGMT_STATUS_NOT_POWERED);
3154 3155 3156
		goto failed;
	}

3157 3158 3159 3160 3161 3162
	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
				 MGMT_STATUS_BUSY);
		goto failed;
	}

3163
	if (hdev->discovery.state != DISCOVERY_STOPPED) {
3164
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3165
				 MGMT_STATUS_BUSY);
3166 3167 3168
		goto failed;
	}

3169
	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3170 3171 3172 3173 3174
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

A
Andre Guedes 已提交
3175 3176
	hdev->discovery.type = cp->type;

3177 3178
	hci_req_init(&req, hdev);

A
Andre Guedes 已提交
3179
	switch (hdev->discovery.type) {
3180
	case DISCOV_TYPE_BREDR:
3181 3182
		status = mgmt_bredr_support(hdev);
		if (status) {
3183
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3184
					 status);
3185 3186 3187 3188
			mgmt_pending_remove(cmd);
			goto failed;
		}

3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199
		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_BUSY);
			mgmt_pending_remove(cmd);
			goto failed;
		}

		hci_inquiry_cache_flush(hdev);

		memset(&inq_cp, 0, sizeof(inq_cp));
		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3200
		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3201
		hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3202 3203 3204
		break;

	case DISCOV_TYPE_LE:
3205
	case DISCOV_TYPE_INTERLEAVED:
3206 3207
		status = mgmt_le_support(hdev);
		if (status) {
3208
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3209
					 status);
3210 3211 3212 3213
			mgmt_pending_remove(cmd);
			goto failed;
		}

3214
		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3215
		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3216 3217 3218 3219 3220 3221
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_NOT_SUPPORTED);
			mgmt_pending_remove(cmd);
			goto failed;
		}

3222
		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_REJECTED);
			mgmt_pending_remove(cmd);
			goto failed;
		}

		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_BUSY);
			mgmt_pending_remove(cmd);
			goto failed;
		}

		memset(&param_cp, 0, sizeof(param_cp));
		param_cp.type = LE_SCAN_ACTIVE;
3238 3239
		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3240 3241 3242 3243
		if (bacmp(&hdev->bdaddr, BDADDR_ANY))
			param_cp.own_address_type = ADDR_LE_DEV_PUBLIC;
		else
			param_cp.own_address_type = ADDR_LE_DEV_RANDOM;
3244 3245 3246 3247 3248 3249 3250 3251
		hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
			    &param_cp);

		memset(&enable_cp, 0, sizeof(enable_cp));
		enable_cp.enable = LE_SCAN_ENABLE;
		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
		hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
			    &enable_cp);
3252 3253
		break;

3254
	default:
3255 3256 3257 3258
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
				 MGMT_STATUS_INVALID_PARAMS);
		mgmt_pending_remove(cmd);
		goto failed;
3259
	}
3260

3261
	err = hci_req_run(&req, start_discovery_complete);
3262 3263
	if (err < 0)
		mgmt_pending_remove(cmd);
3264 3265
	else
		hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3266 3267

failed:
3268
	hci_dev_unlock(hdev);
3269 3270 3271
	return err;
}

3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287
static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
	int err;

	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
	if (!cmd)
		return -ENOENT;

	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
			   &hdev->discovery.type, sizeof(hdev->discovery.type));
	mgmt_pending_remove(cmd);

	return err;
}

3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304
static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

	if (status) {
		mgmt_stop_discovery_failed(hdev, status);
		goto unlock;
	}

	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);

unlock:
	hci_dev_unlock(hdev);
}

3305
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3306
			  u16 len)
3307
{
3308
	struct mgmt_cp_stop_discovery *mgmt_cp = data;
3309
	struct pending_cmd *cmd;
3310 3311
	struct hci_cp_remote_name_req_cancel cp;
	struct inquiry_entry *e;
3312 3313
	struct hci_request req;
	struct hci_cp_le_set_scan_enable enable_cp;
3314 3315
	int err;

3316
	BT_DBG("%s", hdev->name);
3317

3318
	hci_dev_lock(hdev);
3319

3320
	if (!hci_discovery_active(hdev)) {
3321
		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3322 3323
				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
				   sizeof(mgmt_cp->type));
3324 3325 3326 3327
		goto unlock;
	}

	if (hdev->discovery.type != mgmt_cp->type) {
3328
		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3329 3330
				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
				   sizeof(mgmt_cp->type));
3331
		goto unlock;
3332 3333
	}

3334
	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3335 3336
	if (!cmd) {
		err = -ENOMEM;
3337 3338 3339
		goto unlock;
	}

3340 3341
	hci_req_init(&req, hdev);

3342 3343
	switch (hdev->discovery.state) {
	case DISCOVERY_FINDING:
3344 3345 3346 3347 3348 3349 3350 3351 3352 3353
		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
			hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
		} else {
			cancel_delayed_work(&hdev->le_scan_disable);

			memset(&enable_cp, 0, sizeof(enable_cp));
			enable_cp.enable = LE_SCAN_DISABLE;
			hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
				    sizeof(enable_cp), &enable_cp);
		}
3354

3355 3356 3357 3358
		break;

	case DISCOVERY_RESOLVING:
		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3359
						     NAME_PENDING);
3360
		if (!e) {
3361
			mgmt_pending_remove(cmd);
3362 3363 3364 3365 3366 3367 3368
			err = cmd_complete(sk, hdev->id,
					   MGMT_OP_STOP_DISCOVERY, 0,
					   &mgmt_cp->type,
					   sizeof(mgmt_cp->type));
			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
			goto unlock;
		}
3369

3370
		bacpy(&cp.bdaddr, &e->data.bdaddr);
3371 3372
		hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
			    &cp);
3373 3374 3375 3376 3377

		break;

	default:
		BT_DBG("unknown discovery state %u", hdev->discovery.state);
3378 3379 3380 3381 3382 3383

		mgmt_pending_remove(cmd);
		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
				   MGMT_STATUS_FAILED, &mgmt_cp->type,
				   sizeof(mgmt_cp->type));
		goto unlock;
3384 3385
	}

3386
	err = hci_req_run(&req, stop_discovery_complete);
3387 3388
	if (err < 0)
		mgmt_pending_remove(cmd);
3389 3390
	else
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3391

3392
unlock:
3393
	hci_dev_unlock(hdev);
3394 3395 3396
	return err;
}

3397
static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3398
			u16 len)
3399
{
3400
	struct mgmt_cp_confirm_name *cp = data;
3401 3402 3403
	struct inquiry_entry *e;
	int err;

3404
	BT_DBG("%s", hdev->name);
3405 3406 3407

	hci_dev_lock(hdev);

3408
	if (!hci_discovery_active(hdev)) {
3409
		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3410
				 MGMT_STATUS_FAILED);
3411 3412 3413
		goto failed;
	}

3414
	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3415
	if (!e) {
3416
		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3417
				 MGMT_STATUS_INVALID_PARAMS);
3418 3419 3420 3421 3422 3423 3424 3425
		goto failed;
	}

	if (cp->name_known) {
		e->name_state = NAME_KNOWN;
		list_del(&e->list);
	} else {
		e->name_state = NAME_NEEDED;
3426
		hci_inquiry_cache_update_resolve(hdev, e);
3427 3428
	}

3429 3430
	err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
			   sizeof(cp->addr));
3431 3432 3433 3434 3435 3436

failed:
	hci_dev_unlock(hdev);
	return err;
}

3437
static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3438
			u16 len)
3439
{
3440
	struct mgmt_cp_block_device *cp = data;
3441
	u8 status;
3442 3443
	int err;

3444
	BT_DBG("%s", hdev->name);
3445

3446
	if (!bdaddr_type_is_valid(cp->addr.type))
3447 3448 3449
		return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &cp->addr, sizeof(cp->addr));
3450

3451
	hci_dev_lock(hdev);
3452

3453
	err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3454
	if (err < 0)
3455
		status = MGMT_STATUS_FAILED;
3456
	else
3457
		status = MGMT_STATUS_SUCCESS;
3458

3459
	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3460
			   &cp->addr, sizeof(cp->addr));
3461

3462
	hci_dev_unlock(hdev);
3463 3464 3465 3466

	return err;
}

3467
static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3468
			  u16 len)
3469
{
3470
	struct mgmt_cp_unblock_device *cp = data;
3471
	u8 status;
3472 3473
	int err;

3474
	BT_DBG("%s", hdev->name);
3475

3476
	if (!bdaddr_type_is_valid(cp->addr.type))
3477 3478 3479
		return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &cp->addr, sizeof(cp->addr));
3480

3481
	hci_dev_lock(hdev);
3482

3483
	err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3484
	if (err < 0)
3485
		status = MGMT_STATUS_INVALID_PARAMS;
3486
	else
3487
		status = MGMT_STATUS_SUCCESS;
3488

3489
	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3490
			   &cp->addr, sizeof(cp->addr));
3491

3492
	hci_dev_unlock(hdev);
3493 3494 3495 3496

	return err;
}

3497 3498 3499 3500
static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_set_device_id *cp = data;
3501
	struct hci_request req;
3502
	int err;
3503
	__u16 source;
3504 3505 3506

	BT_DBG("%s", hdev->name);

3507 3508 3509 3510 3511 3512
	source = __le16_to_cpu(cp->source);

	if (source > 0x0002)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
				  MGMT_STATUS_INVALID_PARAMS);

3513 3514
	hci_dev_lock(hdev);

3515
	hdev->devid_source = source;
3516 3517 3518 3519 3520 3521
	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
	hdev->devid_product = __le16_to_cpu(cp->product);
	hdev->devid_version = __le16_to_cpu(cp->version);

	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);

3522 3523 3524
	hci_req_init(&req, hdev);
	update_eir(&req);
	hci_req_run(&req, NULL);
3525 3526 3527 3528 3529 3530

	hci_dev_unlock(hdev);

	return err;
}

3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551
static void set_advertising_complete(struct hci_dev *hdev, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
				     cmd_status_rsp, &mgmt_err);
		return;
	}

	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
			     &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
}

3552 3553
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
3554 3555 3556 3557
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
	struct hci_request req;
3558
	u8 val, enabled, status;
3559 3560 3561 3562
	int err;

	BT_DBG("request for %s", hdev->name);

3563 3564
	status = mgmt_le_support(hdev);
	if (status)
3565
		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3566
				  status);
3567 3568 3569 3570 3571 3572 3573 3574

	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	val = !!cp->val;
3575
	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3576

3577 3578 3579 3580 3581 3582
	/* The following conditions are ones which mean that we should
	 * not do any HCI communication but directly send a mgmt
	 * response to user space (after toggling the flag if
	 * necessary).
	 */
	if (!hdev_is_powered(hdev) || val == enabled ||
3583
	    hci_conn_num(hdev, LE_LINK) > 0) {
3584 3585
		bool changed = false;

3586 3587
		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
		if (err < 0)
			goto unlock;

		if (changed)
			err = new_settings(hdev, sk);

		goto unlock;
	}

	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

3616 3617 3618 3619
	if (val)
		enable_advertising(&req);
	else
		disable_advertising(&req);
3620 3621 3622 3623 3624 3625 3626 3627 3628 3629

	err = hci_req_run(&req, set_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

3630 3631 3632 3633 3634 3635 3636 3637
static int set_static_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_static_address *cp = data;
	int err;

	BT_DBG("%s", hdev->name);

3638
	if (!lmp_le_capable(hdev))
3639
		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3640
				  MGMT_STATUS_NOT_SUPPORTED);
3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669

	if (hdev_is_powered(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				  MGMT_STATUS_REJECTED);

	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
			return cmd_status(sk, hdev->id,
					  MGMT_OP_SET_STATIC_ADDRESS,
					  MGMT_STATUS_INVALID_PARAMS);

		/* Two most significant bits shall be set */
		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
			return cmd_status(sk, hdev->id,
					  MGMT_OP_SET_STATIC_ADDRESS,
					  MGMT_STATUS_INVALID_PARAMS);
	}

	hci_dev_lock(hdev);

	bacpy(&hdev->static_addr, &cp->bdaddr);

	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);

	hci_dev_unlock(hdev);

	return err;
}

3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694
static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_cp_set_scan_params *cp = data;
	__u16 interval, window;
	int err;

	BT_DBG("%s", hdev->name);

	if (!lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_NOT_SUPPORTED);

	interval = __le16_to_cpu(cp->interval);

	if (interval < 0x0004 || interval > 0x4000)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_INVALID_PARAMS);

	window = __le16_to_cpu(cp->window);

	if (window < 0x0004 || window > 0x4000)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_INVALID_PARAMS);

3695 3696 3697 3698
	if (window > interval)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_INVALID_PARAMS);

3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710
	hci_dev_lock(hdev);

	hdev->le_scan_interval = interval;
	hdev->le_scan_window = window;

	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);

	hci_dev_unlock(hdev);

	return err;
}

3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726
static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
			   mgmt_status(status));
	} else {
3727 3728 3729 3730 3731 3732 3733
		struct mgmt_mode *cp = cmd->param;

		if (cp->val)
			set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
		else
			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);

3734 3735 3736 3737 3738 3739 3740 3741 3742 3743
		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

3744
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3745
				void *data, u16 len)
3746
{
3747
	struct mgmt_mode *cp = data;
3748 3749
	struct pending_cmd *cmd;
	struct hci_request req;
3750 3751
	int err;

3752
	BT_DBG("%s", hdev->name);
3753

3754 3755
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
	    hdev->hci_ver < BLUETOOTH_VER_1_2)
3756 3757 3758
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				  MGMT_STATUS_NOT_SUPPORTED);

3759 3760 3761 3762
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				  MGMT_STATUS_INVALID_PARAMS);

3763
	if (!hdev_is_powered(hdev))
3764
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3765
				  MGMT_STATUS_NOT_POWERED);
3766 3767

	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3768
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3769
				  MGMT_STATUS_REJECTED);
3770 3771 3772

	hci_dev_lock(hdev);

3773 3774 3775 3776 3777 3778
	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}

3779 3780 3781 3782 3783 3784
	if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		goto unlock;
	}

3785 3786 3787 3788 3789
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
			       data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
3790 3791
	}

3792 3793
	hci_req_init(&req, hdev);

3794
	write_fast_connectable(&req, cp->val);
3795 3796

	err = hci_req_run(&req, fast_connectable_complete);
3797
	if (err < 0) {
3798
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3799
				 MGMT_STATUS_FAILED);
3800
		mgmt_pending_remove(cmd);
3801 3802
	}

3803
unlock:
3804
	hci_dev_unlock(hdev);
3805

3806 3807 3808
	return err;
}

3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828
static void set_bredr_scan(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	u8 scan = 0;

	/* Ensure that fast connectable is disabled. This function will
	 * not do anything if the page scan parameters are already what
	 * they should be.
	 */
	write_fast_connectable(req, false);

	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
		scan |= SCAN_PAGE;
	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
		scan |= SCAN_INQUIRY;

	if (scan)
		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}

3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926
static void set_bredr_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		/* We need to restore the flag if related HCI commands
		 * failed.
		 */
		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);

		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
	} else {
		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
	struct hci_request req;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				  MGMT_STATUS_NOT_SUPPORTED);

	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				  MGMT_STATUS_REJECTED);

	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		goto unlock;
	}

	if (!hdev_is_powered(hdev)) {
		if (!cp->val) {
			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
		}

		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);

		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		if (err < 0)
			goto unlock;

		err = new_settings(hdev, sk);
		goto unlock;
	}

	/* Reject disabling when powered on */
	if (!cp->val) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				 MGMT_STATUS_REJECTED);
		goto unlock;
	}

	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

3927
	/* We need to flip the bit already here so that update_adv_data
3928 3929 3930 3931 3932
	 * generates the correct flags.
	 */
	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);

	hci_req_init(&req, hdev);
3933 3934 3935 3936

	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
		set_bredr_scan(&req);

3937 3938 3939
	/* Since only the advertising data flags will change, there
	 * is no need to update the scan response data.
	 */
3940
	update_adv_data(&req);
3941

3942 3943 3944 3945 3946 3947 3948 3949 3950
	err = hci_req_run(&req, set_bredr_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

3951 3952
static bool ltk_is_valid(struct mgmt_ltk_info *key)
{
3953 3954
	if (key->authenticated != 0x00 && key->authenticated != 0x01)
		return false;
3955 3956
	if (key->master != 0x00 && key->master != 0x01)
		return false;
3957 3958
	if (!bdaddr_type_is_le(key->addr.type))
		return false;
3959 3960 3961
	return true;
}

3962
static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3963
			       void *cp_data, u16 len)
3964 3965 3966
{
	struct mgmt_cp_load_long_term_keys *cp = cp_data;
	u16 key_count, expected_len;
3967
	int i, err;
3968

3969 3970 3971 3972 3973 3974
	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				  MGMT_STATUS_NOT_SUPPORTED);

3975
	key_count = __le16_to_cpu(cp->key_count);
3976 3977 3978 3979 3980

	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_ltk_info);
	if (expected_len != len) {
		BT_ERR("load_keys: expected %u bytes, got %u bytes",
3981
		       len, expected_len);
3982
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3983
				  MGMT_STATUS_INVALID_PARAMS);
3984 3985
	}

3986
	BT_DBG("%s key_count %u", hdev->name, key_count);
3987

3988 3989 3990
	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];

3991
		if (!ltk_is_valid(key))
3992 3993 3994 3995 3996
			return cmd_status(sk, hdev->id,
					  MGMT_OP_LOAD_LONG_TERM_KEYS,
					  MGMT_STATUS_INVALID_PARAMS);
	}

3997 3998 3999 4000 4001 4002
	hci_dev_lock(hdev);

	hci_smp_ltks_clear(hdev);

	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];
4003 4004 4005 4006 4007 4008
		u8 type, addr_type;

		if (key->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;
4009 4010 4011 4012 4013 4014

		if (key->master)
			type = HCI_SMP_LTK;
		else
			type = HCI_SMP_LTK_SLAVE;

4015
		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
4016 4017
			    type, 0, key->authenticated, key->val,
			    key->enc_size, key->ediv, key->rand);
4018 4019
	}

4020 4021 4022
	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
			   NULL, 0);

4023 4024
	hci_dev_unlock(hdev);

4025
	return err;
4026 4027
}

4028
static const struct mgmt_handler {
4029 4030
	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
		     u16 data_len);
4031 4032
	bool var_len;
	size_t data_len;
4033 4034
} mgmt_handlers[] = {
	{ NULL }, /* 0x0000 (no command) */
4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073
	{ read_version,           false, MGMT_READ_VERSION_SIZE },
	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
	{ set_powered,            false, MGMT_SETTING_SIZE },
	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
	{ set_connectable,        false, MGMT_SETTING_SIZE },
	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
	{ set_pairable,           false, MGMT_SETTING_SIZE },
	{ set_link_security,      false, MGMT_SETTING_SIZE },
	{ set_ssp,                false, MGMT_SETTING_SIZE },
	{ set_hs,                 false, MGMT_SETTING_SIZE },
	{ set_le,                 false, MGMT_SETTING_SIZE },
	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
	{ add_remote_oob_data,    false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
4074
	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
4075
	{ set_advertising,        false, MGMT_SETTING_SIZE },
4076
	{ set_bredr,              false, MGMT_SETTING_SIZE },
4077
	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
4078
	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
4079 4080 4081
};


4082 4083
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
{
4084 4085
	void *buf;
	u8 *cp;
4086
	struct mgmt_hdr *hdr;
4087
	u16 opcode, index, len;
4088
	struct hci_dev *hdev = NULL;
4089
	const struct mgmt_handler *handler;
4090 4091 4092 4093 4094 4095 4096
	int err;

	BT_DBG("got %zu bytes", msglen);

	if (msglen < sizeof(*hdr))
		return -EINVAL;

4097
	buf = kmalloc(msglen, GFP_KERNEL);
4098 4099 4100 4101 4102 4103 4104 4105
	if (!buf)
		return -ENOMEM;

	if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
		err = -EFAULT;
		goto done;
	}

4106
	hdr = buf;
4107 4108 4109
	opcode = __le16_to_cpu(hdr->opcode);
	index = __le16_to_cpu(hdr->index);
	len = __le16_to_cpu(hdr->len);
4110 4111 4112 4113 4114 4115

	if (len != msglen - sizeof(*hdr)) {
		err = -EINVAL;
		goto done;
	}

4116
	if (index != MGMT_INDEX_NONE) {
4117 4118 4119
		hdev = hci_dev_get(index);
		if (!hdev) {
			err = cmd_status(sk, index, opcode,
4120
					 MGMT_STATUS_INVALID_INDEX);
4121 4122
			goto done;
		}
4123

4124 4125
		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4126 4127 4128 4129
			err = cmd_status(sk, index, opcode,
					 MGMT_STATUS_INVALID_INDEX);
			goto done;
		}
4130 4131
	}

4132
	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4133
	    mgmt_handlers[opcode].func == NULL) {
4134
		BT_DBG("Unknown op %u", opcode);
4135
		err = cmd_status(sk, index, opcode,
4136
				 MGMT_STATUS_UNKNOWN_COMMAND);
4137 4138 4139 4140
		goto done;
	}

	if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4141
	    (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4142
		err = cmd_status(sk, index, opcode,
4143
				 MGMT_STATUS_INVALID_INDEX);
4144
		goto done;
4145 4146
	}

4147 4148 4149
	handler = &mgmt_handlers[opcode];

	if ((handler->var_len && len < handler->data_len) ||
4150
	    (!handler->var_len && len != handler->data_len)) {
4151
		err = cmd_status(sk, index, opcode,
4152
				 MGMT_STATUS_INVALID_PARAMS);
4153 4154 4155
		goto done;
	}

4156 4157 4158 4159 4160
	if (hdev)
		mgmt_init_hdev(sk, hdev);

	cp = buf + sizeof(*hdr);

4161
	err = handler->func(sk, hdev, cp, len);
4162 4163 4164
	if (err < 0)
		goto done;

4165 4166 4167
	err = msglen;

done:
4168 4169 4170
	if (hdev)
		hci_dev_put(hdev);

4171 4172 4173
	kfree(buf);
	return err;
}
4174

4175
void mgmt_index_added(struct hci_dev *hdev)
4176
{
4177
	if (hdev->dev_type != HCI_BREDR)
4178
		return;
4179

4180
	mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4181 4182
}

4183
void mgmt_index_removed(struct hci_dev *hdev)
4184
{
4185
	u8 status = MGMT_STATUS_INVALID_INDEX;
4186

4187
	if (hdev->dev_type != HCI_BREDR)
4188
		return;
4189

4190
	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4191

4192
	mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4193 4194
}

4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212
static void powered_complete(struct hci_dev *hdev, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	hci_dev_unlock(hdev);

	if (match.sk)
		sock_put(match.sk);
}

4213
static int powered_update_hci(struct hci_dev *hdev)
4214
{
4215
	struct hci_request req;
4216
	u8 link_sec;
4217

4218 4219
	hci_req_init(&req, hdev);

4220 4221 4222
	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
	    !lmp_host_ssp_capable(hdev)) {
		u8 ssp = 1;
4223

4224
		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4225
	}
4226

4227 4228
	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
	    lmp_bredr_capable(hdev)) {
4229
		struct hci_cp_write_le_host_supported cp;
4230

4231 4232
		cp.le = 1;
		cp.simul = lmp_le_br_capable(hdev);
4233

4234 4235 4236 4237 4238
		/* Check first if we already have the right
		 * host state (host features set)
		 */
		if (cp.le != lmp_host_le_capable(hdev) ||
		    cp.simul != lmp_host_le_br_capable(hdev))
4239 4240
			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
				    sizeof(cp), &cp);
4241
	}
4242

4243 4244 4245 4246 4247
	if (lmp_le_capable(hdev)) {
		/* Set random address to static address if configured */
		if (bacmp(&hdev->static_addr, BDADDR_ANY))
			hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
				    &hdev->static_addr);
4248

4249 4250 4251 4252
		/* Make sure the controller has a good default for
		 * advertising data. This also applies to the case
		 * where BR/EDR was toggled during the AUTO_OFF phase.
		 */
4253
		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4254
			update_adv_data(&req);
4255 4256
			update_scan_rsp_data(&req);
		}
4257

4258 4259
		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
			enable_advertising(&req);
4260 4261
	}

4262 4263
	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4264 4265
		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
			    sizeof(link_sec), &link_sec);
4266

4267
	if (lmp_bredr_capable(hdev)) {
4268 4269
		if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
			set_bredr_scan(&req);
4270
		update_class(&req);
4271
		update_name(&req);
4272
		update_eir(&req);
4273
	}
4274

4275
	return hci_req_run(&req, powered_complete);
4276
}
4277

4278 4279 4280
int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
	struct cmd_lookup match = { NULL, hdev };
4281 4282
	u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
	u8 zero_cod[] = { 0, 0, 0 };
4283
	int err;
4284

4285 4286 4287 4288
	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
		return 0;

	if (powered) {
4289 4290
		if (powered_update_hci(hdev) == 0)
			return 0;
4291

4292 4293 4294
		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
				     &match);
		goto new_settings;
4295 4296
	}

4297 4298 4299 4300 4301 4302 4303 4304
	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);

	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
			   zero_cod, sizeof(zero_cod), NULL);

new_settings:
4305
	err = new_settings(hdev, match.sk);
4306 4307 4308 4309

	if (match.sk)
		sock_put(match.sk);

4310
	return err;
4311
}
4312

4313
void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4314 4315 4316 4317 4318 4319
{
	struct pending_cmd *cmd;
	u8 status;

	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
	if (!cmd)
4320
		return;
4321 4322 4323 4324 4325 4326

	if (err == -ERFKILL)
		status = MGMT_STATUS_RFKILLED;
	else
		status = MGMT_STATUS_FAILED;

4327
	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4328 4329 4330 4331

	mgmt_pending_remove(cmd);
}

4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355
void mgmt_discoverable_timeout(struct hci_dev *hdev)
{
	struct hci_request req;
	u8 scan = SCAN_PAGE;

	hci_dev_lock(hdev);

	/* When discoverable timeout triggers, then just make sure
	 * the limited discoverable flag is cleared. Even in the case
	 * of a timeout triggered from general discoverable, it is
	 * safe to unconditionally clear the flag.
	 */
	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);

	hci_req_init(&req, hdev);
	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
	update_class(&req);
	hci_req_run(&req, NULL);

	hdev->discov_timeout = 0;

	hci_dev_unlock(hdev);
}

4356
void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4357
{
4358
	bool changed;
4359

4360 4361 4362 4363 4364
	/* Nothing needed here if there's a pending command since that
	 * commands request completion callback takes care of everything
	 * necessary.
	 */
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4365
		return;
4366

4367 4368 4369 4370
	if (discoverable)
		changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4371

4372
	if (changed)
4373
		new_settings(hdev, NULL);
4374
}
4375

4376
void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4377
{
4378
	bool changed;
4379

4380 4381 4382 4383 4384
	/* Nothing needed here if there's a pending command since that
	 * commands request completion callback takes care of everything
	 * necessary.
	 */
	if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4385
		return;
4386

4387 4388 4389 4390
	if (connectable)
		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4391

4392
	if (changed)
4393
		new_settings(hdev, NULL);
4394
}
4395

4396
void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4397
{
4398 4399
	u8 mgmt_err = mgmt_status(status);

4400
	if (scan & SCAN_PAGE)
4401
		mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4402
				     cmd_status_rsp, &mgmt_err);
4403 4404

	if (scan & SCAN_INQUIRY)
4405
		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4406
				     cmd_status_rsp, &mgmt_err);
4407 4408
}

4409 4410
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
		       bool persistent)
4411
{
4412
	struct mgmt_ev_new_link_key ev;
4413

4414
	memset(&ev, 0, sizeof(ev));
4415

4416
	ev.store_hint = persistent;
4417
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4418
	ev.key.addr.type = BDADDR_BREDR;
4419
	ev.key.type = key->type;
4420
	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4421
	ev.key.pin_len = key->pin_len;
4422

4423
	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4424
}
4425

4426
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4427 4428 4429 4430 4431 4432 4433
{
	struct mgmt_ev_new_long_term_key ev;

	memset(&ev, 0, sizeof(ev));

	ev.store_hint = persistent;
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4434
	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4435 4436 4437 4438 4439 4440 4441 4442 4443 4444
	ev.key.authenticated = key->authenticated;
	ev.key.enc_size = key->enc_size;
	ev.key.ediv = key->ediv;

	if (key->type == HCI_SMP_LTK)
		ev.key.master = 1;

	memcpy(ev.key.rand, key->rand, sizeof(key->rand));
	memcpy(ev.key.val, key->val, sizeof(key->val));

4445
	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4446 4447
}

4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
				  u8 data_len)
{
	eir[eir_len++] = sizeof(type) + data_len;
	eir[eir_len++] = type;
	memcpy(&eir[eir_len], data, data_len);
	eir_len += data_len;

	return eir_len;
}

4459 4460 4461
void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			   u8 addr_type, u32 flags, u8 *name, u8 name_len,
			   u8 *dev_class)
4462
{
4463 4464 4465
	char buf[512];
	struct mgmt_ev_device_connected *ev = (void *) buf;
	u16 eir_len = 0;
4466

4467
	bacpy(&ev->addr.bdaddr, bdaddr);
4468
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
4469

4470
	ev->flags = __cpu_to_le32(flags);
4471

4472 4473
	if (name_len > 0)
		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4474
					  name, name_len);
4475 4476

	if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4477
		eir_len = eir_append_data(ev->eir, eir_len,
4478
					  EIR_CLASS_OF_DEV, dev_class, 3);
4479

4480
	ev->eir_len = cpu_to_le16(eir_len);
4481

4482 4483
	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
		    sizeof(*ev) + eir_len, NULL);
4484 4485
}

4486 4487
static void disconnect_rsp(struct pending_cmd *cmd, void *data)
{
4488
	struct mgmt_cp_disconnect *cp = cmd->param;
4489
	struct sock **sk = data;
4490
	struct mgmt_rp_disconnect rp;
4491

4492 4493
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
4494

4495
	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4496
		     sizeof(rp));
4497 4498 4499 4500

	*sk = cmd->sk;
	sock_hold(*sk);

4501
	mgmt_pending_remove(cmd);
4502 4503
}

4504
static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4505
{
4506
	struct hci_dev *hdev = data;
4507 4508
	struct mgmt_cp_unpair_device *cp = cmd->param;
	struct mgmt_rp_unpair_device rp;
4509 4510

	memset(&rp, 0, sizeof(rp));
4511 4512
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
4513

4514 4515
	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);

4516
	cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4517 4518 4519 4520

	mgmt_pending_remove(cmd);
}

4521 4522
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
			      u8 link_type, u8 addr_type, u8 reason)
4523
{
4524
	struct mgmt_ev_device_disconnected ev;
4525 4526
	struct sock *sk = NULL;

4527
	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4528

4529 4530 4531
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.reason = reason;
4532

4533
	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4534 4535

	if (sk)
4536
		sock_put(sk);
4537

4538
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4539
			     hdev);
4540 4541
}

4542 4543
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
			    u8 link_type, u8 addr_type, u8 status)
4544
{
4545
	struct mgmt_rp_disconnect rp;
4546 4547
	struct pending_cmd *cmd;

4548 4549 4550
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
			     hdev);

4551
	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4552
	if (!cmd)
4553
		return;
4554

4555
	bacpy(&rp.addr.bdaddr, bdaddr);
4556
	rp.addr.type = link_to_bdaddr(link_type, addr_type);
4557

4558 4559
	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
		     mgmt_status(status), &rp, sizeof(rp));
4560

4561
	mgmt_pending_remove(cmd);
4562
}
4563

4564 4565
void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			 u8 addr_type, u8 status)
4566 4567 4568
{
	struct mgmt_ev_connect_failed ev;

4569
	bacpy(&ev.addr.bdaddr, bdaddr);
4570
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4571
	ev.status = mgmt_status(status);
4572

4573
	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4574
}
4575

4576
void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4577 4578 4579
{
	struct mgmt_ev_pin_code_request ev;

4580
	bacpy(&ev.addr.bdaddr, bdaddr);
4581
	ev.addr.type = BDADDR_BREDR;
4582
	ev.secure = secure;
4583

4584
	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4585 4586
}

4587 4588
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				  u8 status)
4589 4590
{
	struct pending_cmd *cmd;
4591
	struct mgmt_rp_pin_code_reply rp;
4592

4593
	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4594
	if (!cmd)
4595
		return;
4596

4597
	bacpy(&rp.addr.bdaddr, bdaddr);
4598
	rp.addr.type = BDADDR_BREDR;
4599

4600 4601
	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
		     mgmt_status(status), &rp, sizeof(rp));
4602

4603
	mgmt_pending_remove(cmd);
4604 4605
}

4606 4607
void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				      u8 status)
4608 4609
{
	struct pending_cmd *cmd;
4610
	struct mgmt_rp_pin_code_reply rp;
4611

4612
	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4613
	if (!cmd)
4614
		return;
4615

4616
	bacpy(&rp.addr.bdaddr, bdaddr);
4617
	rp.addr.type = BDADDR_BREDR;
4618

4619 4620
	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
		     mgmt_status(status), &rp, sizeof(rp));
4621

4622
	mgmt_pending_remove(cmd);
4623
}
4624

4625
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4626 4627
			      u8 link_type, u8 addr_type, __le32 value,
			      u8 confirm_hint)
4628 4629 4630
{
	struct mgmt_ev_user_confirm_request ev;

4631
	BT_DBG("%s", hdev->name);
4632

4633
	bacpy(&ev.addr.bdaddr, bdaddr);
4634
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4635
	ev.confirm_hint = confirm_hint;
4636
	ev.value = value;
4637

4638
	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4639
			  NULL);
4640 4641
}

4642
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4643
			      u8 link_type, u8 addr_type)
4644 4645 4646 4647 4648
{
	struct mgmt_ev_user_passkey_request ev;

	BT_DBG("%s", hdev->name);

4649
	bacpy(&ev.addr.bdaddr, bdaddr);
4650
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4651 4652

	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4653
			  NULL);
4654 4655
}

4656
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4657 4658
				      u8 link_type, u8 addr_type, u8 status,
				      u8 opcode)
4659 4660 4661 4662 4663
{
	struct pending_cmd *cmd;
	struct mgmt_rp_user_confirm_reply rp;
	int err;

4664
	cmd = mgmt_pending_find(opcode, hdev);
4665 4666 4667
	if (!cmd)
		return -ENOENT;

4668
	bacpy(&rp.addr.bdaddr, bdaddr);
4669
	rp.addr.type = link_to_bdaddr(link_type, addr_type);
4670
	err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4671
			   &rp, sizeof(rp));
4672

4673
	mgmt_pending_remove(cmd);
4674 4675 4676 4677

	return err;
}

4678
int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4679
				     u8 link_type, u8 addr_type, u8 status)
4680
{
4681
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4682
					  status, MGMT_OP_USER_CONFIRM_REPLY);
4683 4684
}

4685
int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4686
					 u8 link_type, u8 addr_type, u8 status)
4687
{
4688
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4689 4690
					  status,
					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
4691
}
4692

4693
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4694
				     u8 link_type, u8 addr_type, u8 status)
4695
{
4696
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4697
					  status, MGMT_OP_USER_PASSKEY_REPLY);
4698 4699
}

4700
int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4701
					 u8 link_type, u8 addr_type, u8 status)
4702
{
4703
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4704 4705
					  status,
					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
4706 4707
}

4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723
int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
			     u8 link_type, u8 addr_type, u32 passkey,
			     u8 entered)
{
	struct mgmt_ev_passkey_notify ev;

	BT_DBG("%s", hdev->name);

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.passkey = __cpu_to_le32(passkey);
	ev.entered = entered;

	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
}

4724 4725
void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, u8 status)
4726 4727 4728
{
	struct mgmt_ev_auth_failed ev;

4729
	bacpy(&ev.addr.bdaddr, bdaddr);
4730
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4731
	ev.status = mgmt_status(status);
4732

4733
	mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4734
}
4735

4736
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4737 4738
{
	struct cmd_lookup match = { NULL, hdev };
4739
	bool changed;
4740 4741 4742 4743

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4744
				     cmd_status_rsp, &mgmt_err);
4745
		return;
4746 4747
	}

4748 4749 4750 4751 4752 4753
	if (test_bit(HCI_AUTH, &hdev->flags))
		changed = !test_and_set_bit(HCI_LINK_SECURITY,
					    &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_LINK_SECURITY,
					     &hdev->dev_flags);
4754

4755
	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4756
			     &match);
4757

4758
	if (changed)
4759
		new_settings(hdev, match.sk);
4760 4761 4762 4763 4764

	if (match.sk)
		sock_put(match.sk);
}

4765
static void clear_eir(struct hci_request *req)
4766
{
4767
	struct hci_dev *hdev = req->hdev;
4768 4769
	struct hci_cp_write_eir cp;

4770
	if (!lmp_ext_inq_capable(hdev))
4771
		return;
4772

4773 4774
	memset(hdev->eir, 0, sizeof(hdev->eir));

4775 4776
	memset(&cp, 0, sizeof(cp));

4777
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4778 4779
}

4780
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4781 4782
{
	struct cmd_lookup match = { NULL, hdev };
4783
	struct hci_request req;
4784
	bool changed = false;
4785 4786 4787

	if (status) {
		u8 mgmt_err = mgmt_status(status);
4788 4789

		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4790 4791
						 &hdev->dev_flags)) {
			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4792
			new_settings(hdev, NULL);
4793
		}
4794

4795 4796
		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
				     &mgmt_err);
4797
		return;
4798 4799 4800
	}

	if (enable) {
4801
		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4802
	} else {
4803 4804 4805 4806 4807 4808
		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
		if (!changed)
			changed = test_and_clear_bit(HCI_HS_ENABLED,
						     &hdev->dev_flags);
		else
			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4809 4810 4811 4812
	}

	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);

4813
	if (changed)
4814
		new_settings(hdev, match.sk);
4815

4816
	if (match.sk)
4817 4818
		sock_put(match.sk);

4819 4820
	hci_req_init(&req, hdev);

4821
	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4822
		update_eir(&req);
4823
	else
4824 4825 4826
		clear_eir(&req);

	hci_req_run(&req, NULL);
4827 4828
}

4829
static void sk_lookup(struct pending_cmd *cmd, void *data)
4830 4831 4832 4833 4834 4835 4836 4837 4838
{
	struct cmd_lookup *match = data;

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}
}

4839 4840
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
				    u8 status)
4841
{
4842
	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4843

4844 4845 4846
	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4847 4848

	if (!status)
4849 4850
		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
			   NULL);
4851 4852 4853

	if (match.sk)
		sock_put(match.sk);
4854 4855
}

4856
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4857 4858
{
	struct mgmt_cp_set_local_name ev;
4859
	struct pending_cmd *cmd;
4860

4861
	if (status)
4862
		return;
4863 4864 4865

	memset(&ev, 0, sizeof(ev));
	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4866
	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4867

4868
	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4869 4870
	if (!cmd) {
		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4871

4872 4873 4874 4875
		/* If this is a HCI command related to powering on the
		 * HCI dev don't send any mgmt signals.
		 */
		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4876
			return;
4877
	}
4878

4879 4880
	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
		   cmd ? cmd->sk : NULL);
4881
}
4882

4883 4884
void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
					     u8 *randomizer, u8 status)
4885 4886 4887
{
	struct pending_cmd *cmd;

4888
	BT_DBG("%s status %u", hdev->name, status);
4889

4890
	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4891
	if (!cmd)
4892
		return;
4893 4894

	if (status) {
4895 4896
		cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
			   mgmt_status(status));
4897 4898 4899 4900 4901 4902
	} else {
		struct mgmt_rp_read_local_oob_data rp;

		memcpy(rp.hash, hash, sizeof(rp.hash));
		memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));

4903 4904
		cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
			     0, &rp, sizeof(rp));
4905 4906 4907 4908
	}

	mgmt_pending_remove(cmd);
}
4909

4910 4911 4912
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
		       ssp, u8 *eir, u16 eir_len)
4913
{
4914 4915
	char buf[512];
	struct mgmt_ev_device_found *ev = (void *) buf;
4916
	size_t ev_size;
4917

4918
	if (!hci_discovery_active(hdev))
4919
		return;
4920

4921 4922
	/* Leave 5 bytes for a potential CoD field */
	if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4923
		return;
4924

4925 4926
	memset(buf, 0, sizeof(buf));

4927
	bacpy(&ev->addr.bdaddr, bdaddr);
4928
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
4929
	ev->rssi = rssi;
4930
	if (cfm_name)
4931
		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4932
	if (!ssp)
4933
		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4934

4935
	if (eir_len > 0)
4936
		memcpy(ev->eir, eir, eir_len);
4937

4938 4939
	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4940
					  dev_class, 3);
4941

4942
	ev->eir_len = cpu_to_le16(eir_len);
4943
	ev_size = sizeof(*ev) + eir_len;
4944

4945
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4946
}
4947

4948 4949
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4950
{
4951 4952 4953
	struct mgmt_ev_device_found *ev;
	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
	u16 eir_len;
4954

4955
	ev = (struct mgmt_ev_device_found *) buf;
4956

4957 4958 4959
	memset(buf, 0, sizeof(buf));

	bacpy(&ev->addr.bdaddr, bdaddr);
4960
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
4961 4962 4963
	ev->rssi = rssi;

	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4964
				  name_len);
4965

4966
	ev->eir_len = cpu_to_le16(eir_len);
4967

4968
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
4969
}
4970

4971
void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4972
{
4973
	struct mgmt_ev_discovering ev;
4974 4975
	struct pending_cmd *cmd;

4976 4977
	BT_DBG("%s discovering %u", hdev->name, discovering);

4978
	if (discovering)
4979
		cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4980
	else
4981
		cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4982 4983

	if (cmd != NULL) {
4984 4985
		u8 type = hdev->discovery.type;

4986 4987
		cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
			     sizeof(type));
4988 4989 4990
		mgmt_pending_remove(cmd);
	}

4991 4992 4993 4994
	memset(&ev, 0, sizeof(ev));
	ev.type = hdev->discovery.type;
	ev.discovering = discovering;

4995
	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4996
}
4997

4998
int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4999 5000 5001 5002
{
	struct pending_cmd *cmd;
	struct mgmt_ev_device_blocked ev;

5003
	cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5004

5005 5006
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
5007

5008
	return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5009
			  cmd ? cmd->sk : NULL);
5010 5011
}

5012
int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5013 5014 5015 5016
{
	struct pending_cmd *cmd;
	struct mgmt_ev_device_unblocked ev;

5017
	cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5018

5019 5020
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
5021

5022
	return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5023
			  cmd ? cmd->sk : NULL);
5024
}
5025 5026 5027 5028 5029 5030 5031 5032

static void adv_enable_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("%s status %u", hdev->name, status);

	/* Clear the advertising mgmt setting if we failed to re-enable it */
	if (status) {
		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5033
		new_settings(hdev, NULL);
5034 5035 5036 5037 5038 5039 5040
	}
}

void mgmt_reenable_advertising(struct hci_dev *hdev)
{
	struct hci_request req;

5041
	if (hci_conn_num(hdev, LE_LINK) > 0)
5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054
		return;

	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
		return;

	hci_req_init(&req, hdev);
	enable_advertising(&req);

	/* If this fails we have no option but to let user space know
	 * that we've disabled advertising.
	 */
	if (hci_req_run(&req, adv_enable_complete) < 0) {
		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5055
		new_settings(hdev, NULL);
5056 5057
	}
}