mgmt.c 116.7 KB
Newer Older
1 2
/*
   BlueZ - Bluetooth protocol stack for Linux
3

4
   Copyright (C) 2010  Nokia Corporation
5
   Copyright (C) 2011-2012 Intel Corporation
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI Management interface */

27
#include <linux/module.h>
28 29 30 31 32
#include <asm/unaligned.h>

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
33 34

#include "smp.h"
35

36
#define MGMT_VERSION	1
37
#define MGMT_REVISION	4
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
static const u16 mgmt_commands[] = {
	MGMT_OP_READ_INDEX_LIST,
	MGMT_OP_READ_INFO,
	MGMT_OP_SET_POWERED,
	MGMT_OP_SET_DISCOVERABLE,
	MGMT_OP_SET_CONNECTABLE,
	MGMT_OP_SET_FAST_CONNECTABLE,
	MGMT_OP_SET_PAIRABLE,
	MGMT_OP_SET_LINK_SECURITY,
	MGMT_OP_SET_SSP,
	MGMT_OP_SET_HS,
	MGMT_OP_SET_LE,
	MGMT_OP_SET_DEV_CLASS,
	MGMT_OP_SET_LOCAL_NAME,
	MGMT_OP_ADD_UUID,
	MGMT_OP_REMOVE_UUID,
	MGMT_OP_LOAD_LINK_KEYS,
	MGMT_OP_LOAD_LONG_TERM_KEYS,
	MGMT_OP_DISCONNECT,
	MGMT_OP_GET_CONNECTIONS,
	MGMT_OP_PIN_CODE_REPLY,
	MGMT_OP_PIN_CODE_NEG_REPLY,
	MGMT_OP_SET_IO_CAPABILITY,
	MGMT_OP_PAIR_DEVICE,
	MGMT_OP_CANCEL_PAIR_DEVICE,
	MGMT_OP_UNPAIR_DEVICE,
	MGMT_OP_USER_CONFIRM_REPLY,
	MGMT_OP_USER_CONFIRM_NEG_REPLY,
	MGMT_OP_USER_PASSKEY_REPLY,
	MGMT_OP_USER_PASSKEY_NEG_REPLY,
	MGMT_OP_READ_LOCAL_OOB_DATA,
	MGMT_OP_ADD_REMOTE_OOB_DATA,
	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
	MGMT_OP_START_DISCOVERY,
	MGMT_OP_STOP_DISCOVERY,
	MGMT_OP_CONFIRM_NAME,
	MGMT_OP_BLOCK_DEVICE,
	MGMT_OP_UNBLOCK_DEVICE,
77
	MGMT_OP_SET_DEVICE_ID,
78
	MGMT_OP_SET_ADVERTISING,
79
	MGMT_OP_SET_BREDR,
80
	MGMT_OP_SET_STATIC_ADDRESS,
81
	MGMT_OP_SET_SCAN_PARAMS,
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
};

static const u16 mgmt_events[] = {
	MGMT_EV_CONTROLLER_ERROR,
	MGMT_EV_INDEX_ADDED,
	MGMT_EV_INDEX_REMOVED,
	MGMT_EV_NEW_SETTINGS,
	MGMT_EV_CLASS_OF_DEV_CHANGED,
	MGMT_EV_LOCAL_NAME_CHANGED,
	MGMT_EV_NEW_LINK_KEY,
	MGMT_EV_NEW_LONG_TERM_KEY,
	MGMT_EV_DEVICE_CONNECTED,
	MGMT_EV_DEVICE_DISCONNECTED,
	MGMT_EV_CONNECT_FAILED,
	MGMT_EV_PIN_CODE_REQUEST,
	MGMT_EV_USER_CONFIRM_REQUEST,
	MGMT_EV_USER_PASSKEY_REQUEST,
	MGMT_EV_AUTH_FAILED,
	MGMT_EV_DEVICE_FOUND,
	MGMT_EV_DISCOVERING,
	MGMT_EV_DEVICE_BLOCKED,
	MGMT_EV_DEVICE_UNBLOCKED,
	MGMT_EV_DEVICE_UNPAIRED,
105
	MGMT_EV_PASSKEY_NOTIFY,
106 107
};

108
#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
109

110 111 112
#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
				!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))

113 114
struct pending_cmd {
	struct list_head list;
115
	u16 opcode;
116
	int index;
117
	void *param;
118
	struct sock *sk;
119
	void *user_data;
120 121
};

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
/* HCI to MGMT error code conversion table */
static u8 mgmt_status_table[] = {
	MGMT_STATUS_SUCCESS,
	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
	MGMT_STATUS_FAILED,		/* Hardware Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
	MGMT_STATUS_NOT_PAIRED,		/* PIN or Key Missing */
	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
	MGMT_STATUS_BUSY,		/* Command Disallowed */
	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
	MGMT_STATUS_REJECTED,		/* Rejected Security */
	MGMT_STATUS_REJECTED,		/* Rejected Personal */
	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
	MGMT_STATUS_BUSY,		/* Repeated Attempts */
	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
	MGMT_STATUS_FAILED,		/* Unspecified Error */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
	MGMT_STATUS_FAILED,		/* Transaction Collision */
	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
	MGMT_STATUS_REJECTED,		/* QoS Rejected */
	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
	MGMT_STATUS_REJECTED,		/* Insufficient Security */
	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
	MGMT_STATUS_BUSY,		/* Role Switch Pending */
	MGMT_STATUS_FAILED,		/* Slot Violation */
	MGMT_STATUS_FAILED,		/* Role Switch Failed */
	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
	MGMT_STATUS_BUSY,		/* Controller Busy */
	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
};

static u8 mgmt_status(u8 hci_status)
{
	if (hci_status < ARRAY_SIZE(mgmt_status_table))
		return mgmt_status_table[hci_status];

	return MGMT_STATUS_FAILED;
}

195
static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
196 197 198 199
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;
	struct mgmt_ev_cmd_status *ev;
200
	int err;
201

202
	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
203

204
	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
205 206 207 208 209
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));

210
	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
211
	hdr->index = cpu_to_le16(index);
212 213 214 215
	hdr->len = cpu_to_le16(sizeof(*ev));

	ev = (void *) skb_put(skb, sizeof(*ev));
	ev->status = status;
216
	ev->opcode = cpu_to_le16(cmd);
217

218 219
	err = sock_queue_rcv_skb(sk, skb);
	if (err < 0)
220 221
		kfree_skb(skb);

222
	return err;
223 224
}

225
static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
226
			void *rp, size_t rp_len)
227 228 229 230
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;
	struct mgmt_ev_cmd_complete *ev;
231
	int err;
232 233 234

	BT_DBG("sock %p", sk);

235
	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
236 237 238 239 240
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));

241
	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
242
	hdr->index = cpu_to_le16(index);
243
	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
244

245
	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
246
	ev->opcode = cpu_to_le16(cmd);
247
	ev->status = status;
248 249 250

	if (rp)
		memcpy(ev->data, rp, rp_len);
251

252 253
	err = sock_queue_rcv_skb(sk, skb);
	if (err < 0)
254 255
		kfree_skb(skb);

256
	return err;
257 258
}

259 260
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
			u16 data_len)
261 262 263 264 265 266
{
	struct mgmt_rp_read_version rp;

	BT_DBG("sock %p", sk);

	rp.version = MGMT_VERSION;
267
	rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
268

269
	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
270
			    sizeof(rp));
271 272
}

273 274
static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 data_len)
275 276
{
	struct mgmt_rp_read_commands *rp;
277 278
	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
	const u16 num_events = ARRAY_SIZE(mgmt_events);
279
	__le16 *opcode;
280 281 282 283 284 285 286 287 288 289 290
	size_t rp_size;
	int i, err;

	BT_DBG("sock %p", sk);

	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));

	rp = kmalloc(rp_size, GFP_KERNEL);
	if (!rp)
		return -ENOMEM;

291 292
	rp->num_commands = __constant_cpu_to_le16(num_commands);
	rp->num_events = __constant_cpu_to_le16(num_events);
293 294 295 296 297 298 299

	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
		put_unaligned_le16(mgmt_commands[i], opcode);

	for (i = 0; i < num_events; i++, opcode++)
		put_unaligned_le16(mgmt_events[i], opcode);

300
	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
301
			   rp_size);
302 303 304 305 306
	kfree(rp);

	return err;
}

307 308
static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
309 310
{
	struct mgmt_rp_read_index_list *rp;
311
	struct hci_dev *d;
312
	size_t rp_len;
313
	u16 count;
314
	int err;
315 316 317 318 319 320

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
321
	list_for_each_entry(d, &hci_dev_list, list) {
322 323
		if (d->dev_type == HCI_BREDR)
			count++;
324 325
	}

326 327 328
	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
329
		read_unlock(&hci_dev_list_lock);
330
		return -ENOMEM;
331
	}
332

333
	count = 0;
334
	list_for_each_entry(d, &hci_dev_list, list) {
335
		if (test_bit(HCI_SETUP, &d->dev_flags))
336 337
			continue;

338 339 340
		if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
			continue;

341 342 343 344
		if (d->dev_type == HCI_BREDR) {
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
345 346
	}

347 348 349
	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

350 351
	read_unlock(&hci_dev_list_lock);

352
	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
353
			   rp_len);
354

355 356 357
	kfree(rp);

	return err;
358 359
}

360 361 362 363 364 365 366
static u32 get_supported_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

	settings |= MGMT_SETTING_POWERED;
	settings |= MGMT_SETTING_PAIRABLE;

367
	if (lmp_bredr_capable(hdev)) {
368
		settings |= MGMT_SETTING_CONNECTABLE;
369 370
		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
			settings |= MGMT_SETTING_FAST_CONNECTABLE;
371
		settings |= MGMT_SETTING_DISCOVERABLE;
372 373
		settings |= MGMT_SETTING_BREDR;
		settings |= MGMT_SETTING_LINK_SECURITY;
374 375 376 377 378

		if (lmp_ssp_capable(hdev)) {
			settings |= MGMT_SETTING_SSP;
			settings |= MGMT_SETTING_HS;
		}
379
	}
380

381
	if (lmp_le_capable(hdev)) {
382
		settings |= MGMT_SETTING_LE;
383 384
		settings |= MGMT_SETTING_ADVERTISING;
	}
385 386 387 388 389 390 391 392

	return settings;
}

static u32 get_current_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

393
	if (hdev_is_powered(hdev))
394 395
		settings |= MGMT_SETTING_POWERED;

396
	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
397 398
		settings |= MGMT_SETTING_CONNECTABLE;

399 400 401
	if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
		settings |= MGMT_SETTING_FAST_CONNECTABLE;

402
	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
403 404
		settings |= MGMT_SETTING_DISCOVERABLE;

405
	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
406 407
		settings |= MGMT_SETTING_PAIRABLE;

408
	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
409 410
		settings |= MGMT_SETTING_BREDR;

411
	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
412 413
		settings |= MGMT_SETTING_LE;

414
	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
415 416
		settings |= MGMT_SETTING_LINK_SECURITY;

417
	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
418 419
		settings |= MGMT_SETTING_SSP;

420 421 422
	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
		settings |= MGMT_SETTING_HS;

423
	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
424 425
		settings |= MGMT_SETTING_ADVERTISING;

426 427 428
	return settings;
}

429 430
#define PNP_INFO_SVCLASS_ID		0x1200

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 4)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		u16 uuid16;

		if (uuid->size != 16)
			continue;

		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
		if (uuid16 < 0x1100)
			continue;

		if (uuid16 == PNP_INFO_SVCLASS_ID)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID16_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u16) > len) {
			uuids_start[1] = EIR_UUID16_SOME;
			break;
		}

		*ptr++ = (uuid16 & 0x00ff);
		*ptr++ = (uuid16 & 0xff00) >> 8;
		uuids_start[0] += sizeof(uuid16);
	}

	return ptr;
}

473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 6)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 32)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID32_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u32) > len) {
			uuids_start[1] = EIR_UUID32_SOME;
			break;
		}

		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
		ptr += sizeof(u32);
		uuids_start[0] += sizeof(u32);
	}

	return ptr;
}

506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 18)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 128)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID128_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + 16 > len) {
			uuids_start[1] = EIR_UUID128_SOME;
			break;
		}

		memcpy(ptr, uuid->uuid, 16);
		ptr += 16;
		uuids_start[0] += 16;
	}

	return ptr;
}

539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
{
	u8 ad_len = 0, flags = 0;
	size_t name_len;

	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
		flags |= LE_AD_GENERAL;

	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
		if (lmp_le_br_capable(hdev))
			flags |= LE_AD_SIM_LE_BREDR_CTRL;
		if (lmp_host_le_br_capable(hdev))
			flags |= LE_AD_SIM_LE_BREDR_HOST;
	} else {
		flags |= LE_AD_NO_BREDR;
	}

	if (flags) {
		BT_DBG("adv flags 0x%02x", flags);

		ptr[0] = 2;
		ptr[1] = EIR_FLAGS;
		ptr[2] = flags;

		ad_len += 3;
		ptr += 3;
	}

	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->adv_tx_power;

		ad_len += 3;
		ptr += 3;
	}

	name_len = strlen(hdev->dev_name);
	if (name_len > 0) {
		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;

		if (name_len > max_len) {
			name_len = max_len;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ad_len += (name_len + 2);
		ptr += (name_len + 2);
	}

	return ad_len;
}

static void update_ad(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_data cp;
	u8 len;

	if (!lmp_le_capable(hdev))
		return;

	memset(&cp, 0, sizeof(cp));

	len = create_ad(hdev, cp.data);

	if (hdev->adv_data_len == len &&
	    memcmp(cp.data, hdev->adv_data, len) == 0)
		return;

	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
	hdev->adv_data_len = len;

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
}

622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
static void create_eir(struct hci_dev *hdev, u8 *data)
{
	u8 *ptr = data;
	size_t name_len;

	name_len = strlen(hdev->dev_name);

	if (name_len > 0) {
		/* EIR Data type */
		if (name_len > 48) {
			name_len = 48;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		/* EIR Data length */
		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ptr += (name_len + 2);
	}

645
	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
646 647 648 649 650 651 652
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->inq_tx_power;

		ptr += 3;
	}

653 654 655 656 657 658 659 660 661 662 663 664
	if (hdev->devid_source > 0) {
		ptr[0] = 9;
		ptr[1] = EIR_DEVICE_ID;

		put_unaligned_le16(hdev->devid_source, ptr + 2);
		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
		put_unaligned_le16(hdev->devid_product, ptr + 6);
		put_unaligned_le16(hdev->devid_version, ptr + 8);

		ptr += 10;
	}

665
	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
666
	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
667
	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
668 669
}

670
static void update_eir(struct hci_request *req)
671
{
672
	struct hci_dev *hdev = req->hdev;
673 674
	struct hci_cp_write_eir cp;

675
	if (!hdev_is_powered(hdev))
676
		return;
677

678
	if (!lmp_ext_inq_capable(hdev))
679
		return;
680

681
	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
682
		return;
683

684
	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
685
		return;
686 687 688 689 690 691

	memset(&cp, 0, sizeof(cp));

	create_eir(hdev, cp.data);

	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
692
		return;
693 694 695

	memcpy(hdev->eir, cp.data, sizeof(cp.data));

696
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
697 698 699 700 701 702 703 704 705 706 707 708 709
}

static u8 get_service_classes(struct hci_dev *hdev)
{
	struct bt_uuid *uuid;
	u8 val = 0;

	list_for_each_entry(uuid, &hdev->uuids, list)
		val |= uuid->svc_hint;

	return val;
}

710
static void update_class(struct hci_request *req)
711
{
712
	struct hci_dev *hdev = req->hdev;
713 714 715 716
	u8 cod[3];

	BT_DBG("%s", hdev->name);

717
	if (!hdev_is_powered(hdev))
718
		return;
719

720
	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
721
		return;
722 723 724 725 726

	cod[0] = hdev->minor_class;
	cod[1] = hdev->major_class;
	cod[2] = get_service_classes(hdev);

727 728 729
	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
		cod[1] |= 0x20;

730
	if (memcmp(cod, hdev->dev_class, 3) == 0)
731
		return;
732

733
	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
734 735
}

736 737 738
static void service_cache_off(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
739
					    service_cache.work);
740
	struct hci_request req;
741

742
	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
743 744
		return;

745 746
	hci_req_init(&req, hdev);

747 748
	hci_dev_lock(hdev);

749 750
	update_eir(&req);
	update_class(&req);
751 752

	hci_dev_unlock(hdev);
753 754

	hci_req_run(&req, NULL);
755 756
}

757
static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
758
{
759
	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
760 761
		return;

762
	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
763

764 765 766 767 768 769
	/* Non-mgmt controlled devices get this bit set
	 * implicitly so that pairing works for them, however
	 * for mgmt we require user-space to explicitly enable
	 * it
	 */
	clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
770 771
}

772
static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
773
				void *data, u16 data_len)
774
{
775
	struct mgmt_rp_read_info rp;
776

777
	BT_DBG("sock %p %s", sk, hdev->name);
778

779
	hci_dev_lock(hdev);
780

781 782
	memset(&rp, 0, sizeof(rp));

783
	bacpy(&rp.bdaddr, &hdev->bdaddr);
784

785
	rp.version = hdev->hci_ver;
786
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
787 788 789

	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
790

791
	memcpy(rp.dev_class, hdev->dev_class, 3);
792

793
	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
794
	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
795

796
	hci_dev_unlock(hdev);
797

798
	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
799
			    sizeof(rp));
800 801
}

802 803 804
static void mgmt_pending_free(struct pending_cmd *cmd)
{
	sock_put(cmd->sk);
805
	kfree(cmd->param);
806 807 808
	kfree(cmd);
}

809
static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
810 811
					    struct hci_dev *hdev, void *data,
					    u16 len)
812 813 814
{
	struct pending_cmd *cmd;

815
	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
816
	if (!cmd)
817
		return NULL;
818 819

	cmd->opcode = opcode;
820
	cmd->index = hdev->id;
821

822
	cmd->param = kmalloc(len, GFP_KERNEL);
823
	if (!cmd->param) {
824
		kfree(cmd);
825
		return NULL;
826 827
	}

828 829
	if (data)
		memcpy(cmd->param, data, len);
830 831 832 833

	cmd->sk = sk;
	sock_hold(sk);

834
	list_add(&cmd->list, &hdev->mgmt_pending);
835

836
	return cmd;
837 838
}

839
static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
840 841
				 void (*cb)(struct pending_cmd *cmd,
					    void *data),
842
				 void *data)
843
{
844
	struct pending_cmd *cmd, *tmp;
845

846
	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
847
		if (opcode > 0 && cmd->opcode != opcode)
848 849 850 851 852 853
			continue;

		cb(cmd, data);
	}
}

854
static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
855
{
856
	struct pending_cmd *cmd;
857

858
	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
859 860
		if (cmd->opcode == opcode)
			return cmd;
861 862 863 864 865
	}

	return NULL;
}

866
static void mgmt_pending_remove(struct pending_cmd *cmd)
867 868 869 870 871
{
	list_del(&cmd->list);
	mgmt_pending_free(cmd);
}

872
static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
873
{
874
	__le32 settings = cpu_to_le32(get_current_settings(hdev));
875

876
	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
877
			    sizeof(settings));
878 879
}

880
static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
881
		       u16 len)
882
{
883
	struct mgmt_mode *cp = data;
884
	struct pending_cmd *cmd;
885
	int err;
886

887
	BT_DBG("request for %s", hdev->name);
888

889 890 891 892
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				  MGMT_STATUS_INVALID_PARAMS);

893
	hci_dev_lock(hdev);
894

895 896 897 898 899 900
	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				 MGMT_STATUS_BUSY);
		goto failed;
	}

901 902 903 904
	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
		cancel_delayed_work(&hdev->power_off);

		if (cp->val) {
905 906 907
			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
					 data, len);
			err = mgmt_powered(hdev, 1);
908 909 910 911
			goto failed;
		}
	}

912
	if (!!cp->val == hdev_is_powered(hdev)) {
913
		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
914 915 916
		goto failed;
	}

917
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
918 919
	if (!cmd) {
		err = -ENOMEM;
920
		goto failed;
921
	}
922

923
	if (cp->val)
924
		queue_work(hdev->req_workqueue, &hdev->power_on);
925
	else
926
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
927

928
	err = 0;
929 930

failed:
931
	hci_dev_unlock(hdev);
932
	return err;
933 934
}

935 936
static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
		      struct sock *skip_sk)
937 938 939 940
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;

941
	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
942 943 944 945 946 947 948 949
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));
	hdr->opcode = cpu_to_le16(event);
	if (hdev)
		hdr->index = cpu_to_le16(hdev->id);
	else
950
		hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
951 952 953 954 955
	hdr->len = cpu_to_le16(data_len);

	if (data)
		memcpy(skb_put(skb, data_len), data, data_len);

956 957 958
	/* Time stamp */
	__net_timestamp(skb);

959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
	hci_send_to_control(skb, skip_sk);
	kfree_skb(skb);

	return 0;
}

static int new_settings(struct hci_dev *hdev, struct sock *skip)
{
	__le32 ev;

	ev = cpu_to_le32(get_current_settings(hdev));

	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
}

974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
struct cmd_lookup {
	struct sock *sk;
	struct hci_dev *hdev;
	u8 mgmt_status;
};

static void settings_rsp(struct pending_cmd *cmd, void *data)
{
	struct cmd_lookup *match = data;

	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);

	list_del(&cmd->list);

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}

	mgmt_pending_free(cmd);
}

static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
{
	u8 *status = data;

	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
	mgmt_pending_remove(cmd);
}

1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
static u8 mgmt_bredr_support(struct hci_dev *hdev)
{
	if (!lmp_bredr_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

static u8 mgmt_le_support(struct hci_dev *hdev)
{
	if (!lmp_le_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

1024 1025 1026 1027
static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
	struct mgmt_mode *cp;
1028
	struct hci_request req;
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
	bool changed;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1042
		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1043 1044 1045 1046
		goto remove_cmd;
	}

	cp = cmd->param;
1047
	if (cp->val) {
1048 1049
		changed = !test_and_set_bit(HCI_DISCOVERABLE,
					    &hdev->dev_flags);
1050 1051 1052 1053 1054 1055 1056

		if (hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
					   to);
		}
	} else {
1057 1058
		changed = test_and_clear_bit(HCI_DISCOVERABLE,
					     &hdev->dev_flags);
1059
	}
1060 1061 1062 1063 1064 1065

	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);

	if (changed)
		new_settings(hdev, cmd->sk);

1066 1067 1068 1069 1070 1071 1072 1073
	/* When the discoverable mode gets changed, make sure
	 * that class of device has the limited discoverable
	 * bit correctly set.
	 */
	hci_req_init(&req, hdev);
	update_class(&req);
	hci_req_run(&req, NULL);

1074 1075 1076 1077 1078 1079 1080
remove_cmd:
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1081
static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1082
			    u16 len)
1083
{
1084
	struct mgmt_cp_set_discoverable *cp = data;
1085
	struct pending_cmd *cmd;
1086
	struct hci_request req;
1087
	u16 timeout;
1088
	u8 scan, status;
1089 1090
	int err;

1091
	BT_DBG("request for %s", hdev->name);
1092

1093 1094
	status = mgmt_bredr_support(hdev);
	if (status)
1095
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1096
				  status);
1097

1098
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1099 1100 1101
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				  MGMT_STATUS_INVALID_PARAMS);

1102
	timeout = __le16_to_cpu(cp->timeout);
1103 1104 1105 1106 1107 1108

	/* Disabling discoverable requires that no timeout is set,
	 * and enabling limited discoverable requires a timeout.
	 */
	if ((cp->val == 0x00 && timeout > 0) ||
	    (cp->val == 0x02 && timeout == 0))
1109
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1110
				  MGMT_STATUS_INVALID_PARAMS);
1111

1112
	hci_dev_lock(hdev);
1113

1114
	if (!hdev_is_powered(hdev) && timeout > 0) {
1115
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1116
				 MGMT_STATUS_NOT_POWERED);
1117 1118 1119
		goto failed;
	}

1120
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1121
	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1122
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1123
				 MGMT_STATUS_BUSY);
1124 1125 1126
		goto failed;
	}

1127
	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1128
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1129
				 MGMT_STATUS_REJECTED);
1130 1131 1132 1133
		goto failed;
	}

	if (!hdev_is_powered(hdev)) {
1134 1135
		bool changed = false;

1136 1137 1138 1139
		/* Setting limited discoverable when powered off is
		 * not a valid operation since it requires a timeout
		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
		 */
1140 1141 1142 1143 1144
		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
			changed = true;
		}

1145
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1146 1147 1148 1149 1150 1151
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1152 1153 1154
		goto failed;
	}

1155 1156 1157 1158 1159 1160 1161
	/* If the current mode is the same, then just update the timeout
	 * value with the new value. And if only the timeout gets updated,
	 * then no need for any HCI transactions.
	 */
	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
					  &hdev->dev_flags)) {
1162 1163
		cancel_delayed_work(&hdev->discov_off);
		hdev->discov_timeout = timeout;
1164

1165 1166
		if (cp->val && hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1167
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1168
					   to);
1169 1170
		}

1171
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1172 1173 1174
		goto failed;
	}

1175
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1176 1177
	if (!cmd) {
		err = -ENOMEM;
1178
		goto failed;
1179
	}
1180

1181 1182 1183 1184 1185 1186 1187
	/* Cancel any potential discoverable timeout that might be
	 * still active and store new timeout value. The arming of
	 * the timeout happens in the complete handler.
	 */
	cancel_delayed_work(&hdev->discov_off);
	hdev->discov_timeout = timeout;

1188 1189
	hci_req_init(&req, hdev);

1190 1191
	scan = SCAN_PAGE;

1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
	if (cp->val) {
		struct hci_cp_write_current_iac_lap hci_cp;

		if (cp->val == 0x02) {
			/* Limited discoverable mode */
			set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);

			hci_cp.num_iac = 2;
			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
			hci_cp.iac_lap[4] = 0x8b;
			hci_cp.iac_lap[5] = 0x9e;
		} else {
			/* General discoverable mode */
			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);

			hci_cp.num_iac = 1;
			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
		}

		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
			    (hci_cp.num_iac * 3) + 1, &hci_cp);

1219
		scan |= SCAN_INQUIRY;
1220 1221 1222
	} else {
		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
	}
1223

1224
	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1225 1226

	err = hci_req_run(&req, set_discoverable_complete);
1227
	if (err < 0)
1228
		mgmt_pending_remove(cmd);
1229 1230

failed:
1231
	hci_dev_unlock(hdev);
1232 1233 1234
	return err;
}

1235 1236
static void write_fast_connectable(struct hci_request *req, bool enable)
{
1237
	struct hci_dev *hdev = req->hdev;
1238 1239 1240
	struct hci_cp_write_page_scan_activity acp;
	u8 type;

1241 1242 1243
	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
		return;

1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
	if (enable) {
		type = PAGE_SCAN_TYPE_INTERLACED;

		/* 160 msec page scan interval */
		acp.interval = __constant_cpu_to_le16(0x0100);
	} else {
		type = PAGE_SCAN_TYPE_STANDARD;	/* default */

		/* default 1.28 sec page scan */
		acp.interval = __constant_cpu_to_le16(0x0800);
	}

	acp.window = __constant_cpu_to_le16(0x0012);

1258 1259 1260 1261 1262 1263 1264
	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
			    sizeof(acp), &acp);

	if (hdev->page_scan_type != type)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1265 1266
}

1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
static u8 get_adv_type(struct hci_dev *hdev)
{
	struct pending_cmd *cmd;
	bool connectable;

	/* If there's a pending mgmt command the flag will not yet have
	 * it's final value, so check for this first.
	 */
	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
		connectable = !!cp->val;
	} else {
		connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	}

	return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
}

1286 1287 1288 1289 1290 1291 1292 1293 1294
static void enable_advertising(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_param cp;
	u8 enable = 0x01;

	memset(&cp, 0, sizeof(cp));
	cp.min_interval = __constant_cpu_to_le16(0x0800);
	cp.max_interval = __constant_cpu_to_le16(0x0800);
1295
	cp.type = get_adv_type(hdev);
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
	if (bacmp(&hdev->bdaddr, BDADDR_ANY))
		cp.own_address_type = ADDR_LE_DEV_PUBLIC;
	else
		cp.own_address_type = ADDR_LE_DEV_RANDOM;
	cp.channel_map = 0x07;

	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

static void disable_advertising(struct hci_request *req)
{
	u8 enable = 0x00;

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

1314 1315 1316
static void set_connectable_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
1317 1318
	struct mgmt_mode *cp;
	bool changed;
1319 1320 1321 1322 1323 1324 1325 1326 1327

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (!cmd)
		goto unlock;

1328 1329 1330 1331 1332 1333
	if (status) {
		u8 mgmt_err = mgmt_status(status);
		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
		goto remove_cmd;
	}

1334 1335 1336 1337 1338 1339
	cp = cmd->param;
	if (cp->val)
		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);

1340 1341
	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);

1342 1343 1344
	if (changed)
		new_settings(hdev, cmd->sk);

1345
remove_cmd:
1346 1347 1348 1349 1350 1351
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1352
static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1353
			   u16 len)
1354
{
1355
	struct mgmt_mode *cp = data;
1356
	struct pending_cmd *cmd;
1357
	struct hci_request req;
1358
	u8 scan;
1359 1360
	int err;

1361
	BT_DBG("request for %s", hdev->name);
1362

1363 1364
	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1365
		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1366
				  MGMT_STATUS_REJECTED);
1367

1368 1369 1370 1371
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				  MGMT_STATUS_INVALID_PARAMS);

1372
	hci_dev_lock(hdev);
1373

1374
	if (!hdev_is_powered(hdev)) {
1375 1376 1377 1378 1379
		bool changed = false;

		if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
			changed = true;

1380
		if (cp->val) {
1381
			set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1382
		} else {
1383 1384 1385
			clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
		}
1386

1387
		err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1388 1389 1390 1391 1392 1393
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1394 1395 1396
		goto failed;
	}

1397
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1398
	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1399
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1400
				 MGMT_STATUS_BUSY);
1401 1402 1403
		goto failed;
	}

1404
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1405 1406
	if (!cmd) {
		err = -ENOMEM;
1407
		goto failed;
1408
	}
1409

1410
	hci_req_init(&req, hdev);
1411

1412 1413 1414 1415 1416 1417 1418 1419
	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
	    cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
		if (cp->val) {
			scan = SCAN_PAGE;
		} else {
			scan = 0;

			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1420
			    hdev->discov_timeout > 0)
1421 1422
				cancel_delayed_work(&hdev->discov_off);
		}
1423

1424 1425
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}
1426

1427 1428 1429 1430 1431 1432 1433
	/* If we're going from non-connectable to connectable or
	 * vice-versa when fast connectable is enabled ensure that fast
	 * connectable gets disabled. write_fast_connectable won't do
	 * anything if the page scan parameters are already what they
	 * should be.
	 */
	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1434 1435
		write_fast_connectable(&req, false);

1436 1437 1438 1439 1440 1441
	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
	    hci_conn_num(hdev, LE_LINK) == 0) {
		disable_advertising(&req);
		enable_advertising(&req);
	}

1442
	err = hci_req_run(&req, set_connectable_complete);
1443
	if (err < 0) {
1444
		mgmt_pending_remove(cmd);
1445 1446 1447 1448 1449
		if (err == -ENODATA)
			err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE,
						hdev);
		goto failed;
	}
1450 1451

failed:
1452
	hci_dev_unlock(hdev);
1453 1454 1455
	return err;
}

1456
static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1457
			u16 len)
1458
{
1459
	struct mgmt_mode *cp = data;
1460
	bool changed;
1461 1462
	int err;

1463
	BT_DBG("request for %s", hdev->name);
1464

1465 1466 1467 1468
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
				  MGMT_STATUS_INVALID_PARAMS);

1469
	hci_dev_lock(hdev);
1470 1471

	if (cp->val)
1472
		changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1473
	else
1474
		changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1475

1476
	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1477
	if (err < 0)
1478
		goto unlock;
1479

1480 1481
	if (changed)
		err = new_settings(hdev, sk);
1482

1483
unlock:
1484
	hci_dev_unlock(hdev);
1485 1486 1487
	return err;
}

1488 1489
static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
1490 1491 1492
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
1493
	u8 val, status;
1494 1495
	int err;

1496
	BT_DBG("request for %s", hdev->name);
1497

1498 1499
	status = mgmt_bredr_support(hdev);
	if (status)
1500
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1501
				  status);
1502

1503 1504 1505 1506
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				  MGMT_STATUS_INVALID_PARAMS);

1507 1508
	hci_dev_lock(hdev);

1509
	if (!hdev_is_powered(hdev)) {
1510 1511 1512
		bool changed = false;

		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1513
					  &hdev->dev_flags)) {
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1525 1526 1527 1528
		goto failed;
	}

	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1529
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1530
				 MGMT_STATUS_BUSY);
1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557
		goto failed;
	}

	val = !!cp->val;

	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

1558
static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1559 1560 1561
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
1562
	u8 status;
1563 1564
	int err;

1565
	BT_DBG("request for %s", hdev->name);
1566

1567 1568 1569 1570
	status = mgmt_bredr_support(hdev);
	if (status)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);

1571 1572 1573
	if (!lmp_ssp_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				  MGMT_STATUS_NOT_SUPPORTED);
1574

1575 1576 1577 1578
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				  MGMT_STATUS_INVALID_PARAMS);

1579
	hci_dev_lock(hdev);
1580

1581
	if (!hdev_is_powered(hdev)) {
1582
		bool changed;
1583

1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594
		if (cp->val) {
			changed = !test_and_set_bit(HCI_SSP_ENABLED,
						    &hdev->dev_flags);
		} else {
			changed = test_and_clear_bit(HCI_SSP_ENABLED,
						     &hdev->dev_flags);
			if (!changed)
				changed = test_and_clear_bit(HCI_HS_ENABLED,
							     &hdev->dev_flags);
			else
				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1595 1596 1597 1598 1599 1600 1601 1602 1603
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1604 1605 1606
		goto failed;
	}

1607 1608
	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1609 1610
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				 MGMT_STATUS_BUSY);
1611 1612 1613
		goto failed;
	}

1614
	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624
		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

1625
	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

1636
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1637 1638
{
	struct mgmt_mode *cp = data;
1639
	bool changed;
1640
	u8 status;
1641
	int err;
1642

1643
	BT_DBG("request for %s", hdev->name);
1644

1645 1646 1647
	status = mgmt_bredr_support(hdev);
	if (status)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1648

1649 1650 1651 1652 1653 1654 1655 1656
	if (!lmp_ssp_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				  MGMT_STATUS_NOT_SUPPORTED);

	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				  MGMT_STATUS_REJECTED);

1657 1658 1659 1660
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				  MGMT_STATUS_INVALID_PARAMS);

1661 1662
	hci_dev_lock(hdev);

1663
	if (cp->val) {
1664
		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1665 1666 1667 1668 1669 1670 1671
	} else {
		if (hdev_is_powered(hdev)) {
			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
					 MGMT_STATUS_REJECTED);
			goto unlock;
		}

1672
		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1673
	}
1674 1675 1676 1677 1678 1679 1680

	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);
1681

1682 1683 1684
unlock:
	hci_dev_unlock(hdev);
	return err;
1685 1686
}

1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
static void le_enable_complete(struct hci_dev *hdev, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
				     &mgmt_err);
		return;
	}

	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721

	/* Make sure the controller has a good default for
	 * advertising data. Restrict the update to when LE
	 * has actually been enabled. During power on, the
	 * update in powered_update_hci will take care of it.
	 */
	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
		struct hci_request req;

		hci_dev_lock(hdev);

		hci_req_init(&req, hdev);
		update_ad(&req);
		hci_req_run(&req, NULL);

		hci_dev_unlock(hdev);
	}
1722 1723
}

1724
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1725 1726 1727 1728
{
	struct mgmt_mode *cp = data;
	struct hci_cp_write_le_host_supported hci_cp;
	struct pending_cmd *cmd;
1729
	struct hci_request req;
1730
	int err;
1731
	u8 val, enabled;
1732

1733
	BT_DBG("request for %s", hdev->name);
1734

1735 1736 1737
	if (!lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				  MGMT_STATUS_NOT_SUPPORTED);
1738

1739 1740 1741 1742
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				  MGMT_STATUS_INVALID_PARAMS);

1743
	/* LE-only devices do not allow toggling LE on/off */
1744
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1745 1746 1747
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				  MGMT_STATUS_REJECTED);

1748
	hci_dev_lock(hdev);
1749 1750

	val = !!cp->val;
1751
	enabled = lmp_host_le_capable(hdev);
1752

1753
	if (!hdev_is_powered(hdev) || val == enabled) {
1754 1755 1756 1757 1758 1759 1760
		bool changed = false;

		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
			changed = true;
		}

1761 1762
		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1763 1764 1765
			changed = true;
		}

1766 1767
		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
		if (err < 0)
1768
			goto unlock;
1769 1770 1771 1772

		if (changed)
			err = new_settings(hdev, sk);

1773
		goto unlock;
1774 1775
	}

1776 1777
	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1778
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1779
				 MGMT_STATUS_BUSY);
1780
		goto unlock;
1781 1782 1783 1784 1785
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
1786
		goto unlock;
1787 1788
	}

1789 1790
	hci_req_init(&req, hdev);

1791 1792 1793 1794
	memset(&hci_cp, 0, sizeof(hci_cp));

	if (val) {
		hci_cp.le = val;
1795
		hci_cp.simul = lmp_le_br_capable(hdev);
1796 1797 1798
	} else {
		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
			disable_advertising(&req);
1799 1800
	}

1801 1802 1803 1804
	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
		    &hci_cp);

	err = hci_req_run(&req, le_enable_complete);
1805
	if (err < 0)
1806 1807
		mgmt_pending_remove(cmd);

1808 1809
unlock:
	hci_dev_unlock(hdev);
1810 1811 1812
	return err;
}

1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835
/* This is a helper function to test for pending mgmt commands that can
 * cause CoD or EIR HCI commands. We can only allow one such pending
 * mgmt command at a time since otherwise we cannot easily track what
 * the current values are, will be, and based on that calculate if a new
 * HCI command needs to be sent and if yes with what value.
 */
static bool pending_eir_or_class(struct hci_dev *hdev)
{
	struct pending_cmd *cmd;

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		switch (cmd->opcode) {
		case MGMT_OP_ADD_UUID:
		case MGMT_OP_REMOVE_UUID:
		case MGMT_OP_SET_DEV_CLASS:
		case MGMT_OP_SET_POWERED:
			return true;
		}
	}

	return false;
}

1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854
static const u8 bluetooth_base_uuid[] = {
			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};

static u8 get_uuid_size(const u8 *uuid)
{
	u32 val;

	if (memcmp(uuid, bluetooth_base_uuid, 12))
		return 128;

	val = get_unaligned_le32(&uuid[12]);
	if (val > 0xffff)
		return 32;

	return 16;
}

1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880
static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
{
	struct pending_cmd *cmd;

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(mgmt_op, hdev);
	if (!cmd)
		goto unlock;

	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
		     hdev->dev_class, 3);

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static void add_uuid_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
}

1881
static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1882
{
1883
	struct mgmt_cp_add_uuid *cp = data;
1884
	struct pending_cmd *cmd;
1885
	struct hci_request req;
1886 1887 1888
	struct bt_uuid *uuid;
	int err;

1889
	BT_DBG("request for %s", hdev->name);
1890

1891
	hci_dev_lock(hdev);
1892

1893
	if (pending_eir_or_class(hdev)) {
1894
		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1895
				 MGMT_STATUS_BUSY);
1896 1897 1898
		goto failed;
	}

1899
	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1900 1901 1902 1903 1904 1905
	if (!uuid) {
		err = -ENOMEM;
		goto failed;
	}

	memcpy(uuid->uuid, cp->uuid, 16);
1906
	uuid->svc_hint = cp->svc_hint;
1907
	uuid->size = get_uuid_size(cp->uuid);
1908

1909
	list_add_tail(&uuid->list, &hdev->uuids);
1910

1911
	hci_req_init(&req, hdev);
1912

1913 1914 1915
	update_class(&req);
	update_eir(&req);

1916 1917 1918 1919
	err = hci_req_run(&req, add_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto failed;
1920

1921
		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1922
				   hdev->dev_class, 3);
1923 1924 1925 1926
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1927
	if (!cmd) {
1928
		err = -ENOMEM;
1929 1930 1931 1932
		goto failed;
	}

	err = 0;
1933 1934

failed:
1935
	hci_dev_unlock(hdev);
1936 1937 1938
	return err;
}

1939 1940 1941 1942 1943 1944
static bool enable_service_cache(struct hci_dev *hdev)
{
	if (!hdev_is_powered(hdev))
		return false;

	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1945 1946
		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
				   CACHE_TIMEOUT);
1947 1948 1949 1950 1951 1952
		return true;
	}

	return false;
}

1953 1954 1955 1956 1957 1958 1959
static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
}

1960
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1961
		       u16 len)
1962
{
1963
	struct mgmt_cp_remove_uuid *cp = data;
1964
	struct pending_cmd *cmd;
1965
	struct bt_uuid *match, *tmp;
1966
	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1967
	struct hci_request req;
1968 1969
	int err, found;

1970
	BT_DBG("request for %s", hdev->name);
1971

1972
	hci_dev_lock(hdev);
1973

1974
	if (pending_eir_or_class(hdev)) {
1975
		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1976
				 MGMT_STATUS_BUSY);
1977 1978 1979
		goto unlock;
	}

1980 1981
	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
		err = hci_uuids_clear(hdev);
1982

1983
		if (enable_service_cache(hdev)) {
1984
			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1985
					   0, hdev->dev_class, 3);
1986 1987
			goto unlock;
		}
1988

1989
		goto update_class;
1990 1991 1992 1993
	}

	found = 0;

1994
	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1995 1996 1997 1998
		if (memcmp(match->uuid, cp->uuid, 16) != 0)
			continue;

		list_del(&match->list);
1999
		kfree(match);
2000 2001 2002 2003
		found++;
	}

	if (found == 0) {
2004
		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2005
				 MGMT_STATUS_INVALID_PARAMS);
2006 2007 2008
		goto unlock;
	}

2009
update_class:
2010
	hci_req_init(&req, hdev);
2011

2012 2013 2014
	update_class(&req);
	update_eir(&req);

2015 2016 2017 2018
	err = hci_req_run(&req, remove_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2019

2020
		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2021
				   hdev->dev_class, 3);
2022 2023 2024 2025
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2026
	if (!cmd) {
2027
		err = -ENOMEM;
2028 2029 2030 2031
		goto unlock;
	}

	err = 0;
2032 2033

unlock:
2034
	hci_dev_unlock(hdev);
2035 2036 2037
	return err;
}

2038 2039 2040 2041 2042 2043 2044
static void set_class_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
}

2045
static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2046
			 u16 len)
2047
{
2048
	struct mgmt_cp_set_dev_class *cp = data;
2049
	struct pending_cmd *cmd;
2050
	struct hci_request req;
2051 2052
	int err;

2053
	BT_DBG("request for %s", hdev->name);
2054

2055
	if (!lmp_bredr_capable(hdev))
2056 2057
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				  MGMT_STATUS_NOT_SUPPORTED);
2058

2059
	hci_dev_lock(hdev);
2060

2061 2062 2063 2064 2065
	if (pending_eir_or_class(hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}
2066

2067 2068 2069 2070 2071
	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				 MGMT_STATUS_INVALID_PARAMS);
		goto unlock;
	}
2072

2073 2074 2075
	hdev->major_class = cp->major;
	hdev->minor_class = cp->minor;

2076
	if (!hdev_is_powered(hdev)) {
2077
		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2078
				   hdev->dev_class, 3);
2079 2080 2081
		goto unlock;
	}

2082 2083
	hci_req_init(&req, hdev);

2084
	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2085 2086 2087
		hci_dev_unlock(hdev);
		cancel_delayed_work_sync(&hdev->service_cache);
		hci_dev_lock(hdev);
2088
		update_eir(&req);
2089
	}
2090

2091 2092
	update_class(&req);

2093 2094 2095 2096
	err = hci_req_run(&req, set_class_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2097

2098
		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2099
				   hdev->dev_class, 3);
2100 2101 2102 2103
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2104
	if (!cmd) {
2105
		err = -ENOMEM;
2106 2107 2108 2109
		goto unlock;
	}

	err = 0;
2110

2111
unlock:
2112
	hci_dev_unlock(hdev);
2113 2114 2115
	return err;
}

2116
static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2117
			  u16 len)
2118
{
2119
	struct mgmt_cp_load_link_keys *cp = data;
2120
	u16 key_count, expected_len;
2121
	int i;
2122

2123 2124 2125 2126 2127 2128
	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				  MGMT_STATUS_NOT_SUPPORTED);

2129
	key_count = __le16_to_cpu(cp->key_count);
2130

2131 2132
	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_link_key_info);
2133
	if (expected_len != len) {
2134
		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2135
		       len, expected_len);
2136
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2137
				  MGMT_STATUS_INVALID_PARAMS);
2138 2139
	}

2140 2141 2142 2143
	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				  MGMT_STATUS_INVALID_PARAMS);

2144
	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2145
	       key_count);
2146

2147 2148 2149 2150 2151 2152 2153 2154
	for (i = 0; i < key_count; i++) {
		struct mgmt_link_key_info *key = &cp->keys[i];

		if (key->addr.type != BDADDR_BREDR)
			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
					  MGMT_STATUS_INVALID_PARAMS);
	}

2155
	hci_dev_lock(hdev);
2156 2157 2158 2159

	hci_link_keys_clear(hdev);

	if (cp->debug_keys)
2160
		set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2161
	else
2162
		clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2163

2164
	for (i = 0; i < key_count; i++) {
2165
		struct mgmt_link_key_info *key = &cp->keys[i];
2166

2167
		hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2168
				 key->type, key->pin_len);
2169 2170
	}

2171
	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2172

2173
	hci_dev_unlock(hdev);
2174

2175
	return 0;
2176 2177
}

2178
static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2179
			   u8 addr_type, struct sock *skip_sk)
2180 2181 2182 2183 2184 2185 2186
{
	struct mgmt_ev_device_unpaired ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = addr_type;

	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2187
			  skip_sk);
2188 2189
}

2190
static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2191
			 u16 len)
2192
{
2193 2194
	struct mgmt_cp_unpair_device *cp = data;
	struct mgmt_rp_unpair_device rp;
2195 2196
	struct hci_cp_disconnect dc;
	struct pending_cmd *cmd;
2197 2198 2199
	struct hci_conn *conn;
	int err;

2200
	memset(&rp, 0, sizeof(rp));
2201 2202
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
2203

2204 2205 2206 2207 2208
	if (!bdaddr_type_is_valid(cp->addr.type))
		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));

2209 2210 2211 2212 2213
	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));

2214 2215
	hci_dev_lock(hdev);

2216
	if (!hdev_is_powered(hdev)) {
2217
		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2218
				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2219 2220 2221
		goto unlock;
	}

2222
	if (cp->addr.type == BDADDR_BREDR)
2223 2224 2225
		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
	else
		err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
2226

2227
	if (err < 0) {
2228
		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2229
				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2230 2231 2232
		goto unlock;
	}

2233
	if (cp->disconnect) {
2234
		if (cp->addr.type == BDADDR_BREDR)
2235
			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2236
						       &cp->addr.bdaddr);
2237 2238
		else
			conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2239
						       &cp->addr.bdaddr);
2240 2241 2242
	} else {
		conn = NULL;
	}
2243

2244
	if (!conn) {
2245
		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2246
				   &rp, sizeof(rp));
2247
		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2248 2249
		goto unlock;
	}
2250

2251
	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2252
			       sizeof(*cp));
2253 2254 2255
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
2256 2257
	}

2258
	dc.handle = cpu_to_le16(conn->handle);
2259 2260 2261 2262 2263
	dc.reason = 0x13; /* Remote User Terminated Connection */
	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
	if (err < 0)
		mgmt_pending_remove(cmd);

2264
unlock:
2265
	hci_dev_unlock(hdev);
2266 2267 2268
	return err;
}

2269
static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2270
		      u16 len)
2271
{
2272
	struct mgmt_cp_disconnect *cp = data;
2273
	struct mgmt_rp_disconnect rp;
2274
	struct hci_cp_disconnect dc;
2275
	struct pending_cmd *cmd;
2276 2277 2278 2279 2280
	struct hci_conn *conn;
	int err;

	BT_DBG("");

2281 2282 2283 2284
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2285
	if (!bdaddr_type_is_valid(cp->addr.type))
2286 2287 2288
		return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));
2289

2290
	hci_dev_lock(hdev);
2291 2292

	if (!test_bit(HCI_UP, &hdev->flags)) {
2293 2294
		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2295 2296 2297
		goto failed;
	}

2298
	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2299 2300
		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2301 2302 2303
		goto failed;
	}

2304
	if (cp->addr.type == BDADDR_BREDR)
2305 2306
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
2307 2308
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2309

2310
	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2311 2312
		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2313 2314 2315
		goto failed;
	}

2316
	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2317 2318
	if (!cmd) {
		err = -ENOMEM;
2319
		goto failed;
2320
	}
2321

2322
	dc.handle = cpu_to_le16(conn->handle);
2323
	dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2324 2325 2326

	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
	if (err < 0)
2327
		mgmt_pending_remove(cmd);
2328 2329

failed:
2330
	hci_dev_unlock(hdev);
2331 2332 2333
	return err;
}

2334
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2335 2336 2337
{
	switch (link_type) {
	case LE_LINK:
2338 2339
		switch (addr_type) {
		case ADDR_LE_DEV_PUBLIC:
2340
			return BDADDR_LE_PUBLIC;
2341

2342
		default:
2343
			/* Fallback to LE Random address type */
2344
			return BDADDR_LE_RANDOM;
2345
		}
2346

2347
	default:
2348
		/* Fallback to BR/EDR type */
2349
		return BDADDR_BREDR;
2350 2351 2352
	}
}

2353 2354
static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
2355 2356
{
	struct mgmt_rp_get_connections *rp;
2357
	struct hci_conn *c;
2358
	size_t rp_len;
2359 2360
	int err;
	u16 i;
2361 2362 2363

	BT_DBG("");

2364
	hci_dev_lock(hdev);
2365

2366
	if (!hdev_is_powered(hdev)) {
2367
		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2368
				 MGMT_STATUS_NOT_POWERED);
2369 2370 2371
		goto unlock;
	}

2372
	i = 0;
2373 2374
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2375
			i++;
2376 2377
	}

2378
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2379
	rp = kmalloc(rp_len, GFP_KERNEL);
2380
	if (!rp) {
2381 2382 2383 2384 2385
		err = -ENOMEM;
		goto unlock;
	}

	i = 0;
2386
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2387 2388
		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
			continue;
2389
		bacpy(&rp->addr[i].bdaddr, &c->dst);
2390
		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2391
		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2392 2393 2394 2395
			continue;
		i++;
	}

2396
	rp->conn_count = cpu_to_le16(i);
2397

2398 2399
	/* Recalculate length in case of filtered SCO connections, etc */
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2400

2401
	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2402
			   rp_len);
2403

2404
	kfree(rp);
2405 2406

unlock:
2407
	hci_dev_unlock(hdev);
2408 2409 2410
	return err;
}

2411
static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2412
				   struct mgmt_cp_pin_code_neg_reply *cp)
2413 2414 2415 2416
{
	struct pending_cmd *cmd;
	int err;

2417
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2418
			       sizeof(*cp));
2419 2420 2421
	if (!cmd)
		return -ENOMEM;

2422
	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2423
			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2424 2425 2426 2427 2428 2429
	if (err < 0)
		mgmt_pending_remove(cmd);

	return err;
}

2430
static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2431
			  u16 len)
2432
{
2433
	struct hci_conn *conn;
2434
	struct mgmt_cp_pin_code_reply *cp = data;
2435
	struct hci_cp_pin_code_reply reply;
2436
	struct pending_cmd *cmd;
2437 2438 2439 2440
	int err;

	BT_DBG("");

2441
	hci_dev_lock(hdev);
2442

2443
	if (!hdev_is_powered(hdev)) {
2444
		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2445
				 MGMT_STATUS_NOT_POWERED);
2446 2447 2448
		goto failed;
	}

2449
	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2450
	if (!conn) {
2451
		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2452
				 MGMT_STATUS_NOT_CONNECTED);
2453 2454 2455 2456
		goto failed;
	}

	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2457 2458 2459
		struct mgmt_cp_pin_code_neg_reply ncp;

		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2460 2461 2462

		BT_ERR("PIN code is not 16 bytes long");

2463
		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2464
		if (err >= 0)
2465
			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2466
					 MGMT_STATUS_INVALID_PARAMS);
2467 2468 2469 2470

		goto failed;
	}

2471
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2472 2473
	if (!cmd) {
		err = -ENOMEM;
2474
		goto failed;
2475
	}
2476

2477
	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2478
	reply.pin_len = cp->pin_len;
2479
	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2480 2481 2482

	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
	if (err < 0)
2483
		mgmt_pending_remove(cmd);
2484 2485

failed:
2486
	hci_dev_unlock(hdev);
2487 2488 2489
	return err;
}

2490 2491
static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
2492
{
2493
	struct mgmt_cp_set_io_capability *cp = data;
2494 2495 2496

	BT_DBG("");

2497
	hci_dev_lock(hdev);
2498 2499 2500 2501

	hdev->io_capability = cp->io_capability;

	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2502
	       hdev->io_capability);
2503

2504
	hci_dev_unlock(hdev);
2505

2506 2507
	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
			    0);
2508 2509
}

2510
static struct pending_cmd *find_pairing(struct hci_conn *conn)
2511 2512
{
	struct hci_dev *hdev = conn->hdev;
2513
	struct pending_cmd *cmd;
2514

2515
	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532
		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
			continue;

		if (cmd->user_data != conn)
			continue;

		return cmd;
	}

	return NULL;
}

static void pairing_complete(struct pending_cmd *cmd, u8 status)
{
	struct mgmt_rp_pair_device rp;
	struct hci_conn *conn = cmd->user_data;

2533
	bacpy(&rp.addr.bdaddr, &conn->dst);
2534
	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2535

2536
	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2537
		     &rp, sizeof(rp));
2538 2539 2540 2541 2542 2543

	/* So we don't get further callbacks for this connection */
	conn->connect_cfm_cb = NULL;
	conn->security_cfm_cb = NULL;
	conn->disconn_cfm_cb = NULL;

2544
	hci_conn_drop(conn);
2545

2546
	mgmt_pending_remove(cmd);
2547 2548 2549 2550 2551 2552 2553 2554 2555
}

static void pairing_complete_cb(struct hci_conn *conn, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status %u", status);

	cmd = find_pairing(conn);
2556
	if (!cmd)
2557
		BT_DBG("Unable to find a pending command");
2558
	else
2559
		pairing_complete(cmd, mgmt_status(status));
2560 2561
}

2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577
static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status %u", status);

	if (!status)
		return;

	cmd = find_pairing(conn);
	if (!cmd)
		BT_DBG("Unable to find a pending command");
	else
		pairing_complete(cmd, mgmt_status(status));
}

2578
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2579
		       u16 len)
2580
{
2581
	struct mgmt_cp_pair_device *cp = data;
2582
	struct mgmt_rp_pair_device rp;
2583 2584 2585 2586 2587 2588 2589
	struct pending_cmd *cmd;
	u8 sec_level, auth_type;
	struct hci_conn *conn;
	int err;

	BT_DBG("");

2590 2591 2592 2593
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2594 2595 2596 2597 2598
	if (!bdaddr_type_is_valid(cp->addr.type))
		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));

2599
	hci_dev_lock(hdev);
2600

2601
	if (!hdev_is_powered(hdev)) {
2602 2603
		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2604 2605 2606
		goto unlock;
	}

2607 2608
	sec_level = BT_SECURITY_MEDIUM;
	if (cp->io_cap == 0x03)
2609
		auth_type = HCI_AT_DEDICATED_BONDING;
2610
	else
2611 2612
		auth_type = HCI_AT_DEDICATED_BONDING_MITM;

2613
	if (cp->addr.type == BDADDR_BREDR)
2614 2615
		conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
				   cp->addr.type, sec_level, auth_type);
2616
	else
2617 2618
		conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
				   cp->addr.type, sec_level, auth_type);
2619

2620
	if (IS_ERR(conn)) {
2621 2622 2623 2624 2625 2626 2627
		int status;

		if (PTR_ERR(conn) == -EBUSY)
			status = MGMT_STATUS_BUSY;
		else
			status = MGMT_STATUS_CONNECT_FAILED;

2628
		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2629
				   status, &rp,
2630
				   sizeof(rp));
2631 2632 2633 2634
		goto unlock;
	}

	if (conn->connect_cfm_cb) {
2635
		hci_conn_drop(conn);
2636
		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2637
				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2638 2639 2640
		goto unlock;
	}

2641
	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2642 2643
	if (!cmd) {
		err = -ENOMEM;
2644
		hci_conn_drop(conn);
2645 2646 2647
		goto unlock;
	}

2648
	/* For LE, just connecting isn't a proof that the pairing finished */
2649
	if (cp->addr.type == BDADDR_BREDR)
2650
		conn->connect_cfm_cb = pairing_complete_cb;
2651 2652
	else
		conn->connect_cfm_cb = le_connect_complete_cb;
2653

2654 2655 2656 2657 2658 2659
	conn->security_cfm_cb = pairing_complete_cb;
	conn->disconn_cfm_cb = pairing_complete_cb;
	conn->io_capability = cp->io_cap;
	cmd->user_data = conn;

	if (conn->state == BT_CONNECTED &&
2660
	    hci_conn_security(conn, sec_level, auth_type))
2661 2662 2663 2664 2665
		pairing_complete(cmd, 0);

	err = 0;

unlock:
2666
	hci_dev_unlock(hdev);
2667 2668 2669
	return err;
}

2670 2671
static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2672
{
2673
	struct mgmt_addr_info *addr = data;
2674 2675 2676 2677 2678 2679 2680 2681
	struct pending_cmd *cmd;
	struct hci_conn *conn;
	int err;

	BT_DBG("");

	hci_dev_lock(hdev);

2682
	if (!hdev_is_powered(hdev)) {
2683
		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2684
				 MGMT_STATUS_NOT_POWERED);
2685 2686 2687
		goto unlock;
	}

2688 2689
	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
	if (!cmd) {
2690
		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2691
				 MGMT_STATUS_INVALID_PARAMS);
2692 2693 2694 2695 2696 2697
		goto unlock;
	}

	conn = cmd->user_data;

	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2698
		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2699
				 MGMT_STATUS_INVALID_PARAMS);
2700 2701 2702 2703 2704
		goto unlock;
	}

	pairing_complete(cmd, MGMT_STATUS_CANCELLED);

2705
	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2706
			   addr, sizeof(*addr));
2707 2708 2709 2710 2711
unlock:
	hci_dev_unlock(hdev);
	return err;
}

2712
static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2713
			     struct mgmt_addr_info *addr, u16 mgmt_op,
2714
			     u16 hci_op, __le32 passkey)
2715 2716
{
	struct pending_cmd *cmd;
2717
	struct hci_conn *conn;
2718 2719
	int err;

2720
	hci_dev_lock(hdev);
2721

2722
	if (!hdev_is_powered(hdev)) {
2723 2724 2725
		err = cmd_complete(sk, hdev->id, mgmt_op,
				   MGMT_STATUS_NOT_POWERED, addr,
				   sizeof(*addr));
2726
		goto done;
2727 2728
	}

2729 2730
	if (addr->type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2731
	else
2732
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2733 2734

	if (!conn) {
2735 2736 2737
		err = cmd_complete(sk, hdev->id, mgmt_op,
				   MGMT_STATUS_NOT_CONNECTED, addr,
				   sizeof(*addr));
2738 2739
		goto done;
	}
2740

2741
	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2742
		/* Continue with pairing via SMP */
2743 2744 2745
		err = smp_user_confirm_reply(conn, mgmt_op, passkey);

		if (!err)
2746 2747 2748
			err = cmd_complete(sk, hdev->id, mgmt_op,
					   MGMT_STATUS_SUCCESS, addr,
					   sizeof(*addr));
2749
		else
2750 2751 2752
			err = cmd_complete(sk, hdev->id, mgmt_op,
					   MGMT_STATUS_FAILED, addr,
					   sizeof(*addr));
2753 2754 2755 2756

		goto done;
	}

2757
	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2758 2759
	if (!cmd) {
		err = -ENOMEM;
2760
		goto done;
2761 2762
	}

2763
	/* Continue with pairing via HCI */
2764 2765 2766
	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
		struct hci_cp_user_passkey_reply cp;

2767
		bacpy(&cp.bdaddr, &addr->bdaddr);
2768 2769 2770
		cp.passkey = passkey;
		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
	} else
2771 2772
		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
				   &addr->bdaddr);
2773

2774 2775
	if (err < 0)
		mgmt_pending_remove(cmd);
2776

2777
done:
2778
	hci_dev_unlock(hdev);
2779 2780 2781
	return err;
}

2782 2783 2784 2785 2786 2787 2788
static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_pin_code_neg_reply *cp = data;

	BT_DBG("");

2789
	return user_pairing_resp(sk, hdev, &cp->addr,
2790 2791 2792 2793
				MGMT_OP_PIN_CODE_NEG_REPLY,
				HCI_OP_PIN_CODE_NEG_REPLY, 0);
}

2794 2795
static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2796
{
2797
	struct mgmt_cp_user_confirm_reply *cp = data;
2798 2799 2800 2801

	BT_DBG("");

	if (len != sizeof(*cp))
2802
		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2803
				  MGMT_STATUS_INVALID_PARAMS);
2804

2805
	return user_pairing_resp(sk, hdev, &cp->addr,
2806 2807
				 MGMT_OP_USER_CONFIRM_REPLY,
				 HCI_OP_USER_CONFIRM_REPLY, 0);
2808 2809
}

2810
static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2811
				  void *data, u16 len)
2812
{
2813
	struct mgmt_cp_user_confirm_neg_reply *cp = data;
2814 2815 2816

	BT_DBG("");

2817
	return user_pairing_resp(sk, hdev, &cp->addr,
2818 2819
				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2820 2821
}

2822 2823
static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2824
{
2825
	struct mgmt_cp_user_passkey_reply *cp = data;
2826 2827 2828

	BT_DBG("");

2829
	return user_pairing_resp(sk, hdev, &cp->addr,
2830 2831
				 MGMT_OP_USER_PASSKEY_REPLY,
				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2832 2833
}

2834
static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2835
				  void *data, u16 len)
2836
{
2837
	struct mgmt_cp_user_passkey_neg_reply *cp = data;
2838 2839 2840

	BT_DBG("");

2841
	return user_pairing_resp(sk, hdev, &cp->addr,
2842 2843
				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2844 2845
}

2846
static void update_name(struct hci_request *req)
2847
{
2848
	struct hci_dev *hdev = req->hdev;
2849 2850
	struct hci_cp_write_local_name cp;

2851
	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2852

2853
	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2854 2855
}

2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883
static void set_name_complete(struct hci_dev *hdev, u8 status)
{
	struct mgmt_cp_set_local_name *cp;
	struct pending_cmd *cmd;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
	if (!cmd)
		goto unlock;

	cp = cmd->param;

	if (status)
		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
			   mgmt_status(status));
	else
		cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
			     cp, sizeof(*cp));

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

2884
static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2885
			  u16 len)
2886
{
2887
	struct mgmt_cp_set_local_name *cp = data;
2888
	struct pending_cmd *cmd;
2889
	struct hci_request req;
2890 2891 2892 2893
	int err;

	BT_DBG("");

2894
	hci_dev_lock(hdev);
2895

2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906
	/* If the old values are the same as the new ones just return a
	 * direct command complete event.
	 */
	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
	    !memcmp(hdev->short_name, cp->short_name,
		    sizeof(hdev->short_name))) {
		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
				   data, len);
		goto failed;
	}

2907
	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2908

2909
	if (!hdev_is_powered(hdev)) {
2910
		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2911 2912

		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2913
				   data, len);
2914 2915 2916 2917
		if (err < 0)
			goto failed;

		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2918
				 sk);
2919

2920 2921 2922
		goto failed;
	}

2923
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2924 2925 2926 2927 2928
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

2929 2930
	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));

2931
	hci_req_init(&req, hdev);
2932 2933 2934 2935 2936 2937 2938

	if (lmp_bredr_capable(hdev)) {
		update_name(&req);
		update_eir(&req);
	}

	if (lmp_le_capable(hdev))
2939
		update_ad(&req);
2940

2941
	err = hci_req_run(&req, set_name_complete);
2942 2943 2944 2945
	if (err < 0)
		mgmt_pending_remove(cmd);

failed:
2946
	hci_dev_unlock(hdev);
2947 2948 2949
	return err;
}

2950
static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2951
			       void *data, u16 data_len)
2952 2953 2954 2955
{
	struct pending_cmd *cmd;
	int err;

2956
	BT_DBG("%s", hdev->name);
2957

2958
	hci_dev_lock(hdev);
2959

2960
	if (!hdev_is_powered(hdev)) {
2961
		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2962
				 MGMT_STATUS_NOT_POWERED);
2963 2964 2965
		goto unlock;
	}

2966
	if (!lmp_ssp_capable(hdev)) {
2967
		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2968
				 MGMT_STATUS_NOT_SUPPORTED);
2969 2970 2971
		goto unlock;
	}

2972
	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2973
		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2974
				 MGMT_STATUS_BUSY);
2975 2976 2977
		goto unlock;
	}

2978
	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2979 2980 2981 2982 2983 2984 2985 2986 2987 2988
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
2989
	hci_dev_unlock(hdev);
2990 2991 2992
	return err;
}

2993
static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2994
			       void *data, u16 len)
2995
{
2996
	struct mgmt_cp_add_remote_oob_data *cp = data;
2997
	u8 status;
2998 2999
	int err;

3000
	BT_DBG("%s ", hdev->name);
3001

3002
	hci_dev_lock(hdev);
3003

3004
	err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
3005
				      cp->randomizer);
3006
	if (err < 0)
3007
		status = MGMT_STATUS_FAILED;
3008
	else
3009
		status = MGMT_STATUS_SUCCESS;
3010

3011
	err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3012
			   &cp->addr, sizeof(cp->addr));
3013

3014
	hci_dev_unlock(hdev);
3015 3016 3017
	return err;
}

3018
static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3019
				  void *data, u16 len)
3020
{
3021
	struct mgmt_cp_remove_remote_oob_data *cp = data;
3022
	u8 status;
3023 3024
	int err;

3025
	BT_DBG("%s", hdev->name);
3026

3027
	hci_dev_lock(hdev);
3028

3029
	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3030
	if (err < 0)
3031
		status = MGMT_STATUS_INVALID_PARAMS;
3032
	else
3033
		status = MGMT_STATUS_SUCCESS;
3034

3035
	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3036
			   status, &cp->addr, sizeof(cp->addr));
3037

3038
	hci_dev_unlock(hdev);
3039 3040 3041
	return err;
}

3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062
static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
	u8 type;
	int err;

	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);

	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
	if (!cmd)
		return -ENOENT;

	type = hdev->discovery.type;

	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
			   &type, sizeof(type));
	mgmt_pending_remove(cmd);

	return err;
}

3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080
static void start_discovery_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status %d", status);

	if (status) {
		hci_dev_lock(hdev);
		mgmt_start_discovery_failed(hdev, status);
		hci_dev_unlock(hdev);
		return;
	}

	hci_dev_lock(hdev);
	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
	hci_dev_unlock(hdev);

	switch (hdev->discovery.type) {
	case DISCOV_TYPE_LE:
		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3081
				   DISCOV_LE_TIMEOUT);
3082 3083 3084 3085
		break;

	case DISCOV_TYPE_INTERLEAVED:
		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3086
				   DISCOV_INTERLEAVED_TIMEOUT);
3087 3088 3089 3090 3091 3092 3093 3094 3095 3096
		break;

	case DISCOV_TYPE_BREDR:
		break;

	default:
		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
	}
}

3097
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3098
			   void *data, u16 len)
3099
{
3100
	struct mgmt_cp_start_discovery *cp = data;
3101
	struct pending_cmd *cmd;
3102 3103 3104 3105 3106 3107
	struct hci_cp_le_set_scan_param param_cp;
	struct hci_cp_le_set_scan_enable enable_cp;
	struct hci_cp_inquiry inq_cp;
	struct hci_request req;
	/* General inquiry access code (GIAC) */
	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3108
	u8 status;
3109 3110
	int err;

3111
	BT_DBG("%s", hdev->name);
3112

3113
	hci_dev_lock(hdev);
3114

3115
	if (!hdev_is_powered(hdev)) {
3116
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3117
				 MGMT_STATUS_NOT_POWERED);
3118 3119 3120
		goto failed;
	}

3121 3122 3123 3124 3125 3126
	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
				 MGMT_STATUS_BUSY);
		goto failed;
	}

3127
	if (hdev->discovery.state != DISCOVERY_STOPPED) {
3128
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3129
				 MGMT_STATUS_BUSY);
3130 3131 3132
		goto failed;
	}

3133
	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3134 3135 3136 3137 3138
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

A
Andre Guedes 已提交
3139 3140
	hdev->discovery.type = cp->type;

3141 3142
	hci_req_init(&req, hdev);

A
Andre Guedes 已提交
3143
	switch (hdev->discovery.type) {
3144
	case DISCOV_TYPE_BREDR:
3145 3146
		status = mgmt_bredr_support(hdev);
		if (status) {
3147
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3148
					 status);
3149 3150 3151 3152
			mgmt_pending_remove(cmd);
			goto failed;
		}

3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163
		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_BUSY);
			mgmt_pending_remove(cmd);
			goto failed;
		}

		hci_inquiry_cache_flush(hdev);

		memset(&inq_cp, 0, sizeof(inq_cp));
		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3164
		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3165
		hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3166 3167 3168
		break;

	case DISCOV_TYPE_LE:
3169
	case DISCOV_TYPE_INTERLEAVED:
3170 3171
		status = mgmt_le_support(hdev);
		if (status) {
3172
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3173
					 status);
3174 3175 3176 3177
			mgmt_pending_remove(cmd);
			goto failed;
		}

3178
		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3179
		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3180 3181 3182 3183 3184 3185
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_NOT_SUPPORTED);
			mgmt_pending_remove(cmd);
			goto failed;
		}

3186
		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_REJECTED);
			mgmt_pending_remove(cmd);
			goto failed;
		}

		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_BUSY);
			mgmt_pending_remove(cmd);
			goto failed;
		}

		memset(&param_cp, 0, sizeof(param_cp));
		param_cp.type = LE_SCAN_ACTIVE;
3202 3203
		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3204 3205 3206 3207
		if (bacmp(&hdev->bdaddr, BDADDR_ANY))
			param_cp.own_address_type = ADDR_LE_DEV_PUBLIC;
		else
			param_cp.own_address_type = ADDR_LE_DEV_RANDOM;
3208 3209 3210 3211 3212 3213 3214 3215
		hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
			    &param_cp);

		memset(&enable_cp, 0, sizeof(enable_cp));
		enable_cp.enable = LE_SCAN_ENABLE;
		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
		hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
			    &enable_cp);
3216 3217
		break;

3218
	default:
3219 3220 3221 3222
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
				 MGMT_STATUS_INVALID_PARAMS);
		mgmt_pending_remove(cmd);
		goto failed;
3223
	}
3224

3225
	err = hci_req_run(&req, start_discovery_complete);
3226 3227
	if (err < 0)
		mgmt_pending_remove(cmd);
3228 3229
	else
		hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3230 3231

failed:
3232
	hci_dev_unlock(hdev);
3233 3234 3235
	return err;
}

3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251
static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
	int err;

	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
	if (!cmd)
		return -ENOENT;

	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
			   &hdev->discovery.type, sizeof(hdev->discovery.type));
	mgmt_pending_remove(cmd);

	return err;
}

3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268
static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

	if (status) {
		mgmt_stop_discovery_failed(hdev, status);
		goto unlock;
	}

	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);

unlock:
	hci_dev_unlock(hdev);
}

3269
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3270
			  u16 len)
3271
{
3272
	struct mgmt_cp_stop_discovery *mgmt_cp = data;
3273
	struct pending_cmd *cmd;
3274 3275
	struct hci_cp_remote_name_req_cancel cp;
	struct inquiry_entry *e;
3276 3277
	struct hci_request req;
	struct hci_cp_le_set_scan_enable enable_cp;
3278 3279
	int err;

3280
	BT_DBG("%s", hdev->name);
3281

3282
	hci_dev_lock(hdev);
3283

3284
	if (!hci_discovery_active(hdev)) {
3285
		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3286 3287
				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
				   sizeof(mgmt_cp->type));
3288 3289 3290 3291
		goto unlock;
	}

	if (hdev->discovery.type != mgmt_cp->type) {
3292
		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3293 3294
				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
				   sizeof(mgmt_cp->type));
3295
		goto unlock;
3296 3297
	}

3298
	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3299 3300
	if (!cmd) {
		err = -ENOMEM;
3301 3302 3303
		goto unlock;
	}

3304 3305
	hci_req_init(&req, hdev);

3306 3307
	switch (hdev->discovery.state) {
	case DISCOVERY_FINDING:
3308 3309 3310 3311 3312 3313 3314 3315 3316 3317
		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
			hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
		} else {
			cancel_delayed_work(&hdev->le_scan_disable);

			memset(&enable_cp, 0, sizeof(enable_cp));
			enable_cp.enable = LE_SCAN_DISABLE;
			hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
				    sizeof(enable_cp), &enable_cp);
		}
3318

3319 3320 3321 3322
		break;

	case DISCOVERY_RESOLVING:
		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3323
						     NAME_PENDING);
3324
		if (!e) {
3325
			mgmt_pending_remove(cmd);
3326 3327 3328 3329 3330 3331 3332
			err = cmd_complete(sk, hdev->id,
					   MGMT_OP_STOP_DISCOVERY, 0,
					   &mgmt_cp->type,
					   sizeof(mgmt_cp->type));
			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
			goto unlock;
		}
3333

3334
		bacpy(&cp.bdaddr, &e->data.bdaddr);
3335 3336
		hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
			    &cp);
3337 3338 3339 3340 3341

		break;

	default:
		BT_DBG("unknown discovery state %u", hdev->discovery.state);
3342 3343 3344 3345 3346 3347

		mgmt_pending_remove(cmd);
		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
				   MGMT_STATUS_FAILED, &mgmt_cp->type,
				   sizeof(mgmt_cp->type));
		goto unlock;
3348 3349
	}

3350
	err = hci_req_run(&req, stop_discovery_complete);
3351 3352
	if (err < 0)
		mgmt_pending_remove(cmd);
3353 3354
	else
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3355

3356
unlock:
3357
	hci_dev_unlock(hdev);
3358 3359 3360
	return err;
}

3361
static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3362
			u16 len)
3363
{
3364
	struct mgmt_cp_confirm_name *cp = data;
3365 3366 3367
	struct inquiry_entry *e;
	int err;

3368
	BT_DBG("%s", hdev->name);
3369 3370 3371

	hci_dev_lock(hdev);

3372
	if (!hci_discovery_active(hdev)) {
3373
		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3374
				 MGMT_STATUS_FAILED);
3375 3376 3377
		goto failed;
	}

3378
	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3379
	if (!e) {
3380
		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3381
				 MGMT_STATUS_INVALID_PARAMS);
3382 3383 3384 3385 3386 3387 3388 3389
		goto failed;
	}

	if (cp->name_known) {
		e->name_state = NAME_KNOWN;
		list_del(&e->list);
	} else {
		e->name_state = NAME_NEEDED;
3390
		hci_inquiry_cache_update_resolve(hdev, e);
3391 3392
	}

3393 3394
	err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
			   sizeof(cp->addr));
3395 3396 3397 3398 3399 3400

failed:
	hci_dev_unlock(hdev);
	return err;
}

3401
static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3402
			u16 len)
3403
{
3404
	struct mgmt_cp_block_device *cp = data;
3405
	u8 status;
3406 3407
	int err;

3408
	BT_DBG("%s", hdev->name);
3409

3410
	if (!bdaddr_type_is_valid(cp->addr.type))
3411 3412 3413
		return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &cp->addr, sizeof(cp->addr));
3414

3415
	hci_dev_lock(hdev);
3416

3417
	err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3418
	if (err < 0)
3419
		status = MGMT_STATUS_FAILED;
3420
	else
3421
		status = MGMT_STATUS_SUCCESS;
3422

3423
	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3424
			   &cp->addr, sizeof(cp->addr));
3425

3426
	hci_dev_unlock(hdev);
3427 3428 3429 3430

	return err;
}

3431
static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3432
			  u16 len)
3433
{
3434
	struct mgmt_cp_unblock_device *cp = data;
3435
	u8 status;
3436 3437
	int err;

3438
	BT_DBG("%s", hdev->name);
3439

3440
	if (!bdaddr_type_is_valid(cp->addr.type))
3441 3442 3443
		return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &cp->addr, sizeof(cp->addr));
3444

3445
	hci_dev_lock(hdev);
3446

3447
	err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3448
	if (err < 0)
3449
		status = MGMT_STATUS_INVALID_PARAMS;
3450
	else
3451
		status = MGMT_STATUS_SUCCESS;
3452

3453
	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3454
			   &cp->addr, sizeof(cp->addr));
3455

3456
	hci_dev_unlock(hdev);
3457 3458 3459 3460

	return err;
}

3461 3462 3463 3464
static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_set_device_id *cp = data;
3465
	struct hci_request req;
3466
	int err;
3467
	__u16 source;
3468 3469 3470

	BT_DBG("%s", hdev->name);

3471 3472 3473 3474 3475 3476
	source = __le16_to_cpu(cp->source);

	if (source > 0x0002)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
				  MGMT_STATUS_INVALID_PARAMS);

3477 3478
	hci_dev_lock(hdev);

3479
	hdev->devid_source = source;
3480 3481 3482 3483 3484 3485
	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
	hdev->devid_product = __le16_to_cpu(cp->product);
	hdev->devid_version = __le16_to_cpu(cp->version);

	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);

3486 3487 3488
	hci_req_init(&req, hdev);
	update_eir(&req);
	hci_req_run(&req, NULL);
3489 3490 3491 3492 3493 3494

	hci_dev_unlock(hdev);

	return err;
}

3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515
static void set_advertising_complete(struct hci_dev *hdev, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
				     cmd_status_rsp, &mgmt_err);
		return;
	}

	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
			     &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
}

3516 3517
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
3518 3519 3520 3521
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
	struct hci_request req;
3522
	u8 val, enabled, status;
3523 3524 3525 3526
	int err;

	BT_DBG("request for %s", hdev->name);

3527 3528
	status = mgmt_le_support(hdev);
	if (status)
3529
		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3530
				  status);
3531 3532 3533 3534 3535 3536 3537 3538

	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	val = !!cp->val;
3539
	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3540

3541 3542 3543 3544 3545 3546
	/* The following conditions are ones which mean that we should
	 * not do any HCI communication but directly send a mgmt
	 * response to user space (after toggling the flag if
	 * necessary).
	 */
	if (!hdev_is_powered(hdev) || val == enabled ||
3547
	    hci_conn_num(hdev, LE_LINK) > 0) {
3548 3549
		bool changed = false;

3550 3551
		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
		if (err < 0)
			goto unlock;

		if (changed)
			err = new_settings(hdev, sk);

		goto unlock;
	}

	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

3580 3581 3582 3583
	if (val)
		enable_advertising(&req);
	else
		disable_advertising(&req);
3584 3585 3586 3587 3588 3589 3590 3591 3592 3593

	err = hci_req_run(&req, set_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

3594 3595 3596 3597 3598 3599 3600 3601
static int set_static_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_static_address *cp = data;
	int err;

	BT_DBG("%s", hdev->name);

3602
	if (!lmp_le_capable(hdev))
3603
		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3604
				  MGMT_STATUS_NOT_SUPPORTED);
3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633

	if (hdev_is_powered(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				  MGMT_STATUS_REJECTED);

	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
			return cmd_status(sk, hdev->id,
					  MGMT_OP_SET_STATIC_ADDRESS,
					  MGMT_STATUS_INVALID_PARAMS);

		/* Two most significant bits shall be set */
		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
			return cmd_status(sk, hdev->id,
					  MGMT_OP_SET_STATIC_ADDRESS,
					  MGMT_STATUS_INVALID_PARAMS);
	}

	hci_dev_lock(hdev);

	bacpy(&hdev->static_addr, &cp->bdaddr);

	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);

	hci_dev_unlock(hdev);

	return err;
}

3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658
static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_cp_set_scan_params *cp = data;
	__u16 interval, window;
	int err;

	BT_DBG("%s", hdev->name);

	if (!lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_NOT_SUPPORTED);

	interval = __le16_to_cpu(cp->interval);

	if (interval < 0x0004 || interval > 0x4000)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_INVALID_PARAMS);

	window = __le16_to_cpu(cp->window);

	if (window < 0x0004 || window > 0x4000)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_INVALID_PARAMS);

3659 3660 3661 3662
	if (window > interval)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_INVALID_PARAMS);

3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674
	hci_dev_lock(hdev);

	hdev->le_scan_interval = interval;
	hdev->le_scan_window = window;

	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);

	hci_dev_unlock(hdev);

	return err;
}

3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690
static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
			   mgmt_status(status));
	} else {
3691 3692 3693 3694 3695 3696 3697
		struct mgmt_mode *cp = cmd->param;

		if (cp->val)
			set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
		else
			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);

3698 3699 3700 3701 3702 3703 3704 3705 3706 3707
		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

3708
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3709
				void *data, u16 len)
3710
{
3711
	struct mgmt_mode *cp = data;
3712 3713
	struct pending_cmd *cmd;
	struct hci_request req;
3714 3715
	int err;

3716
	BT_DBG("%s", hdev->name);
3717

3718 3719
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
	    hdev->hci_ver < BLUETOOTH_VER_1_2)
3720 3721 3722
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				  MGMT_STATUS_NOT_SUPPORTED);

3723 3724 3725 3726
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				  MGMT_STATUS_INVALID_PARAMS);

3727
	if (!hdev_is_powered(hdev))
3728
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3729
				  MGMT_STATUS_NOT_POWERED);
3730 3731

	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3732
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3733
				  MGMT_STATUS_REJECTED);
3734 3735 3736

	hci_dev_lock(hdev);

3737 3738 3739 3740 3741 3742
	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}

3743 3744 3745 3746 3747 3748
	if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		goto unlock;
	}

3749 3750 3751 3752 3753
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
			       data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
3754 3755
	}

3756 3757
	hci_req_init(&req, hdev);

3758
	write_fast_connectable(&req, cp->val);
3759 3760

	err = hci_req_run(&req, fast_connectable_complete);
3761
	if (err < 0) {
3762
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3763
				 MGMT_STATUS_FAILED);
3764
		mgmt_pending_remove(cmd);
3765 3766
	}

3767
unlock:
3768
	hci_dev_unlock(hdev);
3769

3770 3771 3772
	return err;
}

3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792
static void set_bredr_scan(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	u8 scan = 0;

	/* Ensure that fast connectable is disabled. This function will
	 * not do anything if the page scan parameters are already what
	 * they should be.
	 */
	write_fast_connectable(req, false);

	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
		scan |= SCAN_PAGE;
	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
		scan |= SCAN_INQUIRY;

	if (scan)
		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}

3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890
static void set_bredr_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		/* We need to restore the flag if related HCI commands
		 * failed.
		 */
		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);

		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
	} else {
		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
	struct hci_request req;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				  MGMT_STATUS_NOT_SUPPORTED);

	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				  MGMT_STATUS_REJECTED);

	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		goto unlock;
	}

	if (!hdev_is_powered(hdev)) {
		if (!cp->val) {
			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
		}

		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);

		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		if (err < 0)
			goto unlock;

		err = new_settings(hdev, sk);
		goto unlock;
	}

	/* Reject disabling when powered on */
	if (!cp->val) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				 MGMT_STATUS_REJECTED);
		goto unlock;
	}

	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

3891
	/* We need to flip the bit already here so that update_ad
3892 3893 3894 3895 3896
	 * generates the correct flags.
	 */
	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);

	hci_req_init(&req, hdev);
3897 3898 3899 3900

	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
		set_bredr_scan(&req);

3901
	update_ad(&req);
3902

3903 3904 3905 3906 3907 3908 3909 3910 3911
	err = hci_req_run(&req, set_bredr_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

3912 3913
static bool ltk_is_valid(struct mgmt_ltk_info *key)
{
3914 3915
	if (key->authenticated != 0x00 && key->authenticated != 0x01)
		return false;
3916 3917
	if (key->master != 0x00 && key->master != 0x01)
		return false;
3918 3919
	if (!bdaddr_type_is_le(key->addr.type))
		return false;
3920 3921 3922
	return true;
}

3923
static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3924
			       void *cp_data, u16 len)
3925 3926 3927
{
	struct mgmt_cp_load_long_term_keys *cp = cp_data;
	u16 key_count, expected_len;
3928
	int i, err;
3929

3930 3931 3932 3933 3934 3935
	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				  MGMT_STATUS_NOT_SUPPORTED);

3936
	key_count = __le16_to_cpu(cp->key_count);
3937 3938 3939 3940 3941

	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_ltk_info);
	if (expected_len != len) {
		BT_ERR("load_keys: expected %u bytes, got %u bytes",
3942
		       len, expected_len);
3943
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3944
				  MGMT_STATUS_INVALID_PARAMS);
3945 3946
	}

3947
	BT_DBG("%s key_count %u", hdev->name, key_count);
3948

3949 3950 3951
	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];

3952
		if (!ltk_is_valid(key))
3953 3954 3955 3956 3957
			return cmd_status(sk, hdev->id,
					  MGMT_OP_LOAD_LONG_TERM_KEYS,
					  MGMT_STATUS_INVALID_PARAMS);
	}

3958 3959 3960 3961 3962 3963
	hci_dev_lock(hdev);

	hci_smp_ltks_clear(hdev);

	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];
3964 3965 3966 3967 3968 3969
		u8 type, addr_type;

		if (key->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;
3970 3971 3972 3973 3974 3975

		if (key->master)
			type = HCI_SMP_LTK;
		else
			type = HCI_SMP_LTK_SLAVE;

3976
		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
3977 3978
			    type, 0, key->authenticated, key->val,
			    key->enc_size, key->ediv, key->rand);
3979 3980
	}

3981 3982 3983
	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
			   NULL, 0);

3984 3985
	hci_dev_unlock(hdev);

3986
	return err;
3987 3988
}

3989
static const struct mgmt_handler {
3990 3991
	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
		     u16 data_len);
3992 3993
	bool var_len;
	size_t data_len;
3994 3995
} mgmt_handlers[] = {
	{ NULL }, /* 0x0000 (no command) */
3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034
	{ read_version,           false, MGMT_READ_VERSION_SIZE },
	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
	{ set_powered,            false, MGMT_SETTING_SIZE },
	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
	{ set_connectable,        false, MGMT_SETTING_SIZE },
	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
	{ set_pairable,           false, MGMT_SETTING_SIZE },
	{ set_link_security,      false, MGMT_SETTING_SIZE },
	{ set_ssp,                false, MGMT_SETTING_SIZE },
	{ set_hs,                 false, MGMT_SETTING_SIZE },
	{ set_le,                 false, MGMT_SETTING_SIZE },
	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
	{ add_remote_oob_data,    false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
4035
	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
4036
	{ set_advertising,        false, MGMT_SETTING_SIZE },
4037
	{ set_bredr,              false, MGMT_SETTING_SIZE },
4038
	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
4039
	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
4040 4041 4042
};


4043 4044
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
{
4045 4046
	void *buf;
	u8 *cp;
4047
	struct mgmt_hdr *hdr;
4048
	u16 opcode, index, len;
4049
	struct hci_dev *hdev = NULL;
4050
	const struct mgmt_handler *handler;
4051 4052 4053 4054 4055 4056 4057
	int err;

	BT_DBG("got %zu bytes", msglen);

	if (msglen < sizeof(*hdr))
		return -EINVAL;

4058
	buf = kmalloc(msglen, GFP_KERNEL);
4059 4060 4061 4062 4063 4064 4065 4066
	if (!buf)
		return -ENOMEM;

	if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
		err = -EFAULT;
		goto done;
	}

4067
	hdr = buf;
4068 4069 4070
	opcode = __le16_to_cpu(hdr->opcode);
	index = __le16_to_cpu(hdr->index);
	len = __le16_to_cpu(hdr->len);
4071 4072 4073 4074 4075 4076

	if (len != msglen - sizeof(*hdr)) {
		err = -EINVAL;
		goto done;
	}

4077
	if (index != MGMT_INDEX_NONE) {
4078 4079 4080
		hdev = hci_dev_get(index);
		if (!hdev) {
			err = cmd_status(sk, index, opcode,
4081
					 MGMT_STATUS_INVALID_INDEX);
4082 4083
			goto done;
		}
4084

4085 4086
		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4087 4088 4089 4090
			err = cmd_status(sk, index, opcode,
					 MGMT_STATUS_INVALID_INDEX);
			goto done;
		}
4091 4092
	}

4093
	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4094
	    mgmt_handlers[opcode].func == NULL) {
4095
		BT_DBG("Unknown op %u", opcode);
4096
		err = cmd_status(sk, index, opcode,
4097
				 MGMT_STATUS_UNKNOWN_COMMAND);
4098 4099 4100 4101
		goto done;
	}

	if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4102
	    (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4103
		err = cmd_status(sk, index, opcode,
4104
				 MGMT_STATUS_INVALID_INDEX);
4105
		goto done;
4106 4107
	}

4108 4109 4110
	handler = &mgmt_handlers[opcode];

	if ((handler->var_len && len < handler->data_len) ||
4111
	    (!handler->var_len && len != handler->data_len)) {
4112
		err = cmd_status(sk, index, opcode,
4113
				 MGMT_STATUS_INVALID_PARAMS);
4114 4115 4116
		goto done;
	}

4117 4118 4119 4120 4121
	if (hdev)
		mgmt_init_hdev(sk, hdev);

	cp = buf + sizeof(*hdr);

4122
	err = handler->func(sk, hdev, cp, len);
4123 4124 4125
	if (err < 0)
		goto done;

4126 4127 4128
	err = msglen;

done:
4129 4130 4131
	if (hdev)
		hci_dev_put(hdev);

4132 4133 4134
	kfree(buf);
	return err;
}
4135

4136
void mgmt_index_added(struct hci_dev *hdev)
4137
{
4138
	if (hdev->dev_type != HCI_BREDR)
4139
		return;
4140

4141
	mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4142 4143
}

4144
void mgmt_index_removed(struct hci_dev *hdev)
4145
{
4146
	u8 status = MGMT_STATUS_INVALID_INDEX;
4147

4148
	if (hdev->dev_type != HCI_BREDR)
4149
		return;
4150

4151
	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4152

4153
	mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4154 4155
}

4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173
static void powered_complete(struct hci_dev *hdev, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	hci_dev_unlock(hdev);

	if (match.sk)
		sock_put(match.sk);
}

4174
static int powered_update_hci(struct hci_dev *hdev)
4175
{
4176
	struct hci_request req;
4177
	u8 link_sec;
4178

4179 4180
	hci_req_init(&req, hdev);

4181 4182 4183
	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
	    !lmp_host_ssp_capable(hdev)) {
		u8 ssp = 1;
4184

4185
		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4186
	}
4187

4188 4189
	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
	    lmp_bredr_capable(hdev)) {
4190
		struct hci_cp_write_le_host_supported cp;
4191

4192 4193
		cp.le = 1;
		cp.simul = lmp_le_br_capable(hdev);
4194

4195 4196 4197 4198 4199
		/* Check first if we already have the right
		 * host state (host features set)
		 */
		if (cp.le != lmp_host_le_capable(hdev) ||
		    cp.simul != lmp_host_le_br_capable(hdev))
4200 4201
			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
				    sizeof(cp), &cp);
4202
	}
4203

4204 4205 4206 4207 4208
	if (lmp_le_capable(hdev)) {
		/* Set random address to static address if configured */
		if (bacmp(&hdev->static_addr, BDADDR_ANY))
			hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
				    &hdev->static_addr);
4209

4210 4211 4212 4213 4214 4215 4216
		/* Make sure the controller has a good default for
		 * advertising data. This also applies to the case
		 * where BR/EDR was toggled during the AUTO_OFF phase.
		 */
		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
			update_ad(&req);

4217 4218
		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
			enable_advertising(&req);
4219 4220
	}

4221 4222
	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4223 4224
		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
			    sizeof(link_sec), &link_sec);
4225

4226
	if (lmp_bredr_capable(hdev)) {
4227 4228
		if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
			set_bredr_scan(&req);
4229
		update_class(&req);
4230
		update_name(&req);
4231
		update_eir(&req);
4232
	}
4233

4234
	return hci_req_run(&req, powered_complete);
4235
}
4236

4237 4238 4239
int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
	struct cmd_lookup match = { NULL, hdev };
4240 4241
	u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
	u8 zero_cod[] = { 0, 0, 0 };
4242
	int err;
4243

4244 4245 4246 4247
	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
		return 0;

	if (powered) {
4248 4249
		if (powered_update_hci(hdev) == 0)
			return 0;
4250

4251 4252 4253
		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
				     &match);
		goto new_settings;
4254 4255
	}

4256 4257 4258 4259 4260 4261 4262 4263
	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);

	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
			   zero_cod, sizeof(zero_cod), NULL);

new_settings:
4264
	err = new_settings(hdev, match.sk);
4265 4266 4267 4268

	if (match.sk)
		sock_put(match.sk);

4269
	return err;
4270
}
4271

4272
void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4273 4274 4275 4276 4277 4278
{
	struct pending_cmd *cmd;
	u8 status;

	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
	if (!cmd)
4279
		return;
4280 4281 4282 4283 4284 4285

	if (err == -ERFKILL)
		status = MGMT_STATUS_RFKILLED;
	else
		status = MGMT_STATUS_FAILED;

4286
	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4287 4288 4289 4290

	mgmt_pending_remove(cmd);
}

4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314
void mgmt_discoverable_timeout(struct hci_dev *hdev)
{
	struct hci_request req;
	u8 scan = SCAN_PAGE;

	hci_dev_lock(hdev);

	/* When discoverable timeout triggers, then just make sure
	 * the limited discoverable flag is cleared. Even in the case
	 * of a timeout triggered from general discoverable, it is
	 * safe to unconditionally clear the flag.
	 */
	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);

	hci_req_init(&req, hdev);
	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
	update_class(&req);
	hci_req_run(&req, NULL);

	hdev->discov_timeout = 0;

	hci_dev_unlock(hdev);
}

4315
void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4316
{
4317
	bool changed;
4318

4319 4320 4321 4322 4323
	/* Nothing needed here if there's a pending command since that
	 * commands request completion callback takes care of everything
	 * necessary.
	 */
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4324
		return;
4325

4326 4327 4328 4329
	if (discoverable)
		changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4330

4331
	if (changed)
4332
		new_settings(hdev, NULL);
4333
}
4334

4335
void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4336
{
4337
	bool changed;
4338

4339 4340 4341 4342 4343
	/* Nothing needed here if there's a pending command since that
	 * commands request completion callback takes care of everything
	 * necessary.
	 */
	if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4344
		return;
4345

4346 4347 4348 4349
	if (connectable)
		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4350

4351
	if (changed)
4352
		new_settings(hdev, NULL);
4353
}
4354

4355
void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4356
{
4357 4358
	u8 mgmt_err = mgmt_status(status);

4359
	if (scan & SCAN_PAGE)
4360
		mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4361
				     cmd_status_rsp, &mgmt_err);
4362 4363

	if (scan & SCAN_INQUIRY)
4364
		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4365
				     cmd_status_rsp, &mgmt_err);
4366 4367
}

4368 4369
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
		       bool persistent)
4370
{
4371
	struct mgmt_ev_new_link_key ev;
4372

4373
	memset(&ev, 0, sizeof(ev));
4374

4375
	ev.store_hint = persistent;
4376
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4377
	ev.key.addr.type = BDADDR_BREDR;
4378
	ev.key.type = key->type;
4379
	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4380
	ev.key.pin_len = key->pin_len;
4381

4382
	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4383
}
4384

4385 4386 4387 4388 4389 4390 4391 4392
int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
{
	struct mgmt_ev_new_long_term_key ev;

	memset(&ev, 0, sizeof(ev));

	ev.store_hint = persistent;
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4393
	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4394 4395 4396 4397 4398 4399 4400 4401 4402 4403
	ev.key.authenticated = key->authenticated;
	ev.key.enc_size = key->enc_size;
	ev.key.ediv = key->ediv;

	if (key->type == HCI_SMP_LTK)
		ev.key.master = 1;

	memcpy(ev.key.rand, key->rand, sizeof(key->rand));
	memcpy(ev.key.val, key->val, sizeof(key->val));

4404 4405
	return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
			  NULL);
4406 4407
}

4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
				  u8 data_len)
{
	eir[eir_len++] = sizeof(type) + data_len;
	eir[eir_len++] = type;
	memcpy(&eir[eir_len], data, data_len);
	eir_len += data_len;

	return eir_len;
}

4419 4420 4421
void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			   u8 addr_type, u32 flags, u8 *name, u8 name_len,
			   u8 *dev_class)
4422
{
4423 4424 4425
	char buf[512];
	struct mgmt_ev_device_connected *ev = (void *) buf;
	u16 eir_len = 0;
4426

4427
	bacpy(&ev->addr.bdaddr, bdaddr);
4428
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
4429

4430
	ev->flags = __cpu_to_le32(flags);
4431

4432 4433
	if (name_len > 0)
		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4434
					  name, name_len);
4435 4436

	if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4437
		eir_len = eir_append_data(ev->eir, eir_len,
4438
					  EIR_CLASS_OF_DEV, dev_class, 3);
4439

4440
	ev->eir_len = cpu_to_le16(eir_len);
4441

4442 4443
	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
		    sizeof(*ev) + eir_len, NULL);
4444 4445
}

4446 4447
static void disconnect_rsp(struct pending_cmd *cmd, void *data)
{
4448
	struct mgmt_cp_disconnect *cp = cmd->param;
4449
	struct sock **sk = data;
4450
	struct mgmt_rp_disconnect rp;
4451

4452 4453
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
4454

4455
	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4456
		     sizeof(rp));
4457 4458 4459 4460

	*sk = cmd->sk;
	sock_hold(*sk);

4461
	mgmt_pending_remove(cmd);
4462 4463
}

4464
static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4465
{
4466
	struct hci_dev *hdev = data;
4467 4468
	struct mgmt_cp_unpair_device *cp = cmd->param;
	struct mgmt_rp_unpair_device rp;
4469 4470

	memset(&rp, 0, sizeof(rp));
4471 4472
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
4473

4474 4475
	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);

4476
	cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4477 4478 4479 4480

	mgmt_pending_remove(cmd);
}

4481 4482
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
			      u8 link_type, u8 addr_type, u8 reason)
4483
{
4484
	struct mgmt_ev_device_disconnected ev;
4485 4486
	struct sock *sk = NULL;

4487
	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4488

4489 4490 4491
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.reason = reason;
4492

4493
	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4494 4495

	if (sk)
4496
		sock_put(sk);
4497

4498
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4499
			     hdev);
4500 4501
}

4502 4503
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
			    u8 link_type, u8 addr_type, u8 status)
4504
{
4505
	struct mgmt_rp_disconnect rp;
4506 4507
	struct pending_cmd *cmd;

4508 4509 4510
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
			     hdev);

4511
	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4512
	if (!cmd)
4513
		return;
4514

4515
	bacpy(&rp.addr.bdaddr, bdaddr);
4516
	rp.addr.type = link_to_bdaddr(link_type, addr_type);
4517

4518 4519
	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
		     mgmt_status(status), &rp, sizeof(rp));
4520

4521
	mgmt_pending_remove(cmd);
4522
}
4523

4524 4525
void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			 u8 addr_type, u8 status)
4526 4527 4528
{
	struct mgmt_ev_connect_failed ev;

4529
	bacpy(&ev.addr.bdaddr, bdaddr);
4530
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4531
	ev.status = mgmt_status(status);
4532

4533
	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4534
}
4535

4536
void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4537 4538 4539
{
	struct mgmt_ev_pin_code_request ev;

4540
	bacpy(&ev.addr.bdaddr, bdaddr);
4541
	ev.addr.type = BDADDR_BREDR;
4542
	ev.secure = secure;
4543

4544
	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4545 4546
}

4547 4548
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				  u8 status)
4549 4550
{
	struct pending_cmd *cmd;
4551
	struct mgmt_rp_pin_code_reply rp;
4552

4553
	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4554
	if (!cmd)
4555
		return;
4556

4557
	bacpy(&rp.addr.bdaddr, bdaddr);
4558
	rp.addr.type = BDADDR_BREDR;
4559

4560 4561
	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
		     mgmt_status(status), &rp, sizeof(rp));
4562

4563
	mgmt_pending_remove(cmd);
4564 4565
}

4566 4567
void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				      u8 status)
4568 4569
{
	struct pending_cmd *cmd;
4570
	struct mgmt_rp_pin_code_reply rp;
4571

4572
	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4573
	if (!cmd)
4574
		return;
4575

4576
	bacpy(&rp.addr.bdaddr, bdaddr);
4577
	rp.addr.type = BDADDR_BREDR;
4578

4579 4580
	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
		     mgmt_status(status), &rp, sizeof(rp));
4581

4582
	mgmt_pending_remove(cmd);
4583
}
4584

4585
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4586 4587
			      u8 link_type, u8 addr_type, __le32 value,
			      u8 confirm_hint)
4588 4589 4590
{
	struct mgmt_ev_user_confirm_request ev;

4591
	BT_DBG("%s", hdev->name);
4592

4593
	bacpy(&ev.addr.bdaddr, bdaddr);
4594
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4595
	ev.confirm_hint = confirm_hint;
4596
	ev.value = value;
4597

4598
	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4599
			  NULL);
4600 4601
}

4602
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4603
			      u8 link_type, u8 addr_type)
4604 4605 4606 4607 4608
{
	struct mgmt_ev_user_passkey_request ev;

	BT_DBG("%s", hdev->name);

4609
	bacpy(&ev.addr.bdaddr, bdaddr);
4610
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4611 4612

	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4613
			  NULL);
4614 4615
}

4616
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4617 4618
				      u8 link_type, u8 addr_type, u8 status,
				      u8 opcode)
4619 4620 4621 4622 4623
{
	struct pending_cmd *cmd;
	struct mgmt_rp_user_confirm_reply rp;
	int err;

4624
	cmd = mgmt_pending_find(opcode, hdev);
4625 4626 4627
	if (!cmd)
		return -ENOENT;

4628
	bacpy(&rp.addr.bdaddr, bdaddr);
4629
	rp.addr.type = link_to_bdaddr(link_type, addr_type);
4630
	err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4631
			   &rp, sizeof(rp));
4632

4633
	mgmt_pending_remove(cmd);
4634 4635 4636 4637

	return err;
}

4638
int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4639
				     u8 link_type, u8 addr_type, u8 status)
4640
{
4641
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4642
					  status, MGMT_OP_USER_CONFIRM_REPLY);
4643 4644
}

4645
int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4646
					 u8 link_type, u8 addr_type, u8 status)
4647
{
4648
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4649 4650
					  status,
					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
4651
}
4652

4653
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4654
				     u8 link_type, u8 addr_type, u8 status)
4655
{
4656
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4657
					  status, MGMT_OP_USER_PASSKEY_REPLY);
4658 4659
}

4660
int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4661
					 u8 link_type, u8 addr_type, u8 status)
4662
{
4663
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4664 4665
					  status,
					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
4666 4667
}

4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683
int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
			     u8 link_type, u8 addr_type, u32 passkey,
			     u8 entered)
{
	struct mgmt_ev_passkey_notify ev;

	BT_DBG("%s", hdev->name);

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.passkey = __cpu_to_le32(passkey);
	ev.entered = entered;

	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
}

4684 4685
void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, u8 status)
4686 4687 4688
{
	struct mgmt_ev_auth_failed ev;

4689
	bacpy(&ev.addr.bdaddr, bdaddr);
4690
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4691
	ev.status = mgmt_status(status);
4692

4693
	mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4694
}
4695

4696
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4697 4698
{
	struct cmd_lookup match = { NULL, hdev };
4699
	bool changed;
4700 4701 4702 4703

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4704
				     cmd_status_rsp, &mgmt_err);
4705
		return;
4706 4707
	}

4708 4709 4710 4711 4712 4713
	if (test_bit(HCI_AUTH, &hdev->flags))
		changed = !test_and_set_bit(HCI_LINK_SECURITY,
					    &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_LINK_SECURITY,
					     &hdev->dev_flags);
4714

4715
	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4716
			     &match);
4717

4718
	if (changed)
4719
		new_settings(hdev, match.sk);
4720 4721 4722 4723 4724

	if (match.sk)
		sock_put(match.sk);
}

4725
static void clear_eir(struct hci_request *req)
4726
{
4727
	struct hci_dev *hdev = req->hdev;
4728 4729
	struct hci_cp_write_eir cp;

4730
	if (!lmp_ext_inq_capable(hdev))
4731
		return;
4732

4733 4734
	memset(hdev->eir, 0, sizeof(hdev->eir));

4735 4736
	memset(&cp, 0, sizeof(cp));

4737
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4738 4739
}

4740
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4741 4742
{
	struct cmd_lookup match = { NULL, hdev };
4743
	struct hci_request req;
4744
	bool changed = false;
4745 4746 4747

	if (status) {
		u8 mgmt_err = mgmt_status(status);
4748 4749

		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4750 4751
						 &hdev->dev_flags)) {
			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4752
			new_settings(hdev, NULL);
4753
		}
4754

4755 4756
		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
				     &mgmt_err);
4757
		return;
4758 4759 4760
	}

	if (enable) {
4761
		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4762
	} else {
4763 4764 4765 4766 4767 4768
		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
		if (!changed)
			changed = test_and_clear_bit(HCI_HS_ENABLED,
						     &hdev->dev_flags);
		else
			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4769 4770 4771 4772
	}

	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);

4773
	if (changed)
4774
		new_settings(hdev, match.sk);
4775

4776
	if (match.sk)
4777 4778
		sock_put(match.sk);

4779 4780
	hci_req_init(&req, hdev);

4781
	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4782
		update_eir(&req);
4783
	else
4784 4785 4786
		clear_eir(&req);

	hci_req_run(&req, NULL);
4787 4788
}

4789
static void sk_lookup(struct pending_cmd *cmd, void *data)
4790 4791 4792 4793 4794 4795 4796 4797 4798
{
	struct cmd_lookup *match = data;

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}
}

4799 4800
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
				    u8 status)
4801
{
4802
	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4803

4804 4805 4806
	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4807 4808

	if (!status)
4809 4810
		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
			   NULL);
4811 4812 4813

	if (match.sk)
		sock_put(match.sk);
4814 4815
}

4816
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4817 4818
{
	struct mgmt_cp_set_local_name ev;
4819
	struct pending_cmd *cmd;
4820

4821
	if (status)
4822
		return;
4823 4824 4825

	memset(&ev, 0, sizeof(ev));
	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4826
	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4827

4828
	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4829 4830
	if (!cmd) {
		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4831

4832 4833 4834 4835
		/* If this is a HCI command related to powering on the
		 * HCI dev don't send any mgmt signals.
		 */
		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4836
			return;
4837
	}
4838

4839 4840
	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
		   cmd ? cmd->sk : NULL);
4841
}
4842

4843
int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4844
					    u8 *randomizer, u8 status)
4845 4846 4847 4848
{
	struct pending_cmd *cmd;
	int err;

4849
	BT_DBG("%s status %u", hdev->name, status);
4850

4851
	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4852 4853 4854 4855
	if (!cmd)
		return -ENOENT;

	if (status) {
4856 4857
		err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
				 mgmt_status(status));
4858 4859 4860 4861 4862 4863
	} else {
		struct mgmt_rp_read_local_oob_data rp;

		memcpy(rp.hash, hash, sizeof(rp.hash));
		memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));

4864
		err = cmd_complete(cmd->sk, hdev->id,
4865 4866
				   MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
				   sizeof(rp));
4867 4868 4869 4870 4871 4872
	}

	mgmt_pending_remove(cmd);

	return err;
}
4873

4874 4875 4876
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
		       ssp, u8 *eir, u16 eir_len)
4877
{
4878 4879
	char buf[512];
	struct mgmt_ev_device_found *ev = (void *) buf;
4880
	size_t ev_size;
4881

4882
	if (!hci_discovery_active(hdev))
4883
		return;
4884

4885 4886
	/* Leave 5 bytes for a potential CoD field */
	if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4887
		return;
4888

4889 4890
	memset(buf, 0, sizeof(buf));

4891
	bacpy(&ev->addr.bdaddr, bdaddr);
4892
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
4893
	ev->rssi = rssi;
4894
	if (cfm_name)
4895
		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4896
	if (!ssp)
4897
		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4898

4899
	if (eir_len > 0)
4900
		memcpy(ev->eir, eir, eir_len);
4901

4902 4903
	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4904
					  dev_class, 3);
4905

4906
	ev->eir_len = cpu_to_le16(eir_len);
4907
	ev_size = sizeof(*ev) + eir_len;
4908

4909
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4910
}
4911

4912 4913
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4914
{
4915 4916 4917
	struct mgmt_ev_device_found *ev;
	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
	u16 eir_len;
4918

4919
	ev = (struct mgmt_ev_device_found *) buf;
4920

4921 4922 4923
	memset(buf, 0, sizeof(buf));

	bacpy(&ev->addr.bdaddr, bdaddr);
4924
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
4925 4926 4927
	ev->rssi = rssi;

	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4928
				  name_len);
4929

4930
	ev->eir_len = cpu_to_le16(eir_len);
4931

4932
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
4933
}
4934

4935
void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4936
{
4937
	struct mgmt_ev_discovering ev;
4938 4939
	struct pending_cmd *cmd;

4940 4941
	BT_DBG("%s discovering %u", hdev->name, discovering);

4942
	if (discovering)
4943
		cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4944
	else
4945
		cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4946 4947

	if (cmd != NULL) {
4948 4949
		u8 type = hdev->discovery.type;

4950 4951
		cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
			     sizeof(type));
4952 4953 4954
		mgmt_pending_remove(cmd);
	}

4955 4956 4957 4958
	memset(&ev, 0, sizeof(ev));
	ev.type = hdev->discovery.type;
	ev.discovering = discovering;

4959
	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4960
}
4961

4962
int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4963 4964 4965 4966
{
	struct pending_cmd *cmd;
	struct mgmt_ev_device_blocked ev;

4967
	cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4968

4969 4970
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
4971

4972
	return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4973
			  cmd ? cmd->sk : NULL);
4974 4975
}

4976
int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4977 4978 4979 4980
{
	struct pending_cmd *cmd;
	struct mgmt_ev_device_unblocked ev;

4981
	cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4982

4983 4984
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
4985

4986
	return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4987
			  cmd ? cmd->sk : NULL);
4988
}
4989 4990 4991 4992 4993 4994 4995 4996

static void adv_enable_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("%s status %u", hdev->name, status);

	/* Clear the advertising mgmt setting if we failed to re-enable it */
	if (status) {
		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4997
		new_settings(hdev, NULL);
4998 4999 5000 5001 5002 5003 5004
	}
}

void mgmt_reenable_advertising(struct hci_dev *hdev)
{
	struct hci_request req;

5005
	if (hci_conn_num(hdev, LE_LINK) > 0)
5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018
		return;

	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
		return;

	hci_req_init(&req, hdev);
	enable_advertising(&req);

	/* If this fails we have no option but to let user space know
	 * that we've disabled advertising.
	 */
	if (hci_req_run(&req, adv_enable_complete) < 0) {
		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5019
		new_settings(hdev, NULL);
5020 5021
	}
}