mgmt.c 125.1 KB
Newer Older
1 2
/*
   BlueZ - Bluetooth protocol stack for Linux
3

4
   Copyright (C) 2010  Nokia Corporation
5
   Copyright (C) 2011-2012 Intel Corporation
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI Management interface */

27
#include <linux/module.h>
28 29 30 31 32
#include <asm/unaligned.h>

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
33 34

#include "smp.h"
35

36
#define MGMT_VERSION	1
37
#define MGMT_REVISION	5
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
static const u16 mgmt_commands[] = {
	MGMT_OP_READ_INDEX_LIST,
	MGMT_OP_READ_INFO,
	MGMT_OP_SET_POWERED,
	MGMT_OP_SET_DISCOVERABLE,
	MGMT_OP_SET_CONNECTABLE,
	MGMT_OP_SET_FAST_CONNECTABLE,
	MGMT_OP_SET_PAIRABLE,
	MGMT_OP_SET_LINK_SECURITY,
	MGMT_OP_SET_SSP,
	MGMT_OP_SET_HS,
	MGMT_OP_SET_LE,
	MGMT_OP_SET_DEV_CLASS,
	MGMT_OP_SET_LOCAL_NAME,
	MGMT_OP_ADD_UUID,
	MGMT_OP_REMOVE_UUID,
	MGMT_OP_LOAD_LINK_KEYS,
	MGMT_OP_LOAD_LONG_TERM_KEYS,
	MGMT_OP_DISCONNECT,
	MGMT_OP_GET_CONNECTIONS,
	MGMT_OP_PIN_CODE_REPLY,
	MGMT_OP_PIN_CODE_NEG_REPLY,
	MGMT_OP_SET_IO_CAPABILITY,
	MGMT_OP_PAIR_DEVICE,
	MGMT_OP_CANCEL_PAIR_DEVICE,
	MGMT_OP_UNPAIR_DEVICE,
	MGMT_OP_USER_CONFIRM_REPLY,
	MGMT_OP_USER_CONFIRM_NEG_REPLY,
	MGMT_OP_USER_PASSKEY_REPLY,
	MGMT_OP_USER_PASSKEY_NEG_REPLY,
	MGMT_OP_READ_LOCAL_OOB_DATA,
	MGMT_OP_ADD_REMOTE_OOB_DATA,
	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
	MGMT_OP_START_DISCOVERY,
	MGMT_OP_STOP_DISCOVERY,
	MGMT_OP_CONFIRM_NAME,
	MGMT_OP_BLOCK_DEVICE,
	MGMT_OP_UNBLOCK_DEVICE,
77
	MGMT_OP_SET_DEVICE_ID,
78
	MGMT_OP_SET_ADVERTISING,
79
	MGMT_OP_SET_BREDR,
80
	MGMT_OP_SET_STATIC_ADDRESS,
81
	MGMT_OP_SET_SCAN_PARAMS,
82
	MGMT_OP_SET_SECURE_CONN,
83
	MGMT_OP_SET_DEBUG_KEYS,
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
};

static const u16 mgmt_events[] = {
	MGMT_EV_CONTROLLER_ERROR,
	MGMT_EV_INDEX_ADDED,
	MGMT_EV_INDEX_REMOVED,
	MGMT_EV_NEW_SETTINGS,
	MGMT_EV_CLASS_OF_DEV_CHANGED,
	MGMT_EV_LOCAL_NAME_CHANGED,
	MGMT_EV_NEW_LINK_KEY,
	MGMT_EV_NEW_LONG_TERM_KEY,
	MGMT_EV_DEVICE_CONNECTED,
	MGMT_EV_DEVICE_DISCONNECTED,
	MGMT_EV_CONNECT_FAILED,
	MGMT_EV_PIN_CODE_REQUEST,
	MGMT_EV_USER_CONFIRM_REQUEST,
	MGMT_EV_USER_PASSKEY_REQUEST,
	MGMT_EV_AUTH_FAILED,
	MGMT_EV_DEVICE_FOUND,
	MGMT_EV_DISCOVERING,
	MGMT_EV_DEVICE_BLOCKED,
	MGMT_EV_DEVICE_UNBLOCKED,
	MGMT_EV_DEVICE_UNPAIRED,
107
	MGMT_EV_PASSKEY_NOTIFY,
108 109
};

110
#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
111

112 113 114
#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
				!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))

115 116
struct pending_cmd {
	struct list_head list;
117
	u16 opcode;
118
	int index;
119
	void *param;
120
	struct sock *sk;
121
	void *user_data;
122 123
};

124 125 126 127 128 129 130 131
/* HCI to MGMT error code conversion table */
static u8 mgmt_status_table[] = {
	MGMT_STATUS_SUCCESS,
	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
	MGMT_STATUS_FAILED,		/* Hardware Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
132
	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
	MGMT_STATUS_BUSY,		/* Command Disallowed */
	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
	MGMT_STATUS_REJECTED,		/* Rejected Security */
	MGMT_STATUS_REJECTED,		/* Rejected Personal */
	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
	MGMT_STATUS_BUSY,		/* Repeated Attempts */
	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
	MGMT_STATUS_FAILED,		/* Unspecified Error */
	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
	MGMT_STATUS_FAILED,		/* Transaction Collision */
	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
	MGMT_STATUS_REJECTED,		/* QoS Rejected */
	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
	MGMT_STATUS_REJECTED,		/* Insufficient Security */
	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
	MGMT_STATUS_BUSY,		/* Role Switch Pending */
	MGMT_STATUS_FAILED,		/* Slot Violation */
	MGMT_STATUS_FAILED,		/* Role Switch Failed */
	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
	MGMT_STATUS_BUSY,		/* Controller Busy */
	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
};

static u8 mgmt_status(u8 hci_status)
{
	if (hci_status < ARRAY_SIZE(mgmt_status_table))
		return mgmt_status_table[hci_status];

	return MGMT_STATUS_FAILED;
}

197
static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
198 199 200 201
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;
	struct mgmt_ev_cmd_status *ev;
202
	int err;
203

204
	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
205

206
	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
207 208 209 210 211
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));

212
	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
213
	hdr->index = cpu_to_le16(index);
214 215 216 217
	hdr->len = cpu_to_le16(sizeof(*ev));

	ev = (void *) skb_put(skb, sizeof(*ev));
	ev->status = status;
218
	ev->opcode = cpu_to_le16(cmd);
219

220 221
	err = sock_queue_rcv_skb(sk, skb);
	if (err < 0)
222 223
		kfree_skb(skb);

224
	return err;
225 226
}

227
static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
228
			void *rp, size_t rp_len)
229 230 231 232
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;
	struct mgmt_ev_cmd_complete *ev;
233
	int err;
234 235 236

	BT_DBG("sock %p", sk);

237
	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
238 239 240 241 242
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));

243
	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
244
	hdr->index = cpu_to_le16(index);
245
	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
246

247
	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
248
	ev->opcode = cpu_to_le16(cmd);
249
	ev->status = status;
250 251 252

	if (rp)
		memcpy(ev->data, rp, rp_len);
253

254 255
	err = sock_queue_rcv_skb(sk, skb);
	if (err < 0)
256 257
		kfree_skb(skb);

258
	return err;
259 260
}

261 262
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
			u16 data_len)
263 264 265 266 267 268
{
	struct mgmt_rp_read_version rp;

	BT_DBG("sock %p", sk);

	rp.version = MGMT_VERSION;
269
	rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
270

271
	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
272
			    sizeof(rp));
273 274
}

275 276
static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 data_len)
277 278
{
	struct mgmt_rp_read_commands *rp;
279 280
	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
	const u16 num_events = ARRAY_SIZE(mgmt_events);
281
	__le16 *opcode;
282 283 284 285 286 287 288 289 290 291 292
	size_t rp_size;
	int i, err;

	BT_DBG("sock %p", sk);

	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));

	rp = kmalloc(rp_size, GFP_KERNEL);
	if (!rp)
		return -ENOMEM;

293 294
	rp->num_commands = __constant_cpu_to_le16(num_commands);
	rp->num_events = __constant_cpu_to_le16(num_events);
295 296 297 298 299 300 301

	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
		put_unaligned_le16(mgmt_commands[i], opcode);

	for (i = 0; i < num_events; i++, opcode++)
		put_unaligned_le16(mgmt_events[i], opcode);

302
	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
303
			   rp_size);
304 305 306 307 308
	kfree(rp);

	return err;
}

309 310
static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
311 312
{
	struct mgmt_rp_read_index_list *rp;
313
	struct hci_dev *d;
314
	size_t rp_len;
315
	u16 count;
316
	int err;
317 318 319 320 321 322

	BT_DBG("sock %p", sk);

	read_lock(&hci_dev_list_lock);

	count = 0;
323
	list_for_each_entry(d, &hci_dev_list, list) {
324 325
		if (d->dev_type == HCI_BREDR)
			count++;
326 327
	}

328 329 330
	rp_len = sizeof(*rp) + (2 * count);
	rp = kmalloc(rp_len, GFP_ATOMIC);
	if (!rp) {
331
		read_unlock(&hci_dev_list_lock);
332
		return -ENOMEM;
333
	}
334

335
	count = 0;
336
	list_for_each_entry(d, &hci_dev_list, list) {
337
		if (test_bit(HCI_SETUP, &d->dev_flags))
338 339
			continue;

340 341 342
		if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
			continue;

343 344 345 346
		if (d->dev_type == HCI_BREDR) {
			rp->index[count++] = cpu_to_le16(d->id);
			BT_DBG("Added hci%u", d->id);
		}
347 348
	}

349 350 351
	rp->num_controllers = cpu_to_le16(count);
	rp_len = sizeof(*rp) + (2 * count);

352 353
	read_unlock(&hci_dev_list_lock);

354
	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
355
			   rp_len);
356

357 358 359
	kfree(rp);

	return err;
360 361
}

362 363 364 365 366 367
static u32 get_supported_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

	settings |= MGMT_SETTING_POWERED;
	settings |= MGMT_SETTING_PAIRABLE;
368
	settings |= MGMT_SETTING_DEBUG_KEYS;
369

370
	if (lmp_bredr_capable(hdev)) {
371
		settings |= MGMT_SETTING_CONNECTABLE;
372 373
		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
			settings |= MGMT_SETTING_FAST_CONNECTABLE;
374
		settings |= MGMT_SETTING_DISCOVERABLE;
375 376
		settings |= MGMT_SETTING_BREDR;
		settings |= MGMT_SETTING_LINK_SECURITY;
377 378 379 380 381

		if (lmp_ssp_capable(hdev)) {
			settings |= MGMT_SETTING_SSP;
			settings |= MGMT_SETTING_HS;
		}
382

383 384
		if (lmp_sc_capable(hdev) ||
		    test_bit(HCI_FORCE_SC, &hdev->dev_flags))
385
			settings |= MGMT_SETTING_SECURE_CONN;
386
	}
387

388
	if (lmp_le_capable(hdev)) {
389
		settings |= MGMT_SETTING_LE;
390 391
		settings |= MGMT_SETTING_ADVERTISING;
	}
392 393 394 395 396 397 398 399

	return settings;
}

static u32 get_current_settings(struct hci_dev *hdev)
{
	u32 settings = 0;

400
	if (hdev_is_powered(hdev))
401 402
		settings |= MGMT_SETTING_POWERED;

403
	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
404 405
		settings |= MGMT_SETTING_CONNECTABLE;

406 407 408
	if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
		settings |= MGMT_SETTING_FAST_CONNECTABLE;

409
	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
410 411
		settings |= MGMT_SETTING_DISCOVERABLE;

412
	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
413 414
		settings |= MGMT_SETTING_PAIRABLE;

415
	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
416 417
		settings |= MGMT_SETTING_BREDR;

418
	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
419 420
		settings |= MGMT_SETTING_LE;

421
	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
422 423
		settings |= MGMT_SETTING_LINK_SECURITY;

424
	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
425 426
		settings |= MGMT_SETTING_SSP;

427 428 429
	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
		settings |= MGMT_SETTING_HS;

430
	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
431 432
		settings |= MGMT_SETTING_ADVERTISING;

433 434 435
	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
		settings |= MGMT_SETTING_SECURE_CONN;

436 437 438
	if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
		settings |= MGMT_SETTING_DEBUG_KEYS;

439 440 441
	return settings;
}

442 443
#define PNP_INFO_SVCLASS_ID		0x1200

444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 4)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		u16 uuid16;

		if (uuid->size != 16)
			continue;

		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
		if (uuid16 < 0x1100)
			continue;

		if (uuid16 == PNP_INFO_SVCLASS_ID)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID16_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u16) > len) {
			uuids_start[1] = EIR_UUID16_SOME;
			break;
		}

		*ptr++ = (uuid16 & 0x00ff);
		*ptr++ = (uuid16 & 0xff00) >> 8;
		uuids_start[0] += sizeof(uuid16);
	}

	return ptr;
}

486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 6)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 32)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID32_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + sizeof(u32) > len) {
			uuids_start[1] = EIR_UUID32_SOME;
			break;
		}

		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
		ptr += sizeof(u32);
		uuids_start[0] += sizeof(u32);
	}

	return ptr;
}

519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
	u8 *ptr = data, *uuids_start = NULL;
	struct bt_uuid *uuid;

	if (len < 18)
		return ptr;

	list_for_each_entry(uuid, &hdev->uuids, list) {
		if (uuid->size != 128)
			continue;

		if (!uuids_start) {
			uuids_start = ptr;
			uuids_start[0] = 1;
			uuids_start[1] = EIR_UUID128_ALL;
			ptr += 2;
		}

		/* Stop if not enough space to put next UUID */
		if ((ptr - data) + 16 > len) {
			uuids_start[1] = EIR_UUID128_SOME;
			break;
		}

		memcpy(ptr, uuid->uuid, 16);
		ptr += 16;
		uuids_start[0] += 16;
	}

	return ptr;
}

552 553 554 555 556 557 558 559 560 561 562 563
static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
{
	struct pending_cmd *cmd;

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		if (cmd->opcode == opcode)
			return cmd;
	}

	return NULL;
}

564 565
static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
{
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
	u8 ad_len = 0;
	size_t name_len;

	name_len = strlen(hdev->dev_name);
	if (name_len > 0) {
		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;

		if (name_len > max_len) {
			name_len = max_len;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ad_len += (name_len + 2);
		ptr += (name_len + 2);
	}

	return ad_len;
588 589 590 591 592 593 594 595
}

static void update_scan_rsp_data(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_scan_rsp_data cp;
	u8 len;

596
	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
597 598 599 600 601 602
		return;

	memset(&cp, 0, sizeof(cp));

	len = create_scan_rsp_data(hdev, cp.data);

603 604
	if (hdev->scan_rsp_data_len == len &&
	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
605 606
		return;

607 608
	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
	hdev->scan_rsp_data_len = len;
609 610 611 612 613 614

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
}

615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
static u8 get_adv_discov_flags(struct hci_dev *hdev)
{
	struct pending_cmd *cmd;

	/* If there's a pending mgmt command the flags will not yet have
	 * their final values, so check for this first.
	 */
	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
		if (cp->val == 0x01)
			return LE_AD_GENERAL;
		else if (cp->val == 0x02)
			return LE_AD_LIMITED;
	} else {
		if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
			return LE_AD_LIMITED;
		else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
			return LE_AD_GENERAL;
	}

	return 0;
}

639
static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
640 641 642
{
	u8 ad_len = 0, flags = 0;

643
	flags |= get_adv_discov_flags(hdev);
644

645
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
		flags |= LE_AD_NO_BREDR;

	if (flags) {
		BT_DBG("adv flags 0x%02x", flags);

		ptr[0] = 2;
		ptr[1] = EIR_FLAGS;
		ptr[2] = flags;

		ad_len += 3;
		ptr += 3;
	}

	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->adv_tx_power;

		ad_len += 3;
		ptr += 3;
	}

	return ad_len;
}

671
static void update_adv_data(struct hci_request *req)
672 673 674 675 676
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_data cp;
	u8 len;

677
	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
678 679 680 681
		return;

	memset(&cp, 0, sizeof(cp));

682
	len = create_adv_data(hdev, cp.data);
683 684 685 686 687 688 689 690 691 692 693 694 695

	if (hdev->adv_data_len == len &&
	    memcmp(cp.data, hdev->adv_data, len) == 0)
		return;

	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
	hdev->adv_data_len = len;

	cp.length = len;

	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
}

696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
static void create_eir(struct hci_dev *hdev, u8 *data)
{
	u8 *ptr = data;
	size_t name_len;

	name_len = strlen(hdev->dev_name);

	if (name_len > 0) {
		/* EIR Data type */
		if (name_len > 48) {
			name_len = 48;
			ptr[1] = EIR_NAME_SHORT;
		} else
			ptr[1] = EIR_NAME_COMPLETE;

		/* EIR Data length */
		ptr[0] = name_len + 1;

		memcpy(ptr + 2, hdev->dev_name, name_len);

		ptr += (name_len + 2);
	}

719
	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
720 721 722 723 724 725 726
		ptr[0] = 2;
		ptr[1] = EIR_TX_POWER;
		ptr[2] = (u8) hdev->inq_tx_power;

		ptr += 3;
	}

727 728 729 730 731 732 733 734 735 736 737 738
	if (hdev->devid_source > 0) {
		ptr[0] = 9;
		ptr[1] = EIR_DEVICE_ID;

		put_unaligned_le16(hdev->devid_source, ptr + 2);
		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
		put_unaligned_le16(hdev->devid_product, ptr + 6);
		put_unaligned_le16(hdev->devid_version, ptr + 8);

		ptr += 10;
	}

739
	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
740
	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
741
	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
742 743
}

744
static void update_eir(struct hci_request *req)
745
{
746
	struct hci_dev *hdev = req->hdev;
747 748
	struct hci_cp_write_eir cp;

749
	if (!hdev_is_powered(hdev))
750
		return;
751

752
	if (!lmp_ext_inq_capable(hdev))
753
		return;
754

755
	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
756
		return;
757

758
	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
759
		return;
760 761 762 763 764 765

	memset(&cp, 0, sizeof(cp));

	create_eir(hdev, cp.data);

	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
766
		return;
767 768 769

	memcpy(hdev->eir, cp.data, sizeof(cp.data));

770
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
771 772 773 774 775 776 777 778 779 780 781 782 783
}

static u8 get_service_classes(struct hci_dev *hdev)
{
	struct bt_uuid *uuid;
	u8 val = 0;

	list_for_each_entry(uuid, &hdev->uuids, list)
		val |= uuid->svc_hint;

	return val;
}

784
static void update_class(struct hci_request *req)
785
{
786
	struct hci_dev *hdev = req->hdev;
787 788 789 790
	u8 cod[3];

	BT_DBG("%s", hdev->name);

791
	if (!hdev_is_powered(hdev))
792
		return;
793

794 795 796
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
		return;

797
	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
798
		return;
799 800 801 802 803

	cod[0] = hdev->minor_class;
	cod[1] = hdev->major_class;
	cod[2] = get_service_classes(hdev);

804 805 806
	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
		cod[1] |= 0x20;

807
	if (memcmp(cod, hdev->dev_class, 3) == 0)
808
		return;
809

810
	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
811 812
}

813 814 815
static void service_cache_off(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
816
					    service_cache.work);
817
	struct hci_request req;
818

819
	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
820 821
		return;

822 823
	hci_req_init(&req, hdev);

824 825
	hci_dev_lock(hdev);

826 827
	update_eir(&req);
	update_class(&req);
828 829

	hci_dev_unlock(hdev);
830 831

	hci_req_run(&req, NULL);
832 833
}

834
static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
835
{
836
	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
837 838
		return;

839
	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
840

841 842 843 844 845 846
	/* Non-mgmt controlled devices get this bit set
	 * implicitly so that pairing works for them, however
	 * for mgmt we require user-space to explicitly enable
	 * it
	 */
	clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
847 848
}

849
static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
850
				void *data, u16 data_len)
851
{
852
	struct mgmt_rp_read_info rp;
853

854
	BT_DBG("sock %p %s", sk, hdev->name);
855

856
	hci_dev_lock(hdev);
857

858 859
	memset(&rp, 0, sizeof(rp));

860
	bacpy(&rp.bdaddr, &hdev->bdaddr);
861

862
	rp.version = hdev->hci_ver;
863
	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
864 865 866

	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
867

868
	memcpy(rp.dev_class, hdev->dev_class, 3);
869

870
	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
871
	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
872

873
	hci_dev_unlock(hdev);
874

875
	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
876
			    sizeof(rp));
877 878
}

879 880 881
static void mgmt_pending_free(struct pending_cmd *cmd)
{
	sock_put(cmd->sk);
882
	kfree(cmd->param);
883 884 885
	kfree(cmd);
}

886
static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
887 888
					    struct hci_dev *hdev, void *data,
					    u16 len)
889 890 891
{
	struct pending_cmd *cmd;

892
	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
893
	if (!cmd)
894
		return NULL;
895 896

	cmd->opcode = opcode;
897
	cmd->index = hdev->id;
898

899
	cmd->param = kmalloc(len, GFP_KERNEL);
900
	if (!cmd->param) {
901
		kfree(cmd);
902
		return NULL;
903 904
	}

905 906
	if (data)
		memcpy(cmd->param, data, len);
907 908 909 910

	cmd->sk = sk;
	sock_hold(sk);

911
	list_add(&cmd->list, &hdev->mgmt_pending);
912

913
	return cmd;
914 915
}

916
static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
917 918
				 void (*cb)(struct pending_cmd *cmd,
					    void *data),
919
				 void *data)
920
{
921
	struct pending_cmd *cmd, *tmp;
922

923
	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
924
		if (opcode > 0 && cmd->opcode != opcode)
925 926 927 928 929 930
			continue;

		cb(cmd, data);
	}
}

931
static void mgmt_pending_remove(struct pending_cmd *cmd)
932 933 934 935 936
{
	list_del(&cmd->list);
	mgmt_pending_free(cmd);
}

937
static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
938
{
939
	__le32 settings = cpu_to_le32(get_current_settings(hdev));
940

941
	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
942
			    sizeof(settings));
943 944
}

945
static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
946
		       u16 len)
947
{
948
	struct mgmt_mode *cp = data;
949
	struct pending_cmd *cmd;
950
	int err;
951

952
	BT_DBG("request for %s", hdev->name);
953

954 955 956 957
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				  MGMT_STATUS_INVALID_PARAMS);

958
	hci_dev_lock(hdev);
959

960 961 962 963 964 965
	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
				 MGMT_STATUS_BUSY);
		goto failed;
	}

966 967 968 969
	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
		cancel_delayed_work(&hdev->power_off);

		if (cp->val) {
970 971 972
			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
					 data, len);
			err = mgmt_powered(hdev, 1);
973 974 975 976
			goto failed;
		}
	}

977
	if (!!cp->val == hdev_is_powered(hdev)) {
978
		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
979 980 981
		goto failed;
	}

982
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
983 984
	if (!cmd) {
		err = -ENOMEM;
985
		goto failed;
986
	}
987

988
	if (cp->val)
989
		queue_work(hdev->req_workqueue, &hdev->power_on);
990
	else
991
		queue_work(hdev->req_workqueue, &hdev->power_off.work);
992

993
	err = 0;
994 995

failed:
996
	hci_dev_unlock(hdev);
997
	return err;
998 999
}

1000 1001
static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
		      struct sock *skip_sk)
1002 1003 1004 1005
{
	struct sk_buff *skb;
	struct mgmt_hdr *hdr;

1006
	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1007 1008 1009 1010 1011 1012 1013 1014
	if (!skb)
		return -ENOMEM;

	hdr = (void *) skb_put(skb, sizeof(*hdr));
	hdr->opcode = cpu_to_le16(event);
	if (hdev)
		hdr->index = cpu_to_le16(hdev->id);
	else
1015
		hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1016 1017 1018 1019 1020
	hdr->len = cpu_to_le16(data_len);

	if (data)
		memcpy(skb_put(skb, data_len), data, data_len);

1021 1022 1023
	/* Time stamp */
	__net_timestamp(skb);

1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
	hci_send_to_control(skb, skip_sk);
	kfree_skb(skb);

	return 0;
}

static int new_settings(struct hci_dev *hdev, struct sock *skip)
{
	__le32 ev;

	ev = cpu_to_le32(get_current_settings(hdev));

	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
}

1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
struct cmd_lookup {
	struct sock *sk;
	struct hci_dev *hdev;
	u8 mgmt_status;
};

static void settings_rsp(struct pending_cmd *cmd, void *data)
{
	struct cmd_lookup *match = data;

	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);

	list_del(&cmd->list);

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}

	mgmt_pending_free(cmd);
}

static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
{
	u8 *status = data;

	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
	mgmt_pending_remove(cmd);
}

1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
static u8 mgmt_bredr_support(struct hci_dev *hdev)
{
	if (!lmp_bredr_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

static u8 mgmt_le_support(struct hci_dev *hdev)
{
	if (!lmp_le_capable(hdev))
		return MGMT_STATUS_NOT_SUPPORTED;
	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
		return MGMT_STATUS_REJECTED;
	else
		return MGMT_STATUS_SUCCESS;
}

1089 1090 1091 1092
static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
	struct mgmt_mode *cp;
1093
	struct hci_request req;
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
	bool changed;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1107
		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1108 1109 1110 1111
		goto remove_cmd;
	}

	cp = cmd->param;
1112
	if (cp->val) {
1113 1114
		changed = !test_and_set_bit(HCI_DISCOVERABLE,
					    &hdev->dev_flags);
1115 1116 1117 1118 1119 1120 1121

		if (hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
					   to);
		}
	} else {
1122 1123
		changed = test_and_clear_bit(HCI_DISCOVERABLE,
					     &hdev->dev_flags);
1124
	}
1125 1126 1127 1128 1129 1130

	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);

	if (changed)
		new_settings(hdev, cmd->sk);

1131 1132 1133 1134 1135 1136 1137 1138
	/* When the discoverable mode gets changed, make sure
	 * that class of device has the limited discoverable
	 * bit correctly set.
	 */
	hci_req_init(&req, hdev);
	update_class(&req);
	hci_req_run(&req, NULL);

1139 1140 1141 1142 1143 1144 1145
remove_cmd:
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1146
static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1147
			    u16 len)
1148
{
1149
	struct mgmt_cp_set_discoverable *cp = data;
1150
	struct pending_cmd *cmd;
1151
	struct hci_request req;
1152
	u16 timeout;
1153
	u8 scan;
1154 1155
	int err;

1156
	BT_DBG("request for %s", hdev->name);
1157

1158 1159
	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1160
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1161
				  MGMT_STATUS_REJECTED);
1162

1163
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1164 1165 1166
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
				  MGMT_STATUS_INVALID_PARAMS);

1167
	timeout = __le16_to_cpu(cp->timeout);
1168 1169 1170 1171 1172 1173

	/* Disabling discoverable requires that no timeout is set,
	 * and enabling limited discoverable requires a timeout.
	 */
	if ((cp->val == 0x00 && timeout > 0) ||
	    (cp->val == 0x02 && timeout == 0))
1174
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1175
				  MGMT_STATUS_INVALID_PARAMS);
1176

1177
	hci_dev_lock(hdev);
1178

1179
	if (!hdev_is_powered(hdev) && timeout > 0) {
1180
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1181
				 MGMT_STATUS_NOT_POWERED);
1182 1183 1184
		goto failed;
	}

1185
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1186
	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1187
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1188
				 MGMT_STATUS_BUSY);
1189 1190 1191
		goto failed;
	}

1192
	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1193
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1194
				 MGMT_STATUS_REJECTED);
1195 1196 1197 1198
		goto failed;
	}

	if (!hdev_is_powered(hdev)) {
1199 1200
		bool changed = false;

1201 1202 1203 1204
		/* Setting limited discoverable when powered off is
		 * not a valid operation since it requires a timeout
		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
		 */
1205 1206 1207 1208 1209
		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
			changed = true;
		}

1210
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1211 1212 1213 1214 1215 1216
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1217 1218 1219
		goto failed;
	}

1220 1221 1222 1223 1224 1225 1226
	/* If the current mode is the same, then just update the timeout
	 * value with the new value. And if only the timeout gets updated,
	 * then no need for any HCI transactions.
	 */
	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
					  &hdev->dev_flags)) {
1227 1228
		cancel_delayed_work(&hdev->discov_off);
		hdev->discov_timeout = timeout;
1229

1230 1231
		if (cp->val && hdev->discov_timeout > 0) {
			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1232
			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1233
					   to);
1234 1235
		}

1236
		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1237 1238 1239
		goto failed;
	}

1240
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1241 1242
	if (!cmd) {
		err = -ENOMEM;
1243
		goto failed;
1244
	}
1245

1246 1247 1248 1249 1250 1251 1252
	/* Cancel any potential discoverable timeout that might be
	 * still active and store new timeout value. The arming of
	 * the timeout happens in the complete handler.
	 */
	cancel_delayed_work(&hdev->discov_off);
	hdev->discov_timeout = timeout;

1253 1254 1255 1256 1257 1258
	/* Limited discoverable mode */
	if (cp->val == 0x02)
		set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
	else
		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);

1259 1260
	hci_req_init(&req, hdev);

1261 1262 1263 1264 1265 1266
	/* The procedure for LE-only controllers is much simpler - just
	 * update the advertising data.
	 */
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
		goto update_ad;

1267 1268
	scan = SCAN_PAGE;

1269 1270 1271 1272 1273
	if (cp->val) {
		struct hci_cp_write_current_iac_lap hci_cp;

		if (cp->val == 0x02) {
			/* Limited discoverable mode */
1274
			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
			hci_cp.iac_lap[4] = 0x8b;
			hci_cp.iac_lap[5] = 0x9e;
		} else {
			/* General discoverable mode */
			hci_cp.num_iac = 1;
			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
			hci_cp.iac_lap[1] = 0x8b;
			hci_cp.iac_lap[2] = 0x9e;
		}

		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
			    (hci_cp.num_iac * 3) + 1, &hci_cp);

1292
		scan |= SCAN_INQUIRY;
1293 1294 1295
	} else {
		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
	}
1296

1297
	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1298

1299 1300 1301
update_ad:
	update_adv_data(&req);

1302
	err = hci_req_run(&req, set_discoverable_complete);
1303
	if (err < 0)
1304
		mgmt_pending_remove(cmd);
1305 1306

failed:
1307
	hci_dev_unlock(hdev);
1308 1309 1310
	return err;
}

1311 1312
static void write_fast_connectable(struct hci_request *req, bool enable)
{
1313
	struct hci_dev *hdev = req->hdev;
1314 1315 1316
	struct hci_cp_write_page_scan_activity acp;
	u8 type;

1317 1318 1319
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
		return;

1320 1321 1322
	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
		return;

1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
	if (enable) {
		type = PAGE_SCAN_TYPE_INTERLACED;

		/* 160 msec page scan interval */
		acp.interval = __constant_cpu_to_le16(0x0100);
	} else {
		type = PAGE_SCAN_TYPE_STANDARD;	/* default */

		/* default 1.28 sec page scan */
		acp.interval = __constant_cpu_to_le16(0x0800);
	}

	acp.window = __constant_cpu_to_le16(0x0012);

1337 1338 1339 1340 1341 1342 1343
	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
			    sizeof(acp), &acp);

	if (hdev->page_scan_type != type)
		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1344 1345
}

1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
static u8 get_adv_type(struct hci_dev *hdev)
{
	struct pending_cmd *cmd;
	bool connectable;

	/* If there's a pending mgmt command the flag will not yet have
	 * it's final value, so check for this first.
	 */
	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (cmd) {
		struct mgmt_mode *cp = cmd->param;
		connectable = !!cp->val;
	} else {
		connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	}

	return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
}

1365 1366 1367 1368 1369 1370 1371 1372 1373
static void enable_advertising(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	struct hci_cp_le_set_adv_param cp;
	u8 enable = 0x01;

	memset(&cp, 0, sizeof(cp));
	cp.min_interval = __constant_cpu_to_le16(0x0800);
	cp.max_interval = __constant_cpu_to_le16(0x0800);
1374
	cp.type = get_adv_type(hdev);
1375
	cp.own_address_type = hdev->own_addr_type;
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389
	cp.channel_map = 0x07;

	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

static void disable_advertising(struct hci_request *req)
{
	u8 enable = 0x00;

	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}

1390 1391 1392
static void set_connectable_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
1393 1394
	struct mgmt_mode *cp;
	bool changed;
1395 1396 1397 1398 1399 1400 1401 1402 1403

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
	if (!cmd)
		goto unlock;

1404 1405 1406 1407 1408 1409
	if (status) {
		u8 mgmt_err = mgmt_status(status);
		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
		goto remove_cmd;
	}

1410 1411 1412 1413 1414 1415
	cp = cmd->param;
	if (cp->val)
		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);

1416 1417
	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);

1418 1419 1420
	if (changed)
		new_settings(hdev, cmd->sk);

1421
remove_cmd:
1422 1423 1424 1425 1426 1427
	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
static int set_connectable_update_settings(struct hci_dev *hdev,
					   struct sock *sk, u8 val)
{
	bool changed = false;
	int err;

	if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
		changed = true;

	if (val) {
		set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	} else {
		clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
	}

	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
	if (err < 0)
		return err;

	if (changed)
		return new_settings(hdev, sk);

	return 0;
}

1454
static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1455
			   u16 len)
1456
{
1457
	struct mgmt_mode *cp = data;
1458
	struct pending_cmd *cmd;
1459
	struct hci_request req;
1460
	u8 scan;
1461 1462
	int err;

1463
	BT_DBG("request for %s", hdev->name);
1464

1465 1466
	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1467
		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1468
				  MGMT_STATUS_REJECTED);
1469

1470 1471 1472 1473
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
				  MGMT_STATUS_INVALID_PARAMS);

1474
	hci_dev_lock(hdev);
1475

1476
	if (!hdev_is_powered(hdev)) {
1477
		err = set_connectable_update_settings(hdev, sk, cp->val);
1478 1479 1480
		goto failed;
	}

1481
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1482
	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1483
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1484
				 MGMT_STATUS_BUSY);
1485 1486 1487
		goto failed;
	}

1488
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1489 1490
	if (!cmd) {
		err = -ENOMEM;
1491
		goto failed;
1492
	}
1493

1494
	hci_req_init(&req, hdev);
1495

1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506
	/* If BR/EDR is not enabled and we disable advertising as a
	 * by-product of disabling connectable, we need to update the
	 * advertising flags.
	 */
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
		if (!cp->val) {
			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
		}
		update_adv_data(&req);
	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1507 1508 1509 1510 1511 1512
		if (cp->val) {
			scan = SCAN_PAGE;
		} else {
			scan = 0;

			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1513
			    hdev->discov_timeout > 0)
1514 1515
				cancel_delayed_work(&hdev->discov_off);
		}
1516

1517 1518
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
	}
1519

1520 1521 1522 1523 1524 1525 1526
	/* If we're going from non-connectable to connectable or
	 * vice-versa when fast connectable is enabled ensure that fast
	 * connectable gets disabled. write_fast_connectable won't do
	 * anything if the page scan parameters are already what they
	 * should be.
	 */
	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1527 1528
		write_fast_connectable(&req, false);

1529 1530 1531 1532 1533 1534
	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
	    hci_conn_num(hdev, LE_LINK) == 0) {
		disable_advertising(&req);
		enable_advertising(&req);
	}

1535
	err = hci_req_run(&req, set_connectable_complete);
1536
	if (err < 0) {
1537
		mgmt_pending_remove(cmd);
1538
		if (err == -ENODATA)
1539 1540
			err = set_connectable_update_settings(hdev, sk,
							      cp->val);
1541 1542
		goto failed;
	}
1543 1544

failed:
1545
	hci_dev_unlock(hdev);
1546 1547 1548
	return err;
}

1549
static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1550
			u16 len)
1551
{
1552
	struct mgmt_mode *cp = data;
1553
	bool changed;
1554 1555
	int err;

1556
	BT_DBG("request for %s", hdev->name);
1557

1558 1559 1560 1561
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
				  MGMT_STATUS_INVALID_PARAMS);

1562
	hci_dev_lock(hdev);
1563 1564

	if (cp->val)
1565
		changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1566
	else
1567
		changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1568

1569
	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1570
	if (err < 0)
1571
		goto unlock;
1572

1573 1574
	if (changed)
		err = new_settings(hdev, sk);
1575

1576
unlock:
1577
	hci_dev_unlock(hdev);
1578 1579 1580
	return err;
}

1581 1582
static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
1583 1584 1585
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
1586
	u8 val, status;
1587 1588
	int err;

1589
	BT_DBG("request for %s", hdev->name);
1590

1591 1592
	status = mgmt_bredr_support(hdev);
	if (status)
1593
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1594
				  status);
1595

1596 1597 1598 1599
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
				  MGMT_STATUS_INVALID_PARAMS);

1600 1601
	hci_dev_lock(hdev);

1602
	if (!hdev_is_powered(hdev)) {
1603 1604 1605
		bool changed = false;

		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1606
					  &hdev->dev_flags)) {
1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1618 1619 1620 1621
		goto failed;
	}

	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1622
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1623
				 MGMT_STATUS_BUSY);
1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
		goto failed;
	}

	val = !!cp->val;

	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

1651
static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1652 1653 1654
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
1655
	u8 status;
1656 1657
	int err;

1658
	BT_DBG("request for %s", hdev->name);
1659

1660 1661 1662 1663
	status = mgmt_bredr_support(hdev);
	if (status)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);

1664 1665 1666
	if (!lmp_ssp_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				  MGMT_STATUS_NOT_SUPPORTED);
1667

1668 1669 1670 1671
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				  MGMT_STATUS_INVALID_PARAMS);

1672
	hci_dev_lock(hdev);
1673

1674
	if (!hdev_is_powered(hdev)) {
1675
		bool changed;
1676

1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
		if (cp->val) {
			changed = !test_and_set_bit(HCI_SSP_ENABLED,
						    &hdev->dev_flags);
		} else {
			changed = test_and_clear_bit(HCI_SSP_ENABLED,
						     &hdev->dev_flags);
			if (!changed)
				changed = test_and_clear_bit(HCI_HS_ENABLED,
							     &hdev->dev_flags);
			else
				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1688 1689 1690 1691 1692 1693 1694 1695 1696
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

1697 1698 1699
		goto failed;
	}

1700 1701
	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1702 1703
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
				 MGMT_STATUS_BUSY);
1704 1705 1706
		goto failed;
	}

1707
	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1708 1709 1710 1711 1712 1713 1714 1715 1716 1717
		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

1718
	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

failed:
	hci_dev_unlock(hdev);
	return err;
}

1729
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1730 1731
{
	struct mgmt_mode *cp = data;
1732
	bool changed;
1733
	u8 status;
1734
	int err;
1735

1736
	BT_DBG("request for %s", hdev->name);
1737

1738 1739 1740
	status = mgmt_bredr_support(hdev);
	if (status)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1741

1742 1743 1744 1745 1746 1747 1748 1749
	if (!lmp_ssp_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				  MGMT_STATUS_NOT_SUPPORTED);

	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				  MGMT_STATUS_REJECTED);

1750 1751 1752 1753
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
				  MGMT_STATUS_INVALID_PARAMS);

1754 1755
	hci_dev_lock(hdev);

1756
	if (cp->val) {
1757
		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1758 1759 1760 1761 1762 1763 1764
	} else {
		if (hdev_is_powered(hdev)) {
			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
					 MGMT_STATUS_REJECTED);
			goto unlock;
		}

1765
		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1766
	}
1767 1768 1769 1770 1771 1772 1773

	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);
1774

1775 1776 1777
unlock:
	hci_dev_unlock(hdev);
	return err;
1778 1779
}

1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797
static void le_enable_complete(struct hci_dev *hdev, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
				     &mgmt_err);
		return;
	}

	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809

	/* Make sure the controller has a good default for
	 * advertising data. Restrict the update to when LE
	 * has actually been enabled. During power on, the
	 * update in powered_update_hci will take care of it.
	 */
	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
		struct hci_request req;

		hci_dev_lock(hdev);

		hci_req_init(&req, hdev);
1810
		update_adv_data(&req);
1811
		update_scan_rsp_data(&req);
1812 1813 1814 1815
		hci_req_run(&req, NULL);

		hci_dev_unlock(hdev);
	}
1816 1817
}

1818
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1819 1820 1821 1822
{
	struct mgmt_mode *cp = data;
	struct hci_cp_write_le_host_supported hci_cp;
	struct pending_cmd *cmd;
1823
	struct hci_request req;
1824
	int err;
1825
	u8 val, enabled;
1826

1827
	BT_DBG("request for %s", hdev->name);
1828

1829 1830 1831
	if (!lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				  MGMT_STATUS_NOT_SUPPORTED);
1832

1833 1834 1835 1836
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				  MGMT_STATUS_INVALID_PARAMS);

1837
	/* LE-only devices do not allow toggling LE on/off */
1838
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1839 1840 1841
		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
				  MGMT_STATUS_REJECTED);

1842
	hci_dev_lock(hdev);
1843 1844

	val = !!cp->val;
1845
	enabled = lmp_host_le_capable(hdev);
1846

1847
	if (!hdev_is_powered(hdev) || val == enabled) {
1848 1849 1850 1851 1852 1853 1854
		bool changed = false;

		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
			changed = true;
		}

1855 1856
		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1857 1858 1859
			changed = true;
		}

1860 1861
		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
		if (err < 0)
1862
			goto unlock;
1863 1864 1865 1866

		if (changed)
			err = new_settings(hdev, sk);

1867
		goto unlock;
1868 1869
	}

1870 1871
	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1872
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1873
				 MGMT_STATUS_BUSY);
1874
		goto unlock;
1875 1876 1877 1878 1879
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
1880
		goto unlock;
1881 1882
	}

1883 1884
	hci_req_init(&req, hdev);

1885 1886 1887 1888
	memset(&hci_cp, 0, sizeof(hci_cp));

	if (val) {
		hci_cp.le = val;
1889
		hci_cp.simul = lmp_le_br_capable(hdev);
1890 1891 1892
	} else {
		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
			disable_advertising(&req);
1893 1894
	}

1895 1896 1897 1898
	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
		    &hci_cp);

	err = hci_req_run(&req, le_enable_complete);
1899
	if (err < 0)
1900 1901
		mgmt_pending_remove(cmd);

1902 1903
unlock:
	hci_dev_unlock(hdev);
1904 1905 1906
	return err;
}

1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929
/* This is a helper function to test for pending mgmt commands that can
 * cause CoD or EIR HCI commands. We can only allow one such pending
 * mgmt command at a time since otherwise we cannot easily track what
 * the current values are, will be, and based on that calculate if a new
 * HCI command needs to be sent and if yes with what value.
 */
static bool pending_eir_or_class(struct hci_dev *hdev)
{
	struct pending_cmd *cmd;

	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
		switch (cmd->opcode) {
		case MGMT_OP_ADD_UUID:
		case MGMT_OP_REMOVE_UUID:
		case MGMT_OP_SET_DEV_CLASS:
		case MGMT_OP_SET_POWERED:
			return true;
		}
	}

	return false;
}

1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948
static const u8 bluetooth_base_uuid[] = {
			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};

static u8 get_uuid_size(const u8 *uuid)
{
	u32 val;

	if (memcmp(uuid, bluetooth_base_uuid, 12))
		return 128;

	val = get_unaligned_le32(&uuid[12]);
	if (val > 0xffff)
		return 32;

	return 16;
}

1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974
static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
{
	struct pending_cmd *cmd;

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(mgmt_op, hdev);
	if (!cmd)
		goto unlock;

	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
		     hdev->dev_class, 3);

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static void add_uuid_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
}

1975
static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1976
{
1977
	struct mgmt_cp_add_uuid *cp = data;
1978
	struct pending_cmd *cmd;
1979
	struct hci_request req;
1980 1981 1982
	struct bt_uuid *uuid;
	int err;

1983
	BT_DBG("request for %s", hdev->name);
1984

1985
	hci_dev_lock(hdev);
1986

1987
	if (pending_eir_or_class(hdev)) {
1988
		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1989
				 MGMT_STATUS_BUSY);
1990 1991 1992
		goto failed;
	}

1993
	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1994 1995 1996 1997 1998 1999
	if (!uuid) {
		err = -ENOMEM;
		goto failed;
	}

	memcpy(uuid->uuid, cp->uuid, 16);
2000
	uuid->svc_hint = cp->svc_hint;
2001
	uuid->size = get_uuid_size(cp->uuid);
2002

2003
	list_add_tail(&uuid->list, &hdev->uuids);
2004

2005
	hci_req_init(&req, hdev);
2006

2007 2008 2009
	update_class(&req);
	update_eir(&req);

2010 2011 2012 2013
	err = hci_req_run(&req, add_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto failed;
2014

2015
		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2016
				   hdev->dev_class, 3);
2017 2018 2019 2020
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2021
	if (!cmd) {
2022
		err = -ENOMEM;
2023 2024 2025 2026
		goto failed;
	}

	err = 0;
2027 2028

failed:
2029
	hci_dev_unlock(hdev);
2030 2031 2032
	return err;
}

2033 2034 2035 2036 2037 2038
static bool enable_service_cache(struct hci_dev *hdev)
{
	if (!hdev_is_powered(hdev))
		return false;

	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2039 2040
		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
				   CACHE_TIMEOUT);
2041 2042 2043 2044 2045 2046
		return true;
	}

	return false;
}

2047 2048 2049 2050 2051 2052 2053
static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
}

2054
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2055
		       u16 len)
2056
{
2057
	struct mgmt_cp_remove_uuid *cp = data;
2058
	struct pending_cmd *cmd;
2059
	struct bt_uuid *match, *tmp;
2060
	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2061
	struct hci_request req;
2062 2063
	int err, found;

2064
	BT_DBG("request for %s", hdev->name);
2065

2066
	hci_dev_lock(hdev);
2067

2068
	if (pending_eir_or_class(hdev)) {
2069
		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2070
				 MGMT_STATUS_BUSY);
2071 2072 2073
		goto unlock;
	}

2074 2075
	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
		err = hci_uuids_clear(hdev);
2076

2077
		if (enable_service_cache(hdev)) {
2078
			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2079
					   0, hdev->dev_class, 3);
2080 2081
			goto unlock;
		}
2082

2083
		goto update_class;
2084 2085 2086 2087
	}

	found = 0;

2088
	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2089 2090 2091 2092
		if (memcmp(match->uuid, cp->uuid, 16) != 0)
			continue;

		list_del(&match->list);
2093
		kfree(match);
2094 2095 2096 2097
		found++;
	}

	if (found == 0) {
2098
		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2099
				 MGMT_STATUS_INVALID_PARAMS);
2100 2101 2102
		goto unlock;
	}

2103
update_class:
2104
	hci_req_init(&req, hdev);
2105

2106 2107 2108
	update_class(&req);
	update_eir(&req);

2109 2110 2111 2112
	err = hci_req_run(&req, remove_uuid_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2113

2114
		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2115
				   hdev->dev_class, 3);
2116 2117 2118 2119
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2120
	if (!cmd) {
2121
		err = -ENOMEM;
2122 2123 2124 2125
		goto unlock;
	}

	err = 0;
2126 2127

unlock:
2128
	hci_dev_unlock(hdev);
2129 2130 2131
	return err;
}

2132 2133 2134 2135 2136 2137 2138
static void set_class_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status 0x%02x", status);

	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
}

2139
static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2140
			 u16 len)
2141
{
2142
	struct mgmt_cp_set_dev_class *cp = data;
2143
	struct pending_cmd *cmd;
2144
	struct hci_request req;
2145 2146
	int err;

2147
	BT_DBG("request for %s", hdev->name);
2148

2149
	if (!lmp_bredr_capable(hdev))
2150 2151
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				  MGMT_STATUS_NOT_SUPPORTED);
2152

2153
	hci_dev_lock(hdev);
2154

2155 2156 2157 2158 2159
	if (pending_eir_or_class(hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}
2160

2161 2162 2163 2164 2165
	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
				 MGMT_STATUS_INVALID_PARAMS);
		goto unlock;
	}
2166

2167 2168 2169
	hdev->major_class = cp->major;
	hdev->minor_class = cp->minor;

2170
	if (!hdev_is_powered(hdev)) {
2171
		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2172
				   hdev->dev_class, 3);
2173 2174 2175
		goto unlock;
	}

2176 2177
	hci_req_init(&req, hdev);

2178
	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2179 2180 2181
		hci_dev_unlock(hdev);
		cancel_delayed_work_sync(&hdev->service_cache);
		hci_dev_lock(hdev);
2182
		update_eir(&req);
2183
	}
2184

2185 2186
	update_class(&req);

2187 2188 2189 2190
	err = hci_req_run(&req, set_class_complete);
	if (err < 0) {
		if (err != -ENODATA)
			goto unlock;
2191

2192
		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2193
				   hdev->dev_class, 3);
2194 2195 2196 2197
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2198
	if (!cmd) {
2199
		err = -ENOMEM;
2200 2201 2202 2203
		goto unlock;
	}

	err = 0;
2204

2205
unlock:
2206
	hci_dev_unlock(hdev);
2207 2208 2209
	return err;
}

2210
static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2211
			  u16 len)
2212
{
2213
	struct mgmt_cp_load_link_keys *cp = data;
2214
	u16 key_count, expected_len;
2215
	bool changed;
2216
	int i;
2217

2218 2219 2220 2221 2222 2223
	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				  MGMT_STATUS_NOT_SUPPORTED);

2224
	key_count = __le16_to_cpu(cp->key_count);
2225

2226 2227
	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_link_key_info);
2228
	if (expected_len != len) {
2229
		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2230
		       len, expected_len);
2231
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2232
				  MGMT_STATUS_INVALID_PARAMS);
2233 2234
	}

2235 2236 2237 2238
	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
				  MGMT_STATUS_INVALID_PARAMS);

2239
	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2240
	       key_count);
2241

2242 2243 2244
	for (i = 0; i < key_count; i++) {
		struct mgmt_link_key_info *key = &cp->keys[i];

2245
		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2246 2247 2248 2249
			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
					  MGMT_STATUS_INVALID_PARAMS);
	}

2250
	hci_dev_lock(hdev);
2251 2252 2253 2254

	hci_link_keys_clear(hdev);

	if (cp->debug_keys)
2255
		changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2256
	else
2257 2258 2259 2260
		changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);

	if (changed)
		new_settings(hdev, NULL);
2261

2262
	for (i = 0; i < key_count; i++) {
2263
		struct mgmt_link_key_info *key = &cp->keys[i];
2264

2265
		hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2266
				 key->type, key->pin_len);
2267 2268
	}

2269
	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2270

2271
	hci_dev_unlock(hdev);
2272

2273
	return 0;
2274 2275
}

2276
static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2277
			   u8 addr_type, struct sock *skip_sk)
2278 2279 2280 2281 2282 2283 2284
{
	struct mgmt_ev_device_unpaired ev;

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = addr_type;

	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2285
			  skip_sk);
2286 2287
}

2288
static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2289
			 u16 len)
2290
{
2291 2292
	struct mgmt_cp_unpair_device *cp = data;
	struct mgmt_rp_unpair_device rp;
2293 2294
	struct hci_cp_disconnect dc;
	struct pending_cmd *cmd;
2295 2296 2297
	struct hci_conn *conn;
	int err;

2298
	memset(&rp, 0, sizeof(rp));
2299 2300
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
2301

2302 2303 2304 2305 2306
	if (!bdaddr_type_is_valid(cp->addr.type))
		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));

2307 2308 2309 2310 2311
	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));

2312 2313
	hci_dev_lock(hdev);

2314
	if (!hdev_is_powered(hdev)) {
2315
		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2316
				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2317 2318 2319
		goto unlock;
	}

2320
	if (cp->addr.type == BDADDR_BREDR)
2321 2322 2323
		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
	else
		err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
2324

2325
	if (err < 0) {
2326
		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2327
				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2328 2329 2330
		goto unlock;
	}

2331
	if (cp->disconnect) {
2332
		if (cp->addr.type == BDADDR_BREDR)
2333
			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2334
						       &cp->addr.bdaddr);
2335 2336
		else
			conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2337
						       &cp->addr.bdaddr);
2338 2339 2340
	} else {
		conn = NULL;
	}
2341

2342
	if (!conn) {
2343
		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2344
				   &rp, sizeof(rp));
2345
		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2346 2347
		goto unlock;
	}
2348

2349
	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2350
			       sizeof(*cp));
2351 2352 2353
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
2354 2355
	}

2356
	dc.handle = cpu_to_le16(conn->handle);
2357 2358 2359 2360 2361
	dc.reason = 0x13; /* Remote User Terminated Connection */
	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
	if (err < 0)
		mgmt_pending_remove(cmd);

2362
unlock:
2363
	hci_dev_unlock(hdev);
2364 2365 2366
	return err;
}

2367
static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2368
		      u16 len)
2369
{
2370
	struct mgmt_cp_disconnect *cp = data;
2371
	struct mgmt_rp_disconnect rp;
2372
	struct hci_cp_disconnect dc;
2373
	struct pending_cmd *cmd;
2374 2375 2376 2377 2378
	struct hci_conn *conn;
	int err;

	BT_DBG("");

2379 2380 2381 2382
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2383
	if (!bdaddr_type_is_valid(cp->addr.type))
2384 2385 2386
		return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));
2387

2388
	hci_dev_lock(hdev);
2389 2390

	if (!test_bit(HCI_UP, &hdev->flags)) {
2391 2392
		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2393 2394 2395
		goto failed;
	}

2396
	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2397 2398
		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2399 2400 2401
		goto failed;
	}

2402
	if (cp->addr.type == BDADDR_BREDR)
2403 2404
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
					       &cp->addr.bdaddr);
2405 2406
	else
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2407

2408
	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2409 2410
		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2411 2412 2413
		goto failed;
	}

2414
	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2415 2416
	if (!cmd) {
		err = -ENOMEM;
2417
		goto failed;
2418
	}
2419

2420
	dc.handle = cpu_to_le16(conn->handle);
2421
	dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2422 2423 2424

	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
	if (err < 0)
2425
		mgmt_pending_remove(cmd);
2426 2427

failed:
2428
	hci_dev_unlock(hdev);
2429 2430 2431
	return err;
}

2432
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2433 2434 2435
{
	switch (link_type) {
	case LE_LINK:
2436 2437
		switch (addr_type) {
		case ADDR_LE_DEV_PUBLIC:
2438
			return BDADDR_LE_PUBLIC;
2439

2440
		default:
2441
			/* Fallback to LE Random address type */
2442
			return BDADDR_LE_RANDOM;
2443
		}
2444

2445
	default:
2446
		/* Fallback to BR/EDR type */
2447
		return BDADDR_BREDR;
2448 2449 2450
	}
}

2451 2452
static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 data_len)
2453 2454
{
	struct mgmt_rp_get_connections *rp;
2455
	struct hci_conn *c;
2456
	size_t rp_len;
2457 2458
	int err;
	u16 i;
2459 2460 2461

	BT_DBG("");

2462
	hci_dev_lock(hdev);
2463

2464
	if (!hdev_is_powered(hdev)) {
2465
		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2466
				 MGMT_STATUS_NOT_POWERED);
2467 2468 2469
		goto unlock;
	}

2470
	i = 0;
2471 2472
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2473
			i++;
2474 2475
	}

2476
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2477
	rp = kmalloc(rp_len, GFP_KERNEL);
2478
	if (!rp) {
2479 2480 2481 2482 2483
		err = -ENOMEM;
		goto unlock;
	}

	i = 0;
2484
	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2485 2486
		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
			continue;
2487
		bacpy(&rp->addr[i].bdaddr, &c->dst);
2488
		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2489
		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2490 2491 2492 2493
			continue;
		i++;
	}

2494
	rp->conn_count = cpu_to_le16(i);
2495

2496 2497
	/* Recalculate length in case of filtered SCO connections, etc */
	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2498

2499
	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2500
			   rp_len);
2501

2502
	kfree(rp);
2503 2504

unlock:
2505
	hci_dev_unlock(hdev);
2506 2507 2508
	return err;
}

2509
static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2510
				   struct mgmt_cp_pin_code_neg_reply *cp)
2511 2512 2513 2514
{
	struct pending_cmd *cmd;
	int err;

2515
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2516
			       sizeof(*cp));
2517 2518 2519
	if (!cmd)
		return -ENOMEM;

2520
	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2521
			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2522 2523 2524 2525 2526 2527
	if (err < 0)
		mgmt_pending_remove(cmd);

	return err;
}

2528
static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2529
			  u16 len)
2530
{
2531
	struct hci_conn *conn;
2532
	struct mgmt_cp_pin_code_reply *cp = data;
2533
	struct hci_cp_pin_code_reply reply;
2534
	struct pending_cmd *cmd;
2535 2536 2537 2538
	int err;

	BT_DBG("");

2539
	hci_dev_lock(hdev);
2540

2541
	if (!hdev_is_powered(hdev)) {
2542
		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2543
				 MGMT_STATUS_NOT_POWERED);
2544 2545 2546
		goto failed;
	}

2547
	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2548
	if (!conn) {
2549
		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2550
				 MGMT_STATUS_NOT_CONNECTED);
2551 2552 2553 2554
		goto failed;
	}

	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2555 2556 2557
		struct mgmt_cp_pin_code_neg_reply ncp;

		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2558 2559 2560

		BT_ERR("PIN code is not 16 bytes long");

2561
		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2562
		if (err >= 0)
2563
			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2564
					 MGMT_STATUS_INVALID_PARAMS);
2565 2566 2567 2568

		goto failed;
	}

2569
	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2570 2571
	if (!cmd) {
		err = -ENOMEM;
2572
		goto failed;
2573
	}
2574

2575
	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2576
	reply.pin_len = cp->pin_len;
2577
	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2578 2579 2580

	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
	if (err < 0)
2581
		mgmt_pending_remove(cmd);
2582 2583

failed:
2584
	hci_dev_unlock(hdev);
2585 2586 2587
	return err;
}

2588 2589
static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
			     u16 len)
2590
{
2591
	struct mgmt_cp_set_io_capability *cp = data;
2592 2593 2594

	BT_DBG("");

2595
	hci_dev_lock(hdev);
2596 2597 2598 2599

	hdev->io_capability = cp->io_capability;

	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2600
	       hdev->io_capability);
2601

2602
	hci_dev_unlock(hdev);
2603

2604 2605
	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
			    0);
2606 2607
}

2608
static struct pending_cmd *find_pairing(struct hci_conn *conn)
2609 2610
{
	struct hci_dev *hdev = conn->hdev;
2611
	struct pending_cmd *cmd;
2612

2613
	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
			continue;

		if (cmd->user_data != conn)
			continue;

		return cmd;
	}

	return NULL;
}

static void pairing_complete(struct pending_cmd *cmd, u8 status)
{
	struct mgmt_rp_pair_device rp;
	struct hci_conn *conn = cmd->user_data;

2631
	bacpy(&rp.addr.bdaddr, &conn->dst);
2632
	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2633

2634
	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2635
		     &rp, sizeof(rp));
2636 2637 2638 2639 2640 2641

	/* So we don't get further callbacks for this connection */
	conn->connect_cfm_cb = NULL;
	conn->security_cfm_cb = NULL;
	conn->disconn_cfm_cb = NULL;

2642
	hci_conn_drop(conn);
2643

2644
	mgmt_pending_remove(cmd);
2645 2646 2647 2648 2649 2650 2651 2652 2653
}

static void pairing_complete_cb(struct hci_conn *conn, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status %u", status);

	cmd = find_pairing(conn);
2654
	if (!cmd)
2655
		BT_DBG("Unable to find a pending command");
2656
	else
2657
		pairing_complete(cmd, mgmt_status(status));
2658 2659
}

2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675
static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status %u", status);

	if (!status)
		return;

	cmd = find_pairing(conn);
	if (!cmd)
		BT_DBG("Unable to find a pending command");
	else
		pairing_complete(cmd, mgmt_status(status));
}

2676
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2677
		       u16 len)
2678
{
2679
	struct mgmt_cp_pair_device *cp = data;
2680
	struct mgmt_rp_pair_device rp;
2681 2682 2683 2684 2685 2686 2687
	struct pending_cmd *cmd;
	u8 sec_level, auth_type;
	struct hci_conn *conn;
	int err;

	BT_DBG("");

2688 2689 2690 2691
	memset(&rp, 0, sizeof(rp));
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;

2692 2693 2694 2695 2696
	if (!bdaddr_type_is_valid(cp->addr.type))
		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &rp, sizeof(rp));

2697
	hci_dev_lock(hdev);
2698

2699
	if (!hdev_is_powered(hdev)) {
2700 2701
		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2702 2703 2704
		goto unlock;
	}

2705 2706
	sec_level = BT_SECURITY_MEDIUM;
	if (cp->io_cap == 0x03)
2707
		auth_type = HCI_AT_DEDICATED_BONDING;
2708
	else
2709 2710
		auth_type = HCI_AT_DEDICATED_BONDING_MITM;

2711
	if (cp->addr.type == BDADDR_BREDR)
2712 2713
		conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
				   cp->addr.type, sec_level, auth_type);
2714
	else
2715 2716
		conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
				   cp->addr.type, sec_level, auth_type);
2717

2718
	if (IS_ERR(conn)) {
2719 2720 2721 2722 2723 2724 2725
		int status;

		if (PTR_ERR(conn) == -EBUSY)
			status = MGMT_STATUS_BUSY;
		else
			status = MGMT_STATUS_CONNECT_FAILED;

2726
		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2727
				   status, &rp,
2728
				   sizeof(rp));
2729 2730 2731 2732
		goto unlock;
	}

	if (conn->connect_cfm_cb) {
2733
		hci_conn_drop(conn);
2734
		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2735
				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2736 2737 2738
		goto unlock;
	}

2739
	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2740 2741
	if (!cmd) {
		err = -ENOMEM;
2742
		hci_conn_drop(conn);
2743 2744 2745
		goto unlock;
	}

2746
	/* For LE, just connecting isn't a proof that the pairing finished */
2747
	if (cp->addr.type == BDADDR_BREDR)
2748
		conn->connect_cfm_cb = pairing_complete_cb;
2749 2750
	else
		conn->connect_cfm_cb = le_connect_complete_cb;
2751

2752 2753 2754 2755 2756 2757
	conn->security_cfm_cb = pairing_complete_cb;
	conn->disconn_cfm_cb = pairing_complete_cb;
	conn->io_capability = cp->io_cap;
	cmd->user_data = conn;

	if (conn->state == BT_CONNECTED &&
2758
	    hci_conn_security(conn, sec_level, auth_type))
2759 2760 2761 2762 2763
		pairing_complete(cmd, 0);

	err = 0;

unlock:
2764
	hci_dev_unlock(hdev);
2765 2766 2767
	return err;
}

2768 2769
static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2770
{
2771
	struct mgmt_addr_info *addr = data;
2772 2773 2774 2775 2776 2777 2778 2779
	struct pending_cmd *cmd;
	struct hci_conn *conn;
	int err;

	BT_DBG("");

	hci_dev_lock(hdev);

2780
	if (!hdev_is_powered(hdev)) {
2781
		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2782
				 MGMT_STATUS_NOT_POWERED);
2783 2784 2785
		goto unlock;
	}

2786 2787
	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
	if (!cmd) {
2788
		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2789
				 MGMT_STATUS_INVALID_PARAMS);
2790 2791 2792 2793 2794 2795
		goto unlock;
	}

	conn = cmd->user_data;

	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2796
		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2797
				 MGMT_STATUS_INVALID_PARAMS);
2798 2799 2800 2801 2802
		goto unlock;
	}

	pairing_complete(cmd, MGMT_STATUS_CANCELLED);

2803
	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2804
			   addr, sizeof(*addr));
2805 2806 2807 2808 2809
unlock:
	hci_dev_unlock(hdev);
	return err;
}

2810
static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2811
			     struct mgmt_addr_info *addr, u16 mgmt_op,
2812
			     u16 hci_op, __le32 passkey)
2813 2814
{
	struct pending_cmd *cmd;
2815
	struct hci_conn *conn;
2816 2817
	int err;

2818
	hci_dev_lock(hdev);
2819

2820
	if (!hdev_is_powered(hdev)) {
2821 2822 2823
		err = cmd_complete(sk, hdev->id, mgmt_op,
				   MGMT_STATUS_NOT_POWERED, addr,
				   sizeof(*addr));
2824
		goto done;
2825 2826
	}

2827 2828
	if (addr->type == BDADDR_BREDR)
		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2829
	else
2830
		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2831 2832

	if (!conn) {
2833 2834 2835
		err = cmd_complete(sk, hdev->id, mgmt_op,
				   MGMT_STATUS_NOT_CONNECTED, addr,
				   sizeof(*addr));
2836 2837
		goto done;
	}
2838

2839
	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2840
		/* Continue with pairing via SMP */
2841 2842 2843
		err = smp_user_confirm_reply(conn, mgmt_op, passkey);

		if (!err)
2844 2845 2846
			err = cmd_complete(sk, hdev->id, mgmt_op,
					   MGMT_STATUS_SUCCESS, addr,
					   sizeof(*addr));
2847
		else
2848 2849 2850
			err = cmd_complete(sk, hdev->id, mgmt_op,
					   MGMT_STATUS_FAILED, addr,
					   sizeof(*addr));
2851 2852 2853 2854

		goto done;
	}

2855
	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2856 2857
	if (!cmd) {
		err = -ENOMEM;
2858
		goto done;
2859 2860
	}

2861
	/* Continue with pairing via HCI */
2862 2863 2864
	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
		struct hci_cp_user_passkey_reply cp;

2865
		bacpy(&cp.bdaddr, &addr->bdaddr);
2866 2867 2868
		cp.passkey = passkey;
		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
	} else
2869 2870
		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
				   &addr->bdaddr);
2871

2872 2873
	if (err < 0)
		mgmt_pending_remove(cmd);
2874

2875
done:
2876
	hci_dev_unlock(hdev);
2877 2878 2879
	return err;
}

2880 2881 2882 2883 2884 2885 2886
static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_pin_code_neg_reply *cp = data;

	BT_DBG("");

2887
	return user_pairing_resp(sk, hdev, &cp->addr,
2888 2889 2890 2891
				MGMT_OP_PIN_CODE_NEG_REPLY,
				HCI_OP_PIN_CODE_NEG_REPLY, 0);
}

2892 2893
static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2894
{
2895
	struct mgmt_cp_user_confirm_reply *cp = data;
2896 2897 2898 2899

	BT_DBG("");

	if (len != sizeof(*cp))
2900
		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2901
				  MGMT_STATUS_INVALID_PARAMS);
2902

2903
	return user_pairing_resp(sk, hdev, &cp->addr,
2904 2905
				 MGMT_OP_USER_CONFIRM_REPLY,
				 HCI_OP_USER_CONFIRM_REPLY, 0);
2906 2907
}

2908
static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2909
				  void *data, u16 len)
2910
{
2911
	struct mgmt_cp_user_confirm_neg_reply *cp = data;
2912 2913 2914

	BT_DBG("");

2915
	return user_pairing_resp(sk, hdev, &cp->addr,
2916 2917
				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2918 2919
}

2920 2921
static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
			      u16 len)
2922
{
2923
	struct mgmt_cp_user_passkey_reply *cp = data;
2924 2925 2926

	BT_DBG("");

2927
	return user_pairing_resp(sk, hdev, &cp->addr,
2928 2929
				 MGMT_OP_USER_PASSKEY_REPLY,
				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2930 2931
}

2932
static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2933
				  void *data, u16 len)
2934
{
2935
	struct mgmt_cp_user_passkey_neg_reply *cp = data;
2936 2937 2938

	BT_DBG("");

2939
	return user_pairing_resp(sk, hdev, &cp->addr,
2940 2941
				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2942 2943
}

2944
static void update_name(struct hci_request *req)
2945
{
2946
	struct hci_dev *hdev = req->hdev;
2947 2948
	struct hci_cp_write_local_name cp;

2949
	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2950

2951
	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2952 2953
}

2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981
static void set_name_complete(struct hci_dev *hdev, u8 status)
{
	struct mgmt_cp_set_local_name *cp;
	struct pending_cmd *cmd;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
	if (!cmd)
		goto unlock;

	cp = cmd->param;

	if (status)
		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
			   mgmt_status(status));
	else
		cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
			     cp, sizeof(*cp));

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

2982
static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2983
			  u16 len)
2984
{
2985
	struct mgmt_cp_set_local_name *cp = data;
2986
	struct pending_cmd *cmd;
2987
	struct hci_request req;
2988 2989 2990 2991
	int err;

	BT_DBG("");

2992
	hci_dev_lock(hdev);
2993

2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004
	/* If the old values are the same as the new ones just return a
	 * direct command complete event.
	 */
	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
	    !memcmp(hdev->short_name, cp->short_name,
		    sizeof(hdev->short_name))) {
		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
				   data, len);
		goto failed;
	}

3005
	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3006

3007
	if (!hdev_is_powered(hdev)) {
3008
		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3009 3010

		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3011
				   data, len);
3012 3013 3014 3015
		if (err < 0)
			goto failed;

		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3016
				 sk);
3017

3018 3019 3020
		goto failed;
	}

3021
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3022 3023 3024 3025 3026
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

3027 3028
	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));

3029
	hci_req_init(&req, hdev);
3030 3031 3032 3033 3034 3035

	if (lmp_bredr_capable(hdev)) {
		update_name(&req);
		update_eir(&req);
	}

3036 3037 3038
	/* The name is stored in the scan response data and so
	 * no need to udpate the advertising data here.
	 */
3039
	if (lmp_le_capable(hdev))
3040
		update_scan_rsp_data(&req);
3041

3042
	err = hci_req_run(&req, set_name_complete);
3043 3044 3045 3046
	if (err < 0)
		mgmt_pending_remove(cmd);

failed:
3047
	hci_dev_unlock(hdev);
3048 3049 3050
	return err;
}

3051
static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3052
			       void *data, u16 data_len)
3053 3054 3055 3056
{
	struct pending_cmd *cmd;
	int err;

3057
	BT_DBG("%s", hdev->name);
3058

3059
	hci_dev_lock(hdev);
3060

3061
	if (!hdev_is_powered(hdev)) {
3062
		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3063
				 MGMT_STATUS_NOT_POWERED);
3064 3065 3066
		goto unlock;
	}

3067
	if (!lmp_ssp_capable(hdev)) {
3068
		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3069
				 MGMT_STATUS_NOT_SUPPORTED);
3070 3071 3072
		goto unlock;
	}

3073
	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3074
		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3075
				 MGMT_STATUS_BUSY);
3076 3077 3078
		goto unlock;
	}

3079
	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3080 3081 3082 3083 3084
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

3085 3086 3087 3088 3089 3090
	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
				   0, NULL);
	else
		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);

3091 3092 3093 3094
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
3095
	hci_dev_unlock(hdev);
3096 3097 3098
	return err;
}

3099
static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3100
			       void *data, u16 len)
3101 3102 3103
{
	int err;

3104
	BT_DBG("%s ", hdev->name);
3105

3106
	hci_dev_lock(hdev);
3107

3108 3109 3110
	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_data *cp = data;
		u8 status;
3111

3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141
		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
					      cp->hash, cp->randomizer);
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
				   status, &cp->addr, sizeof(cp->addr));
	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
		u8 status;

		err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
						  cp->hash192,
						  cp->randomizer192,
						  cp->hash256,
						  cp->randomizer256);
		if (err < 0)
			status = MGMT_STATUS_FAILED;
		else
			status = MGMT_STATUS_SUCCESS;

		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
				   status, &cp->addr, sizeof(cp->addr));
	} else {
		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
				 MGMT_STATUS_INVALID_PARAMS);
	}
3142

3143
	hci_dev_unlock(hdev);
3144 3145 3146
	return err;
}

3147
static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3148
				  void *data, u16 len)
3149
{
3150
	struct mgmt_cp_remove_remote_oob_data *cp = data;
3151
	u8 status;
3152 3153
	int err;

3154
	BT_DBG("%s", hdev->name);
3155

3156
	hci_dev_lock(hdev);
3157

3158
	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3159
	if (err < 0)
3160
		status = MGMT_STATUS_INVALID_PARAMS;
3161
	else
3162
		status = MGMT_STATUS_SUCCESS;
3163

3164
	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3165
			   status, &cp->addr, sizeof(cp->addr));
3166

3167
	hci_dev_unlock(hdev);
3168 3169 3170
	return err;
}

3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191
static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
	u8 type;
	int err;

	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);

	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
	if (!cmd)
		return -ENOENT;

	type = hdev->discovery.type;

	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
			   &type, sizeof(type));
	mgmt_pending_remove(cmd);

	return err;
}

3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209
static void start_discovery_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status %d", status);

	if (status) {
		hci_dev_lock(hdev);
		mgmt_start_discovery_failed(hdev, status);
		hci_dev_unlock(hdev);
		return;
	}

	hci_dev_lock(hdev);
	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
	hci_dev_unlock(hdev);

	switch (hdev->discovery.type) {
	case DISCOV_TYPE_LE:
		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3210
				   DISCOV_LE_TIMEOUT);
3211 3212 3213 3214
		break;

	case DISCOV_TYPE_INTERLEAVED:
		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3215
				   DISCOV_INTERLEAVED_TIMEOUT);
3216 3217 3218 3219 3220 3221 3222 3223 3224 3225
		break;

	case DISCOV_TYPE_BREDR:
		break;

	default:
		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
	}
}

3226
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3227
			   void *data, u16 len)
3228
{
3229
	struct mgmt_cp_start_discovery *cp = data;
3230
	struct pending_cmd *cmd;
3231 3232 3233 3234 3235 3236
	struct hci_cp_le_set_scan_param param_cp;
	struct hci_cp_le_set_scan_enable enable_cp;
	struct hci_cp_inquiry inq_cp;
	struct hci_request req;
	/* General inquiry access code (GIAC) */
	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3237
	u8 status;
3238 3239
	int err;

3240
	BT_DBG("%s", hdev->name);
3241

3242
	hci_dev_lock(hdev);
3243

3244
	if (!hdev_is_powered(hdev)) {
3245
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3246
				 MGMT_STATUS_NOT_POWERED);
3247 3248 3249
		goto failed;
	}

3250 3251 3252 3253 3254 3255
	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
				 MGMT_STATUS_BUSY);
		goto failed;
	}

3256
	if (hdev->discovery.state != DISCOVERY_STOPPED) {
3257
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3258
				 MGMT_STATUS_BUSY);
3259 3260 3261
		goto failed;
	}

3262
	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3263 3264 3265 3266 3267
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

A
Andre Guedes 已提交
3268 3269
	hdev->discovery.type = cp->type;

3270 3271
	hci_req_init(&req, hdev);

A
Andre Guedes 已提交
3272
	switch (hdev->discovery.type) {
3273
	case DISCOV_TYPE_BREDR:
3274 3275
		status = mgmt_bredr_support(hdev);
		if (status) {
3276
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3277
					 status);
3278 3279 3280 3281
			mgmt_pending_remove(cmd);
			goto failed;
		}

3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292
		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_BUSY);
			mgmt_pending_remove(cmd);
			goto failed;
		}

		hci_inquiry_cache_flush(hdev);

		memset(&inq_cp, 0, sizeof(inq_cp));
		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3293
		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3294
		hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3295 3296 3297
		break;

	case DISCOV_TYPE_LE:
3298
	case DISCOV_TYPE_INTERLEAVED:
3299 3300
		status = mgmt_le_support(hdev);
		if (status) {
3301
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3302
					 status);
3303 3304 3305 3306
			mgmt_pending_remove(cmd);
			goto failed;
		}

3307
		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3308
		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3309 3310 3311 3312 3313 3314
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_NOT_SUPPORTED);
			mgmt_pending_remove(cmd);
			goto failed;
		}

3315
		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_REJECTED);
			mgmt_pending_remove(cmd);
			goto failed;
		}

		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
					 MGMT_STATUS_BUSY);
			mgmt_pending_remove(cmd);
			goto failed;
		}

		memset(&param_cp, 0, sizeof(param_cp));
		param_cp.type = LE_SCAN_ACTIVE;
3331 3332
		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3333
		param_cp.own_address_type = hdev->own_addr_type;
3334 3335 3336 3337 3338 3339 3340 3341
		hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
			    &param_cp);

		memset(&enable_cp, 0, sizeof(enable_cp));
		enable_cp.enable = LE_SCAN_ENABLE;
		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
		hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
			    &enable_cp);
3342 3343
		break;

3344
	default:
3345 3346 3347 3348
		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
				 MGMT_STATUS_INVALID_PARAMS);
		mgmt_pending_remove(cmd);
		goto failed;
3349
	}
3350

3351
	err = hci_req_run(&req, start_discovery_complete);
3352 3353
	if (err < 0)
		mgmt_pending_remove(cmd);
3354 3355
	else
		hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3356 3357

failed:
3358
	hci_dev_unlock(hdev);
3359 3360 3361
	return err;
}

3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377
static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;
	int err;

	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
	if (!cmd)
		return -ENOENT;

	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
			   &hdev->discovery.type, sizeof(hdev->discovery.type));
	mgmt_pending_remove(cmd);

	return err;
}

3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394
static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("status %d", status);

	hci_dev_lock(hdev);

	if (status) {
		mgmt_stop_discovery_failed(hdev, status);
		goto unlock;
	}

	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);

unlock:
	hci_dev_unlock(hdev);
}

3395
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3396
			  u16 len)
3397
{
3398
	struct mgmt_cp_stop_discovery *mgmt_cp = data;
3399
	struct pending_cmd *cmd;
3400 3401
	struct hci_cp_remote_name_req_cancel cp;
	struct inquiry_entry *e;
3402 3403
	struct hci_request req;
	struct hci_cp_le_set_scan_enable enable_cp;
3404 3405
	int err;

3406
	BT_DBG("%s", hdev->name);
3407

3408
	hci_dev_lock(hdev);
3409

3410
	if (!hci_discovery_active(hdev)) {
3411
		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3412 3413
				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
				   sizeof(mgmt_cp->type));
3414 3415 3416 3417
		goto unlock;
	}

	if (hdev->discovery.type != mgmt_cp->type) {
3418
		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3419 3420
				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
				   sizeof(mgmt_cp->type));
3421
		goto unlock;
3422 3423
	}

3424
	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3425 3426
	if (!cmd) {
		err = -ENOMEM;
3427 3428 3429
		goto unlock;
	}

3430 3431
	hci_req_init(&req, hdev);

3432 3433
	switch (hdev->discovery.state) {
	case DISCOVERY_FINDING:
3434 3435 3436 3437 3438 3439 3440 3441 3442 3443
		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
			hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
		} else {
			cancel_delayed_work(&hdev->le_scan_disable);

			memset(&enable_cp, 0, sizeof(enable_cp));
			enable_cp.enable = LE_SCAN_DISABLE;
			hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
				    sizeof(enable_cp), &enable_cp);
		}
3444

3445 3446 3447 3448
		break;

	case DISCOVERY_RESOLVING:
		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3449
						     NAME_PENDING);
3450
		if (!e) {
3451
			mgmt_pending_remove(cmd);
3452 3453 3454 3455 3456 3457 3458
			err = cmd_complete(sk, hdev->id,
					   MGMT_OP_STOP_DISCOVERY, 0,
					   &mgmt_cp->type,
					   sizeof(mgmt_cp->type));
			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
			goto unlock;
		}
3459

3460
		bacpy(&cp.bdaddr, &e->data.bdaddr);
3461 3462
		hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
			    &cp);
3463 3464 3465 3466 3467

		break;

	default:
		BT_DBG("unknown discovery state %u", hdev->discovery.state);
3468 3469 3470 3471 3472 3473

		mgmt_pending_remove(cmd);
		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
				   MGMT_STATUS_FAILED, &mgmt_cp->type,
				   sizeof(mgmt_cp->type));
		goto unlock;
3474 3475
	}

3476
	err = hci_req_run(&req, stop_discovery_complete);
3477 3478
	if (err < 0)
		mgmt_pending_remove(cmd);
3479 3480
	else
		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3481

3482
unlock:
3483
	hci_dev_unlock(hdev);
3484 3485 3486
	return err;
}

3487
static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3488
			u16 len)
3489
{
3490
	struct mgmt_cp_confirm_name *cp = data;
3491 3492 3493
	struct inquiry_entry *e;
	int err;

3494
	BT_DBG("%s", hdev->name);
3495 3496 3497

	hci_dev_lock(hdev);

3498
	if (!hci_discovery_active(hdev)) {
3499
		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3500
				 MGMT_STATUS_FAILED);
3501 3502 3503
		goto failed;
	}

3504
	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3505
	if (!e) {
3506
		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3507
				 MGMT_STATUS_INVALID_PARAMS);
3508 3509 3510 3511 3512 3513 3514 3515
		goto failed;
	}

	if (cp->name_known) {
		e->name_state = NAME_KNOWN;
		list_del(&e->list);
	} else {
		e->name_state = NAME_NEEDED;
3516
		hci_inquiry_cache_update_resolve(hdev, e);
3517 3518
	}

3519 3520
	err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
			   sizeof(cp->addr));
3521 3522 3523 3524 3525 3526

failed:
	hci_dev_unlock(hdev);
	return err;
}

3527
static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3528
			u16 len)
3529
{
3530
	struct mgmt_cp_block_device *cp = data;
3531
	u8 status;
3532 3533
	int err;

3534
	BT_DBG("%s", hdev->name);
3535

3536
	if (!bdaddr_type_is_valid(cp->addr.type))
3537 3538 3539
		return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &cp->addr, sizeof(cp->addr));
3540

3541
	hci_dev_lock(hdev);
3542

3543
	err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3544
	if (err < 0)
3545
		status = MGMT_STATUS_FAILED;
3546
	else
3547
		status = MGMT_STATUS_SUCCESS;
3548

3549
	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3550
			   &cp->addr, sizeof(cp->addr));
3551

3552
	hci_dev_unlock(hdev);
3553 3554 3555 3556

	return err;
}

3557
static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3558
			  u16 len)
3559
{
3560
	struct mgmt_cp_unblock_device *cp = data;
3561
	u8 status;
3562 3563
	int err;

3564
	BT_DBG("%s", hdev->name);
3565

3566
	if (!bdaddr_type_is_valid(cp->addr.type))
3567 3568 3569
		return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
				    MGMT_STATUS_INVALID_PARAMS,
				    &cp->addr, sizeof(cp->addr));
3570

3571
	hci_dev_lock(hdev);
3572

3573
	err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3574
	if (err < 0)
3575
		status = MGMT_STATUS_INVALID_PARAMS;
3576
	else
3577
		status = MGMT_STATUS_SUCCESS;
3578

3579
	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3580
			   &cp->addr, sizeof(cp->addr));
3581

3582
	hci_dev_unlock(hdev);
3583 3584 3585 3586

	return err;
}

3587 3588 3589 3590
static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
			 u16 len)
{
	struct mgmt_cp_set_device_id *cp = data;
3591
	struct hci_request req;
3592
	int err;
3593
	__u16 source;
3594 3595 3596

	BT_DBG("%s", hdev->name);

3597 3598 3599 3600 3601 3602
	source = __le16_to_cpu(cp->source);

	if (source > 0x0002)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
				  MGMT_STATUS_INVALID_PARAMS);

3603 3604
	hci_dev_lock(hdev);

3605
	hdev->devid_source = source;
3606 3607 3608 3609 3610 3611
	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
	hdev->devid_product = __le16_to_cpu(cp->product);
	hdev->devid_version = __le16_to_cpu(cp->version);

	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);

3612 3613 3614
	hci_req_init(&req, hdev);
	update_eir(&req);
	hci_req_run(&req, NULL);
3615 3616 3617 3618 3619 3620

	hci_dev_unlock(hdev);

	return err;
}

3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641
static void set_advertising_complete(struct hci_dev *hdev, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
				     cmd_status_rsp, &mgmt_err);
		return;
	}

	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
			     &match);

	new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
}

3642 3643
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
			   u16 len)
3644 3645 3646 3647
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
	struct hci_request req;
3648
	u8 val, enabled, status;
3649 3650 3651 3652
	int err;

	BT_DBG("request for %s", hdev->name);

3653 3654
	status = mgmt_le_support(hdev);
	if (status)
3655
		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3656
				  status);
3657 3658 3659 3660 3661 3662 3663 3664

	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	val = !!cp->val;
3665
	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3666

3667 3668 3669 3670 3671 3672
	/* The following conditions are ones which mean that we should
	 * not do any HCI communication but directly send a mgmt
	 * response to user space (after toggling the flag if
	 * necessary).
	 */
	if (!hdev_is_powered(hdev) || val == enabled ||
3673
	    hci_conn_num(hdev, LE_LINK) > 0) {
3674 3675
		bool changed = false;

3676 3677
		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705
			changed = true;
		}

		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
		if (err < 0)
			goto unlock;

		if (changed)
			err = new_settings(hdev, sk);

		goto unlock;
	}

	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

	hci_req_init(&req, hdev);

3706 3707 3708 3709
	if (val)
		enable_advertising(&req);
	else
		disable_advertising(&req);
3710 3711 3712 3713 3714 3715 3716 3717 3718 3719

	err = hci_req_run(&req, set_advertising_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

3720 3721 3722 3723 3724 3725 3726 3727
static int set_static_address(struct sock *sk, struct hci_dev *hdev,
			      void *data, u16 len)
{
	struct mgmt_cp_set_static_address *cp = data;
	int err;

	BT_DBG("%s", hdev->name);

3728
	if (!lmp_le_capable(hdev))
3729
		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3730
				  MGMT_STATUS_NOT_SUPPORTED);
3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759

	if (hdev_is_powered(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
				  MGMT_STATUS_REJECTED);

	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
			return cmd_status(sk, hdev->id,
					  MGMT_OP_SET_STATIC_ADDRESS,
					  MGMT_STATUS_INVALID_PARAMS);

		/* Two most significant bits shall be set */
		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
			return cmd_status(sk, hdev->id,
					  MGMT_OP_SET_STATIC_ADDRESS,
					  MGMT_STATUS_INVALID_PARAMS);
	}

	hci_dev_lock(hdev);

	bacpy(&hdev->static_addr, &cp->bdaddr);

	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);

	hci_dev_unlock(hdev);

	return err;
}

3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784
static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_cp_set_scan_params *cp = data;
	__u16 interval, window;
	int err;

	BT_DBG("%s", hdev->name);

	if (!lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_NOT_SUPPORTED);

	interval = __le16_to_cpu(cp->interval);

	if (interval < 0x0004 || interval > 0x4000)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_INVALID_PARAMS);

	window = __le16_to_cpu(cp->window);

	if (window < 0x0004 || window > 0x4000)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_INVALID_PARAMS);

3785 3786 3787 3788
	if (window > interval)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
				  MGMT_STATUS_INVALID_PARAMS);

3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800
	hci_dev_lock(hdev);

	hdev->le_scan_interval = interval;
	hdev->le_scan_window = window;

	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);

	hci_dev_unlock(hdev);

	return err;
}

3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816
static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
			   mgmt_status(status));
	} else {
3817 3818 3819 3820 3821 3822 3823
		struct mgmt_mode *cp = cmd->param;

		if (cp->val)
			set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
		else
			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);

3824 3825 3826 3827 3828 3829 3830 3831 3832 3833
		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

3834
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3835
				void *data, u16 len)
3836
{
3837
	struct mgmt_mode *cp = data;
3838 3839
	struct pending_cmd *cmd;
	struct hci_request req;
3840 3841
	int err;

3842
	BT_DBG("%s", hdev->name);
3843

3844 3845
	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
	    hdev->hci_ver < BLUETOOTH_VER_1_2)
3846 3847 3848
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				  MGMT_STATUS_NOT_SUPPORTED);

3849 3850 3851 3852
	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				  MGMT_STATUS_INVALID_PARAMS);

3853
	if (!hdev_is_powered(hdev))
3854
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3855
				  MGMT_STATUS_NOT_POWERED);
3856 3857

	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3858
		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3859
				  MGMT_STATUS_REJECTED);
3860 3861 3862

	hci_dev_lock(hdev);

3863 3864 3865 3866 3867 3868
	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}

3869 3870 3871 3872 3873 3874
	if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
					hdev);
		goto unlock;
	}

3875 3876 3877 3878 3879
	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
			       data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
3880 3881
	}

3882 3883
	hci_req_init(&req, hdev);

3884
	write_fast_connectable(&req, cp->val);
3885 3886

	err = hci_req_run(&req, fast_connectable_complete);
3887
	if (err < 0) {
3888
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3889
				 MGMT_STATUS_FAILED);
3890
		mgmt_pending_remove(cmd);
3891 3892
	}

3893
unlock:
3894
	hci_dev_unlock(hdev);
3895

3896 3897 3898
	return err;
}

3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918
static void set_bredr_scan(struct hci_request *req)
{
	struct hci_dev *hdev = req->hdev;
	u8 scan = 0;

	/* Ensure that fast connectable is disabled. This function will
	 * not do anything if the page scan parameters are already what
	 * they should be.
	 */
	write_fast_connectable(req, false);

	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
		scan |= SCAN_PAGE;
	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
		scan |= SCAN_INQUIRY;

	if (scan)
		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}

3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016
static void set_bredr_complete(struct hci_dev *hdev, u8 status)
{
	struct pending_cmd *cmd;

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
	if (!cmd)
		goto unlock;

	if (status) {
		u8 mgmt_err = mgmt_status(status);

		/* We need to restore the flag if related HCI commands
		 * failed.
		 */
		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);

		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
	} else {
		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
		new_settings(hdev, cmd->sk);
	}

	mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
}

static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
	struct hci_request req;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				  MGMT_STATUS_NOT_SUPPORTED);

	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				  MGMT_STATUS_REJECTED);

	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		goto unlock;
	}

	if (!hdev_is_powered(hdev)) {
		if (!cp->val) {
			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
		}

		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);

		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
		if (err < 0)
			goto unlock;

		err = new_settings(hdev, sk);
		goto unlock;
	}

	/* Reject disabling when powered on */
	if (!cp->val) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				 MGMT_STATUS_REJECTED);
		goto unlock;
	}

	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
				 MGMT_STATUS_BUSY);
		goto unlock;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto unlock;
	}

4017
	/* We need to flip the bit already here so that update_adv_data
4018 4019 4020 4021 4022
	 * generates the correct flags.
	 */
	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);

	hci_req_init(&req, hdev);
4023 4024 4025 4026

	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
		set_bredr_scan(&req);

4027 4028 4029
	/* Since only the advertising data flags will change, there
	 * is no need to update the scan response data.
	 */
4030
	update_adv_data(&req);
4031

4032 4033 4034 4035 4036 4037 4038 4039 4040
	err = hci_req_run(&req, set_bredr_complete);
	if (err < 0)
		mgmt_pending_remove(cmd);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4041 4042 4043 4044 4045
static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
			   void *data, u16 len)
{
	struct mgmt_mode *cp = data;
	struct pending_cmd *cmd;
4046
	u8 val, status;
4047 4048 4049 4050 4051 4052 4053 4054 4055
	int err;

	BT_DBG("request for %s", hdev->name);

	status = mgmt_bredr_support(hdev);
	if (status)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				  status);

4056 4057
	if (!lmp_sc_capable(hdev) &&
	    !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4058 4059 4060
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				  MGMT_STATUS_NOT_SUPPORTED);

4061
	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4062 4063 4064 4065 4066 4067 4068 4069
		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	if (!hdev_is_powered(hdev)) {
		bool changed;

4070
		if (cp->val) {
4071 4072
			changed = !test_and_set_bit(HCI_SC_ENABLED,
						    &hdev->dev_flags);
4073 4074 4075 4076 4077
			if (cp->val == 0x02)
				set_bit(HCI_SC_ONLY, &hdev->dev_flags);
			else
				clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
		} else {
4078 4079
			changed = test_and_clear_bit(HCI_SC_ENABLED,
						     &hdev->dev_flags);
4080 4081
			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
		}
4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098

		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		if (err < 0)
			goto failed;

		if (changed)
			err = new_settings(hdev, sk);

		goto failed;
	}

	if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
				 MGMT_STATUS_BUSY);
		goto failed;
	}

4099 4100 4101 4102
	val = !!cp->val;

	if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
	    (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4103 4104 4105 4106 4107 4108 4109 4110 4111 4112
		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
		goto failed;
	}

	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
	if (!cmd) {
		err = -ENOMEM;
		goto failed;
	}

4113
	err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4114 4115 4116 4117 4118
	if (err < 0) {
		mgmt_pending_remove(cmd);
		goto failed;
	}

4119 4120 4121 4122 4123
	if (cp->val == 0x02)
		set_bit(HCI_SC_ONLY, &hdev->dev_flags);
	else
		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);

4124 4125 4126 4127 4128
failed:
	hci_dev_unlock(hdev);
	return err;
}

4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160
static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
			  void *data, u16 len)
{
	struct mgmt_mode *cp = data;
	bool changed;
	int err;

	BT_DBG("request for %s", hdev->name);

	if (cp->val != 0x00 && cp->val != 0x01)
		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
				  MGMT_STATUS_INVALID_PARAMS);

	hci_dev_lock(hdev);

	if (cp->val)
		changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);

	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
	if (err < 0)
		goto unlock;

	if (changed)
		err = new_settings(hdev, sk);

unlock:
	hci_dev_unlock(hdev);
	return err;
}

4161 4162 4163 4164
static bool ltk_is_valid(struct mgmt_ltk_info *key)
{
	if (key->master != 0x00 && key->master != 0x01)
		return false;
4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177

	switch (key->addr.type) {
	case BDADDR_LE_PUBLIC:
		return true;

	case BDADDR_LE_RANDOM:
		/* Two most significant bits shall be set */
		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
			return false;
		return true;
	}

	return false;
4178 4179
}

4180
static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4181
			       void *cp_data, u16 len)
4182 4183 4184
{
	struct mgmt_cp_load_long_term_keys *cp = cp_data;
	u16 key_count, expected_len;
4185
	int i, err;
4186

4187 4188 4189 4190 4191 4192
	BT_DBG("request for %s", hdev->name);

	if (!lmp_le_capable(hdev))
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
				  MGMT_STATUS_NOT_SUPPORTED);

4193
	key_count = __le16_to_cpu(cp->key_count);
4194 4195 4196 4197 4198

	expected_len = sizeof(*cp) + key_count *
					sizeof(struct mgmt_ltk_info);
	if (expected_len != len) {
		BT_ERR("load_keys: expected %u bytes, got %u bytes",
4199
		       len, expected_len);
4200
		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4201
				  MGMT_STATUS_INVALID_PARAMS);
4202 4203
	}

4204
	BT_DBG("%s key_count %u", hdev->name, key_count);
4205

4206 4207 4208
	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];

4209
		if (!ltk_is_valid(key))
4210 4211 4212 4213 4214
			return cmd_status(sk, hdev->id,
					  MGMT_OP_LOAD_LONG_TERM_KEYS,
					  MGMT_STATUS_INVALID_PARAMS);
	}

4215 4216 4217 4218 4219 4220
	hci_dev_lock(hdev);

	hci_smp_ltks_clear(hdev);

	for (i = 0; i < key_count; i++) {
		struct mgmt_ltk_info *key = &cp->keys[i];
4221 4222 4223 4224 4225 4226
		u8 type, addr_type;

		if (key->addr.type == BDADDR_LE_PUBLIC)
			addr_type = ADDR_LE_DEV_PUBLIC;
		else
			addr_type = ADDR_LE_DEV_RANDOM;
4227 4228 4229 4230 4231 4232

		if (key->master)
			type = HCI_SMP_LTK;
		else
			type = HCI_SMP_LTK_SLAVE;

4233
		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
4234
			    type, 0, key->type, key->val,
4235
			    key->enc_size, key->ediv, key->rand);
4236 4237
	}

4238 4239 4240
	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
			   NULL, 0);

4241 4242
	hci_dev_unlock(hdev);

4243
	return err;
4244 4245
}

4246
static const struct mgmt_handler {
4247 4248
	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
		     u16 data_len);
4249 4250
	bool var_len;
	size_t data_len;
4251 4252
} mgmt_handlers[] = {
	{ NULL }, /* 0x0000 (no command) */
4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284
	{ read_version,           false, MGMT_READ_VERSION_SIZE },
	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
	{ set_powered,            false, MGMT_SETTING_SIZE },
	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
	{ set_connectable,        false, MGMT_SETTING_SIZE },
	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
	{ set_pairable,           false, MGMT_SETTING_SIZE },
	{ set_link_security,      false, MGMT_SETTING_SIZE },
	{ set_ssp,                false, MGMT_SETTING_SIZE },
	{ set_hs,                 false, MGMT_SETTING_SIZE },
	{ set_le,                 false, MGMT_SETTING_SIZE },
	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4285
	{ add_remote_oob_data,    true,  MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4286 4287 4288 4289 4290 4291
	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
4292
	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
4293
	{ set_advertising,        false, MGMT_SETTING_SIZE },
4294
	{ set_bredr,              false, MGMT_SETTING_SIZE },
4295
	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
4296
	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
4297
	{ set_secure_conn,        false, MGMT_SETTING_SIZE },
4298
	{ set_debug_keys,         false, MGMT_SETTING_SIZE },
4299 4300 4301
};


4302 4303
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
{
4304 4305
	void *buf;
	u8 *cp;
4306
	struct mgmt_hdr *hdr;
4307
	u16 opcode, index, len;
4308
	struct hci_dev *hdev = NULL;
4309
	const struct mgmt_handler *handler;
4310 4311 4312 4313 4314 4315 4316
	int err;

	BT_DBG("got %zu bytes", msglen);

	if (msglen < sizeof(*hdr))
		return -EINVAL;

4317
	buf = kmalloc(msglen, GFP_KERNEL);
4318 4319 4320 4321 4322 4323 4324 4325
	if (!buf)
		return -ENOMEM;

	if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
		err = -EFAULT;
		goto done;
	}

4326
	hdr = buf;
4327 4328 4329
	opcode = __le16_to_cpu(hdr->opcode);
	index = __le16_to_cpu(hdr->index);
	len = __le16_to_cpu(hdr->len);
4330 4331 4332 4333 4334 4335

	if (len != msglen - sizeof(*hdr)) {
		err = -EINVAL;
		goto done;
	}

4336
	if (index != MGMT_INDEX_NONE) {
4337 4338 4339
		hdev = hci_dev_get(index);
		if (!hdev) {
			err = cmd_status(sk, index, opcode,
4340
					 MGMT_STATUS_INVALID_INDEX);
4341 4342
			goto done;
		}
4343

4344 4345
		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4346 4347 4348 4349
			err = cmd_status(sk, index, opcode,
					 MGMT_STATUS_INVALID_INDEX);
			goto done;
		}
4350 4351
	}

4352
	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4353
	    mgmt_handlers[opcode].func == NULL) {
4354
		BT_DBG("Unknown op %u", opcode);
4355
		err = cmd_status(sk, index, opcode,
4356
				 MGMT_STATUS_UNKNOWN_COMMAND);
4357 4358 4359 4360
		goto done;
	}

	if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4361
	    (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4362
		err = cmd_status(sk, index, opcode,
4363
				 MGMT_STATUS_INVALID_INDEX);
4364
		goto done;
4365 4366
	}

4367 4368 4369
	handler = &mgmt_handlers[opcode];

	if ((handler->var_len && len < handler->data_len) ||
4370
	    (!handler->var_len && len != handler->data_len)) {
4371
		err = cmd_status(sk, index, opcode,
4372
				 MGMT_STATUS_INVALID_PARAMS);
4373 4374 4375
		goto done;
	}

4376 4377 4378 4379 4380
	if (hdev)
		mgmt_init_hdev(sk, hdev);

	cp = buf + sizeof(*hdr);

4381
	err = handler->func(sk, hdev, cp, len);
4382 4383 4384
	if (err < 0)
		goto done;

4385 4386 4387
	err = msglen;

done:
4388 4389 4390
	if (hdev)
		hci_dev_put(hdev);

4391 4392 4393
	kfree(buf);
	return err;
}
4394

4395
void mgmt_index_added(struct hci_dev *hdev)
4396
{
4397
	if (hdev->dev_type != HCI_BREDR)
4398
		return;
4399

4400
	mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4401 4402
}

4403
void mgmt_index_removed(struct hci_dev *hdev)
4404
{
4405
	u8 status = MGMT_STATUS_INVALID_INDEX;
4406

4407
	if (hdev->dev_type != HCI_BREDR)
4408
		return;
4409

4410
	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4411

4412
	mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4413 4414
}

4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432
static void powered_complete(struct hci_dev *hdev, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };

	BT_DBG("status 0x%02x", status);

	hci_dev_lock(hdev);

	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);

	new_settings(hdev, match.sk);

	hci_dev_unlock(hdev);

	if (match.sk)
		sock_put(match.sk);
}

4433
static int powered_update_hci(struct hci_dev *hdev)
4434
{
4435
	struct hci_request req;
4436
	u8 link_sec;
4437

4438 4439
	hci_req_init(&req, hdev);

4440 4441 4442
	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
	    !lmp_host_ssp_capable(hdev)) {
		u8 ssp = 1;
4443

4444
		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4445
	}
4446

4447 4448
	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
	    lmp_bredr_capable(hdev)) {
4449
		struct hci_cp_write_le_host_supported cp;
4450

4451 4452
		cp.le = 1;
		cp.simul = lmp_le_br_capable(hdev);
4453

4454 4455 4456 4457 4458
		/* Check first if we already have the right
		 * host state (host features set)
		 */
		if (cp.le != lmp_host_le_capable(hdev) ||
		    cp.simul != lmp_host_le_br_capable(hdev))
4459 4460
			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
				    sizeof(cp), &cp);
4461
	}
4462

4463 4464 4465 4466 4467
	if (lmp_le_capable(hdev)) {
		/* Set random address to static address if configured */
		if (bacmp(&hdev->static_addr, BDADDR_ANY))
			hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
				    &hdev->static_addr);
4468

4469 4470 4471 4472
		/* Make sure the controller has a good default for
		 * advertising data. This also applies to the case
		 * where BR/EDR was toggled during the AUTO_OFF phase.
		 */
4473
		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4474
			update_adv_data(&req);
4475 4476
			update_scan_rsp_data(&req);
		}
4477

4478 4479
		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
			enable_advertising(&req);
4480 4481
	}

4482 4483
	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4484 4485
		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
			    sizeof(link_sec), &link_sec);
4486

4487
	if (lmp_bredr_capable(hdev)) {
4488 4489
		if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
			set_bredr_scan(&req);
4490
		update_class(&req);
4491
		update_name(&req);
4492
		update_eir(&req);
4493
	}
4494

4495
	return hci_req_run(&req, powered_complete);
4496
}
4497

4498 4499 4500
int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
	struct cmd_lookup match = { NULL, hdev };
4501 4502
	u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
	u8 zero_cod[] = { 0, 0, 0 };
4503
	int err;
4504

4505 4506 4507 4508
	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
		return 0;

	if (powered) {
4509 4510
		if (powered_update_hci(hdev) == 0)
			return 0;
4511

4512 4513 4514
		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
				     &match);
		goto new_settings;
4515 4516
	}

4517 4518 4519 4520 4521 4522 4523 4524
	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);

	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
			   zero_cod, sizeof(zero_cod), NULL);

new_settings:
4525
	err = new_settings(hdev, match.sk);
4526 4527 4528 4529

	if (match.sk)
		sock_put(match.sk);

4530
	return err;
4531
}
4532

4533
void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4534 4535 4536 4537 4538 4539
{
	struct pending_cmd *cmd;
	u8 status;

	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
	if (!cmd)
4540
		return;
4541 4542 4543 4544 4545 4546

	if (err == -ERFKILL)
		status = MGMT_STATUS_RFKILLED;
	else
		status = MGMT_STATUS_FAILED;

4547
	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4548 4549 4550 4551

	mgmt_pending_remove(cmd);
}

4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563
void mgmt_discoverable_timeout(struct hci_dev *hdev)
{
	struct hci_request req;

	hci_dev_lock(hdev);

	/* When discoverable timeout triggers, then just make sure
	 * the limited discoverable flag is cleared. Even in the case
	 * of a timeout triggered from general discoverable, it is
	 * safe to unconditionally clear the flag.
	 */
	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4564
	clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4565 4566

	hci_req_init(&req, hdev);
4567 4568 4569 4570 4571
	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
		u8 scan = SCAN_PAGE;
		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
			    sizeof(scan), &scan);
	}
4572
	update_class(&req);
4573
	update_adv_data(&req);
4574 4575 4576 4577
	hci_req_run(&req, NULL);

	hdev->discov_timeout = 0;

4578 4579
	new_settings(hdev, NULL);

4580 4581 4582
	hci_dev_unlock(hdev);
}

4583
void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4584
{
4585
	bool changed;
4586

4587 4588 4589 4590 4591
	/* Nothing needed here if there's a pending command since that
	 * commands request completion callback takes care of everything
	 * necessary.
	 */
	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4592
		return;
4593

4594
	if (discoverable) {
4595
		changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4596 4597
	} else {
		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4598
		changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610
	}

	if (changed) {
		struct hci_request req;

		/* In case this change in discoverable was triggered by
		 * a disabling of connectable there could be a need to
		 * update the advertising flags.
		 */
		hci_req_init(&req, hdev);
		update_adv_data(&req);
		hci_req_run(&req, NULL);
4611

4612
		new_settings(hdev, NULL);
4613
	}
4614
}
4615

4616
void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4617
{
4618
	bool changed;
4619

4620 4621 4622 4623 4624
	/* Nothing needed here if there's a pending command since that
	 * commands request completion callback takes care of everything
	 * necessary.
	 */
	if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4625
		return;
4626

4627 4628 4629 4630
	if (connectable)
		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4631

4632
	if (changed)
4633
		new_settings(hdev, NULL);
4634
}
4635

4636
void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4637
{
4638 4639
	u8 mgmt_err = mgmt_status(status);

4640
	if (scan & SCAN_PAGE)
4641
		mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4642
				     cmd_status_rsp, &mgmt_err);
4643 4644

	if (scan & SCAN_INQUIRY)
4645
		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4646
				     cmd_status_rsp, &mgmt_err);
4647 4648
}

4649 4650
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
		       bool persistent)
4651
{
4652
	struct mgmt_ev_new_link_key ev;
4653

4654
	memset(&ev, 0, sizeof(ev));
4655

4656
	ev.store_hint = persistent;
4657
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4658
	ev.key.addr.type = BDADDR_BREDR;
4659
	ev.key.type = key->type;
4660
	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4661
	ev.key.pin_len = key->pin_len;
4662

4663
	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4664
}
4665

4666
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4667 4668 4669 4670 4671 4672 4673
{
	struct mgmt_ev_new_long_term_key ev;

	memset(&ev, 0, sizeof(ev));

	ev.store_hint = persistent;
	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4674
	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4675
	ev.key.type = key->authenticated;
4676 4677 4678 4679 4680 4681 4682 4683 4684
	ev.key.enc_size = key->enc_size;
	ev.key.ediv = key->ediv;

	if (key->type == HCI_SMP_LTK)
		ev.key.master = 1;

	memcpy(ev.key.rand, key->rand, sizeof(key->rand));
	memcpy(ev.key.val, key->val, sizeof(key->val));

4685
	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4686 4687
}

4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
				  u8 data_len)
{
	eir[eir_len++] = sizeof(type) + data_len;
	eir[eir_len++] = type;
	memcpy(&eir[eir_len], data, data_len);
	eir_len += data_len;

	return eir_len;
}

4699 4700 4701
void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			   u8 addr_type, u32 flags, u8 *name, u8 name_len,
			   u8 *dev_class)
4702
{
4703 4704 4705
	char buf[512];
	struct mgmt_ev_device_connected *ev = (void *) buf;
	u16 eir_len = 0;
4706

4707
	bacpy(&ev->addr.bdaddr, bdaddr);
4708
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
4709

4710
	ev->flags = __cpu_to_le32(flags);
4711

4712 4713
	if (name_len > 0)
		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4714
					  name, name_len);
4715 4716

	if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4717
		eir_len = eir_append_data(ev->eir, eir_len,
4718
					  EIR_CLASS_OF_DEV, dev_class, 3);
4719

4720
	ev->eir_len = cpu_to_le16(eir_len);
4721

4722 4723
	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
		    sizeof(*ev) + eir_len, NULL);
4724 4725
}

4726 4727
static void disconnect_rsp(struct pending_cmd *cmd, void *data)
{
4728
	struct mgmt_cp_disconnect *cp = cmd->param;
4729
	struct sock **sk = data;
4730
	struct mgmt_rp_disconnect rp;
4731

4732 4733
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
4734

4735
	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4736
		     sizeof(rp));
4737 4738 4739 4740

	*sk = cmd->sk;
	sock_hold(*sk);

4741
	mgmt_pending_remove(cmd);
4742 4743
}

4744
static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4745
{
4746
	struct hci_dev *hdev = data;
4747 4748
	struct mgmt_cp_unpair_device *cp = cmd->param;
	struct mgmt_rp_unpair_device rp;
4749 4750

	memset(&rp, 0, sizeof(rp));
4751 4752
	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
	rp.addr.type = cp->addr.type;
4753

4754 4755
	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);

4756
	cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4757 4758 4759 4760

	mgmt_pending_remove(cmd);
}

4761 4762
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
			      u8 link_type, u8 addr_type, u8 reason)
4763
{
4764
	struct mgmt_ev_device_disconnected ev;
4765 4766
	struct sock *sk = NULL;

4767 4768 4769
	if (link_type != ACL_LINK && link_type != LE_LINK)
		return;

4770
	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4771

4772 4773 4774
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.reason = reason;
4775

4776
	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4777 4778

	if (sk)
4779
		sock_put(sk);
4780

4781
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4782
			     hdev);
4783 4784
}

4785 4786
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
			    u8 link_type, u8 addr_type, u8 status)
4787
{
4788 4789
	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
	struct mgmt_cp_disconnect *cp;
4790
	struct mgmt_rp_disconnect rp;
4791 4792
	struct pending_cmd *cmd;

4793 4794 4795
	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
			     hdev);

4796
	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4797
	if (!cmd)
4798
		return;
4799

4800 4801 4802 4803 4804 4805 4806 4807
	cp = cmd->param;

	if (bacmp(bdaddr, &cp->addr.bdaddr))
		return;

	if (cp->addr.type != bdaddr_type)
		return;

4808
	bacpy(&rp.addr.bdaddr, bdaddr);
4809
	rp.addr.type = bdaddr_type;
4810

4811 4812
	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
		     mgmt_status(status), &rp, sizeof(rp));
4813

4814
	mgmt_pending_remove(cmd);
4815
}
4816

4817 4818
void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
			 u8 addr_type, u8 status)
4819 4820 4821
{
	struct mgmt_ev_connect_failed ev;

4822
	bacpy(&ev.addr.bdaddr, bdaddr);
4823
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4824
	ev.status = mgmt_status(status);
4825

4826
	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4827
}
4828

4829
void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4830 4831 4832
{
	struct mgmt_ev_pin_code_request ev;

4833
	bacpy(&ev.addr.bdaddr, bdaddr);
4834
	ev.addr.type = BDADDR_BREDR;
4835
	ev.secure = secure;
4836

4837
	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4838 4839
}

4840 4841
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				  u8 status)
4842 4843
{
	struct pending_cmd *cmd;
4844
	struct mgmt_rp_pin_code_reply rp;
4845

4846
	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4847
	if (!cmd)
4848
		return;
4849

4850
	bacpy(&rp.addr.bdaddr, bdaddr);
4851
	rp.addr.type = BDADDR_BREDR;
4852

4853 4854
	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
		     mgmt_status(status), &rp, sizeof(rp));
4855

4856
	mgmt_pending_remove(cmd);
4857 4858
}

4859 4860
void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
				      u8 status)
4861 4862
{
	struct pending_cmd *cmd;
4863
	struct mgmt_rp_pin_code_reply rp;
4864

4865
	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4866
	if (!cmd)
4867
		return;
4868

4869
	bacpy(&rp.addr.bdaddr, bdaddr);
4870
	rp.addr.type = BDADDR_BREDR;
4871

4872 4873
	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
		     mgmt_status(status), &rp, sizeof(rp));
4874

4875
	mgmt_pending_remove(cmd);
4876
}
4877

4878
int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4879 4880
			      u8 link_type, u8 addr_type, __le32 value,
			      u8 confirm_hint)
4881 4882 4883
{
	struct mgmt_ev_user_confirm_request ev;

4884
	BT_DBG("%s", hdev->name);
4885

4886
	bacpy(&ev.addr.bdaddr, bdaddr);
4887
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4888
	ev.confirm_hint = confirm_hint;
4889
	ev.value = value;
4890

4891
	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4892
			  NULL);
4893 4894
}

4895
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4896
			      u8 link_type, u8 addr_type)
4897 4898 4899 4900 4901
{
	struct mgmt_ev_user_passkey_request ev;

	BT_DBG("%s", hdev->name);

4902
	bacpy(&ev.addr.bdaddr, bdaddr);
4903
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4904 4905

	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4906
			  NULL);
4907 4908
}

4909
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4910 4911
				      u8 link_type, u8 addr_type, u8 status,
				      u8 opcode)
4912 4913 4914 4915 4916
{
	struct pending_cmd *cmd;
	struct mgmt_rp_user_confirm_reply rp;
	int err;

4917
	cmd = mgmt_pending_find(opcode, hdev);
4918 4919 4920
	if (!cmd)
		return -ENOENT;

4921
	bacpy(&rp.addr.bdaddr, bdaddr);
4922
	rp.addr.type = link_to_bdaddr(link_type, addr_type);
4923
	err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4924
			   &rp, sizeof(rp));
4925

4926
	mgmt_pending_remove(cmd);
4927 4928 4929 4930

	return err;
}

4931
int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4932
				     u8 link_type, u8 addr_type, u8 status)
4933
{
4934
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4935
					  status, MGMT_OP_USER_CONFIRM_REPLY);
4936 4937
}

4938
int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4939
					 u8 link_type, u8 addr_type, u8 status)
4940
{
4941
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4942 4943
					  status,
					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
4944
}
4945

4946
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4947
				     u8 link_type, u8 addr_type, u8 status)
4948
{
4949
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4950
					  status, MGMT_OP_USER_PASSKEY_REPLY);
4951 4952
}

4953
int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4954
					 u8 link_type, u8 addr_type, u8 status)
4955
{
4956
	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4957 4958
					  status,
					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
4959 4960
}

4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976
int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
			     u8 link_type, u8 addr_type, u32 passkey,
			     u8 entered)
{
	struct mgmt_ev_passkey_notify ev;

	BT_DBG("%s", hdev->name);

	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
	ev.passkey = __cpu_to_le32(passkey);
	ev.entered = entered;

	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
}

4977 4978
void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, u8 status)
4979 4980 4981
{
	struct mgmt_ev_auth_failed ev;

4982
	bacpy(&ev.addr.bdaddr, bdaddr);
4983
	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4984
	ev.status = mgmt_status(status);
4985

4986
	mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4987
}
4988

4989
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4990 4991
{
	struct cmd_lookup match = { NULL, hdev };
4992
	bool changed;
4993 4994 4995 4996

	if (status) {
		u8 mgmt_err = mgmt_status(status);
		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4997
				     cmd_status_rsp, &mgmt_err);
4998
		return;
4999 5000
	}

5001 5002 5003 5004 5005 5006
	if (test_bit(HCI_AUTH, &hdev->flags))
		changed = !test_and_set_bit(HCI_LINK_SECURITY,
					    &hdev->dev_flags);
	else
		changed = test_and_clear_bit(HCI_LINK_SECURITY,
					     &hdev->dev_flags);
5007

5008
	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5009
			     &match);
5010

5011
	if (changed)
5012
		new_settings(hdev, match.sk);
5013 5014 5015 5016 5017

	if (match.sk)
		sock_put(match.sk);
}

5018
static void clear_eir(struct hci_request *req)
5019
{
5020
	struct hci_dev *hdev = req->hdev;
5021 5022
	struct hci_cp_write_eir cp;

5023
	if (!lmp_ext_inq_capable(hdev))
5024
		return;
5025

5026 5027
	memset(hdev->eir, 0, sizeof(hdev->eir));

5028 5029
	memset(&cp, 0, sizeof(cp));

5030
	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5031 5032
}

5033
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5034 5035
{
	struct cmd_lookup match = { NULL, hdev };
5036
	struct hci_request req;
5037
	bool changed = false;
5038 5039 5040

	if (status) {
		u8 mgmt_err = mgmt_status(status);
5041 5042

		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5043 5044
						 &hdev->dev_flags)) {
			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5045
			new_settings(hdev, NULL);
5046
		}
5047

5048 5049
		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
				     &mgmt_err);
5050
		return;
5051 5052 5053
	}

	if (enable) {
5054
		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5055
	} else {
5056 5057 5058 5059 5060 5061
		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
		if (!changed)
			changed = test_and_clear_bit(HCI_HS_ENABLED,
						     &hdev->dev_flags);
		else
			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5062 5063 5064 5065
	}

	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);

5066
	if (changed)
5067
		new_settings(hdev, match.sk);
5068

5069
	if (match.sk)
5070 5071
		sock_put(match.sk);

5072 5073
	hci_req_init(&req, hdev);

5074
	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5075
		update_eir(&req);
5076
	else
5077 5078 5079
		clear_eir(&req);

	hci_req_run(&req, NULL);
5080 5081
}

5082 5083 5084 5085 5086 5087 5088 5089
void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
{
	struct cmd_lookup match = { NULL, hdev };
	bool changed = false;

	if (status) {
		u8 mgmt_err = mgmt_status(status);

5090 5091 5092 5093 5094 5095
		if (enable) {
			if (test_and_clear_bit(HCI_SC_ENABLED,
					       &hdev->dev_flags))
				new_settings(hdev, NULL);
			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
		}
5096 5097 5098 5099 5100 5101

		mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
				     cmd_status_rsp, &mgmt_err);
		return;
	}

5102
	if (enable) {
5103
		changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5104
	} else {
5105
		changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5106 5107
		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
	}
5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118

	mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
			     settings_rsp, &match);

	if (changed)
		new_settings(hdev, match.sk);

	if (match.sk)
		sock_put(match.sk);
}

5119
static void sk_lookup(struct pending_cmd *cmd, void *data)
5120 5121 5122 5123 5124 5125 5126 5127 5128
{
	struct cmd_lookup *match = data;

	if (match->sk == NULL) {
		match->sk = cmd->sk;
		sock_hold(match->sk);
	}
}

5129 5130
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
				    u8 status)
5131
{
5132
	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5133

5134 5135 5136
	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5137 5138

	if (!status)
5139 5140
		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
			   NULL);
5141 5142 5143

	if (match.sk)
		sock_put(match.sk);
5144 5145
}

5146
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5147 5148
{
	struct mgmt_cp_set_local_name ev;
5149
	struct pending_cmd *cmd;
5150

5151
	if (status)
5152
		return;
5153 5154 5155

	memset(&ev, 0, sizeof(ev));
	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5156
	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5157

5158
	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5159 5160
	if (!cmd) {
		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5161

5162 5163 5164 5165
		/* If this is a HCI command related to powering on the
		 * HCI dev don't send any mgmt signals.
		 */
		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5166
			return;
5167
	}
5168

5169 5170
	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
		   cmd ? cmd->sk : NULL);
5171
}
5172

5173 5174 5175
void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
				       u8 *randomizer192, u8 *hash256,
				       u8 *randomizer256, u8 status)
5176 5177 5178
{
	struct pending_cmd *cmd;

5179
	BT_DBG("%s status %u", hdev->name, status);
5180

5181
	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5182
	if (!cmd)
5183
		return;
5184 5185

	if (status) {
5186 5187
		cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
			   mgmt_status(status));
5188
	} else {
5189 5190 5191 5192 5193 5194 5195
		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
		    hash256 && randomizer256) {
			struct mgmt_rp_read_local_oob_ext_data rp;

			memcpy(rp.hash192, hash192, sizeof(rp.hash192));
			memcpy(rp.randomizer192, randomizer192,
			       sizeof(rp.randomizer192));
5196

5197 5198 5199
			memcpy(rp.hash256, hash256, sizeof(rp.hash256));
			memcpy(rp.randomizer256, randomizer256,
			       sizeof(rp.randomizer256));
5200

5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214
			cmd_complete(cmd->sk, hdev->id,
				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
				     &rp, sizeof(rp));
		} else {
			struct mgmt_rp_read_local_oob_data rp;

			memcpy(rp.hash, hash192, sizeof(rp.hash));
			memcpy(rp.randomizer, randomizer192,
			       sizeof(rp.randomizer));

			cmd_complete(cmd->sk, hdev->id,
				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
				     &rp, sizeof(rp));
		}
5215 5216 5217 5218
	}

	mgmt_pending_remove(cmd);
}
5219

5220 5221 5222
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
		       ssp, u8 *eir, u16 eir_len)
5223
{
5224 5225
	char buf[512];
	struct mgmt_ev_device_found *ev = (void *) buf;
5226
	size_t ev_size;
5227

5228
	if (!hci_discovery_active(hdev))
5229
		return;
5230

5231 5232
	/* Leave 5 bytes for a potential CoD field */
	if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5233
		return;
5234

5235 5236
	memset(buf, 0, sizeof(buf));

5237
	bacpy(&ev->addr.bdaddr, bdaddr);
5238
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
5239
	ev->rssi = rssi;
5240
	if (cfm_name)
5241
		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5242
	if (!ssp)
5243
		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5244

5245
	if (eir_len > 0)
5246
		memcpy(ev->eir, eir, eir_len);
5247

5248 5249
	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5250
					  dev_class, 3);
5251

5252
	ev->eir_len = cpu_to_le16(eir_len);
5253
	ev_size = sizeof(*ev) + eir_len;
5254

5255
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5256
}
5257

5258 5259
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5260
{
5261 5262 5263
	struct mgmt_ev_device_found *ev;
	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
	u16 eir_len;
5264

5265
	ev = (struct mgmt_ev_device_found *) buf;
5266

5267 5268 5269
	memset(buf, 0, sizeof(buf));

	bacpy(&ev->addr.bdaddr, bdaddr);
5270
	ev->addr.type = link_to_bdaddr(link_type, addr_type);
5271 5272 5273
	ev->rssi = rssi;

	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5274
				  name_len);
5275

5276
	ev->eir_len = cpu_to_le16(eir_len);
5277

5278
	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5279
}
5280

5281
void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5282
{
5283
	struct mgmt_ev_discovering ev;
5284 5285
	struct pending_cmd *cmd;

5286 5287
	BT_DBG("%s discovering %u", hdev->name, discovering);

5288
	if (discovering)
5289
		cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5290
	else
5291
		cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5292 5293

	if (cmd != NULL) {
5294 5295
		u8 type = hdev->discovery.type;

5296 5297
		cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
			     sizeof(type));
5298 5299 5300
		mgmt_pending_remove(cmd);
	}

5301 5302 5303 5304
	memset(&ev, 0, sizeof(ev));
	ev.type = hdev->discovery.type;
	ev.discovering = discovering;

5305
	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5306
}
5307

5308
int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5309 5310 5311 5312
{
	struct pending_cmd *cmd;
	struct mgmt_ev_device_blocked ev;

5313
	cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5314

5315 5316
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
5317

5318
	return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5319
			  cmd ? cmd->sk : NULL);
5320 5321
}

5322
int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5323 5324 5325 5326
{
	struct pending_cmd *cmd;
	struct mgmt_ev_device_unblocked ev;

5327
	cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5328

5329 5330
	bacpy(&ev.addr.bdaddr, bdaddr);
	ev.addr.type = type;
5331

5332
	return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5333
			  cmd ? cmd->sk : NULL);
5334
}
5335 5336 5337 5338 5339 5340 5341 5342

static void adv_enable_complete(struct hci_dev *hdev, u8 status)
{
	BT_DBG("%s status %u", hdev->name, status);

	/* Clear the advertising mgmt setting if we failed to re-enable it */
	if (status) {
		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5343
		new_settings(hdev, NULL);
5344 5345 5346 5347 5348 5349 5350
	}
}

void mgmt_reenable_advertising(struct hci_dev *hdev)
{
	struct hci_request req;

5351
	if (hci_conn_num(hdev, LE_LINK) > 0)
5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364
		return;

	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
		return;

	hci_req_init(&req, hdev);
	enable_advertising(&req);

	/* If this fails we have no option but to let user space know
	 * that we've disabled advertising.
	 */
	if (hci_req_run(&req, adv_enable_complete) < 0) {
		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5365
		new_settings(hdev, NULL);
5366 5367
	}
}