hci_core.c 62.1 KB
Newer Older
1
/*
L
Linus Torvalds 已提交
2 3
   BlueZ - Bluetooth protocol stack for Linux
   Copyright (C) 2000-2001 Qualcomm Incorporated
4
   Copyright (C) 2011 ProFUSION Embedded Systems
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15

   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 17 18
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
L
Linus Torvalds 已提交
19 20
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

21 22
   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
L
Linus Torvalds 已提交
23 24 25 26 27
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI core. */

28
#include <linux/jiffies.h>
L
Linus Torvalds 已提交
29 30 31 32 33 34 35 36 37 38 39 40
#include <linux/module.h>
#include <linux/kmod.h>

#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/skbuff.h>
41
#include <linux/workqueue.h>
L
Linus Torvalds 已提交
42
#include <linux/interrupt.h>
43
#include <linux/rfkill.h>
44
#include <linux/timer.h>
45
#include <linux/crypto.h>
L
Linus Torvalds 已提交
46 47
#include <net/sock.h>

A
Andrei Emeltchenko 已提交
48
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
49 50 51 52 53
#include <asm/unaligned.h>

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>

54 55
#define AUTO_OFF_TIMEOUT 2000

56
static void hci_rx_work(struct work_struct *work);
57
static void hci_cmd_work(struct work_struct *work);
58
static void hci_tx_work(struct work_struct *work);
L
Linus Torvalds 已提交
59 60 61 62 63 64 65 66 67 68 69

/* HCI device list */
LIST_HEAD(hci_dev_list);
DEFINE_RWLOCK(hci_dev_list_lock);

/* HCI callback list */
LIST_HEAD(hci_cb_list);
DEFINE_RWLOCK(hci_cb_list_lock);

/* ---- HCI notifications ---- */

70
static void hci_notify(struct hci_dev *hdev, int event)
L
Linus Torvalds 已提交
71
{
72
	hci_sock_dev_event(hdev, event);
L
Linus Torvalds 已提交
73 74 75 76
}

/* ---- HCI requests ---- */

77
void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
L
Linus Torvalds 已提交
78
{
79 80
	BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);

81 82 83
	/* If this is the init phase check if the completed command matches
	 * the last init command, and if not just return.
	 */
84 85
	if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
86
		u16 opcode = __le16_to_cpu(sent->opcode);
87 88 89 90 91 92 93 94 95
		struct sk_buff *skb;

		/* Some CSR based controllers generate a spontaneous
		 * reset complete event during init and any pending
		 * command will never be completed. In such a case we
		 * need to resend whatever was the last sent
		 * command.
		 */

96
		if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
97 98 99 100 101 102 103 104
			return;

		skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
		if (skb) {
			skb_queue_head(&hdev->cmd_q, skb);
			queue_work(hdev->workqueue, &hdev->cmd_work);
		}

105
		return;
106
	}
L
Linus Torvalds 已提交
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126

	if (hdev->req_status == HCI_REQ_PEND) {
		hdev->req_result = result;
		hdev->req_status = HCI_REQ_DONE;
		wake_up_interruptible(&hdev->req_wait_q);
	}
}

static void hci_req_cancel(struct hci_dev *hdev, int err)
{
	BT_DBG("%s err 0x%2.2x", hdev->name, err);

	if (hdev->req_status == HCI_REQ_PEND) {
		hdev->req_result = err;
		hdev->req_status = HCI_REQ_CANCELED;
		wake_up_interruptible(&hdev->req_wait_q);
	}
}

/* Execute request and wait for completion. */
127
static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
128
					unsigned long opt, __u32 timeout)
L
Linus Torvalds 已提交
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
{
	DECLARE_WAITQUEUE(wait, current);
	int err = 0;

	BT_DBG("%s start", hdev->name);

	hdev->req_status = HCI_REQ_PEND;

	add_wait_queue(&hdev->req_wait_q, &wait);
	set_current_state(TASK_INTERRUPTIBLE);

	req(hdev, opt);
	schedule_timeout(timeout);

	remove_wait_queue(&hdev->req_wait_q, &wait);

	if (signal_pending(current))
		return -EINTR;

	switch (hdev->req_status) {
	case HCI_REQ_DONE:
150
		err = -bt_to_errno(hdev->req_result);
L
Linus Torvalds 已提交
151 152 153 154 155 156 157 158 159
		break;

	case HCI_REQ_CANCELED:
		err = -hdev->req_result;
		break;

	default:
		err = -ETIMEDOUT;
		break;
160
	}
L
Linus Torvalds 已提交
161

162
	hdev->req_status = hdev->req_result = 0;
L
Linus Torvalds 已提交
163 164 165 166 167 168 169

	BT_DBG("%s end: err %d", hdev->name, err);

	return err;
}

static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
170
					unsigned long opt, __u32 timeout)
L
Linus Torvalds 已提交
171 172 173
{
	int ret;

174 175 176
	if (!test_bit(HCI_UP, &hdev->flags))
		return -ENETDOWN;

L
Linus Torvalds 已提交
177 178 179 180 181 182 183 184 185 186 187 188 189
	/* Serialize all requests */
	hci_req_lock(hdev);
	ret = __hci_request(hdev, req, opt, timeout);
	hci_req_unlock(hdev);

	return ret;
}

static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
{
	BT_DBG("%s %ld", hdev->name, opt);

	/* Reset device */
190
	set_bit(HCI_RESET, &hdev->flags);
191
	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
L
Linus Torvalds 已提交
192 193
}

194
static void bredr_init(struct hci_dev *hdev)
L
Linus Torvalds 已提交
195
{
196
	struct hci_cp_delete_stored_link_key cp;
197
	__le16 param;
198
	__u8 flt_type;
L
Linus Torvalds 已提交
199

200 201
	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;

L
Linus Torvalds 已提交
202 203 204
	/* Mandatory initialization */

	/* Reset */
205
	if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
206 207
		set_bit(HCI_RESET, &hdev->flags);
		hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208
	}
L
Linus Torvalds 已提交
209 210

	/* Read Local Supported Features */
211
	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
L
Linus Torvalds 已提交
212

213
	/* Read Local Version */
214
	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215

L
Linus Torvalds 已提交
216
	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
217
	hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
L
Linus Torvalds 已提交
218 219

	/* Read BD Address */
220 221 222 223 224 225 226
	hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);

	/* Read Class of Device */
	hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);

	/* Read Local Name */
	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
L
Linus Torvalds 已提交
227 228

	/* Read Voice Setting */
229
	hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
L
Linus Torvalds 已提交
230 231 232 233

	/* Optional initialization */

	/* Clear Event Filters */
234
	flt_type = HCI_FLT_CLEAR_ALL;
235
	hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
L
Linus Torvalds 已提交
236 237

	/* Connection accept timeout ~20 secs */
238
	param = cpu_to_le16(0x7d00);
239
	hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
240 241 242 243

	bacpy(&cp.bdaddr, BDADDR_ANY);
	cp.delete_all = 1;
	hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
L
Linus Torvalds 已提交
244 245
}

246 247
static void amp_init(struct hci_dev *hdev)
{
248 249
	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;

250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
	/* Reset */
	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);

	/* Read Local Version */
	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
}

static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
{
	struct sk_buff *skb;

	BT_DBG("%s %ld", hdev->name, opt);

	/* Driver initialization */

	/* Special commands */
	while ((skb = skb_dequeue(&hdev->driver_init))) {
		bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
		skb->dev = (void *) hdev;

		skb_queue_tail(&hdev->cmd_q, skb);
		queue_work(hdev->workqueue, &hdev->cmd_work);
	}
	skb_queue_purge(&hdev->driver_init);

	switch (hdev->dev_type) {
	case HCI_BREDR:
		bredr_init(hdev);
		break;

	case HCI_AMP:
		amp_init(hdev);
		break;

	default:
		BT_ERR("Unknown device type %d", hdev->dev_type);
		break;
	}

}

291 292 293 294 295 296 297 298
static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
{
	BT_DBG("%s", hdev->name);

	/* Read LE buffer size */
	hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
}

L
Linus Torvalds 已提交
299 300 301 302 303 304 305
static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
{
	__u8 scan = opt;

	BT_DBG("%s %x", hdev->name, scan);

	/* Inquiry and Page scans */
306
	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
L
Linus Torvalds 已提交
307 308 309 310 311 312 313 314 315
}

static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
{
	__u8 auth = opt;

	BT_DBG("%s %x", hdev->name, auth);

	/* Authentication */
316
	hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
L
Linus Torvalds 已提交
317 318 319 320 321 322 323 324
}

static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
{
	__u8 encrypt = opt;

	BT_DBG("%s %x", hdev->name, encrypt);

325
	/* Encryption */
326
	hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
L
Linus Torvalds 已提交
327 328
}

329 330 331 332
static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
{
	__le16 policy = cpu_to_le16(opt);

333
	BT_DBG("%s %x", hdev->name, policy);
334 335 336 337 338

	/* Default link policy */
	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
}

339
/* Get HCI device by index.
L
Linus Torvalds 已提交
340 341 342
 * Device is held on return. */
struct hci_dev *hci_dev_get(int index)
{
343
	struct hci_dev *hdev = NULL, *d;
L
Linus Torvalds 已提交
344 345 346 347 348 349 350

	BT_DBG("%d", index);

	if (index < 0)
		return NULL;

	read_lock(&hci_dev_list_lock);
351
	list_for_each_entry(d, &hci_dev_list, list) {
L
Linus Torvalds 已提交
352 353 354 355 356 357 358 359 360 361
		if (d->id == index) {
			hdev = hci_dev_hold(d);
			break;
		}
	}
	read_unlock(&hci_dev_list_lock);
	return hdev;
}

/* ---- Inquiry support ---- */
362

363 364 365 366
bool hci_discovery_active(struct hci_dev *hdev)
{
	struct discovery_state *discov = &hdev->discovery;

A
Andre Guedes 已提交
367
	switch (discov->state) {
368
	case DISCOVERY_FINDING:
A
Andre Guedes 已提交
369
	case DISCOVERY_RESOLVING:
370 371
		return true;

A
Andre Guedes 已提交
372 373 374
	default:
		return false;
	}
375 376
}

377 378 379 380 381 382 383 384 385
void hci_discovery_set_state(struct hci_dev *hdev, int state)
{
	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);

	if (hdev->discovery.state == state)
		return;

	switch (state) {
	case DISCOVERY_STOPPED:
386 387
		if (hdev->discovery.state != DISCOVERY_STARTING)
			mgmt_discovering(hdev, 0);
388
		hdev->discovery.type = 0;
389 390 391
		break;
	case DISCOVERY_STARTING:
		break;
392
	case DISCOVERY_FINDING:
393 394
		mgmt_discovering(hdev, 1);
		break;
395 396
	case DISCOVERY_RESOLVING:
		break;
397 398 399 400 401 402 403
	case DISCOVERY_STOPPING:
		break;
	}

	hdev->discovery.state = state;
}

L
Linus Torvalds 已提交
404 405
static void inquiry_cache_flush(struct hci_dev *hdev)
{
406
	struct discovery_state *cache = &hdev->discovery;
407
	struct inquiry_entry *p, *n;
L
Linus Torvalds 已提交
408

409 410
	list_for_each_entry_safe(p, n, &cache->all, all) {
		list_del(&p->all);
411
		kfree(p);
L
Linus Torvalds 已提交
412
	}
413 414 415

	INIT_LIST_HEAD(&cache->unknown);
	INIT_LIST_HEAD(&cache->resolve);
L
Linus Torvalds 已提交
416 417 418 419
}

struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
420
	struct discovery_state *cache = &hdev->discovery;
L
Linus Torvalds 已提交
421 422 423 424
	struct inquiry_entry *e;

	BT_DBG("cache %p, %s", cache, batostr(bdaddr));

425 426 427 428 429 430 431 432 433
	list_for_each_entry(e, &cache->all, all) {
		if (!bacmp(&e->data.bdaddr, bdaddr))
			return e;
	}

	return NULL;
}

struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
434
						       bdaddr_t *bdaddr)
435
{
436
	struct discovery_state *cache = &hdev->discovery;
437 438 439 440 441
	struct inquiry_entry *e;

	BT_DBG("cache %p, %s", cache, batostr(bdaddr));

	list_for_each_entry(e, &cache->unknown, list) {
L
Linus Torvalds 已提交
442
		if (!bacmp(&e->data.bdaddr, bdaddr))
443 444 445 446
			return e;
	}

	return NULL;
L
Linus Torvalds 已提交
447 448
}

449
struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
450 451
						       bdaddr_t *bdaddr,
						       int state)
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
{
	struct discovery_state *cache = &hdev->discovery;
	struct inquiry_entry *e;

	BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);

	list_for_each_entry(e, &cache->resolve, list) {
		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
			return e;
		if (!bacmp(&e->data.bdaddr, bdaddr))
			return e;
	}

	return NULL;
}

468
void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
469
				      struct inquiry_entry *ie)
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
{
	struct discovery_state *cache = &hdev->discovery;
	struct list_head *pos = &cache->resolve;
	struct inquiry_entry *p;

	list_del(&ie->list);

	list_for_each_entry(p, &cache->resolve, list) {
		if (p->name_state != NAME_PENDING &&
				abs(p->data.rssi) >= abs(ie->data.rssi))
			break;
		pos = &p->list;
	}

	list_add(&ie->list, pos);
}

487
bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
488
			      bool name_known, bool *ssp)
L
Linus Torvalds 已提交
489
{
490
	struct discovery_state *cache = &hdev->discovery;
A
Andrei Emeltchenko 已提交
491
	struct inquiry_entry *ie;
L
Linus Torvalds 已提交
492 493 494

	BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));

495 496 497
	if (ssp)
		*ssp = data->ssp_mode;

A
Andrei Emeltchenko 已提交
498
	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
499
	if (ie) {
500 501 502
		if (ie->data.ssp_mode && ssp)
			*ssp = true;

503 504 505 506 507 508
		if (ie->name_state == NAME_NEEDED &&
						data->rssi != ie->data.rssi) {
			ie->data.rssi = data->rssi;
			hci_inquiry_cache_update_resolve(hdev, ie);
		}

509
		goto update;
510
	}
511 512 513 514

	/* Entry not in the cache. Add new one. */
	ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
	if (!ie)
515
		return false;
516 517 518 519 520 521 522 523 524

	list_add(&ie->all, &cache->all);

	if (name_known) {
		ie->name_state = NAME_KNOWN;
	} else {
		ie->name_state = NAME_NOT_KNOWN;
		list_add(&ie->list, &cache->unknown);
	}
A
Andrei Emeltchenko 已提交
525

526 527 528 529 530
update:
	if (name_known && ie->name_state != NAME_KNOWN &&
					ie->name_state != NAME_PENDING) {
		ie->name_state = NAME_KNOWN;
		list_del(&ie->list);
L
Linus Torvalds 已提交
531 532
	}

A
Andrei Emeltchenko 已提交
533 534
	memcpy(&ie->data, data, sizeof(*data));
	ie->timestamp = jiffies;
L
Linus Torvalds 已提交
535
	cache->timestamp = jiffies;
536 537 538 539 540

	if (ie->name_state == NAME_NOT_KNOWN)
		return false;

	return true;
L
Linus Torvalds 已提交
541 542 543 544
}

static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
{
545
	struct discovery_state *cache = &hdev->discovery;
L
Linus Torvalds 已提交
546 547 548 549
	struct inquiry_info *info = (struct inquiry_info *) buf;
	struct inquiry_entry *e;
	int copied = 0;

550
	list_for_each_entry(e, &cache->all, all) {
L
Linus Torvalds 已提交
551
		struct inquiry_data *data = &e->data;
552 553 554 555

		if (copied >= num)
			break;

L
Linus Torvalds 已提交
556 557 558 559 560 561
		bacpy(&info->bdaddr, &data->bdaddr);
		info->pscan_rep_mode	= data->pscan_rep_mode;
		info->pscan_period_mode	= data->pscan_period_mode;
		info->pscan_mode	= data->pscan_mode;
		memcpy(info->dev_class, data->dev_class, 3);
		info->clock_offset	= data->clock_offset;
562

L
Linus Torvalds 已提交
563
		info++;
564
		copied++;
L
Linus Torvalds 已提交
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
	}

	BT_DBG("cache %p, copied %d", cache, copied);
	return copied;
}

static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
{
	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
	struct hci_cp_inquiry cp;

	BT_DBG("%s", hdev->name);

	if (test_bit(HCI_INQUIRY, &hdev->flags))
		return;

	/* Start Inquiry */
	memcpy(&cp.lap, &ir->lap, 3);
	cp.length  = ir->length;
	cp.num_rsp = ir->num_rsp;
585
	hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
L
Linus Torvalds 已提交
586 587 588 589 590 591 592 593 594 595 596 597 598 599
}

int hci_inquiry(void __user *arg)
{
	__u8 __user *ptr = arg;
	struct hci_inquiry_req ir;
	struct hci_dev *hdev;
	int err = 0, do_inquiry = 0, max_rsp;
	long timeo;
	__u8 *buf;

	if (copy_from_user(&ir, ptr, sizeof(ir)))
		return -EFAULT;

600 601
	hdev = hci_dev_get(ir.dev_id);
	if (!hdev)
L
Linus Torvalds 已提交
602 603
		return -ENODEV;

604
	hci_dev_lock(hdev);
605
	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
A
Andrei Emeltchenko 已提交
606 607
				inquiry_cache_empty(hdev) ||
				ir.flags & IREQ_CACHE_FLUSH) {
L
Linus Torvalds 已提交
608 609 610
		inquiry_cache_flush(hdev);
		do_inquiry = 1;
	}
611
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
612

613
	timeo = ir.length * msecs_to_jiffies(2000);
A
Andrei Emeltchenko 已提交
614 615 616 617 618 619

	if (do_inquiry) {
		err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
		if (err < 0)
			goto done;
	}
L
Linus Torvalds 已提交
620 621 622 623 624 625 626

	/* for unlimited number of responses we will use buffer with 255 entries */
	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;

	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
	 * copy it to the user space.
	 */
627
	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
A
Andrei Emeltchenko 已提交
628
	if (!buf) {
L
Linus Torvalds 已提交
629 630 631 632
		err = -ENOMEM;
		goto done;
	}

633
	hci_dev_lock(hdev);
L
Linus Torvalds 已提交
634
	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
635
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
636 637 638 639 640 641 642 643

	BT_DBG("num_rsp %d", ir.num_rsp);

	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
		ptr += sizeof(ir);
		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
					ir.num_rsp))
			err = -EFAULT;
644
	} else
L
Linus Torvalds 已提交
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
		err = -EFAULT;

	kfree(buf);

done:
	hci_dev_put(hdev);
	return err;
}

/* ---- HCI ioctl helpers ---- */

int hci_dev_open(__u16 dev)
{
	struct hci_dev *hdev;
	int ret = 0;

661 662
	hdev = hci_dev_get(dev);
	if (!hdev)
L
Linus Torvalds 已提交
663 664 665 666 667 668
		return -ENODEV;

	BT_DBG("%s %p", hdev->name, hdev);

	hci_req_lock(hdev);

669 670 671 672 673
	if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
		ret = -ENODEV;
		goto done;
	}

674 675 676 677 678
	if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
		ret = -ERFKILL;
		goto done;
	}

L
Linus Torvalds 已提交
679 680 681 682 683 684 685 686
	if (test_bit(HCI_UP, &hdev->flags)) {
		ret = -EALREADY;
		goto done;
	}

	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
		set_bit(HCI_RAW, &hdev->flags);

687 688 689
	/* Treat all non BR/EDR controllers as raw devices if
	   enable_hs is not set */
	if (hdev->dev_type != HCI_BREDR && !enable_hs)
690 691
		set_bit(HCI_RAW, &hdev->flags);

L
Linus Torvalds 已提交
692 693 694 695 696 697 698 699
	if (hdev->open(hdev)) {
		ret = -EIO;
		goto done;
	}

	if (!test_bit(HCI_RAW, &hdev->flags)) {
		atomic_set(&hdev->cmd_cnt, 1);
		set_bit(HCI_INIT, &hdev->flags);
700
		hdev->init_last_cmd = 0;
L
Linus Torvalds 已提交
701

702 703
		ret = __hci_request(hdev, hci_init_req, 0,
					msecs_to_jiffies(HCI_INIT_TIMEOUT));
L
Linus Torvalds 已提交
704

705
		if (lmp_host_le_capable(hdev))
706 707 708
			ret = __hci_request(hdev, hci_le_init_req, 0,
					msecs_to_jiffies(HCI_INIT_TIMEOUT));

L
Linus Torvalds 已提交
709 710 711 712 713 714 715
		clear_bit(HCI_INIT, &hdev->flags);
	}

	if (!ret) {
		hci_dev_hold(hdev);
		set_bit(HCI_UP, &hdev->flags);
		hci_notify(hdev, HCI_DEV_UP);
716
		if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
717
			hci_dev_lock(hdev);
718
			mgmt_powered(hdev, 1);
719
			hci_dev_unlock(hdev);
720
		}
721
	} else {
L
Linus Torvalds 已提交
722
		/* Init failed, cleanup */
723
		flush_work(&hdev->tx_work);
724
		flush_work(&hdev->cmd_work);
725
		flush_work(&hdev->rx_work);
L
Linus Torvalds 已提交
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751

		skb_queue_purge(&hdev->cmd_q);
		skb_queue_purge(&hdev->rx_q);

		if (hdev->flush)
			hdev->flush(hdev);

		if (hdev->sent_cmd) {
			kfree_skb(hdev->sent_cmd);
			hdev->sent_cmd = NULL;
		}

		hdev->close(hdev);
		hdev->flags = 0;
	}

done:
	hci_req_unlock(hdev);
	hci_dev_put(hdev);
	return ret;
}

static int hci_dev_do_close(struct hci_dev *hdev)
{
	BT_DBG("%s %p", hdev->name, hdev);

A
Andre Guedes 已提交
752 753
	cancel_work_sync(&hdev->le_scan);

L
Linus Torvalds 已提交
754 755 756 757
	hci_req_cancel(hdev, ENODEV);
	hci_req_lock(hdev);

	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
758
		del_timer_sync(&hdev->cmd_timer);
L
Linus Torvalds 已提交
759 760 761 762
		hci_req_unlock(hdev);
		return 0;
	}

763 764
	/* Flush RX and TX works */
	flush_work(&hdev->tx_work);
765
	flush_work(&hdev->rx_work);
L
Linus Torvalds 已提交
766

767
	if (hdev->discov_timeout > 0) {
768
		cancel_delayed_work(&hdev->discov_off);
769
		hdev->discov_timeout = 0;
770
		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
771 772
	}

773
	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
774 775
		cancel_delayed_work(&hdev->service_cache);

A
Andre Guedes 已提交
776 777
	cancel_delayed_work_sync(&hdev->le_scan_disable);

778
	hci_dev_lock(hdev);
L
Linus Torvalds 已提交
779 780
	inquiry_cache_flush(hdev);
	hci_conn_hash_flush(hdev);
781
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
782 783 784 785 786 787 788 789 790

	hci_notify(hdev, HCI_DEV_DOWN);

	if (hdev->flush)
		hdev->flush(hdev);

	/* Reset device */
	skb_queue_purge(&hdev->cmd_q);
	atomic_set(&hdev->cmd_cnt, 1);
791 792
	if (!test_bit(HCI_RAW, &hdev->flags) &&
				test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
L
Linus Torvalds 已提交
793
		set_bit(HCI_INIT, &hdev->flags);
794
		__hci_request(hdev, hci_reset_req, 0,
795
					msecs_to_jiffies(250));
L
Linus Torvalds 已提交
796 797 798
		clear_bit(HCI_INIT, &hdev->flags);
	}

799 800
	/* flush cmd  work */
	flush_work(&hdev->cmd_work);
L
Linus Torvalds 已提交
801 802 803 804 805 806 807 808

	/* Drop queues */
	skb_queue_purge(&hdev->rx_q);
	skb_queue_purge(&hdev->cmd_q);
	skb_queue_purge(&hdev->raw_q);

	/* Drop last sent command */
	if (hdev->sent_cmd) {
809
		del_timer_sync(&hdev->cmd_timer);
L
Linus Torvalds 已提交
810 811 812 813 814 815 816 817
		kfree_skb(hdev->sent_cmd);
		hdev->sent_cmd = NULL;
	}

	/* After this point our queues are empty
	 * and no tasks are scheduled. */
	hdev->close(hdev);

818 819 820 821 822
	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
		hci_dev_lock(hdev);
		mgmt_powered(hdev, 0);
		hci_dev_unlock(hdev);
	}
823

L
Linus Torvalds 已提交
824 825 826
	/* Clear flags */
	hdev->flags = 0;

827
	memset(hdev->eir, 0, sizeof(hdev->eir));
828
	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
829

L
Linus Torvalds 已提交
830 831 832 833 834 835 836 837 838 839 840
	hci_req_unlock(hdev);

	hci_dev_put(hdev);
	return 0;
}

int hci_dev_close(__u16 dev)
{
	struct hci_dev *hdev;
	int err;

A
Andrei Emeltchenko 已提交
841 842
	hdev = hci_dev_get(dev);
	if (!hdev)
L
Linus Torvalds 已提交
843
		return -ENODEV;
844 845 846 847

	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
		cancel_delayed_work(&hdev->power_off);

L
Linus Torvalds 已提交
848
	err = hci_dev_do_close(hdev);
849

L
Linus Torvalds 已提交
850 851 852 853 854 855 856 857 858
	hci_dev_put(hdev);
	return err;
}

int hci_dev_reset(__u16 dev)
{
	struct hci_dev *hdev;
	int ret = 0;

A
Andrei Emeltchenko 已提交
859 860
	hdev = hci_dev_get(dev);
	if (!hdev)
L
Linus Torvalds 已提交
861 862 863 864 865 866 867 868 869 870 871
		return -ENODEV;

	hci_req_lock(hdev);

	if (!test_bit(HCI_UP, &hdev->flags))
		goto done;

	/* Drop queues */
	skb_queue_purge(&hdev->rx_q);
	skb_queue_purge(&hdev->cmd_q);

872
	hci_dev_lock(hdev);
L
Linus Torvalds 已提交
873 874
	inquiry_cache_flush(hdev);
	hci_conn_hash_flush(hdev);
875
	hci_dev_unlock(hdev);
L
Linus Torvalds 已提交
876 877 878 879

	if (hdev->flush)
		hdev->flush(hdev);

880
	atomic_set(&hdev->cmd_cnt, 1);
881
	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
L
Linus Torvalds 已提交
882 883

	if (!test_bit(HCI_RAW, &hdev->flags))
884 885
		ret = __hci_request(hdev, hci_reset_req, 0,
					msecs_to_jiffies(HCI_INIT_TIMEOUT));
L
Linus Torvalds 已提交
886 887 888 889 890 891 892 893 894 895 896 897

done:
	hci_req_unlock(hdev);
	hci_dev_put(hdev);
	return ret;
}

int hci_dev_reset_stat(__u16 dev)
{
	struct hci_dev *hdev;
	int ret = 0;

A
Andrei Emeltchenko 已提交
898 899
	hdev = hci_dev_get(dev);
	if (!hdev)
L
Linus Torvalds 已提交
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
		return -ENODEV;

	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));

	hci_dev_put(hdev);

	return ret;
}

int hci_dev_cmd(unsigned int cmd, void __user *arg)
{
	struct hci_dev *hdev;
	struct hci_dev_req dr;
	int err = 0;

	if (copy_from_user(&dr, arg, sizeof(dr)))
		return -EFAULT;

A
Andrei Emeltchenko 已提交
918 919
	hdev = hci_dev_get(dr.dev_id);
	if (!hdev)
L
Linus Torvalds 已提交
920 921 922 923
		return -ENODEV;

	switch (cmd) {
	case HCISETAUTH:
924 925
		err = hci_request(hdev, hci_auth_req, dr.dev_opt,
					msecs_to_jiffies(HCI_INIT_TIMEOUT));
L
Linus Torvalds 已提交
926 927 928 929 930 931 932 933 934 935
		break;

	case HCISETENCRYPT:
		if (!lmp_encrypt_capable(hdev)) {
			err = -EOPNOTSUPP;
			break;
		}

		if (!test_bit(HCI_AUTH, &hdev->flags)) {
			/* Auth must be enabled first */
936 937
			err = hci_request(hdev, hci_auth_req, dr.dev_opt,
					msecs_to_jiffies(HCI_INIT_TIMEOUT));
L
Linus Torvalds 已提交
938 939 940 941
			if (err)
				break;
		}

942 943
		err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
					msecs_to_jiffies(HCI_INIT_TIMEOUT));
L
Linus Torvalds 已提交
944 945 946
		break;

	case HCISETSCAN:
947 948
		err = hci_request(hdev, hci_scan_req, dr.dev_opt,
					msecs_to_jiffies(HCI_INIT_TIMEOUT));
L
Linus Torvalds 已提交
949 950 951
		break;

	case HCISETLINKPOL:
952 953
		err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
					msecs_to_jiffies(HCI_INIT_TIMEOUT));
L
Linus Torvalds 已提交
954 955 956
		break;

	case HCISETLINKMODE:
957 958 959 960 961 962
		hdev->link_mode = ((__u16) dr.dev_opt) &
					(HCI_LM_MASTER | HCI_LM_ACCEPT);
		break;

	case HCISETPTYPE:
		hdev->pkt_type = (__u16) dr.dev_opt;
L
Linus Torvalds 已提交
963 964 965
		break;

	case HCISETACLMTU:
966 967
		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
L
Linus Torvalds 已提交
968 969 970
		break;

	case HCISETSCOMTU:
971 972
		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
L
Linus Torvalds 已提交
973 974 975 976 977 978
		break;

	default:
		err = -EINVAL;
		break;
	}
979

L
Linus Torvalds 已提交
980 981 982 983 984 985
	hci_dev_put(hdev);
	return err;
}

int hci_get_dev_list(void __user *arg)
{
986
	struct hci_dev *hdev;
L
Linus Torvalds 已提交
987 988 989 990 991 992 993 994 995 996 997 998 999
	struct hci_dev_list_req *dl;
	struct hci_dev_req *dr;
	int n = 0, size, err;
	__u16 dev_num;

	if (get_user(dev_num, (__u16 __user *) arg))
		return -EFAULT;

	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
		return -EINVAL;

	size = sizeof(*dl) + dev_num * sizeof(*dr);

A
Andrei Emeltchenko 已提交
1000 1001
	dl = kzalloc(size, GFP_KERNEL);
	if (!dl)
L
Linus Torvalds 已提交
1002 1003 1004 1005
		return -ENOMEM;

	dr = dl->dev_req;

1006
	read_lock(&hci_dev_list_lock);
1007
	list_for_each_entry(hdev, &hci_dev_list, list) {
1008
		if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1009
			cancel_delayed_work(&hdev->power_off);
1010

1011 1012
		if (!test_bit(HCI_MGMT, &hdev->dev_flags))
			set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1013

L
Linus Torvalds 已提交
1014 1015
		(dr + n)->dev_id  = hdev->id;
		(dr + n)->dev_opt = hdev->flags;
1016

L
Linus Torvalds 已提交
1017 1018 1019
		if (++n >= dev_num)
			break;
	}
1020
	read_unlock(&hci_dev_list_lock);
L
Linus Torvalds 已提交
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039

	dl->dev_num = n;
	size = sizeof(*dl) + n * sizeof(*dr);

	err = copy_to_user(arg, dl, size);
	kfree(dl);

	return err ? -EFAULT : 0;
}

int hci_get_dev_info(void __user *arg)
{
	struct hci_dev *hdev;
	struct hci_dev_info di;
	int err = 0;

	if (copy_from_user(&di, arg, sizeof(di)))
		return -EFAULT;

A
Andrei Emeltchenko 已提交
1040 1041
	hdev = hci_dev_get(di.dev_id);
	if (!hdev)
L
Linus Torvalds 已提交
1042 1043
		return -ENODEV;

1044
	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1045
		cancel_delayed_work_sync(&hdev->power_off);
1046

1047 1048
	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
		set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1049

L
Linus Torvalds 已提交
1050 1051
	strcpy(di.name, hdev->name);
	di.bdaddr   = hdev->bdaddr;
1052
	di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
L
Linus Torvalds 已提交
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
	di.flags    = hdev->flags;
	di.pkt_type = hdev->pkt_type;
	di.acl_mtu  = hdev->acl_mtu;
	di.acl_pkts = hdev->acl_pkts;
	di.sco_mtu  = hdev->sco_mtu;
	di.sco_pkts = hdev->sco_pkts;
	di.link_policy = hdev->link_policy;
	di.link_mode   = hdev->link_mode;

	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
	memcpy(&di.features, &hdev->features, sizeof(di.features));

	if (copy_to_user(arg, &di, sizeof(di)))
		err = -EFAULT;

	hci_dev_put(hdev);

	return err;
}

/* ---- Interface to HCI drivers ---- */

1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
static int hci_rfkill_set_block(void *data, bool blocked)
{
	struct hci_dev *hdev = data;

	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);

	if (!blocked)
		return 0;

	hci_dev_do_close(hdev);

	return 0;
}

static const struct rfkill_ops hci_rfkill_ops = {
	.set_block = hci_rfkill_set_block,
};

L
Linus Torvalds 已提交
1093 1094 1095 1096 1097
/* Alloc HCI device */
struct hci_dev *hci_alloc_dev(void)
{
	struct hci_dev *hdev;

1098
	hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
L
Linus Torvalds 已提交
1099 1100 1101
	if (!hdev)
		return NULL;

1102
	hci_init_sysfs(hdev);
L
Linus Torvalds 已提交
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
	skb_queue_head_init(&hdev->driver_init);

	return hdev;
}
EXPORT_SYMBOL(hci_alloc_dev);

/* Free HCI device */
void hci_free_dev(struct hci_dev *hdev)
{
	skb_queue_purge(&hdev->driver_init);

1114 1115
	/* will free via device release */
	put_device(&hdev->dev);
L
Linus Torvalds 已提交
1116 1117 1118
}
EXPORT_SYMBOL(hci_free_dev);

1119 1120 1121 1122 1123 1124 1125 1126 1127
static void hci_power_on(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);

	BT_DBG("%s", hdev->name);

	if (hci_dev_open(hdev->id) < 0)
		return;

1128
	if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1129
		schedule_delayed_work(&hdev->power_off,
1130
					msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1131

1132
	if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1133
		mgmt_index_added(hdev);
1134 1135 1136 1137
}

static void hci_power_off(struct work_struct *work)
{
1138 1139
	struct hci_dev *hdev = container_of(work, struct hci_dev,
							power_off.work);
1140 1141 1142

	BT_DBG("%s", hdev->name);

1143
	hci_dev_do_close(hdev);
1144 1145
}

1146 1147 1148 1149 1150 1151 1152 1153 1154
static void hci_discov_off(struct work_struct *work)
{
	struct hci_dev *hdev;
	u8 scan = SCAN_PAGE;

	hdev = container_of(work, struct hci_dev, discov_off.work);

	BT_DBG("%s", hdev->name);

1155
	hci_dev_lock(hdev);
1156 1157 1158 1159 1160

	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);

	hdev->discov_timeout = 0;

1161
	hci_dev_unlock(hdev);
1162 1163
}

1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
int hci_uuids_clear(struct hci_dev *hdev)
{
	struct list_head *p, *n;

	list_for_each_safe(p, n, &hdev->uuids) {
		struct bt_uuid *uuid;

		uuid = list_entry(p, struct bt_uuid, list);

		list_del(p);
		kfree(uuid);
	}

	return 0;
}

1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
int hci_link_keys_clear(struct hci_dev *hdev)
{
	struct list_head *p, *n;

	list_for_each_safe(p, n, &hdev->link_keys) {
		struct link_key *key;

		key = list_entry(p, struct link_key, list);

		list_del(p);
		kfree(key);
	}

	return 0;
}

1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
int hci_smp_ltks_clear(struct hci_dev *hdev)
{
	struct smp_ltk *k, *tmp;

	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
		list_del(&k->list);
		kfree(k);
	}

	return 0;
}

1208 1209
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
1210
	struct link_key *k;
1211

1212
	list_for_each_entry(k, &hdev->link_keys, list)
1213 1214 1215 1216 1217 1218
		if (bacmp(bdaddr, &k->bdaddr) == 0)
			return k;

	return NULL;
}

1219
static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1220 1221 1222 1223
						u8 key_type, u8 old_key_type)
{
	/* Legacy key */
	if (key_type < 0x03)
1224
		return true;
1225 1226 1227

	/* Debug keys are insecure so don't store them persistently */
	if (key_type == HCI_LK_DEBUG_COMBINATION)
1228
		return false;
1229 1230 1231

	/* Changed combination key and there's no previous one */
	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1232
		return false;
1233 1234 1235

	/* Security mode 3 case */
	if (!conn)
1236
		return true;
1237 1238 1239

	/* Neither local nor remote side had no-bonding as requirement */
	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1240
		return true;
1241 1242 1243

	/* Local side had dedicated bonding as requirement */
	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1244
		return true;
1245 1246 1247

	/* Remote side had dedicated bonding as requirement */
	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1248
		return true;
1249 1250 1251

	/* If none of the above criteria match, then don't store the key
	 * persistently */
1252
	return false;
1253 1254
}

1255
struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1256
{
1257
	struct smp_ltk *k;
1258

1259 1260 1261
	list_for_each_entry(k, &hdev->long_term_keys, list) {
		if (k->ediv != ediv ||
				memcmp(rand, k->rand, sizeof(k->rand)))
1262 1263
			continue;

1264
		return k;
1265 1266 1267 1268 1269 1270
	}

	return NULL;
}
EXPORT_SYMBOL(hci_find_ltk);

1271
struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1272
				     u8 addr_type)
1273
{
1274
	struct smp_ltk *k;
1275

1276 1277 1278
	list_for_each_entry(k, &hdev->long_term_keys, list)
		if (addr_type == k->bdaddr_type &&
					bacmp(bdaddr, &k->bdaddr) == 0)
1279 1280 1281 1282
			return k;

	return NULL;
}
1283
EXPORT_SYMBOL(hci_find_ltk_by_addr);
1284

1285
int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1286
		     bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1287 1288
{
	struct link_key *key, *old_key;
1289 1290
	u8 old_key_type;
	bool persistent;
1291 1292 1293 1294 1295 1296

	old_key = hci_find_link_key(hdev, bdaddr);
	if (old_key) {
		old_key_type = old_key->type;
		key = old_key;
	} else {
1297
		old_key_type = conn ? conn->key_type : 0xff;
1298 1299 1300 1301 1302 1303 1304 1305
		key = kzalloc(sizeof(*key), GFP_ATOMIC);
		if (!key)
			return -ENOMEM;
		list_add(&key->list, &hdev->link_keys);
	}

	BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);

1306 1307 1308 1309 1310
	/* Some buggy controller combinations generate a changed
	 * combination key for legacy pairing even when there's no
	 * previous key */
	if (type == HCI_LK_CHANGED_COMBINATION &&
					(!conn || conn->remote_auth == 0xff) &&
1311
					old_key_type == 0xff) {
1312
		type = HCI_LK_COMBINATION;
1313 1314 1315
		if (conn)
			conn->key_type = type;
	}
1316

1317 1318 1319 1320
	bacpy(&key->bdaddr, bdaddr);
	memcpy(key->val, val, 16);
	key->pin_len = pin_len;

1321
	if (type == HCI_LK_CHANGED_COMBINATION)
1322
		key->type = old_key_type;
1323 1324 1325
	else
		key->type = type;

1326 1327 1328 1329 1330
	if (!new_key)
		return 0;

	persistent = hci_persistent_key(hdev, conn, type, old_key_type);

1331
	mgmt_new_link_key(hdev, key, persistent);
1332

1333 1334
	if (conn)
		conn->flush_key = !persistent;
1335 1336 1337 1338

	return 0;
}

1339
int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1340
		int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1341
		ediv, u8 rand[8])
1342
{
1343
	struct smp_ltk *key, *old_key;
1344

1345 1346
	if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
		return 0;
1347

1348 1349
	old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
	if (old_key)
1350
		key = old_key;
1351 1352
	else {
		key = kzalloc(sizeof(*key), GFP_ATOMIC);
1353 1354
		if (!key)
			return -ENOMEM;
1355
		list_add(&key->list, &hdev->long_term_keys);
1356 1357 1358
	}

	bacpy(&key->bdaddr, bdaddr);
1359 1360 1361 1362 1363 1364 1365
	key->bdaddr_type = addr_type;
	memcpy(key->val, tk, sizeof(key->val));
	key->authenticated = authenticated;
	key->ediv = ediv;
	key->enc_size = enc_size;
	key->type = type;
	memcpy(key->rand, rand, sizeof(key->rand));
1366

1367 1368
	if (!new_key)
		return 0;
1369

1370 1371 1372
	if (type & HCI_SMP_LTK)
		mgmt_new_ltk(hdev, key, 1);

1373 1374 1375
	return 0;
}

1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
	struct link_key *key;

	key = hci_find_link_key(hdev, bdaddr);
	if (!key)
		return -ENOENT;

	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));

	list_del(&key->list);
	kfree(key);

	return 0;
}

1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
	struct smp_ltk *k, *tmp;

	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
		if (bacmp(bdaddr, &k->bdaddr))
			continue;

		BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));

		list_del(&k->list);
		kfree(k);
	}

	return 0;
}

1409 1410 1411 1412 1413 1414 1415
/* HCI command timer function */
static void hci_cmd_timer(unsigned long arg)
{
	struct hci_dev *hdev = (void *) arg;

	BT_ERR("%s command tx timeout", hdev->name);
	atomic_set(&hdev->cmd_cnt, 1);
1416
	queue_work(hdev->workqueue, &hdev->cmd_work);
1417 1418
}

1419
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1420
					  bdaddr_t *bdaddr)
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
{
	struct oob_data *data;

	list_for_each_entry(data, &hdev->remote_oob_data, list)
		if (bacmp(bdaddr, &data->bdaddr) == 0)
			return data;

	return NULL;
}

int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
	struct oob_data *data;

	data = hci_find_remote_oob_data(hdev, bdaddr);
	if (!data)
		return -ENOENT;

	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));

	list_del(&data->list);
	kfree(data);

	return 0;
}

int hci_remote_oob_data_clear(struct hci_dev *hdev)
{
	struct oob_data *data, *n;

	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
		list_del(&data->list);
		kfree(data);
	}

	return 0;
}

int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1460
			    u8 *randomizer)
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482
{
	struct oob_data *data;

	data = hci_find_remote_oob_data(hdev, bdaddr);

	if (!data) {
		data = kmalloc(sizeof(*data), GFP_ATOMIC);
		if (!data)
			return -ENOMEM;

		bacpy(&data->bdaddr, bdaddr);
		list_add(&data->list, &hdev->remote_oob_data);
	}

	memcpy(data->hash, hash, sizeof(data->hash));
	memcpy(data->randomizer, randomizer, sizeof(data->randomizer));

	BT_DBG("%s for %s", hdev->name, batostr(bdaddr));

	return 0;
}

1483
struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1484
{
1485
	struct bdaddr_list *b;
1486

1487
	list_for_each_entry(b, &hdev->blacklist, list)
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
		if (bacmp(bdaddr, &b->bdaddr) == 0)
			return b;

	return NULL;
}

int hci_blacklist_clear(struct hci_dev *hdev)
{
	struct list_head *p, *n;

	list_for_each_safe(p, n, &hdev->blacklist) {
		struct bdaddr_list *b;

		b = list_entry(p, struct bdaddr_list, list);

		list_del(p);
		kfree(b);
	}

	return 0;
}

1510
int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1511 1512 1513 1514 1515 1516
{
	struct bdaddr_list *entry;

	if (bacmp(bdaddr, BDADDR_ANY) == 0)
		return -EBADF;

1517 1518
	if (hci_blacklist_lookup(hdev, bdaddr))
		return -EEXIST;
1519 1520

	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1521 1522
	if (!entry)
		return -ENOMEM;
1523 1524 1525 1526 1527

	bacpy(&entry->bdaddr, bdaddr);

	list_add(&entry->list, &hdev->blacklist);

1528
	return mgmt_device_blocked(hdev, bdaddr, type);
1529 1530
}

1531
int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1532 1533 1534
{
	struct bdaddr_list *entry;

1535
	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1536
		return hci_blacklist_clear(hdev);
1537 1538

	entry = hci_blacklist_lookup(hdev, bdaddr);
1539
	if (!entry)
1540
		return -ENOENT;
1541 1542 1543 1544

	list_del(&entry->list);
	kfree(entry);

1545
	return mgmt_device_unblocked(hdev, bdaddr, type);
1546 1547
}

1548
static void hci_clear_adv_cache(struct work_struct *work)
1549
{
1550
	struct hci_dev *hdev = container_of(work, struct hci_dev,
1551
					    adv_work.work);
1552 1553 1554 1555 1556 1557 1558 1559

	hci_dev_lock(hdev);

	hci_adv_entries_clear(hdev);

	hci_dev_unlock(hdev);
}

A
Andre Guedes 已提交
1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
int hci_adv_entries_clear(struct hci_dev *hdev)
{
	struct adv_entry *entry, *tmp;

	list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
		list_del(&entry->list);
		kfree(entry);
	}

	BT_DBG("%s adv cache cleared", hdev->name);

	return 0;
}

struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
	struct adv_entry *entry;

	list_for_each_entry(entry, &hdev->adv_entries, list)
		if (bacmp(bdaddr, &entry->bdaddr) == 0)
			return entry;

	return NULL;
}

static inline int is_connectable_adv(u8 evt_type)
{
	if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
		return 1;

	return 0;
}

int hci_add_adv_entry(struct hci_dev *hdev,
1594
					struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
A
Andre Guedes 已提交
1595 1596 1597 1598 1599 1600 1601
		return -EINVAL;

	/* Only new entries should be added to adv_entries. So, if
	 * bdaddr was found, don't add it. */
	if (hci_find_adv_entry(hdev, &ev->bdaddr))
		return 0;

1602
	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
A
Andre Guedes 已提交
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616
	if (!entry)
		return -ENOMEM;

	bacpy(&entry->bdaddr, &ev->bdaddr);
	entry->bdaddr_type = ev->bdaddr_type;

	list_add(&entry->list, &hdev->adv_entries);

	BT_DBG("%s adv entry added: address %s type %u", hdev->name,
				batostr(&entry->bdaddr), entry->bdaddr_type);

	return 0;
}

A
Andre Guedes 已提交
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640
static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
{
	struct le_scan_params *param =  (struct le_scan_params *) opt;
	struct hci_cp_le_set_scan_param cp;

	memset(&cp, 0, sizeof(cp));
	cp.type = param->type;
	cp.interval = cpu_to_le16(param->interval);
	cp.window = cpu_to_le16(param->window);

	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
}

static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
{
	struct hci_cp_le_set_scan_enable cp;

	memset(&cp, 0, sizeof(cp));
	cp.enable = 1;

	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
}

static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1641
			  u16 window, int timeout)
A
Andre Guedes 已提交
1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
{
	long timeo = msecs_to_jiffies(3000);
	struct le_scan_params param;
	int err;

	BT_DBG("%s", hdev->name);

	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
		return -EINPROGRESS;

	param.type = type;
	param.interval = interval;
	param.window = window;

	hci_req_lock(hdev);

	err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1659
			    timeo);
A
Andre Guedes 已提交
1660 1661 1662 1663 1664 1665 1666 1667 1668
	if (!err)
		err = __hci_request(hdev, le_scan_enable_req, 0, timeo);

	hci_req_unlock(hdev);

	if (err < 0)
		return err;

	schedule_delayed_work(&hdev->le_scan_disable,
1669
			      msecs_to_jiffies(timeout));
A
Andre Guedes 已提交
1670 1671 1672 1673

	return 0;
}

1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691
int hci_cancel_le_scan(struct hci_dev *hdev)
{
	BT_DBG("%s", hdev->name);

	if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
		return -EALREADY;

	if (cancel_delayed_work(&hdev->le_scan_disable)) {
		struct hci_cp_le_set_scan_enable cp;

		/* Send HCI command to disable LE Scan */
		memset(&cp, 0, sizeof(cp));
		hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
	}

	return 0;
}

A
Andre Guedes 已提交
1692 1693 1694
static void le_scan_disable_work(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
1695
					    le_scan_disable.work);
A
Andre Guedes 已提交
1696 1697 1698 1699 1700 1701 1702 1703 1704
	struct hci_cp_le_set_scan_enable cp;

	BT_DBG("%s", hdev->name);

	memset(&cp, 0, sizeof(cp));

	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
}

A
Andre Guedes 已提交
1705 1706 1707 1708 1709 1710 1711
static void le_scan_work(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
	struct le_scan_params *param = &hdev->le_scan_params;

	BT_DBG("%s", hdev->name);

1712 1713
	hci_do_le_scan(hdev, param->type, param->interval, param->window,
		       param->timeout);
A
Andre Guedes 已提交
1714 1715 1716
}

int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1717
		int timeout)
A
Andre Guedes 已提交
1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735
{
	struct le_scan_params *param = &hdev->le_scan_params;

	BT_DBG("%s", hdev->name);

	if (work_busy(&hdev->le_scan))
		return -EINPROGRESS;

	param->type = type;
	param->interval = interval;
	param->window = window;
	param->timeout = timeout;

	queue_work(system_long_wq, &hdev->le_scan);

	return 0;
}

L
Linus Torvalds 已提交
1736 1737 1738 1739
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
{
	struct list_head *head = &hci_dev_list, *p;
1740
	int i, id, error;
L
Linus Torvalds 已提交
1741

1742
	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
L
Linus Torvalds 已提交
1743

1744
	if (!hdev->open || !hdev->close)
L
Linus Torvalds 已提交
1745 1746
		return -EINVAL;

1747 1748 1749 1750 1751
	/* Do not allow HCI_AMP devices to register at index 0,
	 * so the index can be used as the AMP controller ID.
	 */
	id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;

1752
	write_lock(&hci_dev_list_lock);
L
Linus Torvalds 已提交
1753 1754 1755 1756 1757 1758 1759

	/* Find first available device id */
	list_for_each(p, &hci_dev_list) {
		if (list_entry(p, struct hci_dev, list)->id != id)
			break;
		head = p; id++;
	}
1760

L
Linus Torvalds 已提交
1761 1762
	sprintf(hdev->name, "hci%d", id);
	hdev->id = id;
1763
	list_add_tail(&hdev->list, head);
L
Linus Torvalds 已提交
1764

1765
	mutex_init(&hdev->lock);
L
Linus Torvalds 已提交
1766 1767

	hdev->flags = 0;
1768
	hdev->dev_flags = 0;
L
Linus Torvalds 已提交
1769
	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1770
	hdev->esco_type = (ESCO_HV1);
L
Linus Torvalds 已提交
1771
	hdev->link_mode = (HCI_LM_ACCEPT);
1772
	hdev->io_capability = 0x03; /* No Input No Output */
L
Linus Torvalds 已提交
1773

1774 1775 1776 1777
	hdev->idle_timeout = 0;
	hdev->sniff_max_interval = 800;
	hdev->sniff_min_interval = 80;

1778
	INIT_WORK(&hdev->rx_work, hci_rx_work);
1779
	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1780
	INIT_WORK(&hdev->tx_work, hci_tx_work);
1781

L
Linus Torvalds 已提交
1782 1783 1784 1785 1786

	skb_queue_head_init(&hdev->rx_q);
	skb_queue_head_init(&hdev->cmd_q);
	skb_queue_head_init(&hdev->raw_q);

1787 1788
	setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);

1789
	for (i = 0; i < NUM_REASSEMBLY; i++)
1790 1791
		hdev->reassembly[i] = NULL;

L
Linus Torvalds 已提交
1792
	init_waitqueue_head(&hdev->req_wait_q);
1793
	mutex_init(&hdev->req_lock);
L
Linus Torvalds 已提交
1794

1795
	discovery_init(hdev);
L
Linus Torvalds 已提交
1796 1797 1798

	hci_conn_hash_init(hdev);

1799 1800
	INIT_LIST_HEAD(&hdev->mgmt_pending);

1801
	INIT_LIST_HEAD(&hdev->blacklist);
1802

1803 1804
	INIT_LIST_HEAD(&hdev->uuids);

1805
	INIT_LIST_HEAD(&hdev->link_keys);
1806
	INIT_LIST_HEAD(&hdev->long_term_keys);
1807

1808 1809
	INIT_LIST_HEAD(&hdev->remote_oob_data);

A
Andre Guedes 已提交
1810 1811
	INIT_LIST_HEAD(&hdev->adv_entries);

1812
	INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1813
	INIT_WORK(&hdev->power_on, hci_power_on);
1814
	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1815

1816 1817
	INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);

L
Linus Torvalds 已提交
1818 1819 1820 1821
	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));

	atomic_set(&hdev->promisc, 0);

A
Andre Guedes 已提交
1822 1823
	INIT_WORK(&hdev->le_scan, le_scan_work);

A
Andre Guedes 已提交
1824 1825
	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);

1826
	write_unlock(&hci_dev_list_lock);
L
Linus Torvalds 已提交
1827

1828 1829
	hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
							WQ_MEM_RECLAIM, 1);
1830 1831 1832 1833
	if (!hdev->workqueue) {
		error = -ENOMEM;
		goto err;
	}
1834

1835 1836 1837
	error = hci_add_sysfs(hdev);
	if (error < 0)
		goto err_wqueue;
L
Linus Torvalds 已提交
1838

1839 1840 1841 1842 1843 1844 1845 1846 1847
	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
				RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
	if (hdev->rfkill) {
		if (rfkill_register(hdev->rfkill) < 0) {
			rfkill_destroy(hdev->rfkill);
			hdev->rfkill = NULL;
		}
	}

1848 1849
	set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
	set_bit(HCI_SETUP, &hdev->dev_flags);
1850
	schedule_work(&hdev->power_on);
1851

L
Linus Torvalds 已提交
1852
	hci_notify(hdev, HCI_DEV_REG);
1853
	hci_dev_hold(hdev);
L
Linus Torvalds 已提交
1854 1855

	return id;
1856

1857 1858 1859
err_wqueue:
	destroy_workqueue(hdev->workqueue);
err:
1860
	write_lock(&hci_dev_list_lock);
1861
	list_del(&hdev->list);
1862
	write_unlock(&hci_dev_list_lock);
1863

1864
	return error;
L
Linus Torvalds 已提交
1865 1866 1867 1868
}
EXPORT_SYMBOL(hci_register_dev);

/* Unregister HCI device */
1869
void hci_unregister_dev(struct hci_dev *hdev)
L
Linus Torvalds 已提交
1870
{
1871 1872
	int i;

1873
	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
L
Linus Torvalds 已提交
1874

1875 1876
	set_bit(HCI_UNREGISTER, &hdev->dev_flags);

1877
	write_lock(&hci_dev_list_lock);
L
Linus Torvalds 已提交
1878
	list_del(&hdev->list);
1879
	write_unlock(&hci_dev_list_lock);
L
Linus Torvalds 已提交
1880 1881 1882

	hci_dev_do_close(hdev);

1883
	for (i = 0; i < NUM_REASSEMBLY; i++)
1884 1885
		kfree_skb(hdev->reassembly[i]);

1886
	if (!test_bit(HCI_INIT, &hdev->flags) &&
1887
				!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1888
		hci_dev_lock(hdev);
1889
		mgmt_index_removed(hdev);
1890
		hci_dev_unlock(hdev);
1891
	}
1892

1893 1894 1895 1896
	/* mgmt_index_removed should take care of emptying the
	 * pending list */
	BUG_ON(!list_empty(&hdev->mgmt_pending));

L
Linus Torvalds 已提交
1897 1898
	hci_notify(hdev, HCI_DEV_UNREG);

1899 1900 1901 1902 1903
	if (hdev->rfkill) {
		rfkill_unregister(hdev->rfkill);
		rfkill_destroy(hdev->rfkill);
	}

1904
	hci_del_sysfs(hdev);
1905

1906
	cancel_delayed_work_sync(&hdev->adv_work);
1907

1908 1909
	destroy_workqueue(hdev->workqueue);

1910
	hci_dev_lock(hdev);
1911
	hci_blacklist_clear(hdev);
1912
	hci_uuids_clear(hdev);
1913
	hci_link_keys_clear(hdev);
1914
	hci_smp_ltks_clear(hdev);
1915
	hci_remote_oob_data_clear(hdev);
A
Andre Guedes 已提交
1916
	hci_adv_entries_clear(hdev);
1917
	hci_dev_unlock(hdev);
1918

1919
	hci_dev_put(hdev);
L
Linus Torvalds 已提交
1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
}
EXPORT_SYMBOL(hci_unregister_dev);

/* Suspend HCI device */
int hci_suspend_dev(struct hci_dev *hdev)
{
	hci_notify(hdev, HCI_DEV_SUSPEND);
	return 0;
}
EXPORT_SYMBOL(hci_suspend_dev);

/* Resume HCI device */
int hci_resume_dev(struct hci_dev *hdev)
{
	hci_notify(hdev, HCI_DEV_RESUME);
	return 0;
}
EXPORT_SYMBOL(hci_resume_dev);

1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955
/* Receive frame from HCI drivers */
int hci_recv_frame(struct sk_buff *skb)
{
	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
				&& !test_bit(HCI_INIT, &hdev->flags))) {
		kfree_skb(skb);
		return -ENXIO;
	}

	/* Incomming skb */
	bt_cb(skb)->incoming = 1;

	/* Time stamp */
	__net_timestamp(skb);

	skb_queue_tail(&hdev->rx_q, skb);
1956
	queue_work(hdev->workqueue, &hdev->rx_work);
1957

1958 1959 1960 1961
	return 0;
}
EXPORT_SYMBOL(hci_recv_frame);

1962
static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1963
						  int count, __u8 index)
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992
{
	int len = 0;
	int hlen = 0;
	int remain = count;
	struct sk_buff *skb;
	struct bt_skb_cb *scb;

	if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
				index >= NUM_REASSEMBLY)
		return -EILSEQ;

	skb = hdev->reassembly[index];

	if (!skb) {
		switch (type) {
		case HCI_ACLDATA_PKT:
			len = HCI_MAX_FRAME_SIZE;
			hlen = HCI_ACL_HDR_SIZE;
			break;
		case HCI_EVENT_PKT:
			len = HCI_MAX_EVENT_SIZE;
			hlen = HCI_EVENT_HDR_SIZE;
			break;
		case HCI_SCODATA_PKT:
			len = HCI_MAX_SCO_SIZE;
			hlen = HCI_SCO_HDR_SIZE;
			break;
		}

1993
		skb = bt_skb_alloc(len, GFP_ATOMIC);
1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006
		if (!skb)
			return -ENOMEM;

		scb = (void *) skb->cb;
		scb->expect = hlen;
		scb->pkt_type = type;

		skb->dev = (void *) hdev;
		hdev->reassembly[index] = skb;
	}

	while (count) {
		scb = (void *) skb->cb;
2007
		len = min_t(uint, scb->expect, count);
2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070

		memcpy(skb_put(skb, len), data, len);

		count -= len;
		data += len;
		scb->expect -= len;
		remain = count;

		switch (type) {
		case HCI_EVENT_PKT:
			if (skb->len == HCI_EVENT_HDR_SIZE) {
				struct hci_event_hdr *h = hci_event_hdr(skb);
				scb->expect = h->plen;

				if (skb_tailroom(skb) < scb->expect) {
					kfree_skb(skb);
					hdev->reassembly[index] = NULL;
					return -ENOMEM;
				}
			}
			break;

		case HCI_ACLDATA_PKT:
			if (skb->len  == HCI_ACL_HDR_SIZE) {
				struct hci_acl_hdr *h = hci_acl_hdr(skb);
				scb->expect = __le16_to_cpu(h->dlen);

				if (skb_tailroom(skb) < scb->expect) {
					kfree_skb(skb);
					hdev->reassembly[index] = NULL;
					return -ENOMEM;
				}
			}
			break;

		case HCI_SCODATA_PKT:
			if (skb->len == HCI_SCO_HDR_SIZE) {
				struct hci_sco_hdr *h = hci_sco_hdr(skb);
				scb->expect = h->dlen;

				if (skb_tailroom(skb) < scb->expect) {
					kfree_skb(skb);
					hdev->reassembly[index] = NULL;
					return -ENOMEM;
				}
			}
			break;
		}

		if (scb->expect == 0) {
			/* Complete frame */

			bt_cb(skb)->pkt_type = type;
			hci_recv_frame(skb);

			hdev->reassembly[index] = NULL;
			return remain;
		}
	}

	return remain;
}

2071 2072
int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
{
2073 2074
	int rem = 0;

2075 2076 2077
	if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
		return -EILSEQ;

2078
	while (count) {
2079
		rem = hci_reassembly(hdev, type, data, count, type - 1);
2080 2081
		if (rem < 0)
			return rem;
2082

2083 2084
		data += (count - rem);
		count = rem;
J
Joe Perches 已提交
2085
	}
2086

2087
	return rem;
2088 2089 2090
}
EXPORT_SYMBOL(hci_recv_fragment);

2091 2092 2093 2094 2095 2096 2097
#define STREAM_REASSEMBLY 0

int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
{
	int type;
	int rem = 0;

2098
	while (count) {
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112
		struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];

		if (!skb) {
			struct { char type; } *pkt;

			/* Start of the frame */
			pkt = data;
			type = pkt->type;

			data++;
			count--;
		} else
			type = bt_cb(skb)->pkt_type;

2113 2114
		rem = hci_reassembly(hdev, type, data, count,
							STREAM_REASSEMBLY);
2115 2116 2117 2118 2119
		if (rem < 0)
			return rem;

		data += (count - rem);
		count = rem;
J
Joe Perches 已提交
2120
	}
2121 2122 2123 2124 2125

	return rem;
}
EXPORT_SYMBOL(hci_recv_stream_fragment);

L
Linus Torvalds 已提交
2126 2127 2128 2129 2130 2131
/* ---- Interface to upper protocols ---- */

int hci_register_cb(struct hci_cb *cb)
{
	BT_DBG("%p name %s", cb, cb->name);

2132
	write_lock(&hci_cb_list_lock);
L
Linus Torvalds 已提交
2133
	list_add(&cb->list, &hci_cb_list);
2134
	write_unlock(&hci_cb_list_lock);
L
Linus Torvalds 已提交
2135 2136 2137 2138 2139 2140 2141 2142 2143

	return 0;
}
EXPORT_SYMBOL(hci_register_cb);

int hci_unregister_cb(struct hci_cb *cb)
{
	BT_DBG("%p name %s", cb, cb->name);

2144
	write_lock(&hci_cb_list_lock);
L
Linus Torvalds 已提交
2145
	list_del(&cb->list);
2146
	write_unlock(&hci_cb_list_lock);
L
Linus Torvalds 已提交
2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160

	return 0;
}
EXPORT_SYMBOL(hci_unregister_cb);

static int hci_send_frame(struct sk_buff *skb)
{
	struct hci_dev *hdev = (struct hci_dev *) skb->dev;

	if (!hdev) {
		kfree_skb(skb);
		return -ENODEV;
	}

2161
	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
L
Linus Torvalds 已提交
2162

2163 2164
	/* Time stamp */
	__net_timestamp(skb);
L
Linus Torvalds 已提交
2165

2166 2167 2168 2169 2170
	/* Send copy to monitor */
	hci_send_to_monitor(hdev, skb);

	if (atomic_read(&hdev->promisc)) {
		/* Send copy to the sockets */
2171
		hci_send_to_sock(hdev, skb);
L
Linus Torvalds 已提交
2172 2173 2174 2175 2176 2177 2178 2179 2180
	}

	/* Get rid of skb owner, prior to sending to the driver. */
	skb_orphan(skb);

	return hdev->send(skb);
}

/* Send HCI command */
2181
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
L
Linus Torvalds 已提交
2182 2183 2184 2185 2186
{
	int len = HCI_COMMAND_HDR_SIZE + plen;
	struct hci_command_hdr *hdr;
	struct sk_buff *skb;

2187
	BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
L
Linus Torvalds 已提交
2188 2189 2190

	skb = bt_skb_alloc(len, GFP_ATOMIC);
	if (!skb) {
2191
		BT_ERR("%s no memory for command", hdev->name);
L
Linus Torvalds 已提交
2192 2193 2194 2195
		return -ENOMEM;
	}

	hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2196
	hdr->opcode = cpu_to_le16(opcode);
L
Linus Torvalds 已提交
2197 2198 2199 2200 2201 2202 2203
	hdr->plen   = plen;

	if (plen)
		memcpy(skb_put(skb, plen), param, plen);

	BT_DBG("skb len %d", skb->len);

2204
	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
L
Linus Torvalds 已提交
2205
	skb->dev = (void *) hdev;
2206

2207 2208 2209
	if (test_bit(HCI_INIT, &hdev->flags))
		hdev->init_last_cmd = opcode;

L
Linus Torvalds 已提交
2210
	skb_queue_tail(&hdev->cmd_q, skb);
2211
	queue_work(hdev->workqueue, &hdev->cmd_work);
L
Linus Torvalds 已提交
2212 2213 2214 2215 2216

	return 0;
}

/* Get data from the previously sent command */
2217
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
L
Linus Torvalds 已提交
2218 2219 2220 2221 2222 2223 2224 2225
{
	struct hci_command_hdr *hdr;

	if (!hdev->sent_cmd)
		return NULL;

	hdr = (void *) hdev->sent_cmd->data;

2226
	if (hdr->opcode != cpu_to_le16(opcode))
L
Linus Torvalds 已提交
2227 2228
		return NULL;

2229
	BT_DBG("%s opcode 0x%x", hdev->name, opcode);
L
Linus Torvalds 已提交
2230 2231 2232 2233 2234 2235 2236 2237 2238 2239

	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
}

/* Send ACL data */
static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
{
	struct hci_acl_hdr *hdr;
	int len = skb->len;

2240 2241
	skb_push(skb, HCI_ACL_HDR_SIZE);
	skb_reset_transport_header(skb);
2242
	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2243 2244
	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
	hdr->dlen   = cpu_to_le16(len);
L
Linus Torvalds 已提交
2245 2246
}

2247 2248
static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
				struct sk_buff *skb, __u16 flags)
L
Linus Torvalds 已提交
2249 2250 2251 2252
{
	struct hci_dev *hdev = conn->hdev;
	struct sk_buff *list;

A
Andrei Emeltchenko 已提交
2253 2254
	list = skb_shinfo(skb)->frag_list;
	if (!list) {
L
Linus Torvalds 已提交
2255 2256 2257
		/* Non fragmented */
		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);

2258
		skb_queue_tail(queue, skb);
L
Linus Torvalds 已提交
2259 2260 2261 2262 2263 2264 2265
	} else {
		/* Fragmented */
		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);

		skb_shinfo(skb)->frag_list = NULL;

		/* Queue all fragments atomically */
2266
		spin_lock(&queue->lock);
L
Linus Torvalds 已提交
2267

2268
		__skb_queue_tail(queue, skb);
2269 2270 2271

		flags &= ~ACL_START;
		flags |= ACL_CONT;
L
Linus Torvalds 已提交
2272 2273
		do {
			skb = list; list = list->next;
2274

L
Linus Torvalds 已提交
2275
			skb->dev = (void *) hdev;
2276
			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2277
			hci_add_acl_hdr(skb, conn->handle, flags);
L
Linus Torvalds 已提交
2278 2279 2280

			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);

2281
			__skb_queue_tail(queue, skb);
L
Linus Torvalds 已提交
2282 2283
		} while (list);

2284
		spin_unlock(&queue->lock);
L
Linus Torvalds 已提交
2285
	}
2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299
}

void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
{
	struct hci_conn *conn = chan->conn;
	struct hci_dev *hdev = conn->hdev;

	BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);

	skb->dev = (void *) hdev;
	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
	hci_add_acl_hdr(skb, conn->handle, flags);

	hci_queue_acl(conn, &chan->data_q, skb, flags);
L
Linus Torvalds 已提交
2300

2301
	queue_work(hdev->workqueue, &hdev->tx_work);
L
Linus Torvalds 已提交
2302 2303 2304 2305
}
EXPORT_SYMBOL(hci_send_acl);

/* Send SCO data */
2306
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
L
Linus Torvalds 已提交
2307 2308 2309 2310 2311 2312
{
	struct hci_dev *hdev = conn->hdev;
	struct hci_sco_hdr hdr;

	BT_DBG("%s len %d", hdev->name, skb->len);

2313
	hdr.handle = cpu_to_le16(conn->handle);
L
Linus Torvalds 已提交
2314 2315
	hdr.dlen   = skb->len;

2316 2317
	skb_push(skb, HCI_SCO_HDR_SIZE);
	skb_reset_transport_header(skb);
2318
	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
L
Linus Torvalds 已提交
2319 2320

	skb->dev = (void *) hdev;
2321
	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2322

L
Linus Torvalds 已提交
2323
	skb_queue_tail(&conn->data_q, skb);
2324
	queue_work(hdev->workqueue, &hdev->tx_work);
L
Linus Torvalds 已提交
2325 2326 2327 2328 2329 2330 2331 2332 2333
}
EXPORT_SYMBOL(hci_send_sco);

/* ---- HCI TX task (outgoing data) ---- */

/* HCI Connection scheduler */
static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
{
	struct hci_conn_hash *h = &hdev->conn_hash;
2334
	struct hci_conn *conn = NULL, *c;
L
Linus Torvalds 已提交
2335 2336
	int num = 0, min = ~0;

2337
	/* We don't have to lock device here. Connections are always
L
Linus Torvalds 已提交
2338
	 * added and removed with TX task disabled. */
2339 2340 2341 2342

	rcu_read_lock();

	list_for_each_entry_rcu(c, &h->list, list) {
2343
		if (c->type != type || skb_queue_empty(&c->data_q))
L
Linus Torvalds 已提交
2344
			continue;
2345 2346 2347 2348

		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
			continue;

L
Linus Torvalds 已提交
2349 2350 2351 2352 2353 2354
		num++;

		if (c->sent < min) {
			min  = c->sent;
			conn = c;
		}
2355 2356 2357

		if (hci_conn_num(hdev, type) == num)
			break;
L
Linus Torvalds 已提交
2358 2359
	}

2360 2361
	rcu_read_unlock();

L
Linus Torvalds 已提交
2362
	if (conn) {
2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381
		int cnt, q;

		switch (conn->type) {
		case ACL_LINK:
			cnt = hdev->acl_cnt;
			break;
		case SCO_LINK:
		case ESCO_LINK:
			cnt = hdev->sco_cnt;
			break;
		case LE_LINK:
			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
			break;
		default:
			cnt = 0;
			BT_ERR("Unknown link type");
		}

		q = cnt / num;
L
Linus Torvalds 已提交
2382 2383 2384 2385 2386 2387 2388 2389
		*quote = q ? q : 1;
	} else
		*quote = 0;

	BT_DBG("conn %p quote %d", conn, *quote);
	return conn;
}

2390
static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
L
Linus Torvalds 已提交
2391 2392
{
	struct hci_conn_hash *h = &hdev->conn_hash;
2393
	struct hci_conn *c;
L
Linus Torvalds 已提交
2394

2395
	BT_ERR("%s link tx timeout", hdev->name);
L
Linus Torvalds 已提交
2396

2397 2398
	rcu_read_lock();

L
Linus Torvalds 已提交
2399
	/* Kill stalled connections */
2400
	list_for_each_entry_rcu(c, &h->list, list) {
2401 2402
		if (c->type == type && c->sent) {
			BT_ERR("%s killing stalled connection %s",
L
Linus Torvalds 已提交
2403 2404 2405 2406
				hdev->name, batostr(&c->dst));
			hci_acl_disconn(c, 0x13);
		}
	}
2407 2408

	rcu_read_unlock();
L
Linus Torvalds 已提交
2409 2410
}

2411 2412
static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
						int *quote)
L
Linus Torvalds 已提交
2413
{
2414 2415 2416
	struct hci_conn_hash *h = &hdev->conn_hash;
	struct hci_chan *chan = NULL;
	int num = 0, min = ~0, cur_prio = 0;
L
Linus Torvalds 已提交
2417
	struct hci_conn *conn;
2418 2419 2420 2421
	int cnt, q, conn_num = 0;

	BT_DBG("%s", hdev->name);

2422 2423 2424
	rcu_read_lock();

	list_for_each_entry_rcu(conn, &h->list, list) {
2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
		struct hci_chan *tmp;

		if (conn->type != type)
			continue;

		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
			continue;

		conn_num++;

2435
		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462
			struct sk_buff *skb;

			if (skb_queue_empty(&tmp->data_q))
				continue;

			skb = skb_peek(&tmp->data_q);
			if (skb->priority < cur_prio)
				continue;

			if (skb->priority > cur_prio) {
				num = 0;
				min = ~0;
				cur_prio = skb->priority;
			}

			num++;

			if (conn->sent < min) {
				min  = conn->sent;
				chan = tmp;
			}
		}

		if (hci_conn_num(hdev, type) == conn_num)
			break;
	}

2463 2464
	rcu_read_unlock();

2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489
	if (!chan)
		return NULL;

	switch (chan->conn->type) {
	case ACL_LINK:
		cnt = hdev->acl_cnt;
		break;
	case SCO_LINK:
	case ESCO_LINK:
		cnt = hdev->sco_cnt;
		break;
	case LE_LINK:
		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
		break;
	default:
		cnt = 0;
		BT_ERR("Unknown link type");
	}

	q = cnt / num;
	*quote = q ? q : 1;
	BT_DBG("chan %p quote %d", chan, *quote);
	return chan;
}

2490 2491 2492 2493 2494 2495 2496 2497
static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
{
	struct hci_conn_hash *h = &hdev->conn_hash;
	struct hci_conn *conn;
	int num = 0;

	BT_DBG("%s", hdev->name);

2498 2499 2500
	rcu_read_lock();

	list_for_each_entry_rcu(conn, &h->list, list) {
2501 2502 2503 2504 2505 2506 2507 2508 2509 2510
		struct hci_chan *chan;

		if (conn->type != type)
			continue;

		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
			continue;

		num++;

2511
		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534
			struct sk_buff *skb;

			if (chan->sent) {
				chan->sent = 0;
				continue;
			}

			if (skb_queue_empty(&chan->data_q))
				continue;

			skb = skb_peek(&chan->data_q);
			if (skb->priority >= HCI_PRIO_MAX - 1)
				continue;

			skb->priority = HCI_PRIO_MAX - 1;

			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
								skb->priority);
		}

		if (hci_conn_num(hdev, type) == num)
			break;
	}
2535 2536 2537

	rcu_read_unlock();

2538 2539
}

2540 2541 2542 2543 2544 2545
static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
{
	/* Calculate count of blocks used by this packet */
	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
}

2546
static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2547
{
L
Linus Torvalds 已提交
2548 2549 2550
	if (!test_bit(HCI_RAW, &hdev->flags)) {
		/* ACL tx timeout must be longer than maximum
		 * link supervision timeout (40.9 seconds) */
2551
		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2552
					msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2553
			hci_link_tx_to(hdev, ACL_LINK);
L
Linus Torvalds 已提交
2554
	}
2555
}
L
Linus Torvalds 已提交
2556

2557 2558 2559 2560 2561 2562 2563 2564
static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
{
	unsigned int cnt = hdev->acl_cnt;
	struct hci_chan *chan;
	struct sk_buff *skb;
	int quote;

	__check_timeout(hdev, cnt);
2565

2566 2567
	while (hdev->acl_cnt &&
			(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2568 2569
		u32 priority = (skb_peek(&chan->data_q))->priority;
		while (quote-- && (skb = skb_peek(&chan->data_q))) {
2570 2571 2572
			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
					skb->len, skb->priority);

2573 2574 2575 2576 2577 2578
			/* Stop if priority has changed */
			if (skb->priority < priority)
				break;

			skb = skb_dequeue(&chan->data_q);

2579
			hci_conn_enter_active_mode(chan->conn,
2580
						   bt_cb(skb)->force_active);
2581

L
Linus Torvalds 已提交
2582 2583 2584 2585
			hci_send_frame(skb);
			hdev->acl_last_tx = jiffies;

			hdev->acl_cnt--;
2586 2587
			chan->sent++;
			chan->conn->sent++;
L
Linus Torvalds 已提交
2588 2589
		}
	}
2590 2591 2592

	if (cnt != hdev->acl_cnt)
		hci_prio_recalculate(hdev, ACL_LINK);
L
Linus Torvalds 已提交
2593 2594
}

2595 2596
static inline void hci_sched_acl_blk(struct hci_dev *hdev)
{
2597
	unsigned int cnt = hdev->block_cnt;
2598 2599 2600 2601
	struct hci_chan *chan;
	struct sk_buff *skb;
	int quote;

2602
	__check_timeout(hdev, cnt);
2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658

	while (hdev->block_cnt > 0 &&
			(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
		u32 priority = (skb_peek(&chan->data_q))->priority;
		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
			int blocks;

			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
						skb->len, skb->priority);

			/* Stop if priority has changed */
			if (skb->priority < priority)
				break;

			skb = skb_dequeue(&chan->data_q);

			blocks = __get_blocks(hdev, skb);
			if (blocks > hdev->block_cnt)
				return;

			hci_conn_enter_active_mode(chan->conn,
						bt_cb(skb)->force_active);

			hci_send_frame(skb);
			hdev->acl_last_tx = jiffies;

			hdev->block_cnt -= blocks;
			quote -= blocks;

			chan->sent += blocks;
			chan->conn->sent += blocks;
		}
	}

	if (cnt != hdev->block_cnt)
		hci_prio_recalculate(hdev, ACL_LINK);
}

static inline void hci_sched_acl(struct hci_dev *hdev)
{
	BT_DBG("%s", hdev->name);

	if (!hci_conn_num(hdev, ACL_LINK))
		return;

	switch (hdev->flow_ctl_mode) {
	case HCI_FLOW_CTL_MODE_PACKET_BASED:
		hci_sched_acl_pkt(hdev);
		break;

	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
		hci_sched_acl_blk(hdev);
		break;
	}
}

L
Linus Torvalds 已提交
2659 2660 2661 2662 2663 2664 2665 2666 2667
/* Schedule SCO */
static inline void hci_sched_sco(struct hci_dev *hdev)
{
	struct hci_conn *conn;
	struct sk_buff *skb;
	int quote;

	BT_DBG("%s", hdev->name);

2668 2669 2670
	if (!hci_conn_num(hdev, SCO_LINK))
		return;

L
Linus Torvalds 已提交
2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682
	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
			BT_DBG("skb %p len %d", skb, skb->len);
			hci_send_frame(skb);

			conn->sent++;
			if (conn->sent == ~0)
				conn->sent = 0;
		}
	}
}

2683 2684 2685 2686 2687 2688 2689 2690
static inline void hci_sched_esco(struct hci_dev *hdev)
{
	struct hci_conn *conn;
	struct sk_buff *skb;
	int quote;

	BT_DBG("%s", hdev->name);

2691 2692 2693
	if (!hci_conn_num(hdev, ESCO_LINK))
		return;

2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705
	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
			BT_DBG("skb %p len %d", skb, skb->len);
			hci_send_frame(skb);

			conn->sent++;
			if (conn->sent == ~0)
				conn->sent = 0;
		}
	}
}

2706 2707
static inline void hci_sched_le(struct hci_dev *hdev)
{
2708
	struct hci_chan *chan;
2709
	struct sk_buff *skb;
2710
	int quote, cnt, tmp;
2711 2712 2713

	BT_DBG("%s", hdev->name);

2714 2715 2716
	if (!hci_conn_num(hdev, LE_LINK))
		return;

2717 2718 2719
	if (!test_bit(HCI_RAW, &hdev->flags)) {
		/* LE tx timeout must be longer than maximum
		 * link supervision timeout (40.9 seconds) */
2720
		if (!hdev->le_cnt && hdev->le_pkts &&
2721
				time_after(jiffies, hdev->le_last_tx + HZ * 45))
2722
			hci_link_tx_to(hdev, LE_LINK);
2723 2724 2725
	}

	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2726
	tmp = cnt;
2727
	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2728 2729
		u32 priority = (skb_peek(&chan->data_q))->priority;
		while (quote-- && (skb = skb_peek(&chan->data_q))) {
2730 2731
			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
					skb->len, skb->priority);
2732

2733 2734 2735 2736 2737 2738
			/* Stop if priority has changed */
			if (skb->priority < priority)
				break;

			skb = skb_dequeue(&chan->data_q);

2739 2740 2741 2742
			hci_send_frame(skb);
			hdev->le_last_tx = jiffies;

			cnt--;
2743 2744
			chan->sent++;
			chan->conn->sent++;
2745 2746
		}
	}
2747

2748 2749 2750 2751
	if (hdev->le_pkts)
		hdev->le_cnt = cnt;
	else
		hdev->acl_cnt = cnt;
2752 2753 2754

	if (cnt != tmp)
		hci_prio_recalculate(hdev, LE_LINK);
2755 2756
}

2757
static void hci_tx_work(struct work_struct *work)
L
Linus Torvalds 已提交
2758
{
2759
	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
L
Linus Torvalds 已提交
2760 2761
	struct sk_buff *skb;

2762 2763
	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
		hdev->sco_cnt, hdev->le_cnt);
L
Linus Torvalds 已提交
2764 2765 2766 2767 2768 2769 2770

	/* Schedule queues and send stuff to HCI driver */

	hci_sched_acl(hdev);

	hci_sched_sco(hdev);

2771 2772
	hci_sched_esco(hdev);

2773 2774
	hci_sched_le(hdev);

L
Linus Torvalds 已提交
2775 2776 2777 2778 2779
	/* Send next queued raw (unknown type) packet */
	while ((skb = skb_dequeue(&hdev->raw_q)))
		hci_send_frame(skb);
}

L
Lucas De Marchi 已提交
2780
/* ----- HCI RX task (incoming data processing) ----- */
L
Linus Torvalds 已提交
2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801

/* ACL data packet */
static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_acl_hdr *hdr = (void *) skb->data;
	struct hci_conn *conn;
	__u16 handle, flags;

	skb_pull(skb, HCI_ACL_HDR_SIZE);

	handle = __le16_to_cpu(hdr->handle);
	flags  = hci_flags(handle);
	handle = hci_handle(handle);

	BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);

	hdev->stat.acl_rx++;

	hci_dev_lock(hdev);
	conn = hci_conn_hash_lookup_handle(hdev, handle);
	hci_dev_unlock(hdev);
2802

L
Linus Torvalds 已提交
2803
	if (conn) {
2804
		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2805

L
Linus Torvalds 已提交
2806
		/* Send to upper protocol */
2807 2808
		l2cap_recv_acldata(conn, skb, flags);
		return;
L
Linus Torvalds 已提交
2809
	} else {
2810
		BT_ERR("%s ACL packet for unknown connection handle %d",
L
Linus Torvalds 已提交
2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837
			hdev->name, handle);
	}

	kfree_skb(skb);
}

/* SCO data packet */
static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_sco_hdr *hdr = (void *) skb->data;
	struct hci_conn *conn;
	__u16 handle;

	skb_pull(skb, HCI_SCO_HDR_SIZE);

	handle = __le16_to_cpu(hdr->handle);

	BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);

	hdev->stat.sco_rx++;

	hci_dev_lock(hdev);
	conn = hci_conn_hash_lookup_handle(hdev, handle);
	hci_dev_unlock(hdev);

	if (conn) {
		/* Send to upper protocol */
2838 2839
		sco_recv_scodata(conn, skb);
		return;
L
Linus Torvalds 已提交
2840
	} else {
2841
		BT_ERR("%s SCO packet for unknown connection handle %d",
L
Linus Torvalds 已提交
2842 2843 2844 2845 2846 2847
			hdev->name, handle);
	}

	kfree_skb(skb);
}

2848
static void hci_rx_work(struct work_struct *work)
L
Linus Torvalds 已提交
2849
{
2850
	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
L
Linus Torvalds 已提交
2851 2852 2853 2854 2855
	struct sk_buff *skb;

	BT_DBG("%s", hdev->name);

	while ((skb = skb_dequeue(&hdev->rx_q))) {
2856 2857 2858
		/* Send copy to monitor */
		hci_send_to_monitor(hdev, skb);

L
Linus Torvalds 已提交
2859 2860
		if (atomic_read(&hdev->promisc)) {
			/* Send copy to the sockets */
2861
			hci_send_to_sock(hdev, skb);
L
Linus Torvalds 已提交
2862 2863 2864 2865 2866 2867 2868 2869 2870
		}

		if (test_bit(HCI_RAW, &hdev->flags)) {
			kfree_skb(skb);
			continue;
		}

		if (test_bit(HCI_INIT, &hdev->flags)) {
			/* Don't process data packets in this states. */
2871
			switch (bt_cb(skb)->pkt_type) {
L
Linus Torvalds 已提交
2872 2873 2874 2875
			case HCI_ACLDATA_PKT:
			case HCI_SCODATA_PKT:
				kfree_skb(skb);
				continue;
2876
			}
L
Linus Torvalds 已提交
2877 2878 2879
		}

		/* Process frame */
2880
		switch (bt_cb(skb)->pkt_type) {
L
Linus Torvalds 已提交
2881
		case HCI_EVENT_PKT:
2882
			BT_DBG("%s Event packet", hdev->name);
L
Linus Torvalds 已提交
2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902
			hci_event_packet(hdev, skb);
			break;

		case HCI_ACLDATA_PKT:
			BT_DBG("%s ACL data packet", hdev->name);
			hci_acldata_packet(hdev, skb);
			break;

		case HCI_SCODATA_PKT:
			BT_DBG("%s SCO data packet", hdev->name);
			hci_scodata_packet(hdev, skb);
			break;

		default:
			kfree_skb(skb);
			break;
		}
	}
}

2903
static void hci_cmd_work(struct work_struct *work)
L
Linus Torvalds 已提交
2904
{
2905
	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
L
Linus Torvalds 已提交
2906 2907 2908 2909 2910
	struct sk_buff *skb;

	BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));

	/* Send queued commands */
2911 2912 2913 2914 2915
	if (atomic_read(&hdev->cmd_cnt)) {
		skb = skb_dequeue(&hdev->cmd_q);
		if (!skb)
			return;

2916
		kfree_skb(hdev->sent_cmd);
L
Linus Torvalds 已提交
2917

A
Andrei Emeltchenko 已提交
2918 2919
		hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
		if (hdev->sent_cmd) {
L
Linus Torvalds 已提交
2920 2921
			atomic_dec(&hdev->cmd_cnt);
			hci_send_frame(skb);
2922 2923 2924 2925
			if (test_bit(HCI_RESET, &hdev->flags))
				del_timer(&hdev->cmd_timer);
			else
				mod_timer(&hdev->cmd_timer,
2926
				  jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
L
Linus Torvalds 已提交
2927 2928
		} else {
			skb_queue_head(&hdev->cmd_q, skb);
2929
			queue_work(hdev->workqueue, &hdev->cmd_work);
L
Linus Torvalds 已提交
2930 2931 2932
		}
	}
}
A
Andre Guedes 已提交
2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944

int hci_do_inquiry(struct hci_dev *hdev, u8 length)
{
	/* General inquiry access code (GIAC) */
	u8 lap[3] = { 0x33, 0x8b, 0x9e };
	struct hci_cp_inquiry cp;

	BT_DBG("%s", hdev->name);

	if (test_bit(HCI_INQUIRY, &hdev->flags))
		return -EINPROGRESS;

2945 2946
	inquiry_cache_flush(hdev);

A
Andre Guedes 已提交
2947 2948 2949 2950 2951 2952
	memset(&cp, 0, sizeof(cp));
	memcpy(&cp.lap, lap, sizeof(cp.lap));
	cp.length  = length;

	return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
}
2953 2954 2955 2956 2957 2958 2959 2960 2961 2962

int hci_cancel_inquiry(struct hci_dev *hdev)
{
	BT_DBG("%s", hdev->name);

	if (!test_bit(HCI_INQUIRY, &hdev->flags))
		return -EPERM;

	return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
}