提交 a1ae52c2 编写于 作者: J John W. Linville

Merge branch 'for-upstream' of...

Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next
上级 0da4cc6e 3bd27240
master alk-4.19.24 alk-4.19.30 alk-4.19.34 alk-4.19.36 alk-4.19.43 alk-4.19.48 alk-4.19.57 ck-4.19.67 ck-4.19.81 ck-4.19.91 github/fork/deepanshu1422/fix-typo-in-comment github/fork/haosdent/fix-typo linux-next v4.19.91 v4.19.90 v4.19.89 v4.19.88 v4.19.87 v4.19.86 v4.19.85 v4.19.84 v4.19.83 v4.19.82 v4.19.81 v4.19.80 v4.19.79 v4.19.78 v4.19.77 v4.19.76 v4.19.75 v4.19.74 v4.19.73 v4.19.72 v4.19.71 v4.19.70 v4.19.69 v4.19.68 v4.19.67 v4.19.66 v4.19.65 v4.19.64 v4.19.63 v4.19.62 v4.19.61 v4.19.60 v4.19.59 v4.19.58 v4.19.57 v4.19.56 v4.19.55 v4.19.54 v4.19.53 v4.19.52 v4.19.51 v4.19.50 v4.19.49 v4.19.48 v4.19.47 v4.19.46 v4.19.45 v4.19.44 v4.19.43 v4.19.42 v4.19.41 v4.19.40 v4.19.39 v4.19.38 v4.19.37 v4.19.36 v4.19.35 v4.19.34 v4.19.33 v4.19.32 v4.19.31 v4.19.30 v4.19.29 v4.19.28 v4.19.27 v4.19.26 v4.19.25 v4.19.24 v4.19.23 v4.19.22 v4.19.21 v4.19.20 v4.19.19 v4.19.18 v4.19.17 v4.19.16 v4.19.15 v4.19.14 v4.19.13 v4.19.12 v4.19.11 v4.19.10 v4.19.9 v4.19.8 v4.19.7 v4.19.6 v4.19.5 v4.19.4 v4.19.3 v4.19.2 v4.19.1 v4.19 v4.19-rc8 v4.19-rc7 v4.19-rc6 v4.19-rc5 v4.19-rc4 v4.19-rc3 v4.19-rc2 v4.19-rc1 ck-release-21 ck-release-20 ck-release-19.2 ck-release-19.1 ck-release-19 ck-release-18 ck-release-17.2 ck-release-17.1 ck-release-17 ck-release-16 ck-release-15.1 ck-release-15 ck-release-14 ck-release-13.2 ck-release-13 ck-release-12 ck-release-11 ck-release-10 ck-release-9 ck-release-7 alk-release-15 alk-release-14 alk-release-13.2 alk-release-13 alk-release-12 alk-release-11 alk-release-10 alk-release-9 alk-release-7
无相关合并请求
......@@ -154,6 +154,7 @@ L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-bluetooth@vger.kernel.org
S: Maintained
F: net/6lowpan/
F: include/net/6lowpan.h
6PACK NETWORK DRIVER FOR AX.25
M: Andreas Koensgen <ajk@comnets.uni-bremen.de>
......
......@@ -1074,6 +1074,8 @@ struct hci_rp_read_data_block_size {
__le16 num_blocks;
} __packed;
#define HCI_OP_READ_LOCAL_CODECS 0x100b
#define HCI_OP_READ_PAGE_SCAN_ACTIVITY 0x0c1b
struct hci_rp_read_page_scan_activity {
__u8 status;
......@@ -1170,6 +1172,8 @@ struct hci_rp_write_remote_amp_assoc {
__u8 phy_handle;
} __packed;
#define HCI_OP_GET_MWS_TRANSPORT_CONFIG 0x140c
#define HCI_OP_ENABLE_DUT_MODE 0x1803
#define HCI_OP_WRITE_SSP_DEBUG_MODE 0x1804
......
......@@ -203,6 +203,8 @@ struct hci_dev {
__u16 page_scan_window;
__u8 page_scan_type;
__u8 le_adv_channel_map;
__u16 le_adv_min_interval;
__u16 le_adv_max_interval;
__u8 le_scan_type;
__u16 le_scan_interval;
__u16 le_scan_window;
......@@ -458,6 +460,7 @@ struct hci_conn_params {
enum {
HCI_AUTO_CONN_DISABLED,
HCI_AUTO_CONN_REPORT,
HCI_AUTO_CONN_DIRECT,
HCI_AUTO_CONN_ALWAYS,
HCI_AUTO_CONN_LINK_LOSS,
} auto_connect;
......
......@@ -970,6 +970,62 @@ static int adv_channel_map_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
adv_channel_map_set, "%llu\n");
static int adv_min_interval_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
return -EINVAL;
hci_dev_lock(hdev);
hdev->le_adv_min_interval = val;
hci_dev_unlock(hdev);
return 0;
}
static int adv_min_interval_get(void *data, u64 *val)
{
struct hci_dev *hdev = data;
hci_dev_lock(hdev);
*val = hdev->le_adv_min_interval;
hci_dev_unlock(hdev);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
adv_min_interval_set, "%llu\n");
static int adv_max_interval_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
return -EINVAL;
hci_dev_lock(hdev);
hdev->le_adv_max_interval = val;
hci_dev_unlock(hdev);
return 0;
}
static int adv_max_interval_get(void *data, u64 *val)
{
struct hci_dev *hdev = data;
hci_dev_lock(hdev);
*val = hdev->le_adv_max_interval;
hci_dev_unlock(hdev);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
adv_max_interval_set, "%llu\n");
static int device_list_show(struct seq_file *f, void *ptr)
{
struct hci_dev *hdev = f->private;
......@@ -1567,7 +1623,7 @@ static void hci_set_le_support(struct hci_request *req)
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
cp.le = 0x01;
cp.simul = lmp_le_br_capable(hdev);
cp.simul = 0x00;
}
if (cp.le != lmp_host_le_capable(hdev))
......@@ -1686,6 +1742,14 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
if (hdev->commands[22] & 0x04)
hci_set_event_mask_page_2(req);
/* Read local codec list if the HCI command is supported */
if (hdev->commands[29] & 0x20)
hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
/* Get MWS transport configuration if the HCI command is supported */
if (hdev->commands[30] & 0x08)
hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
/* Check for Synchronization Train support */
if (lmp_sync_train_capable(hdev))
hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
......@@ -1825,6 +1889,10 @@ static int __hci_init(struct hci_dev *hdev)
hdev, &supervision_timeout_fops);
debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
hdev, &adv_channel_map_fops);
debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
hdev, &adv_min_interval_fops);
debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
hdev, &adv_max_interval_fops);
debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
&device_list_fops);
debugfs_create_u16("discov_interleaved_timeout", 0644,
......@@ -3639,6 +3707,7 @@ int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
list_add(&params->action, &hdev->pend_le_reports);
hci_update_background_scan(hdev);
break;
case HCI_AUTO_CONN_DIRECT:
case HCI_AUTO_CONN_ALWAYS:
if (!is_connected(hdev, addr, addr_type)) {
list_add(&params->action, &hdev->pend_le_conns);
......@@ -3914,6 +3983,8 @@ struct hci_dev *hci_alloc_dev(void)
hdev->sniff_min_interval = 80;
hdev->le_adv_channel_map = 0x07;
hdev->le_adv_min_interval = 0x0800;
hdev->le_adv_max_interval = 0x0800;
hdev->le_scan_interval = 0x0060;
hdev->le_scan_window = 0x0030;
hdev->le_conn_min_interval = 0x0028;
......@@ -5397,12 +5468,113 @@ void hci_req_add_le_scan_disable(struct hci_request *req)
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
}
static void add_to_white_list(struct hci_request *req,
struct hci_conn_params *params)
{
struct hci_cp_le_add_to_white_list cp;
cp.bdaddr_type = params->addr_type;
bacpy(&cp.bdaddr, &params->addr);
hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
}
static u8 update_white_list(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
struct hci_conn_params *params;
struct bdaddr_list *b;
uint8_t white_list_entries = 0;
/* Go through the current white list programmed into the
* controller one by one and check if that address is still
* in the list of pending connections or list of devices to
* report. If not present in either list, then queue the
* command to remove it from the controller.
*/
list_for_each_entry(b, &hdev->le_white_list, list) {
struct hci_cp_le_del_from_white_list cp;
if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
&b->bdaddr, b->bdaddr_type) ||
hci_pend_le_action_lookup(&hdev->pend_le_reports,
&b->bdaddr, b->bdaddr_type)) {
white_list_entries++;
continue;
}
cp.bdaddr_type = b->bdaddr_type;
bacpy(&cp.bdaddr, &b->bdaddr);
hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
sizeof(cp), &cp);
}
/* Since all no longer valid white list entries have been
* removed, walk through the list of pending connections
* and ensure that any new device gets programmed into
* the controller.
*
* If the list of the devices is larger than the list of
* available white list entries in the controller, then
* just abort and return filer policy value to not use the
* white list.
*/
list_for_each_entry(params, &hdev->pend_le_conns, action) {
if (hci_bdaddr_list_lookup(&hdev->le_white_list,
&params->addr, params->addr_type))
continue;
if (white_list_entries >= hdev->le_white_list_size) {
/* Select filter policy to accept all advertising */
return 0x00;
}
if (hci_find_irk_by_addr(hdev, &params->addr,
params->addr_type)) {
/* White list can not be used with RPAs */
return 0x00;
}
white_list_entries++;
add_to_white_list(req, params);
}
/* After adding all new pending connections, walk through
* the list of pending reports and also add these to the
* white list if there is still space.
*/
list_for_each_entry(params, &hdev->pend_le_reports, action) {
if (hci_bdaddr_list_lookup(&hdev->le_white_list,
&params->addr, params->addr_type))
continue;
if (white_list_entries >= hdev->le_white_list_size) {
/* Select filter policy to accept all advertising */
return 0x00;
}
if (hci_find_irk_by_addr(hdev, &params->addr,
params->addr_type)) {
/* White list can not be used with RPAs */
return 0x00;
}
white_list_entries++;
add_to_white_list(req, params);
}
/* Select filter policy to use white list */
return 0x01;
}
void hci_req_add_le_passive_scan(struct hci_request *req)
{
struct hci_cp_le_set_scan_param param_cp;
struct hci_cp_le_set_scan_enable enable_cp;
struct hci_dev *hdev = req->hdev;
u8 own_addr_type;
u8 filter_policy;
/* Set require_privacy to false since no SCAN_REQ are send
* during passive scanning. Not using an unresolvable address
......@@ -5413,11 +5585,18 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
if (hci_update_random_address(req, false, &own_addr_type))
return;
/* Adding or removing entries from the white list must
* happen before enabling scanning. The controller does
* not allow white list modification while scanning.
*/
filter_policy = update_white_list(req);
memset(&param_cp, 0, sizeof(param_cp));
param_cp.type = LE_SCAN_PASSIVE;
param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
param_cp.window = cpu_to_le16(hdev->le_scan_window);
param_cp.own_address_type = own_addr_type;
param_cp.filter_policy = filter_policy;
hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
&param_cp);
......
......@@ -317,7 +317,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (param & SCAN_PAGE)
set_bit(HCI_PSCAN, &hdev->flags);
else
clear_bit(HCI_ISCAN, &hdev->flags);
clear_bit(HCI_PSCAN, &hdev->flags);
done:
hci_dev_unlock(hdev);
......@@ -2259,6 +2259,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
break;
/* Fall through */
case HCI_AUTO_CONN_DIRECT:
case HCI_AUTO_CONN_ALWAYS:
list_del_init(&params->action);
list_add(&params->action, &hdev->pend_le_conns);
......@@ -4251,6 +4252,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
u8 addr_type, u8 adv_type)
{
struct hci_conn *conn;
struct hci_conn_params *params;
/* If the event is not connectable don't proceed further */
if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
......@@ -4266,18 +4268,35 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
if (hdev->conn_hash.le_num_slave > 0)
return;
/* If we're connectable, always connect any ADV_DIRECT_IND event */
if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
adv_type == LE_ADV_DIRECT_IND)
goto connect;
/* If we're not connectable only connect devices that we have in
* our pend_le_conns list.
*/
if (!hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, addr_type))
params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
addr, addr_type);
if (!params)
return;
switch (params->auto_connect) {
case HCI_AUTO_CONN_DIRECT:
/* Only devices advertising with ADV_DIRECT_IND are
* triggering a connection attempt. This is allowing
* incoming connections from slave devices.
*/
if (adv_type != LE_ADV_DIRECT_IND)
return;
break;
case HCI_AUTO_CONN_ALWAYS:
/* Devices advertising with ADV_IND or ADV_DIRECT_IND
* are triggering a connection attempt. This means
* that incoming connectioms from slave device are
* accepted and also outgoing connections to slave
* devices are established when found.
*/
break;
default:
return;
}
connect:
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
if (!IS_ERR(conn))
......
......@@ -1086,8 +1086,8 @@ static void enable_advertising(struct hci_request *req)
return;
memset(&cp, 0, sizeof(cp));
cp.min_interval = cpu_to_le16(0x0800);
cp.max_interval = cpu_to_le16(0x0800);
cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
cp.own_address_type = own_addr_type;
cp.channel_map = hdev->le_adv_channel_map;
......@@ -1881,7 +1881,18 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
if (cp->val) {
scan = SCAN_PAGE;
} else {
scan = 0;
/* If we don't have any whitelist entries just
* disable all scanning. If there are entries
* and we had both page and inquiry scanning
* enabled then fall back to only page scanning.
* Otherwise no changes are needed.
*/
if (list_empty(&hdev->whitelist))
scan = SCAN_DISABLED;
else if (test_bit(HCI_ISCAN, &hdev->flags))
scan = SCAN_PAGE;
else
goto no_scan_update;
if (test_bit(HCI_ISCAN, &hdev->flags) &&
hdev->discov_timeout > 0)
......@@ -1891,6 +1902,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}
no_scan_update:
/* If we're going from non-connectable to connectable or
* vice-versa when fast connectable is enabled ensure that fast
* connectable gets disabled. write_fast_connectable won't do
......@@ -2264,7 +2276,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
if (val) {
hci_cp.le = val;
hci_cp.simul = lmp_le_br_capable(hdev);
hci_cp.simul = 0x00;
} else {
if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
disable_advertising(&req);
......@@ -5271,7 +5283,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
MGMT_STATUS_INVALID_PARAMS,
&cp->addr, sizeof(cp->addr));
if (cp->action != 0x00 && cp->action != 0x01)
if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
MGMT_STATUS_INVALID_PARAMS,
&cp->addr, sizeof(cp->addr));
......@@ -5281,7 +5293,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
if (cp->addr.type == BDADDR_BREDR) {
bool update_scan;
/* Only "connect" action supported for now */
/* Only incoming connections action is supported for now */
if (cp->action != 0x01) {
err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
MGMT_STATUS_INVALID_PARAMS,
......@@ -5307,8 +5319,10 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
else
addr_type = ADDR_LE_DEV_RANDOM;
if (cp->action)
if (cp->action == 0x02)
auto_conn = HCI_AUTO_CONN_ALWAYS;
else if (cp->action == 0x01)
auto_conn = HCI_AUTO_CONN_DIRECT;
else
auto_conn = HCI_AUTO_CONN_REPORT;
......@@ -5870,6 +5884,7 @@ static void restart_le_actions(struct hci_dev *hdev)
list_del_init(&p->action);
switch (p->auto_connect) {
case HCI_AUTO_CONN_DIRECT:
case HCI_AUTO_CONN_ALWAYS:
list_add(&p->action, &hdev->pend_le_conns);
break;
......@@ -5922,8 +5937,8 @@ static int powered_update_hci(struct hci_dev *hdev)
lmp_bredr_capable(hdev)) {
struct hci_cp_write_le_host_supported cp;
cp.le = 1;
cp.simul = lmp_le_br_capable(hdev);
cp.le = 0x01;
cp.simul = 0x00;
/* Check first if we already have the right
* host state (host features set)
......
......@@ -1910,10 +1910,13 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
/* Get data directly from socket receive queue without copying it. */
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
if (!skb_linearize(skb))
if (!skb_linearize(skb)) {
s = rfcomm_recv_frame(s, skb);
else
if (!s)
break;
} else {
kfree_skb(skb);
}
}
if (s && (sk->sk_state == BT_CLOSED))
......
......@@ -1291,6 +1291,22 @@ static void smp_notify_keys(struct l2cap_conn *conn)
bacpy(&hcon->dst, &smp->remote_irk->bdaddr);
hcon->dst_type = smp->remote_irk->addr_type;
l2cap_conn_update_id_addr(hcon);
/* When receiving an indentity resolving key for
* a remote device that does not use a resolvable
* private address, just remove the key so that
* it is possible to use the controller white
* list for scanning.
*
* Userspace will have been told to not store
* this key at this point. So it is safe to
* just remove it.
*/
if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) {
list_del(&smp->remote_irk->list);
kfree(smp->remote_irk);
smp->remote_irk = NULL;
}
}
/* The LTKs and CSRKs should be persistent only if both sides
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部