提交 e2dfb94f 编写于 作者: J Jakub Kicinski

Merge tag 'for-net-next-2021-12-29' of...

Merge tag 'for-net-next-2021-12-29' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Luiz Augusto von Dentz says:

====================
bluetooth-next pull request for net-next:

 - Add support for Foxconn MT7922A
 - Add support for Realtek RTL8852AE
 - Rework HCI event handling to use skb_pull_data

* tag 'for-net-next-2021-12-29' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next: (62 commits)
  Bluetooth: MGMT: Fix spelling mistake "simultanous" -> "simultaneous"
  Bluetooth: vhci: Set HCI_QUIRK_VALID_LE_STATES
  Bluetooth: MGMT: Fix LE simultaneous roles UUID if not supported
  Bluetooth: hci_sync: Add check simultaneous roles support
  Bluetooth: hci_sync: Wait for proper events when connecting LE
  Bluetooth: hci_sync: Add support for waiting specific LE subevents
  Bluetooth: hci_sync: Add hci_le_create_conn_sync
  Bluetooth: hci_event: Use skb_pull_data when processing inquiry results
  Bluetooth: hci_sync: Push sync command cancellation to workqueue
  Bluetooth: hci_qca: Stop IBS timer during BT OFF
  Bluetooth: btusb: Add support for Foxconn MT7922A
  Bluetooth: btintel: Add missing quirks and msft ext for legacy bootloader
  Bluetooth: btusb: Add two more Bluetooth parts for WCN6855
  Bluetooth: L2CAP: Fix using wrong mode
  Bluetooth: hci_sync: Fix not always pausing advertising when necessary
  Bluetooth: mgmt: Make use of mgmt_send_event_skb in MGMT_EV_DEVICE_CONNECTED
  Bluetooth: mgmt: Make use of mgmt_send_event_skb in MGMT_EV_DEVICE_FOUND
  Bluetooth: mgmt: Introduce mgmt_alloc_skb and mgmt_send_event_skb
  Bluetooth: btusb: Return error code when getting patch status failed
  Bluetooth: btusb: Handle download_firmware failure cases
  ...
====================

Link: https://lore.kernel.org/r/20211229211258.2290966-1-luiz.dentz@gmail.comSigned-off-by: NJakub Kicinski <kuba@kernel.org>
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/dmi.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h> #include <net/bluetooth/bluetooth.h>
...@@ -343,6 +344,40 @@ static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev) ...@@ -343,6 +344,40 @@ static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev)
return skb; return skb;
} }
static const struct dmi_system_id disable_broken_read_transmit_power[] = {
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,1"),
},
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,2"),
},
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,4"),
},
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac20,1"),
},
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac20,2"),
},
},
{ }
};
static int btbcm_read_info(struct hci_dev *hdev) static int btbcm_read_info(struct hci_dev *hdev)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -363,6 +398,10 @@ static int btbcm_read_info(struct hci_dev *hdev) ...@@ -363,6 +398,10 @@ static int btbcm_read_info(struct hci_dev *hdev)
bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]); bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]);
kfree_skb(skb); kfree_skb(skb);
/* Read DMI and disable broken Read LE Min/Max Tx Power */
if (dmi_first_match(disable_broken_read_transmit_power))
set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
return 0; return 0;
} }
......
...@@ -2498,10 +2498,14 @@ static int btintel_setup_combined(struct hci_dev *hdev) ...@@ -2498,10 +2498,14 @@ static int btintel_setup_combined(struct hci_dev *hdev)
case 0x12: /* ThP */ case 0x12: /* ThP */
case 0x13: /* HrP */ case 0x13: /* HrP */
case 0x14: /* CcP */ case 0x14: /* CcP */
/* Some legacy bootloader devices from JfP supports both old /* Some legacy bootloader devices starting from JfP,
* and TLV based HCI_Intel_Read_Version command. But we don't * the operational firmware supports both old and TLV based
* want to use the TLV based setup routines for those legacy * HCI_Intel_Read_Version command based on the command
* bootloader device. * parameter.
*
* For upgrading firmware case, the TLV based version cannot
* be used because the firmware filename for legacy bootloader
* is based on the old format.
* *
* Also, it is not easy to convert TLV based version from the * Also, it is not easy to convert TLV based version from the
* legacy version format. * legacy version format.
...@@ -2513,6 +2517,20 @@ static int btintel_setup_combined(struct hci_dev *hdev) ...@@ -2513,6 +2517,20 @@ static int btintel_setup_combined(struct hci_dev *hdev)
err = btintel_read_version(hdev, &ver); err = btintel_read_version(hdev, &ver);
if (err) if (err)
return err; return err;
/* Apply the device specific HCI quirks
*
* All Legacy bootloader devices support WBS
*/
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
/* Valid LE States quirk for JfP/ThP familiy */
if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12)
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
err = btintel_bootloader_setup(hdev, &ver); err = btintel_bootloader_setup(hdev, &ver);
break; break;
case 0x17: case 0x17:
......
...@@ -121,6 +121,7 @@ int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname, ...@@ -121,6 +121,7 @@ int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
} else { } else {
bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)", bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)",
status); status);
err = -EIO;
goto err_release_fw; goto err_release_fw;
} }
} }
......
...@@ -98,6 +98,8 @@ MODULE_DEVICE_TABLE(sdio, btmtksdio_table); ...@@ -98,6 +98,8 @@ MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
#define MTK_SDIO_BLOCK_SIZE 256 #define MTK_SDIO_BLOCK_SIZE 256
#define BTMTKSDIO_TX_WAIT_VND_EVT 1 #define BTMTKSDIO_TX_WAIT_VND_EVT 1
#define BTMTKSDIO_HW_TX_READY 2
#define BTMTKSDIO_FUNC_ENABLED 3
struct mtkbtsdio_hdr { struct mtkbtsdio_hdr {
__le16 len; __le16 len;
...@@ -113,7 +115,6 @@ struct btmtksdio_dev { ...@@ -113,7 +115,6 @@ struct btmtksdio_dev {
struct work_struct txrx_work; struct work_struct txrx_work;
unsigned long tx_state; unsigned long tx_state;
struct sk_buff_head txq; struct sk_buff_head txq;
bool hw_tx_ready;
struct sk_buff *evt_skb; struct sk_buff *evt_skb;
...@@ -254,7 +255,7 @@ static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev, ...@@ -254,7 +255,7 @@ static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev,
sdio_hdr->reserved = cpu_to_le16(0); sdio_hdr->reserved = cpu_to_le16(0);
sdio_hdr->bt_type = hci_skb_pkt_type(skb); sdio_hdr->bt_type = hci_skb_pkt_type(skb);
bdev->hw_tx_ready = false; clear_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state);
err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data, err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data,
round_up(skb->len, MTK_SDIO_BLOCK_SIZE)); round_up(skb->len, MTK_SDIO_BLOCK_SIZE));
if (err < 0) if (err < 0)
...@@ -324,8 +325,29 @@ static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb) ...@@ -324,8 +325,29 @@ static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
return err; return err;
} }
static int btmtksdio_recv_acl(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle);
switch (handle) {
case 0xfc6f:
/* Firmware dump from device: when the firmware hangs, the
* device can no longer suspend and thus disable auto-suspend.
*/
pm_runtime_forbid(bdev->dev);
fallthrough;
case 0x05ff:
case 0x05fe:
/* Firmware debug logging */
return hci_recv_diag(hdev, skb);
}
return hci_recv_frame(hdev, skb);
}
static const struct h4_recv_pkt mtk_recv_pkts[] = { static const struct h4_recv_pkt mtk_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_ACL, .recv = btmtksdio_recv_acl },
{ H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = btmtksdio_recv_event }, { H4_RECV_EVENT, .recv = btmtksdio_recv_event },
}; };
...@@ -463,11 +485,12 @@ static void btmtksdio_txrx_work(struct work_struct *work) ...@@ -463,11 +485,12 @@ static void btmtksdio_txrx_work(struct work_struct *work)
bt_dev_dbg(bdev->hdev, "Get fw own back"); bt_dev_dbg(bdev->hdev, "Get fw own back");
if (int_status & TX_EMPTY) if (int_status & TX_EMPTY)
bdev->hw_tx_ready = true; set_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state);
else if (unlikely(int_status & TX_FIFO_OVERFLOW)) else if (unlikely(int_status & TX_FIFO_OVERFLOW))
bt_dev_warn(bdev->hdev, "Tx fifo overflow"); bt_dev_warn(bdev->hdev, "Tx fifo overflow");
if (bdev->hw_tx_ready) { if (test_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state)) {
skb = skb_dequeue(&bdev->txq); skb = skb_dequeue(&bdev->txq);
if (skb) { if (skb) {
err = btmtksdio_tx_packet(bdev, skb); err = btmtksdio_tx_packet(bdev, skb);
...@@ -517,6 +540,8 @@ static int btmtksdio_open(struct hci_dev *hdev) ...@@ -517,6 +540,8 @@ static int btmtksdio_open(struct hci_dev *hdev)
if (err < 0) if (err < 0)
goto err_release_host; goto err_release_host;
set_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state);
/* Get ownership from the device */ /* Get ownership from the device */
sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err); sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
if (err < 0) if (err < 0)
...@@ -618,6 +643,7 @@ static int btmtksdio_close(struct hci_dev *hdev) ...@@ -618,6 +643,7 @@ static int btmtksdio_close(struct hci_dev *hdev)
if (err < 0) if (err < 0)
bt_dev_err(bdev->hdev, "Cannot return ownership to device"); bt_dev_err(bdev->hdev, "Cannot return ownership to device");
clear_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state);
sdio_disable_func(bdev->func); sdio_disable_func(bdev->func);
sdio_release_host(bdev->func); sdio_release_host(bdev->func);
...@@ -765,6 +791,9 @@ static int mt79xx_setup(struct hci_dev *hdev, const char *fwname) ...@@ -765,6 +791,9 @@ static int mt79xx_setup(struct hci_dev *hdev, const char *fwname)
return err; return err;
} }
hci_set_msft_opcode(hdev, 0xFD30);
hci_set_aosp_capable(hdev);
return err; return err;
} }
...@@ -811,7 +840,7 @@ static int btmtksdio_setup(struct hci_dev *hdev) ...@@ -811,7 +840,7 @@ static int btmtksdio_setup(struct hci_dev *hdev)
u32 fw_version = 0; u32 fw_version = 0;
calltime = ktime_get(); calltime = ktime_get();
bdev->hw_tx_ready = true; set_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state);
switch (bdev->data->chipid) { switch (bdev->data->chipid) {
case 0x7921: case 0x7921:
...@@ -1036,6 +1065,11 @@ static int btmtksdio_runtime_suspend(struct device *dev) ...@@ -1036,6 +1065,11 @@ static int btmtksdio_runtime_suspend(struct device *dev)
if (!bdev) if (!bdev)
return 0; return 0;
if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
return 0;
sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
sdio_claim_host(bdev->func); sdio_claim_host(bdev->func);
sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err); sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err);
...@@ -1063,6 +1097,9 @@ static int btmtksdio_runtime_resume(struct device *dev) ...@@ -1063,6 +1097,9 @@ static int btmtksdio_runtime_resume(struct device *dev)
if (!bdev) if (!bdev)
return 0; return 0;
if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
return 0;
sdio_claim_host(bdev->func); sdio_claim_host(bdev->func);
sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err); sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/gpio/consumer.h> #include <linux/gpio/consumer.h>
#include <linux/debugfs.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h> #include <net/bluetooth/bluetooth.h>
...@@ -31,7 +32,6 @@ ...@@ -31,7 +32,6 @@
static bool disable_scofix; static bool disable_scofix;
static bool force_scofix; static bool force_scofix;
static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND); static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
static bool reset = true; static bool reset = true;
static struct usb_driver btusb_driver; static struct usb_driver btusb_driver;
...@@ -296,6 +296,21 @@ static const struct usb_device_id blacklist_table[] = { ...@@ -296,6 +296,21 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 | { USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH | BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES }, BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0cc), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0d6), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0e3), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x10ab, 0x9309), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x10ab, 0x9409), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
/* Broadcom BCM2035 */ /* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
...@@ -385,6 +400,8 @@ static const struct usb_device_id blacklist_table[] = { ...@@ -385,6 +400,8 @@ static const struct usb_device_id blacklist_table[] = {
/* Realtek 8852AE Bluetooth devices */ /* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK | { USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH }, BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x385a), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x4852), .driver_info = BTUSB_REALTEK | { USB_DEVICE(0x0bda, 0x4852), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH }, BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04c5, 0x165c), .driver_info = BTUSB_REALTEK | { USB_DEVICE(0x04c5, 0x165c), .driver_info = BTUSB_REALTEK |
...@@ -424,6 +441,14 @@ static const struct usb_device_id blacklist_table[] = { ...@@ -424,6 +441,14 @@ static const struct usb_device_id blacklist_table[] = {
BTUSB_WIDEBAND_SPEECH | BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES }, BTUSB_VALID_LE_STATES },
/* MediaTek MT7922A Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
/* Additional Realtek 8723AE Bluetooth devices */ /* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK },
...@@ -550,8 +575,13 @@ struct btusb_data { ...@@ -550,8 +575,13 @@ struct btusb_data {
unsigned long flags; unsigned long flags;
struct work_struct work; bool poll_sync;
struct work_struct waker; int intr_interval;
struct work_struct work;
struct work_struct waker;
struct delayed_work rx_work;
struct sk_buff_head acl_q;
struct usb_anchor deferred; struct usb_anchor deferred;
struct usb_anchor tx_anchor; struct usb_anchor tx_anchor;
...@@ -716,6 +746,16 @@ static inline void btusb_free_frags(struct btusb_data *data) ...@@ -716,6 +746,16 @@ static inline void btusb_free_frags(struct btusb_data *data)
spin_unlock_irqrestore(&data->rxlock, flags); spin_unlock_irqrestore(&data->rxlock, flags);
} }
static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
{
if (data->intr_interval) {
/* Trigger dequeue immediatelly if an event is received */
schedule_delayed_work(&data->rx_work, 0);
}
return data->recv_event(data->hdev, skb);
}
static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count) static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -761,7 +801,7 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count) ...@@ -761,7 +801,7 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
if (!hci_skb_expect(skb)) { if (!hci_skb_expect(skb)) {
/* Complete frame */ /* Complete frame */
data->recv_event(data->hdev, skb); btusb_recv_event(data, skb);
skb = NULL; skb = NULL;
} }
} }
...@@ -772,6 +812,20 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count) ...@@ -772,6 +812,20 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
return err; return err;
} }
static int btusb_recv_acl(struct btusb_data *data, struct sk_buff *skb)
{
/* Only queue ACL packet if intr_interval is set as it means
* force_poll_sync has been enabled.
*/
if (!data->intr_interval)
return data->recv_acl(data->hdev, skb);
skb_queue_tail(&data->acl_q, skb);
schedule_delayed_work(&data->rx_work, data->intr_interval);
return 0;
}
static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count) static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -819,7 +873,7 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count) ...@@ -819,7 +873,7 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
if (!hci_skb_expect(skb)) { if (!hci_skb_expect(skb)) {
/* Complete frame */ /* Complete frame */
data->recv_acl(data->hdev, skb); btusb_recv_acl(data, skb);
skb = NULL; skb = NULL;
} }
} }
...@@ -925,6 +979,8 @@ static void btusb_intr_complete(struct urb *urb) ...@@ -925,6 +979,8 @@ static void btusb_intr_complete(struct urb *urb)
if (err != -EPERM && err != -ENODEV) if (err != -EPERM && err != -ENODEV)
bt_dev_err(hdev, "urb %p failed to resubmit (%d)", bt_dev_err(hdev, "urb %p failed to resubmit (%d)",
urb, -err); urb, -err);
if (err != -EPERM)
hci_cmd_sync_cancel(hdev, -err);
usb_unanchor_urb(urb); usb_unanchor_urb(urb);
} }
} }
...@@ -968,9 +1024,33 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) ...@@ -968,9 +1024,33 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
if (err != -EPERM && err != -ENODEV) if (err != -EPERM && err != -ENODEV)
bt_dev_err(hdev, "urb %p submission failed (%d)", bt_dev_err(hdev, "urb %p submission failed (%d)",
urb, -err); urb, -err);
if (err != -EPERM)
hci_cmd_sync_cancel(hdev, -err);
usb_unanchor_urb(urb); usb_unanchor_urb(urb);
} }
/* Only initialize intr_interval if URB poll sync is enabled */
if (!data->poll_sync)
goto done;
/* The units are frames (milliseconds) for full and low speed devices,
* and microframes (1/8 millisecond) for highspeed and SuperSpeed
* devices.
*
* This is done once on open/resume so it shouldn't change even if
* force_poll_sync changes.
*/
switch (urb->dev->speed) {
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER: /* units are 125us */
data->intr_interval = usecs_to_jiffies(urb->interval * 125);
break;
default:
data->intr_interval = msecs_to_jiffies(urb->interval);
break;
}
done:
usb_free_urb(urb); usb_free_urb(urb);
return err; return err;
...@@ -1323,10 +1403,13 @@ static void btusb_tx_complete(struct urb *urb) ...@@ -1323,10 +1403,13 @@ static void btusb_tx_complete(struct urb *urb)
if (!test_bit(HCI_RUNNING, &hdev->flags)) if (!test_bit(HCI_RUNNING, &hdev->flags))
goto done; goto done;
if (!urb->status) if (!urb->status) {
hdev->stat.byte_tx += urb->transfer_buffer_length; hdev->stat.byte_tx += urb->transfer_buffer_length;
else } else {
if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT)
hci_cmd_sync_cancel(hdev, -urb->status);
hdev->stat.err_tx++; hdev->stat.err_tx++;
}
done: done:
spin_lock_irqsave(&data->txlock, flags); spin_lock_irqsave(&data->txlock, flags);
...@@ -1430,9 +1513,12 @@ static int btusb_close(struct hci_dev *hdev) ...@@ -1430,9 +1513,12 @@ static int btusb_close(struct hci_dev *hdev)
BT_DBG("%s", hdev->name); BT_DBG("%s", hdev->name);
cancel_delayed_work(&data->rx_work);
cancel_work_sync(&data->work); cancel_work_sync(&data->work);
cancel_work_sync(&data->waker); cancel_work_sync(&data->waker);
skb_queue_purge(&data->acl_q);
clear_bit(BTUSB_ISOC_RUNNING, &data->flags); clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
clear_bit(BTUSB_BULK_RUNNING, &data->flags); clear_bit(BTUSB_BULK_RUNNING, &data->flags);
clear_bit(BTUSB_INTR_RUNNING, &data->flags); clear_bit(BTUSB_INTR_RUNNING, &data->flags);
...@@ -1464,6 +1550,10 @@ static int btusb_flush(struct hci_dev *hdev) ...@@ -1464,6 +1550,10 @@ static int btusb_flush(struct hci_dev *hdev)
BT_DBG("%s", hdev->name); BT_DBG("%s", hdev->name);
cancel_delayed_work(&data->rx_work);
skb_queue_purge(&data->acl_q);
usb_kill_anchored_urbs(&data->tx_anchor); usb_kill_anchored_urbs(&data->tx_anchor);
btusb_free_frags(data); btusb_free_frags(data);
...@@ -1827,6 +1917,17 @@ static void btusb_waker(struct work_struct *work) ...@@ -1827,6 +1917,17 @@ static void btusb_waker(struct work_struct *work)
usb_autopm_put_interface(data->intf); usb_autopm_put_interface(data->intf);
} }
static void btusb_rx_work(struct work_struct *work)
{
struct btusb_data *data = container_of(work, struct btusb_data,
rx_work.work);
struct sk_buff *skb;
/* Dequeue ACL data received during the interval */
while ((skb = skb_dequeue(&data->acl_q)))
data->recv_acl(data->hdev, skb);
}
static int btusb_setup_bcm92035(struct hci_dev *hdev) static int btusb_setup_bcm92035(struct hci_dev *hdev)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -2546,6 +2647,10 @@ static int btusb_mtk_setup(struct hci_dev *hdev) ...@@ -2546,6 +2647,10 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
dev_id & 0xffff, (fw_version & 0xff) + 1); dev_id & 0xffff, (fw_version & 0xff) + 1);
err = btmtk_setup_firmware_79xx(hdev, fw_bin_name, err = btmtk_setup_firmware_79xx(hdev, fw_bin_name,
btusb_mtk_hci_wmt_sync); btusb_mtk_hci_wmt_sync);
if (err < 0) {
bt_dev_err(hdev, "Failed to set up firmware (%d)", err);
return err;
}
/* It's Device EndPoint Reset Option Register */ /* It's Device EndPoint Reset Option Register */
btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT); btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT);
...@@ -2884,7 +2989,8 @@ struct qca_version { ...@@ -2884,7 +2989,8 @@ struct qca_version {
__le32 rom_version; __le32 rom_version;
__le32 patch_version; __le32 patch_version;
__le32 ram_version; __le32 ram_version;
__le16 board_id; __u8 chip_id;
__u8 platform_id;
__le16 flag; __le16 flag;
__u8 reserved[4]; __u8 reserved[4];
} __packed; } __packed;
...@@ -3073,7 +3179,17 @@ static void btusb_generate_qca_nvm_name(char *fwname, size_t max_size, ...@@ -3073,7 +3179,17 @@ static void btusb_generate_qca_nvm_name(char *fwname, size_t max_size,
u16 flag = le16_to_cpu(ver->flag); u16 flag = le16_to_cpu(ver->flag);
if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) { if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
u16 board_id = le16_to_cpu(ver->board_id); /* The board_id should be split into two bytes
* The 1st byte is chip ID, and the 2nd byte is platform ID
* For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID
* we have several platforms, and platform IDs are continuously added
* Platform ID:
* 0x00 is for Mobile
* 0x01 is for X86
* 0x02 is for Automotive
* 0x03 is for Consumer electronic
*/
u16 board_id = (ver->chip_id << 8) + ver->platform_id;
const char *variant; const char *variant;
switch (le32_to_cpu(ver->ram_version)) { switch (le32_to_cpu(ver->ram_version)) {
...@@ -3373,6 +3489,49 @@ static int btusb_shutdown_qca(struct hci_dev *hdev) ...@@ -3373,6 +3489,49 @@ static int btusb_shutdown_qca(struct hci_dev *hdev)
return 0; return 0;
} }
static ssize_t force_poll_sync_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct btusb_data *data = file->private_data;
char buf[3];
buf[0] = data->poll_sync ? 'Y' : 'N';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}
static ssize_t force_poll_sync_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct btusb_data *data = file->private_data;
bool enable;
int err;
err = kstrtobool_from_user(user_buf, count, &enable);
if (err)
return err;
/* Only allow changes while the adapter is down */
if (test_bit(HCI_UP, &data->hdev->flags))
return -EPERM;
if (data->poll_sync == enable)
return -EALREADY;
data->poll_sync = enable;
return count;
}
static const struct file_operations force_poll_sync_fops = {
.open = simple_open,
.read = force_poll_sync_read,
.write = force_poll_sync_write,
.llseek = default_llseek,
};
static int btusb_probe(struct usb_interface *intf, static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id) const struct usb_device_id *id)
{ {
...@@ -3456,6 +3615,10 @@ static int btusb_probe(struct usb_interface *intf, ...@@ -3456,6 +3615,10 @@ static int btusb_probe(struct usb_interface *intf,
INIT_WORK(&data->work, btusb_work); INIT_WORK(&data->work, btusb_work);
INIT_WORK(&data->waker, btusb_waker); INIT_WORK(&data->waker, btusb_waker);
INIT_DELAYED_WORK(&data->rx_work, btusb_rx_work);
skb_queue_head_init(&data->acl_q);
init_usb_anchor(&data->deferred); init_usb_anchor(&data->deferred);
init_usb_anchor(&data->tx_anchor); init_usb_anchor(&data->tx_anchor);
spin_lock_init(&data->txlock); spin_lock_init(&data->txlock);
...@@ -3721,6 +3884,9 @@ static int btusb_probe(struct usb_interface *intf, ...@@ -3721,6 +3884,9 @@ static int btusb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, data); usb_set_intfdata(intf, data);
debugfs_create_file("force_poll_sync", 0644, hdev->debugfs, data,
&force_poll_sync_fops);
return 0; return 0;
out_free_dev: out_free_dev:
......
...@@ -1928,6 +1928,9 @@ static int qca_power_off(struct hci_dev *hdev) ...@@ -1928,6 +1928,9 @@ static int qca_power_off(struct hci_dev *hdev)
hu->hdev->hw_error = NULL; hu->hdev->hw_error = NULL;
hu->hdev->cmd_timeout = NULL; hu->hdev->cmd_timeout = NULL;
del_timer_sync(&qca->wake_retrans_timer);
del_timer_sync(&qca->tx_idle_timer);
/* Stop sending shutdown command if soc crashes. */ /* Stop sending shutdown command if soc crashes. */
if (soc_type != QCA_ROME if (soc_type != QCA_ROME
&& qca->memdump_state == QCA_MEMDUMP_IDLE) { && qca->memdump_state == QCA_MEMDUMP_IDLE) {
......
...@@ -331,6 +331,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode) ...@@ -331,6 +331,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
if (opcode & 0x80) if (opcode & 0x80)
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
if (hci_register_dev(hdev) < 0) { if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device"); BT_ERR("Can't register HCI device");
hci_free_dev(hdev); hci_free_dev(hdev);
......
...@@ -2376,6 +2376,8 @@ static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len) ...@@ -2376,6 +2376,8 @@ static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
} }
void *skb_pull_data(struct sk_buff *skb, size_t len);
void *__pskb_pull_tail(struct sk_buff *skb, int delta); void *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len) static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
......
...@@ -390,6 +390,11 @@ struct hci_ctrl { ...@@ -390,6 +390,11 @@ struct hci_ctrl {
}; };
}; };
struct mgmt_ctrl {
struct hci_dev *hdev;
u16 opcode;
};
struct bt_skb_cb { struct bt_skb_cb {
u8 pkt_type; u8 pkt_type;
u8 force_active; u8 force_active;
...@@ -399,6 +404,7 @@ struct bt_skb_cb { ...@@ -399,6 +404,7 @@ struct bt_skb_cb {
struct l2cap_ctrl l2cap; struct l2cap_ctrl l2cap;
struct sco_ctrl sco; struct sco_ctrl sco;
struct hci_ctrl hci; struct hci_ctrl hci;
struct mgmt_ctrl mgmt;
}; };
}; };
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb)) #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
...@@ -406,6 +412,7 @@ struct bt_skb_cb { ...@@ -406,6 +412,7 @@ struct bt_skb_cb {
#define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type #define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type
#define hci_skb_expect(skb) bt_cb((skb))->expect #define hci_skb_expect(skb) bt_cb((skb))->expect
#define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode #define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode
#define hci_skb_event(skb) bt_cb((skb))->hci.req_event
#define hci_skb_sk(skb) bt_cb((skb))->hci.sk #define hci_skb_sk(skb) bt_cb((skb))->hci.sk
static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how) static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how)
......
...@@ -246,6 +246,15 @@ enum { ...@@ -246,6 +246,15 @@ enum {
* HCI after resume. * HCI after resume.
*/ */
HCI_QUIRK_NO_SUSPEND_NOTIFIER, HCI_QUIRK_NO_SUSPEND_NOTIFIER,
/*
* When this quirk is set, LE tx power is not queried on startup
* and the min/max tx power values default to HCI_TX_POWER_INVALID.
*
* This quirk can be set before hci_register_dev is called or
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER,
}; };
/* HCI device flags */ /* HCI device flags */
...@@ -332,6 +341,7 @@ enum { ...@@ -332,6 +341,7 @@ enum {
HCI_FORCE_NO_MITM, HCI_FORCE_NO_MITM,
HCI_QUALITY_REPORT, HCI_QUALITY_REPORT,
HCI_OFFLOAD_CODECS_ENABLED, HCI_OFFLOAD_CODECS_ENABLED,
HCI_LE_SIMULTANEOUS_ROLES,
__HCI_NUM_FLAGS, __HCI_NUM_FLAGS,
}; };
...@@ -1047,8 +1057,8 @@ struct hci_cp_read_stored_link_key { ...@@ -1047,8 +1057,8 @@ struct hci_cp_read_stored_link_key {
} __packed; } __packed;
struct hci_rp_read_stored_link_key { struct hci_rp_read_stored_link_key {
__u8 status; __u8 status;
__u8 max_keys; __le16 max_keys;
__u8 num_keys; __le16 num_keys;
} __packed; } __packed;
#define HCI_OP_DELETE_STORED_LINK_KEY 0x0c12 #define HCI_OP_DELETE_STORED_LINK_KEY 0x0c12
...@@ -1058,7 +1068,7 @@ struct hci_cp_delete_stored_link_key { ...@@ -1058,7 +1068,7 @@ struct hci_cp_delete_stored_link_key {
} __packed; } __packed;
struct hci_rp_delete_stored_link_key { struct hci_rp_delete_stored_link_key {
__u8 status; __u8 status;
__u8 num_keys; __le16 num_keys;
} __packed; } __packed;
#define HCI_MAX_NAME_LENGTH 248 #define HCI_MAX_NAME_LENGTH 248
...@@ -1931,6 +1941,16 @@ struct hci_rp_le_read_transmit_power { ...@@ -1931,6 +1941,16 @@ struct hci_rp_le_read_transmit_power {
__s8 max_le_tx_power; __s8 max_le_tx_power;
} __packed; } __packed;
#define HCI_NETWORK_PRIVACY 0x00
#define HCI_DEVICE_PRIVACY 0x01
#define HCI_OP_LE_SET_PRIVACY_MODE 0x204e
struct hci_cp_le_set_privacy_mode {
__u8 bdaddr_type;
bdaddr_t bdaddr;
__u8 mode;
} __packed;
#define HCI_OP_LE_READ_BUFFER_SIZE_V2 0x2060 #define HCI_OP_LE_READ_BUFFER_SIZE_V2 0x2060
struct hci_rp_le_read_buffer_size_v2 { struct hci_rp_le_read_buffer_size_v2 {
__u8 status; __u8 status;
...@@ -2012,6 +2032,10 @@ struct hci_cp_le_reject_cis { ...@@ -2012,6 +2032,10 @@ struct hci_cp_le_reject_cis {
} __packed; } __packed;
/* ---- HCI Events ---- */ /* ---- HCI Events ---- */
struct hci_ev_status {
__u8 status;
} __packed;
#define HCI_EV_INQUIRY_COMPLETE 0x01 #define HCI_EV_INQUIRY_COMPLETE 0x01
#define HCI_EV_INQUIRY_RESULT 0x02 #define HCI_EV_INQUIRY_RESULT 0x02
...@@ -2024,6 +2048,11 @@ struct inquiry_info { ...@@ -2024,6 +2048,11 @@ struct inquiry_info {
__le16 clock_offset; __le16 clock_offset;
} __packed; } __packed;
struct hci_ev_inquiry_result {
__u8 num;
struct inquiry_info info[];
};
#define HCI_EV_CONN_COMPLETE 0x03 #define HCI_EV_CONN_COMPLETE 0x03
struct hci_ev_conn_complete { struct hci_ev_conn_complete {
__u8 status; __u8 status;
...@@ -2135,7 +2164,7 @@ struct hci_comp_pkts_info { ...@@ -2135,7 +2164,7 @@ struct hci_comp_pkts_info {
} __packed; } __packed;
struct hci_ev_num_comp_pkts { struct hci_ev_num_comp_pkts {
__u8 num_hndl; __u8 num;
struct hci_comp_pkts_info handles[]; struct hci_comp_pkts_info handles[];
} __packed; } __packed;
...@@ -2185,7 +2214,7 @@ struct hci_ev_pscan_rep_mode { ...@@ -2185,7 +2214,7 @@ struct hci_ev_pscan_rep_mode {
} __packed; } __packed;
#define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22 #define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22
struct inquiry_info_with_rssi { struct inquiry_info_rssi {
bdaddr_t bdaddr; bdaddr_t bdaddr;
__u8 pscan_rep_mode; __u8 pscan_rep_mode;
__u8 pscan_period_mode; __u8 pscan_period_mode;
...@@ -2193,7 +2222,7 @@ struct inquiry_info_with_rssi { ...@@ -2193,7 +2222,7 @@ struct inquiry_info_with_rssi {
__le16 clock_offset; __le16 clock_offset;
__s8 rssi; __s8 rssi;
} __packed; } __packed;
struct inquiry_info_with_rssi_and_pscan_mode { struct inquiry_info_rssi_pscan {
bdaddr_t bdaddr; bdaddr_t bdaddr;
__u8 pscan_rep_mode; __u8 pscan_rep_mode;
__u8 pscan_period_mode; __u8 pscan_period_mode;
...@@ -2202,6 +2231,14 @@ struct inquiry_info_with_rssi_and_pscan_mode { ...@@ -2202,6 +2231,14 @@ struct inquiry_info_with_rssi_and_pscan_mode {
__le16 clock_offset; __le16 clock_offset;
__s8 rssi; __s8 rssi;
} __packed; } __packed;
struct hci_ev_inquiry_result_rssi {
__u8 num;
struct inquiry_info_rssi info[];
} __packed;
struct hci_ev_inquiry_result_rssi_pscan {
__u8 num;
struct inquiry_info_rssi_pscan info[];
} __packed;
#define HCI_EV_REMOTE_EXT_FEATURES 0x23 #define HCI_EV_REMOTE_EXT_FEATURES 0x23
struct hci_ev_remote_ext_features { struct hci_ev_remote_ext_features {
...@@ -2256,6 +2293,11 @@ struct extended_inquiry_info { ...@@ -2256,6 +2293,11 @@ struct extended_inquiry_info {
__u8 data[240]; __u8 data[240];
} __packed; } __packed;
struct hci_ev_ext_inquiry_result {
__u8 num;
struct extended_inquiry_info info[];
} __packed;
#define HCI_EV_KEY_REFRESH_COMPLETE 0x30 #define HCI_EV_KEY_REFRESH_COMPLETE 0x30
struct hci_ev_key_refresh_complete { struct hci_ev_key_refresh_complete {
__u8 status; __u8 status;
...@@ -2423,13 +2465,18 @@ struct hci_ev_le_conn_complete { ...@@ -2423,13 +2465,18 @@ struct hci_ev_le_conn_complete {
#define HCI_EV_LE_ADVERTISING_REPORT 0x02 #define HCI_EV_LE_ADVERTISING_REPORT 0x02
struct hci_ev_le_advertising_info { struct hci_ev_le_advertising_info {
__u8 evt_type; __u8 type;
__u8 bdaddr_type; __u8 bdaddr_type;
bdaddr_t bdaddr; bdaddr_t bdaddr;
__u8 length; __u8 length;
__u8 data[]; __u8 data[];
} __packed; } __packed;
struct hci_ev_le_advertising_report {
__u8 num;
struct hci_ev_le_advertising_info info[];
} __packed;
#define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03 #define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03
struct hci_ev_le_conn_update_complete { struct hci_ev_le_conn_update_complete {
__u8 status; __u8 status;
...@@ -2473,7 +2520,7 @@ struct hci_ev_le_data_len_change { ...@@ -2473,7 +2520,7 @@ struct hci_ev_le_data_len_change {
#define HCI_EV_LE_DIRECT_ADV_REPORT 0x0B #define HCI_EV_LE_DIRECT_ADV_REPORT 0x0B
struct hci_ev_le_direct_adv_info { struct hci_ev_le_direct_adv_info {
__u8 evt_type; __u8 type;
__u8 bdaddr_type; __u8 bdaddr_type;
bdaddr_t bdaddr; bdaddr_t bdaddr;
__u8 direct_addr_type; __u8 direct_addr_type;
...@@ -2481,6 +2528,11 @@ struct hci_ev_le_direct_adv_info { ...@@ -2481,6 +2528,11 @@ struct hci_ev_le_direct_adv_info {
__s8 rssi; __s8 rssi;
} __packed; } __packed;
struct hci_ev_le_direct_adv_report {
__u8 num;
struct hci_ev_le_direct_adv_info info[];
} __packed;
#define HCI_EV_LE_PHY_UPDATE_COMPLETE 0x0c #define HCI_EV_LE_PHY_UPDATE_COMPLETE 0x0c
struct hci_ev_le_phy_update_complete { struct hci_ev_le_phy_update_complete {
__u8 status; __u8 status;
...@@ -2490,8 +2542,8 @@ struct hci_ev_le_phy_update_complete { ...@@ -2490,8 +2542,8 @@ struct hci_ev_le_phy_update_complete {
} __packed; } __packed;
#define HCI_EV_LE_EXT_ADV_REPORT 0x0d #define HCI_EV_LE_EXT_ADV_REPORT 0x0d
struct hci_ev_le_ext_adv_report { struct hci_ev_le_ext_adv_info {
__le16 evt_type; __le16 type;
__u8 bdaddr_type; __u8 bdaddr_type;
bdaddr_t bdaddr; bdaddr_t bdaddr;
__u8 primary_phy; __u8 primary_phy;
...@@ -2499,11 +2551,16 @@ struct hci_ev_le_ext_adv_report { ...@@ -2499,11 +2551,16 @@ struct hci_ev_le_ext_adv_report {
__u8 sid; __u8 sid;
__u8 tx_power; __u8 tx_power;
__s8 rssi; __s8 rssi;
__le16 interval; __le16 interval;
__u8 direct_addr_type; __u8 direct_addr_type;
bdaddr_t direct_addr; bdaddr_t direct_addr;
__u8 length; __u8 length;
__u8 data[]; __u8 data[];
} __packed;
struct hci_ev_le_ext_adv_report {
__u8 num;
struct hci_ev_le_ext_adv_info info[];
} __packed; } __packed;
#define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a #define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a
......
...@@ -88,6 +88,7 @@ struct discovery_state { ...@@ -88,6 +88,7 @@ struct discovery_state {
u8 (*uuids)[16]; u8 (*uuids)[16];
unsigned long scan_start; unsigned long scan_start;
unsigned long scan_duration; unsigned long scan_duration;
unsigned long name_resolve_timeout;
}; };
#define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ #define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
...@@ -151,22 +152,22 @@ struct bdaddr_list_with_irk { ...@@ -151,22 +152,22 @@ struct bdaddr_list_with_irk {
u8 local_irk[16]; u8 local_irk[16];
}; };
struct bdaddr_list_with_flags {
struct list_head list;
bdaddr_t bdaddr;
u8 bdaddr_type;
u32 current_flags;
};
enum hci_conn_flags { enum hci_conn_flags {
HCI_CONN_FLAG_REMOTE_WAKEUP, HCI_CONN_FLAG_REMOTE_WAKEUP,
HCI_CONN_FLAG_MAX HCI_CONN_FLAG_DEVICE_PRIVACY,
};
#define hci_conn_test_flag(nr, flags) ((flags) & (1U << nr)) __HCI_CONN_NUM_FLAGS,
};
/* Make sure number of flags doesn't exceed sizeof(current_flags) */ /* Make sure number of flags doesn't exceed sizeof(current_flags) */
static_assert(HCI_CONN_FLAG_MAX < 32); static_assert(__HCI_CONN_NUM_FLAGS < 32);
struct bdaddr_list_with_flags {
struct list_head list;
bdaddr_t bdaddr;
u8 bdaddr_type;
DECLARE_BITMAP(flags, __HCI_CONN_NUM_FLAGS);
};
struct bt_uuid { struct bt_uuid {
struct list_head list; struct list_head list;
...@@ -352,8 +353,8 @@ struct hci_dev { ...@@ -352,8 +353,8 @@ struct hci_dev {
__u16 lmp_subver; __u16 lmp_subver;
__u16 voice_setting; __u16 voice_setting;
__u8 num_iac; __u8 num_iac;
__u8 stored_max_keys; __u16 stored_max_keys;
__u8 stored_num_keys; __u16 stored_num_keys;
__u8 io_capability; __u8 io_capability;
__s8 inq_tx_power; __s8 inq_tx_power;
__u8 err_data_reporting; __u8 err_data_reporting;
...@@ -479,6 +480,7 @@ struct hci_dev { ...@@ -479,6 +480,7 @@ struct hci_dev {
struct work_struct cmd_sync_work; struct work_struct cmd_sync_work;
struct list_head cmd_sync_work_list; struct list_head cmd_sync_work_list;
struct mutex cmd_sync_work_lock; struct mutex cmd_sync_work_lock;
struct work_struct cmd_sync_cancel_work;
__u16 discov_timeout; __u16 discov_timeout;
struct delayed_work discov_off; struct delayed_work discov_off;
...@@ -559,6 +561,7 @@ struct hci_dev { ...@@ -559,6 +561,7 @@ struct hci_dev {
struct rfkill *rfkill; struct rfkill *rfkill;
DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS); DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
DECLARE_BITMAP(conn_flags, __HCI_CONN_NUM_FLAGS);
__s8 adv_tx_power; __s8 adv_tx_power;
__u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; __u8 adv_data[HCI_MAX_EXT_AD_LENGTH];
...@@ -754,7 +757,8 @@ struct hci_conn_params { ...@@ -754,7 +757,8 @@ struct hci_conn_params {
struct hci_conn *conn; struct hci_conn *conn;
bool explicit_connect; bool explicit_connect;
u32 current_flags; DECLARE_BITMAP(flags, __HCI_CONN_NUM_FLAGS);
u8 privacy_mode;
}; };
extern struct list_head hci_dev_list; extern struct list_head hci_dev_list;
...@@ -779,6 +783,12 @@ extern struct mutex hci_cb_list_lock; ...@@ -779,6 +783,12 @@ extern struct mutex hci_cb_list_lock;
hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \
} while (0) } while (0)
#define hci_dev_le_state_simultaneous(hdev) \
(test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) && \
(hdev->le_states[4] & 0x08) && /* Central */ \
(hdev->le_states[4] & 0x40) && /* Peripheral */ \
(hdev->le_states[3] & 0x10)) /* Simultaneous */
/* ----- HCI interface to upper protocols ----- */ /* ----- HCI interface to upper protocols ----- */
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
int l2cap_disconn_ind(struct hci_conn *hcon); int l2cap_disconn_ind(struct hci_conn *hcon);
...@@ -1117,8 +1127,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, ...@@ -1117,8 +1127,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
enum conn_reasons conn_reason); enum conn_reasons conn_reason);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, bool dst_resolved, u8 sec_level, u8 dst_type, bool dst_resolved, u8 sec_level,
u16 conn_timeout, u8 role, u16 conn_timeout, u8 role);
bdaddr_t *direct_rpa);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type, u8 sec_level, u8 auth_type,
enum conn_reasons conn_reason); enum conn_reasons conn_reason);
...@@ -1465,6 +1474,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn); ...@@ -1465,6 +1474,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define use_ll_privacy(dev) (ll_privacy_capable(dev) && \ #define use_ll_privacy(dev) (ll_privacy_capable(dev) && \
hci_dev_test_flag(dev, HCI_ENABLE_LL_PRIVACY)) hci_dev_test_flag(dev, HCI_ENABLE_LL_PRIVACY))
#define privacy_mode_capable(dev) (use_ll_privacy(dev) && \
(hdev->commands[39] & 0x04))
/* Use enhanced synchronous connection if command is supported */ /* Use enhanced synchronous connection if command is supported */
#define enhanced_sco_capable(dev) ((dev)->commands[29] & 0x08) #define enhanced_sco_capable(dev) ((dev)->commands[29] & 0x08)
...@@ -1759,6 +1771,8 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c); ...@@ -1759,6 +1771,8 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
#define DISCOV_LE_FAST_ADV_INT_MIN 0x00A0 /* 100 msec */ #define DISCOV_LE_FAST_ADV_INT_MIN 0x00A0 /* 100 msec */
#define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */ #define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */
#define NAME_RESOLVE_DURATION msecs_to_jiffies(10240) /* 10.24 sec */
void mgmt_fill_version_info(void *ver); void mgmt_fill_version_info(void *ver);
int mgmt_new_settings(struct hci_dev *hdev); int mgmt_new_settings(struct hci_dev *hdev);
void mgmt_index_added(struct hci_dev *hdev); void mgmt_index_added(struct hci_dev *hdev);
......
...@@ -37,6 +37,8 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen, ...@@ -37,6 +37,8 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
void hci_cmd_sync_init(struct hci_dev *hdev); void hci_cmd_sync_init(struct hci_dev *hdev);
void hci_cmd_sync_clear(struct hci_dev *hdev); void hci_cmd_sync_clear(struct hci_dev *hdev);
void hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy); void *data, hci_cmd_sync_work_destroy_t destroy);
...@@ -100,3 +102,7 @@ int hci_stop_discovery_sync(struct hci_dev *hdev); ...@@ -100,3 +102,7 @@ int hci_stop_discovery_sync(struct hci_dev *hdev);
int hci_suspend_sync(struct hci_dev *hdev); int hci_suspend_sync(struct hci_dev *hdev);
int hci_resume_sync(struct hci_dev *hdev); int hci_resume_sync(struct hci_dev *hdev);
struct hci_conn;
int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn);
...@@ -936,10 +936,11 @@ struct mgmt_ev_auth_failed { ...@@ -936,10 +936,11 @@ struct mgmt_ev_auth_failed {
__u8 status; __u8 status;
} __packed; } __packed;
#define MGMT_DEV_FOUND_CONFIRM_NAME 0x01 #define MGMT_DEV_FOUND_CONFIRM_NAME 0x01
#define MGMT_DEV_FOUND_LEGACY_PAIRING 0x02 #define MGMT_DEV_FOUND_LEGACY_PAIRING 0x02
#define MGMT_DEV_FOUND_NOT_CONNECTABLE 0x04 #define MGMT_DEV_FOUND_NOT_CONNECTABLE 0x04
#define MGMT_DEV_FOUND_INITIATED_CONN 0x08 #define MGMT_DEV_FOUND_INITIATED_CONN 0x08
#define MGMT_DEV_FOUND_NAME_REQUEST_FAILED 0x10
#define MGMT_EV_DEVICE_FOUND 0x0012 #define MGMT_EV_DEVICE_FOUND 0x0012
struct mgmt_ev_device_found { struct mgmt_ev_device_found {
......
...@@ -911,267 +911,45 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status) ...@@ -911,267 +911,45 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
hci_enable_advertising(hdev); hci_enable_advertising(hdev);
} }
static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
{ {
struct hci_conn *conn; struct hci_conn *conn = data;
hci_dev_lock(hdev); hci_dev_lock(hdev);
conn = hci_lookup_le_connect(hdev); if (!err) {
if (hdev->adv_instance_cnt)
hci_req_resume_adv_instances(hdev);
if (!status) {
hci_connect_le_scan_cleanup(conn); hci_connect_le_scan_cleanup(conn);
goto done; goto done;
} }
bt_dev_err(hdev, "request failed to create LE connection: " bt_dev_err(hdev, "request failed to create LE connection: err %d", err);
"status 0x%2.2x", status);
if (!conn) if (!conn)
goto done; goto done;
hci_le_conn_failed(conn, status); hci_le_conn_failed(conn, err);
done: done:
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
} }
static bool conn_use_rpa(struct hci_conn *conn) static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
{
struct hci_dev *hdev = conn->hdev;
return hci_dev_test_flag(hdev, HCI_PRIVACY);
}
static void set_ext_conn_params(struct hci_conn *conn,
struct hci_cp_le_ext_conn_param *p)
{
struct hci_dev *hdev = conn->hdev;
memset(p, 0, sizeof(*p));
p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
p->conn_latency = cpu_to_le16(conn->le_conn_latency);
p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
p->min_ce_len = cpu_to_le16(0x0000);
p->max_ce_len = cpu_to_le16(0x0000);
}
static void hci_req_add_le_create_conn(struct hci_request *req,
struct hci_conn *conn,
bdaddr_t *direct_rpa)
{
struct hci_dev *hdev = conn->hdev;
u8 own_addr_type;
/* If direct address was provided we use it instead of current
* address.
*/
if (direct_rpa) {
if (bacmp(&req->hdev->random_addr, direct_rpa))
hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
direct_rpa);
/* direct address is always RPA */
own_addr_type = ADDR_LE_DEV_RANDOM;
} else {
/* Update random address, but set require_privacy to false so
* that we never connect with an non-resolvable address.
*/
if (hci_update_random_address(req, false, conn_use_rpa(conn),
&own_addr_type))
return;
}
if (use_ext_conn(hdev)) {
struct hci_cp_le_ext_create_conn *cp;
struct hci_cp_le_ext_conn_param *p;
u8 data[sizeof(*cp) + sizeof(*p) * 3];
u32 plen;
cp = (void *) data;
p = (void *) cp->data;
memset(cp, 0, sizeof(*cp));
bacpy(&cp->peer_addr, &conn->dst);
cp->peer_addr_type = conn->dst_type;
cp->own_addr_type = own_addr_type;
plen = sizeof(*cp);
if (scan_1m(hdev)) {
cp->phys |= LE_SCAN_PHY_1M;
set_ext_conn_params(conn, p);
p++;
plen += sizeof(*p);
}
if (scan_2m(hdev)) {
cp->phys |= LE_SCAN_PHY_2M;
set_ext_conn_params(conn, p);
p++;
plen += sizeof(*p);
}
if (scan_coded(hdev)) {
cp->phys |= LE_SCAN_PHY_CODED;
set_ext_conn_params(conn, p);
plen += sizeof(*p);
}
hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data);
} else {
struct hci_cp_le_create_conn cp;
memset(&cp, 0, sizeof(cp));
cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
bacpy(&cp.peer_addr, &conn->dst);
cp.peer_addr_type = conn->dst_type;
cp.own_address_type = own_addr_type;
cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
cp.min_ce_len = cpu_to_le16(0x0000);
cp.max_ce_len = cpu_to_le16(0x0000);
hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
}
conn->state = BT_CONNECT;
clear_bit(HCI_CONN_SCANNING, &conn->flags);
}
static void hci_req_directed_advertising(struct hci_request *req,
struct hci_conn *conn)
{ {
struct hci_dev *hdev = req->hdev; struct hci_conn *conn = data;
u8 own_addr_type;
u8 enable;
if (ext_adv_capable(hdev)) {
struct hci_cp_le_set_ext_adv_params cp;
bdaddr_t random_addr;
/* Set require_privacy to false so that the remote device has a
* chance of identifying us.
*/
if (hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
&own_addr_type, &random_addr) < 0)
return;
memset(&cp, 0, sizeof(cp));
cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
cp.own_addr_type = own_addr_type;
cp.channel_map = hdev->le_adv_channel_map;
cp.tx_power = HCI_TX_POWER_INVALID;
cp.primary_phy = HCI_ADV_PHY_1M;
cp.secondary_phy = HCI_ADV_PHY_1M;
cp.handle = 0; /* Use instance 0 for directed adv */
cp.own_addr_type = own_addr_type;
cp.peer_addr_type = conn->dst_type;
bacpy(&cp.peer_addr, &conn->dst);
/* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
* advertising_event_property LE_LEGACY_ADV_DIRECT_IND
* does not supports advertising data when the advertising set already
* contains some, the controller shall return erroc code 'Invalid
* HCI Command Parameters(0x12).
* So it is required to remove adv set for handle 0x00. since we use
* instance 0 for directed adv.
*/
__hci_req_remove_ext_adv_instance(req, cp.handle);
hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
if (own_addr_type == ADDR_LE_DEV_RANDOM && bt_dev_dbg(hdev, "conn %p", conn);
bacmp(&random_addr, BDADDR_ANY) &&
bacmp(&random_addr, &hdev->random_addr)) {
struct hci_cp_le_set_adv_set_rand_addr cp;
memset(&cp, 0, sizeof(cp));
cp.handle = 0;
bacpy(&cp.bdaddr, &random_addr);
hci_req_add(req,
HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
sizeof(cp), &cp);
}
__hci_req_enable_ext_advertising(req, 0x00);
} else {
struct hci_cp_le_set_adv_param cp;
/* Clear the HCI_LE_ADV bit temporarily so that the return hci_le_create_conn_sync(hdev, conn);
* hci_update_random_address knows that it's safe to go ahead
* and write a new random address. The flag will be set back on
* as soon as the SET_ADV_ENABLE HCI command completes.
*/
hci_dev_clear_flag(hdev, HCI_LE_ADV);
/* Set require_privacy to false so that the remote device has a
* chance of identifying us.
*/
if (hci_update_random_address(req, false, conn_use_rpa(conn),
&own_addr_type) < 0)
return;
memset(&cp, 0, sizeof(cp));
/* Some controllers might reject command if intervals are not
* within range for undirected advertising.
* BCM20702A0 is known to be affected by this.
*/
cp.min_interval = cpu_to_le16(0x0020);
cp.max_interval = cpu_to_le16(0x0020);
cp.type = LE_ADV_DIRECT_IND;
cp.own_address_type = own_addr_type;
cp.direct_addr_type = conn->dst_type;
bacpy(&cp.direct_addr, &conn->dst);
cp.channel_map = hdev->le_adv_channel_map;
hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
enable = 0x01;
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
&enable);
}
conn->state = BT_CONNECT;
} }
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, bool dst_resolved, u8 sec_level, u8 dst_type, bool dst_resolved, u8 sec_level,
u16 conn_timeout, u8 role, bdaddr_t *direct_rpa) u16 conn_timeout, u8 role)
{ {
struct hci_conn_params *params;
struct hci_conn *conn; struct hci_conn *conn;
struct smp_irk *irk; struct smp_irk *irk;
struct hci_request req;
int err; int err;
/* This ensures that during disable le_scan address resolution
* will not be disabled if it is followed by le_create_conn
*/
bool rpa_le_conn = true;
/* Let's make sure that le is enabled.*/ /* Let's make sure that le is enabled.*/
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
if (lmp_le_capable(hdev)) if (lmp_le_capable(hdev))
...@@ -1230,68 +1008,13 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, ...@@ -1230,68 +1008,13 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
conn->sec_level = BT_SECURITY_LOW; conn->sec_level = BT_SECURITY_LOW;
conn->conn_timeout = conn_timeout; conn->conn_timeout = conn_timeout;
hci_req_init(&req, hdev); conn->state = BT_CONNECT;
clear_bit(HCI_CONN_SCANNING, &conn->flags);
/* Disable advertising if we're active. For central role
* connections most controllers will refuse to connect if
* advertising is enabled, and for peripheral role connections we
* anyway have to disable it in order to start directed
* advertising. Any registered advertisements will be
* re-enabled after the connection attempt is finished.
*/
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
__hci_req_pause_adv_instances(&req);
/* If requested to connect as peripheral use directed advertising */
if (conn->role == HCI_ROLE_SLAVE) {
/* If we're active scanning most controllers are unable
* to initiate advertising. Simply reject the attempt.
*/
if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
hdev->le_scan_type == LE_SCAN_ACTIVE) {
hci_req_purge(&req);
hci_conn_del(conn);
return ERR_PTR(-EBUSY);
}
hci_req_directed_advertising(&req, conn);
goto create_conn;
}
params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
if (params) {
conn->le_conn_min_interval = params->conn_min_interval;
conn->le_conn_max_interval = params->conn_max_interval;
conn->le_conn_latency = params->conn_latency;
conn->le_supv_timeout = params->supervision_timeout;
} else {
conn->le_conn_min_interval = hdev->le_conn_min_interval;
conn->le_conn_max_interval = hdev->le_conn_max_interval;
conn->le_conn_latency = hdev->le_conn_latency;
conn->le_supv_timeout = hdev->le_supv_timeout;
}
/* If controller is scanning, we stop it since some controllers are
* not able to scan and connect at the same time. Also set the
* HCI_LE_SCAN_INTERRUPTED flag so that the command complete
* handler for scan disabling knows to set the correct discovery
* state.
*/
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
hci_req_add_le_scan_disable(&req, rpa_le_conn);
hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
}
hci_req_add_le_create_conn(&req, conn, direct_rpa);
create_conn: err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn,
err = hci_req_run(&req, create_le_conn_complete); create_le_conn_complete);
if (err) { if (err) {
hci_conn_del(conn); hci_conn_del(conn);
if (hdev->adv_instance_cnt)
hci_req_resume_adv_instances(hdev);
return ERR_PTR(err); return ERR_PTR(err);
} }
......
...@@ -2153,7 +2153,7 @@ int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, ...@@ -2153,7 +2153,7 @@ int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
bacpy(&entry->bdaddr, bdaddr); bacpy(&entry->bdaddr, bdaddr);
entry->bdaddr_type = type; entry->bdaddr_type = type;
entry->current_flags = flags; bitmap_from_u64(entry->flags, flags);
list_add(&entry->list, list); list_add(&entry->list, list);
...@@ -2629,6 +2629,12 @@ int hci_register_dev(struct hci_dev *hdev) ...@@ -2629,6 +2629,12 @@ int hci_register_dev(struct hci_dev *hdev)
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
hci_dev_set_flag(hdev, HCI_UNCONFIGURED); hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
/* Mark Remote Wakeup connection flag as supported if driver has wakeup
* callback.
*/
if (hdev->wakeup)
set_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, hdev->conn_flags);
hci_sock_dev_event(hdev, HCI_DEV_REG); hci_sock_dev_event(hdev, HCI_DEV_REG);
hci_dev_hold(hdev); hci_dev_hold(hdev);
...@@ -2906,7 +2912,7 @@ int hci_unregister_cb(struct hci_cb *cb) ...@@ -2906,7 +2912,7 @@ int hci_unregister_cb(struct hci_cb *cb)
} }
EXPORT_SYMBOL(hci_unregister_cb); EXPORT_SYMBOL(hci_unregister_cb);
static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{ {
int err; int err;
...@@ -2929,14 +2935,17 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) ...@@ -2929,14 +2935,17 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
if (!test_bit(HCI_RUNNING, &hdev->flags)) { if (!test_bit(HCI_RUNNING, &hdev->flags)) {
kfree_skb(skb); kfree_skb(skb);
return; return -EINVAL;
} }
err = hdev->send(hdev, skb); err = hdev->send(hdev, skb);
if (err < 0) { if (err < 0) {
bt_dev_err(hdev, "sending frame failed (%d)", err); bt_dev_err(hdev, "sending frame failed (%d)", err);
kfree_skb(skb); kfree_skb(skb);
return err;
} }
return 0;
} }
/* Send HCI command */ /* Send HCI command */
...@@ -3843,10 +3852,15 @@ static void hci_cmd_work(struct work_struct *work) ...@@ -3843,10 +3852,15 @@ static void hci_cmd_work(struct work_struct *work)
hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
if (hdev->sent_cmd) { if (hdev->sent_cmd) {
int res;
if (hci_req_status_pend(hdev)) if (hci_req_status_pend(hdev))
hci_dev_set_flag(hdev, HCI_CMD_PENDING); hci_dev_set_flag(hdev, HCI_CMD_PENDING);
atomic_dec(&hdev->cmd_cnt); atomic_dec(&hdev->cmd_cnt);
hci_send_frame(hdev, skb);
res = hci_send_frame(hdev, skb);
if (res < 0)
__hci_cmd_sync_cancel(hdev, -res);
if (test_bit(HCI_RESET, &hdev->flags)) if (test_bit(HCI_RESET, &hdev->flags))
cancel_delayed_work(&hdev->cmd_timer); cancel_delayed_work(&hdev->cmd_timer);
else else
......
此差异已折叠。
...@@ -111,17 +111,6 @@ void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, ...@@ -111,17 +111,6 @@ void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
} }
} }
void hci_req_sync_cancel(struct hci_dev *hdev, int err)
{
bt_dev_dbg(hdev, "err 0x%2.2x", err);
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = err;
hdev->req_status = HCI_REQ_CANCELED;
wake_up_interruptible(&hdev->req_wait_q);
}
}
/* Execute request and wait for completion. */ /* Execute request and wait for completion. */
int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
unsigned long opt), unsigned long opt),
...@@ -492,8 +481,8 @@ static int add_to_accept_list(struct hci_request *req, ...@@ -492,8 +481,8 @@ static int add_to_accept_list(struct hci_request *req,
} }
/* During suspend, only wakeable devices can be in accept list */ /* During suspend, only wakeable devices can be in accept list */
if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, if (hdev->suspended &&
params->current_flags)) !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags))
return 0; return 0;
*num_entries += 1; *num_entries += 1;
...@@ -829,56 +818,6 @@ static void cancel_adv_timeout(struct hci_dev *hdev) ...@@ -829,56 +818,6 @@ static void cancel_adv_timeout(struct hci_dev *hdev)
} }
} }
/* This function requires the caller holds hdev->lock */
void __hci_req_pause_adv_instances(struct hci_request *req)
{
bt_dev_dbg(req->hdev, "Pausing advertising instances");
/* Call to disable any advertisements active on the controller.
* This will succeed even if no advertisements are configured.
*/
__hci_req_disable_advertising(req);
/* If we are using software rotation, pause the loop */
if (!ext_adv_capable(req->hdev))
cancel_adv_timeout(req->hdev);
}
/* This function requires the caller holds hdev->lock */
static void __hci_req_resume_adv_instances(struct hci_request *req)
{
struct adv_info *adv;
bt_dev_dbg(req->hdev, "Resuming advertising instances");
if (ext_adv_capable(req->hdev)) {
/* Call for each tracked instance to be re-enabled */
list_for_each_entry(adv, &req->hdev->adv_instances, list) {
__hci_req_enable_ext_advertising(req,
adv->instance);
}
} else {
/* Schedule for most recent instance to be restarted and begin
* the software rotation loop
*/
__hci_req_schedule_adv_instance(req,
req->hdev->cur_adv_instance,
true);
}
}
/* This function requires the caller holds hdev->lock */
int hci_req_resume_adv_instances(struct hci_dev *hdev)
{
struct hci_request req;
hci_req_init(&req, hdev);
__hci_req_resume_adv_instances(&req);
return hci_req_run(&req, NULL);
}
static bool adv_cur_instance_is_scannable(struct hci_dev *hdev) static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
{ {
return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance); return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
...@@ -2703,7 +2642,7 @@ void hci_request_setup(struct hci_dev *hdev) ...@@ -2703,7 +2642,7 @@ void hci_request_setup(struct hci_dev *hdev)
void hci_request_cancel_all(struct hci_dev *hdev) void hci_request_cancel_all(struct hci_dev *hdev)
{ {
hci_req_sync_cancel(hdev, ENODEV); __hci_cmd_sync_cancel(hdev, ENODEV);
cancel_work_sync(&hdev->discov_update); cancel_work_sync(&hdev->discov_update);
cancel_work_sync(&hdev->scan_update); cancel_work_sync(&hdev->scan_update);
......
...@@ -64,7 +64,6 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, ...@@ -64,7 +64,6 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
unsigned long opt), unsigned long opt),
unsigned long opt, u32 timeout, u8 *hci_status); unsigned long opt, u32 timeout, u8 *hci_status);
void hci_req_sync_cancel(struct hci_dev *hdev, int err);
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param); const void *param);
...@@ -81,8 +80,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req); ...@@ -81,8 +80,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req);
void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next); void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next);
void hci_req_disable_address_resolution(struct hci_dev *hdev); void hci_req_disable_address_resolution(struct hci_dev *hdev);
void __hci_req_pause_adv_instances(struct hci_request *req);
int hci_req_resume_adv_instances(struct hci_dev *hdev);
void hci_req_reenable_advertising(struct hci_dev *hdev); void hci_req_reenable_advertising(struct hci_dev *hdev);
void __hci_req_enable_advertising(struct hci_request *req); void __hci_req_enable_advertising(struct hci_request *req);
void __hci_req_disable_advertising(struct hci_request *req); void __hci_req_disable_advertising(struct hci_request *req);
......
...@@ -103,7 +103,7 @@ static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, ...@@ -103,7 +103,7 @@ static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
if (skb_queue_empty(&req->cmd_q)) if (skb_queue_empty(&req->cmd_q))
bt_cb(skb)->hci.req_flags |= HCI_REQ_START; bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
bt_cb(skb)->hci.req_event = event; hci_skb_event(skb) = event;
skb_queue_tail(&req->cmd_q, skb); skb_queue_tail(&req->cmd_q, skb);
} }
...@@ -313,11 +313,24 @@ static void hci_cmd_sync_work(struct work_struct *work) ...@@ -313,11 +313,24 @@ static void hci_cmd_sync_work(struct work_struct *work)
} }
} }
static void hci_cmd_sync_cancel_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work);
cancel_delayed_work_sync(&hdev->cmd_timer);
cancel_delayed_work_sync(&hdev->ncmd_timer);
atomic_set(&hdev->cmd_cnt, 1);
wake_up_interruptible(&hdev->req_wait_q);
}
void hci_cmd_sync_init(struct hci_dev *hdev) void hci_cmd_sync_init(struct hci_dev *hdev)
{ {
INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
INIT_LIST_HEAD(&hdev->cmd_sync_work_list); INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
mutex_init(&hdev->cmd_sync_work_lock); mutex_init(&hdev->cmd_sync_work_lock);
INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
} }
void hci_cmd_sync_clear(struct hci_dev *hdev) void hci_cmd_sync_clear(struct hci_dev *hdev)
...@@ -335,6 +348,35 @@ void hci_cmd_sync_clear(struct hci_dev *hdev) ...@@ -335,6 +348,35 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
} }
} }
void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
{
bt_dev_dbg(hdev, "err 0x%2.2x", err);
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = err;
hdev->req_status = HCI_REQ_CANCELED;
cancel_delayed_work_sync(&hdev->cmd_timer);
cancel_delayed_work_sync(&hdev->ncmd_timer);
atomic_set(&hdev->cmd_cnt, 1);
wake_up_interruptible(&hdev->req_wait_q);
}
}
void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
{
bt_dev_dbg(hdev, "err 0x%2.2x", err);
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = err;
hdev->req_status = HCI_REQ_CANCELED;
queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
}
}
EXPORT_SYMBOL(hci_cmd_sync_cancel);
int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy) void *data, hci_cmd_sync_work_destroy_t destroy)
{ {
...@@ -1580,8 +1622,40 @@ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, ...@@ -1580,8 +1622,40 @@ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
sizeof(cp), &cp, HCI_CMD_TIMEOUT); sizeof(cp), &cp, HCI_CMD_TIMEOUT);
} }
/* Set Device Privacy Mode. */
static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
struct hci_conn_params *params)
{
struct hci_cp_le_set_privacy_mode cp;
struct smp_irk *irk;
/* If device privacy mode has already been set there is nothing to do */
if (params->privacy_mode == HCI_DEVICE_PRIVACY)
return 0;
/* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also
* indicates that LL Privacy has been enabled and
* HCI_OP_LE_SET_PRIVACY_MODE is supported.
*/
if (!test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, params->flags))
return 0;
irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
if (!irk)
return 0;
memset(&cp, 0, sizeof(cp));
cp.bdaddr_type = irk->addr_type;
bacpy(&cp.bdaddr, &irk->bdaddr);
cp.mode = HCI_DEVICE_PRIVACY;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
}
/* Adds connection to allow list if needed, if the device uses RPA (has IRK) /* Adds connection to allow list if needed, if the device uses RPA (has IRK)
* this attempts to program the device in the resolving list as well. * this attempts to program the device in the resolving list as well and
* properly set the privacy mode.
*/ */
static int hci_le_add_accept_list_sync(struct hci_dev *hdev, static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
struct hci_conn_params *params, struct hci_conn_params *params,
...@@ -1590,11 +1664,6 @@ static int hci_le_add_accept_list_sync(struct hci_dev *hdev, ...@@ -1590,11 +1664,6 @@ static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
struct hci_cp_le_add_to_accept_list cp; struct hci_cp_le_add_to_accept_list cp;
int err; int err;
/* Already in accept list */
if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
params->addr_type))
return 0;
/* Select filter policy to accept all advertising */ /* Select filter policy to accept all advertising */
if (*num_entries >= hdev->le_accept_list_size) if (*num_entries >= hdev->le_accept_list_size)
return -ENOSPC; return -ENOSPC;
...@@ -1606,8 +1675,8 @@ static int hci_le_add_accept_list_sync(struct hci_dev *hdev, ...@@ -1606,8 +1675,8 @@ static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
} }
/* During suspend, only wakeable devices can be in acceptlist */ /* During suspend, only wakeable devices can be in acceptlist */
if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, if (hdev->suspended &&
params->current_flags)) !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags))
return 0; return 0;
/* Attempt to program the device in the resolving list first to avoid /* Attempt to program the device in the resolving list first to avoid
...@@ -1620,6 +1689,18 @@ static int hci_le_add_accept_list_sync(struct hci_dev *hdev, ...@@ -1620,6 +1689,18 @@ static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
return err; return err;
} }
/* Set Privacy Mode */
err = hci_le_set_privacy_mode_sync(hdev, params);
if (err) {
bt_dev_err(hdev, "Unable to set privacy mode: %d", err);
return err;
}
/* Check if already in accept list */
if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
params->addr_type))
return 0;
*num_entries += 1; *num_entries += 1;
cp.bdaddr_type = params->addr_type; cp.bdaddr_type = params->addr_type;
bacpy(&cp.bdaddr, &params->addr); bacpy(&cp.bdaddr, &params->addr);
...@@ -1645,10 +1726,8 @@ static int hci_pause_advertising_sync(struct hci_dev *hdev) ...@@ -1645,10 +1726,8 @@ static int hci_pause_advertising_sync(struct hci_dev *hdev)
int err; int err;
int old_state; int old_state;
/* If there are no instances or advertising has already been paused /* If already been paused there is nothing to do. */
* there is nothing to do. if (hdev->advertising_paused)
*/
if (!hdev->adv_instance_cnt || hdev->advertising_paused)
return 0; return 0;
bt_dev_dbg(hdev, "Pausing directed advertising"); bt_dev_dbg(hdev, "Pausing directed advertising");
...@@ -3283,7 +3362,8 @@ static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev) ...@@ -3283,7 +3362,8 @@ static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev)
/* Read LE Min/Max Tx Power*/ /* Read LE Min/Max Tx Power*/
static int hci_le_read_tx_power_sync(struct hci_dev *hdev) static int hci_le_read_tx_power_sync(struct hci_dev *hdev)
{ {
if (!(hdev->commands[38] & 0x80)) if (!(hdev->commands[38] & 0x80) ||
test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks))
return 0; return 0;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER, return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER,
...@@ -4749,8 +4829,7 @@ static int hci_update_event_filter_sync(struct hci_dev *hdev) ...@@ -4749,8 +4829,7 @@ static int hci_update_event_filter_sync(struct hci_dev *hdev)
hci_clear_event_filter_sync(hdev); hci_clear_event_filter_sync(hdev);
list_for_each_entry(b, &hdev->accept_list, list) { list_for_each_entry(b, &hdev->accept_list, list) {
if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, if (!test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, b->flags))
b->current_flags))
continue; continue;
bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
...@@ -4920,3 +4999,283 @@ int hci_resume_sync(struct hci_dev *hdev) ...@@ -4920,3 +4999,283 @@ int hci_resume_sync(struct hci_dev *hdev)
return 0; return 0;
} }
static bool conn_use_rpa(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
return hci_dev_test_flag(hdev, HCI_PRIVACY);
}
static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
struct hci_conn *conn)
{
struct hci_cp_le_set_ext_adv_params cp;
int err;
bdaddr_t random_addr;
u8 own_addr_type;
err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
&own_addr_type);
if (err)
return err;
/* Set require_privacy to false so that the remote device has a
* chance of identifying us.
*/
err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
&own_addr_type, &random_addr);
if (err)
return err;
memset(&cp, 0, sizeof(cp));
cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
cp.own_addr_type = own_addr_type;
cp.channel_map = hdev->le_adv_channel_map;
cp.tx_power = HCI_TX_POWER_INVALID;
cp.primary_phy = HCI_ADV_PHY_1M;
cp.secondary_phy = HCI_ADV_PHY_1M;
cp.handle = 0x00; /* Use instance 0 for directed adv */
cp.own_addr_type = own_addr_type;
cp.peer_addr_type = conn->dst_type;
bacpy(&cp.peer_addr, &conn->dst);
/* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
* advertising_event_property LE_LEGACY_ADV_DIRECT_IND
* does not supports advertising data when the advertising set already
* contains some, the controller shall return erroc code 'Invalid
* HCI Command Parameters(0x12).
* So it is required to remove adv set for handle 0x00. since we use
* instance 0 for directed adv.
*/
err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL);
if (err)
return err;
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
if (err)
return err;
/* Check if random address need to be updated */
if (own_addr_type == ADDR_LE_DEV_RANDOM &&
bacmp(&random_addr, BDADDR_ANY) &&
bacmp(&random_addr, &hdev->random_addr)) {
err = hci_set_adv_set_random_addr_sync(hdev, 0x00,
&random_addr);
if (err)
return err;
}
return hci_enable_ext_advertising_sync(hdev, 0x00);
}
static int hci_le_directed_advertising_sync(struct hci_dev *hdev,
struct hci_conn *conn)
{
struct hci_cp_le_set_adv_param cp;
u8 status;
u8 own_addr_type;
u8 enable;
if (ext_adv_capable(hdev))
return hci_le_ext_directed_advertising_sync(hdev, conn);
/* Clear the HCI_LE_ADV bit temporarily so that the
* hci_update_random_address knows that it's safe to go ahead
* and write a new random address. The flag will be set back on
* as soon as the SET_ADV_ENABLE HCI command completes.
*/
hci_dev_clear_flag(hdev, HCI_LE_ADV);
/* Set require_privacy to false so that the remote device has a
* chance of identifying us.
*/
status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
&own_addr_type);
if (status)
return status;
memset(&cp, 0, sizeof(cp));
/* Some controllers might reject command if intervals are not
* within range for undirected advertising.
* BCM20702A0 is known to be affected by this.
*/
cp.min_interval = cpu_to_le16(0x0020);
cp.max_interval = cpu_to_le16(0x0020);
cp.type = LE_ADV_DIRECT_IND;
cp.own_address_type = own_addr_type;
cp.direct_addr_type = conn->dst_type;
bacpy(&cp.direct_addr, &conn->dst);
cp.channel_map = hdev->le_adv_channel_map;
status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
if (status)
return status;
enable = 0x01;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
sizeof(enable), &enable, HCI_CMD_TIMEOUT);
}
static void set_ext_conn_params(struct hci_conn *conn,
struct hci_cp_le_ext_conn_param *p)
{
struct hci_dev *hdev = conn->hdev;
memset(p, 0, sizeof(*p));
p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
p->conn_latency = cpu_to_le16(conn->le_conn_latency);
p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
p->min_ce_len = cpu_to_le16(0x0000);
p->max_ce_len = cpu_to_le16(0x0000);
}
int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
u8 own_addr_type)
{
struct hci_cp_le_ext_create_conn *cp;
struct hci_cp_le_ext_conn_param *p;
u8 data[sizeof(*cp) + sizeof(*p) * 3];
u32 plen;
cp = (void *)data;
p = (void *)cp->data;
memset(cp, 0, sizeof(*cp));
bacpy(&cp->peer_addr, &conn->dst);
cp->peer_addr_type = conn->dst_type;
cp->own_addr_type = own_addr_type;
plen = sizeof(*cp);
if (scan_1m(hdev)) {
cp->phys |= LE_SCAN_PHY_1M;
set_ext_conn_params(conn, p);
p++;
plen += sizeof(*p);
}
if (scan_2m(hdev)) {
cp->phys |= LE_SCAN_PHY_2M;
set_ext_conn_params(conn, p);
p++;
plen += sizeof(*p);
}
if (scan_coded(hdev)) {
cp->phys |= LE_SCAN_PHY_CODED;
set_ext_conn_params(conn, p);
plen += sizeof(*p);
}
return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN,
plen, data,
HCI_EV_LE_ENHANCED_CONN_COMPLETE,
HCI_CMD_TIMEOUT, NULL);
}
int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
{
struct hci_cp_le_create_conn cp;
struct hci_conn_params *params;
u8 own_addr_type;
int err;
/* If requested to connect as peripheral use directed advertising */
if (conn->role == HCI_ROLE_SLAVE) {
/* If we're active scanning and simultaneous roles is not
* enabled simply reject the attempt.
*/
if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
hdev->le_scan_type == LE_SCAN_ACTIVE &&
!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) {
hci_conn_del(conn);
return -EBUSY;
}
/* Pause advertising while doing directed advertising. */
hci_pause_advertising_sync(hdev);
err = hci_le_directed_advertising_sync(hdev, conn);
goto done;
}
/* Disable advertising if simultaneous roles is not in use. */
if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
hci_pause_advertising_sync(hdev);
params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
if (params) {
conn->le_conn_min_interval = params->conn_min_interval;
conn->le_conn_max_interval = params->conn_max_interval;
conn->le_conn_latency = params->conn_latency;
conn->le_supv_timeout = params->supervision_timeout;
} else {
conn->le_conn_min_interval = hdev->le_conn_min_interval;
conn->le_conn_max_interval = hdev->le_conn_max_interval;
conn->le_conn_latency = hdev->le_conn_latency;
conn->le_supv_timeout = hdev->le_supv_timeout;
}
/* If controller is scanning, we stop it since some controllers are
* not able to scan and connect at the same time. Also set the
* HCI_LE_SCAN_INTERRUPTED flag so that the command complete
* handler for scan disabling knows to set the correct discovery
* state.
*/
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
hci_scan_disable_sync(hdev);
hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
}
/* Update random address, but set require_privacy to false so
* that we never connect with an non-resolvable address.
*/
err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
&own_addr_type);
if (err)
goto done;
if (use_ext_conn(hdev)) {
err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type);
goto done;
}
memset(&cp, 0, sizeof(cp));
cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
bacpy(&cp.peer_addr, &conn->dst);
cp.peer_addr_type = conn->dst_type;
cp.own_address_type = own_addr_type;
cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
cp.min_ce_len = cpu_to_le16(0x0000);
cp.max_ce_len = cpu_to_le16(0x0000);
err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN,
sizeof(cp), &cp, HCI_EV_LE_CONN_COMPLETE,
HCI_CMD_TIMEOUT, NULL);
done:
/* Re-enable advertising after the connection attempt is finished. */
hci_resume_advertising_sync(hdev);
return err;
}
...@@ -7905,7 +7905,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, ...@@ -7905,7 +7905,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
hcon = hci_connect_le(hdev, dst, dst_type, false, hcon = hci_connect_le(hdev, dst, dst_type, false,
chan->sec_level, chan->sec_level,
HCI_LE_CONN_TIMEOUT, HCI_LE_CONN_TIMEOUT,
HCI_ROLE_SLAVE, NULL); HCI_ROLE_SLAVE);
else else
hcon = hci_connect_le_scan(hdev, dst, dst_type, hcon = hci_connect_le_scan(hdev, dst, dst_type,
chan->sec_level, chan->sec_level,
......
...@@ -161,7 +161,11 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) ...@@ -161,7 +161,11 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
break; break;
} }
if (chan->psm && bdaddr_type_is_le(chan->src_type)) /* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and
* L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set.
*/
if (chan->psm && bdaddr_type_is_le(chan->src_type) &&
chan->mode != L2CAP_MODE_EXT_FLOWCTL)
chan->mode = L2CAP_MODE_LE_FLOWCTL; chan->mode = L2CAP_MODE_LE_FLOWCTL;
chan->state = BT_BOUND; chan->state = BT_BOUND;
...@@ -255,7 +259,11 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, ...@@ -255,7 +259,11 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
return -EINVAL; return -EINVAL;
} }
if (chan->psm && bdaddr_type_is_le(chan->src_type) && !chan->mode) /* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and
* L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set.
*/
if (chan->psm && bdaddr_type_is_le(chan->src_type) &&
chan->mode != L2CAP_MODE_EXT_FLOWCTL)
chan->mode = L2CAP_MODE_LE_FLOWCTL; chan->mode = L2CAP_MODE_LE_FLOWCTL;
l2cap_sock_init_pid(sk); l2cap_sock_init_pid(sk);
......
...@@ -335,6 +335,12 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len, ...@@ -335,6 +335,12 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
HCI_SOCK_TRUSTED, skip_sk); HCI_SOCK_TRUSTED, skip_sk);
} }
static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
{
return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
skip_sk);
}
static u8 le_addr_type(u8 mgmt_addr_type) static u8 le_addr_type(u8 mgmt_addr_type)
{ {
if (mgmt_addr_type == BDADDR_LE_PUBLIC) if (mgmt_addr_type == BDADDR_LE_PUBLIC)
...@@ -3876,7 +3882,7 @@ static const u8 offload_codecs_uuid[16] = { ...@@ -3876,7 +3882,7 @@ static const u8 offload_codecs_uuid[16] = {
}; };
/* 671b10b5-42c0-4696-9227-eb28d1b049d6 */ /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
static const u8 simult_central_periph_uuid[16] = { static const u8 le_simultaneous_roles_uuid[16] = {
0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92, 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67, 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
}; };
...@@ -3909,16 +3915,13 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, ...@@ -3909,16 +3915,13 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
} }
#endif #endif
if (hdev) { if (hdev && hci_dev_le_state_simultaneous(hdev)) {
if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) && if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
(hdev->le_states[4] & 0x08) && /* Central */
(hdev->le_states[4] & 0x40) && /* Peripheral */
(hdev->le_states[3] & 0x10)) /* Simultaneous */
flags = BIT(0); flags = BIT(0);
else else
flags = 0; flags = 0;
memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16); memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
rp->features[idx].flags = cpu_to_le32(flags); rp->features[idx].flags = cpu_to_le32(flags);
idx++; idx++;
} }
...@@ -3978,35 +3981,24 @@ static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev, ...@@ -3978,35 +3981,24 @@ static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
memcpy(ev.uuid, rpa_resolution_uuid, 16); memcpy(ev.uuid, rpa_resolution_uuid, 16);
ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1)); ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
if (enabled && privacy_mode_capable(hdev))
set_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
else
clear_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
&ev, sizeof(ev), &ev, sizeof(ev),
HCI_MGMT_EXP_FEATURE_EVENTS, skip); HCI_MGMT_EXP_FEATURE_EVENTS, skip);
} }
#ifdef CONFIG_BT_FEATURE_DEBUG static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
static int exp_debug_feature_changed(bool enabled, struct sock *skip) bool enabled, struct sock *skip)
{ {
struct mgmt_ev_exp_feature_changed ev; struct mgmt_ev_exp_feature_changed ev;
memset(&ev, 0, sizeof(ev)); memset(&ev, 0, sizeof(ev));
memcpy(ev.uuid, debug_uuid, 16); memcpy(ev.uuid, uuid, 16);
ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
&ev, sizeof(ev),
HCI_MGMT_EXP_FEATURE_EVENTS, skip);
}
#endif
static int exp_quality_report_feature_changed(bool enabled,
struct hci_dev *hdev,
struct sock *skip)
{
struct mgmt_ev_exp_feature_changed ev;
memset(&ev, 0, sizeof(ev));
memcpy(ev.uuid, quality_report_uuid, 16);
ev.flags = cpu_to_le32(enabled ? BIT(0) : 0); ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
...@@ -4036,17 +4028,18 @@ static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev, ...@@ -4036,17 +4028,18 @@ static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
bt_dbg_set(false); bt_dbg_set(false);
if (changed) if (changed)
exp_debug_feature_changed(false, sk); exp_feature_changed(NULL, ZERO_KEY, false, sk);
} }
#endif #endif
if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) { if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
bool changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY); bool changed;
hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
changed = hci_dev_test_and_clear_flag(hdev,
HCI_ENABLE_LL_PRIVACY);
if (changed) if (changed)
exp_ll_privacy_feature_changed(false, hdev, sk); exp_feature_changed(hdev, rpa_resolution_uuid, false,
sk);
} }
hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
...@@ -4097,7 +4090,7 @@ static int set_debug_func(struct sock *sk, struct hci_dev *hdev, ...@@ -4097,7 +4090,7 @@ static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
&rp, sizeof(rp)); &rp, sizeof(rp));
if (changed) if (changed)
exp_debug_feature_changed(val, sk); exp_feature_changed(hdev, debug_uuid, val, sk);
return err; return err;
} }
...@@ -4139,15 +4132,15 @@ static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev, ...@@ -4139,15 +4132,15 @@ static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
val = !!cp->param[0]; val = !!cp->param[0];
if (val) { if (val) {
changed = !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY); changed = !hci_dev_test_and_set_flag(hdev,
hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY); HCI_ENABLE_LL_PRIVACY);
hci_dev_clear_flag(hdev, HCI_ADVERTISING); hci_dev_clear_flag(hdev, HCI_ADVERTISING);
/* Enable LL privacy + supported settings changed */ /* Enable LL privacy + supported settings changed */
flags = BIT(0) | BIT(1); flags = BIT(0) | BIT(1);
} else { } else {
changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY); changed = hci_dev_test_and_clear_flag(hdev,
hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); HCI_ENABLE_LL_PRIVACY);
/* Disable LL privacy + supported settings changed */ /* Disable LL privacy + supported settings changed */
flags = BIT(1); flags = BIT(1);
...@@ -4235,27 +4228,13 @@ static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev, ...@@ -4235,27 +4228,13 @@ static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
&rp, sizeof(rp)); &rp, sizeof(rp));
if (changed) if (changed)
exp_quality_report_feature_changed(val, hdev, sk); exp_feature_changed(hdev, quality_report_uuid, val, sk);
unlock_quality_report: unlock_quality_report:
hci_req_sync_unlock(hdev); hci_req_sync_unlock(hdev);
return err; return err;
} }
static int exp_offload_codec_feature_changed(bool enabled, struct hci_dev *hdev,
struct sock *skip)
{
struct mgmt_ev_exp_feature_changed ev;
memset(&ev, 0, sizeof(ev));
memcpy(ev.uuid, offload_codecs_uuid, 16);
ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
&ev, sizeof(ev),
HCI_MGMT_EXP_FEATURE_EVENTS, skip);
}
static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev, static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
struct mgmt_cp_set_exp_feature *cp, struct mgmt_cp_set_exp_feature *cp,
u16 data_len) u16 data_len)
...@@ -4309,7 +4288,65 @@ static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev, ...@@ -4309,7 +4288,65 @@ static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
&rp, sizeof(rp)); &rp, sizeof(rp));
if (changed) if (changed)
exp_offload_codec_feature_changed(val, hdev, sk); exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
return err;
}
static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
struct mgmt_cp_set_exp_feature *cp,
u16 data_len)
{
bool val, changed;
int err;
struct mgmt_rp_set_exp_feature rp;
/* Command requires to use a valid controller index */
if (!hdev)
return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
MGMT_OP_SET_EXP_FEATURE,
MGMT_STATUS_INVALID_INDEX);
/* Parameters are limited to a single octet */
if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_EXP_FEATURE,
MGMT_STATUS_INVALID_PARAMS);
/* Only boolean on/off is supported */
if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_EXP_FEATURE,
MGMT_STATUS_INVALID_PARAMS);
val = !!cp->param[0];
changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
if (!hci_dev_le_state_simultaneous(hdev)) {
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_EXP_FEATURE,
MGMT_STATUS_NOT_SUPPORTED);
}
if (changed) {
if (val)
hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
else
hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
}
bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
val, changed);
memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
rp.flags = cpu_to_le32(val ? BIT(0) : 0);
hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
err = mgmt_cmd_complete(sk, hdev->id,
MGMT_OP_SET_EXP_FEATURE, 0,
&rp, sizeof(rp));
if (changed)
exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
return err; return err;
} }
...@@ -4326,6 +4363,7 @@ static const struct mgmt_exp_feature { ...@@ -4326,6 +4363,7 @@ static const struct mgmt_exp_feature {
EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func), EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
EXP_FEAT(quality_report_uuid, set_quality_report_func), EXP_FEAT(quality_report_uuid, set_quality_report_func),
EXP_FEAT(offload_codecs_uuid, set_offload_codec_func), EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
/* end with a null feature */ /* end with a null feature */
EXP_FEAT(NULL, NULL) EXP_FEAT(NULL, NULL)
...@@ -4349,8 +4387,6 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, ...@@ -4349,8 +4387,6 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
MGMT_STATUS_NOT_SUPPORTED); MGMT_STATUS_NOT_SUPPORTED);
} }
#define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len) u16 data_len)
{ {
...@@ -4358,7 +4394,7 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, ...@@ -4358,7 +4394,7 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
struct mgmt_rp_get_device_flags rp; struct mgmt_rp_get_device_flags rp;
struct bdaddr_list_with_flags *br_params; struct bdaddr_list_with_flags *br_params;
struct hci_conn_params *params; struct hci_conn_params *params;
u32 supported_flags = SUPPORTED_DEVICE_FLAGS(); u32 supported_flags;
u32 current_flags = 0; u32 current_flags = 0;
u8 status = MGMT_STATUS_INVALID_PARAMS; u8 status = MGMT_STATUS_INVALID_PARAMS;
...@@ -4367,6 +4403,9 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, ...@@ -4367,6 +4403,9 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev); hci_dev_lock(hdev);
bitmap_to_arr32(&supported_flags, hdev->conn_flags,
__HCI_CONN_NUM_FLAGS);
memset(&rp, 0, sizeof(rp)); memset(&rp, 0, sizeof(rp));
if (cp->addr.type == BDADDR_BREDR) { if (cp->addr.type == BDADDR_BREDR) {
...@@ -4376,7 +4415,8 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, ...@@ -4376,7 +4415,8 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
if (!br_params) if (!br_params)
goto done; goto done;
current_flags = br_params->current_flags; bitmap_to_arr32(&current_flags, br_params->flags,
__HCI_CONN_NUM_FLAGS);
} else { } else {
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
le_addr_type(cp->addr.type)); le_addr_type(cp->addr.type));
...@@ -4384,7 +4424,8 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, ...@@ -4384,7 +4424,8 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
if (!params) if (!params)
goto done; goto done;
current_flags = params->current_flags; bitmap_to_arr32(&current_flags, params->flags,
__HCI_CONN_NUM_FLAGS);
} }
bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
...@@ -4422,13 +4463,16 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, ...@@ -4422,13 +4463,16 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
struct bdaddr_list_with_flags *br_params; struct bdaddr_list_with_flags *br_params;
struct hci_conn_params *params; struct hci_conn_params *params;
u8 status = MGMT_STATUS_INVALID_PARAMS; u8 status = MGMT_STATUS_INVALID_PARAMS;
u32 supported_flags = SUPPORTED_DEVICE_FLAGS(); u32 supported_flags;
u32 current_flags = __le32_to_cpu(cp->current_flags); u32 current_flags = __le32_to_cpu(cp->current_flags);
bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x", bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
&cp->addr.bdaddr, cp->addr.type, &cp->addr.bdaddr, cp->addr.type,
__le32_to_cpu(current_flags)); __le32_to_cpu(current_flags));
bitmap_to_arr32(&supported_flags, hdev->conn_flags,
__HCI_CONN_NUM_FLAGS);
if ((supported_flags | current_flags) != supported_flags) { if ((supported_flags | current_flags) != supported_flags) {
bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)", bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
current_flags, supported_flags); current_flags, supported_flags);
...@@ -4443,7 +4487,7 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, ...@@ -4443,7 +4487,7 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
cp->addr.type); cp->addr.type);
if (br_params) { if (br_params) {
br_params->current_flags = current_flags; bitmap_from_u64(br_params->flags, current_flags);
status = MGMT_STATUS_SUCCESS; status = MGMT_STATUS_SUCCESS;
} else { } else {
bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)", bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
...@@ -4453,8 +4497,15 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, ...@@ -4453,8 +4497,15 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
le_addr_type(cp->addr.type)); le_addr_type(cp->addr.type));
if (params) { if (params) {
params->current_flags = current_flags; bitmap_from_u64(params->flags, current_flags);
status = MGMT_STATUS_SUCCESS; status = MGMT_STATUS_SUCCESS;
/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
* has been set.
*/
if (test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY,
params->flags))
hci_update_passive_scan(hdev);
} else { } else {
bt_dev_warn(hdev, "No such LE device %pMR (0x%x)", bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
&cp->addr.bdaddr, &cp->addr.bdaddr,
...@@ -6979,6 +7030,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, ...@@ -6979,6 +7030,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
struct hci_conn_params *params; struct hci_conn_params *params;
int err; int err;
u32 current_flags = 0; u32 current_flags = 0;
u32 supported_flags;
bt_dev_dbg(hdev, "sock %p", sk); bt_dev_dbg(hdev, "sock %p", sk);
...@@ -7050,7 +7102,8 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, ...@@ -7050,7 +7102,8 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
addr_type); addr_type);
if (params) if (params)
current_flags = params->current_flags; bitmap_to_arr32(&current_flags, params->flags,
__HCI_CONN_NUM_FLAGS);
} }
err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL); err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
...@@ -7059,8 +7112,10 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, ...@@ -7059,8 +7112,10 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
added: added:
device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action); device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
bitmap_to_arr32(&supported_flags, hdev->conn_flags,
__HCI_CONN_NUM_FLAGS);
device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type, device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
SUPPORTED_DEVICE_FLAGS(), current_flags); supported_flags, current_flags);
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
MGMT_STATUS_SUCCESS, &cp->addr, MGMT_STATUS_SUCCESS, &cp->addr,
...@@ -8999,11 +9054,19 @@ void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, ...@@ -8999,11 +9054,19 @@ void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
u8 *name, u8 name_len) u8 *name, u8 name_len)
{ {
char buf[512]; struct sk_buff *skb;
struct mgmt_ev_device_connected *ev = (void *) buf; struct mgmt_ev_device_connected *ev;
u16 eir_len = 0; u16 eir_len = 0;
u32 flags = 0; u32 flags = 0;
if (conn->le_adv_data_len > 0)
skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
conn->le_adv_data_len);
else
skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
2 + name_len + 5);
ev = skb_put(skb, sizeof(*ev));
bacpy(&ev->addr.bdaddr, &conn->dst); bacpy(&ev->addr.bdaddr, &conn->dst);
ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type); ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
...@@ -9017,24 +9080,26 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, ...@@ -9017,24 +9080,26 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
* adding any BR/EDR data to the LE adv. * adding any BR/EDR data to the LE adv.
*/ */
if (conn->le_adv_data_len > 0) { if (conn->le_adv_data_len > 0) {
memcpy(&ev->eir[eir_len], skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
conn->le_adv_data, conn->le_adv_data_len);
eir_len = conn->le_adv_data_len; eir_len = conn->le_adv_data_len;
} else { } else {
if (name_len > 0) if (name_len > 0) {
eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
name, name_len); name, name_len);
skb_put(skb, eir_len);
}
if (memcmp(conn->dev_class, "\0\0\0", 3) != 0) if (memcmp(conn->dev_class, "\0\0\0", 3) != 0) {
eir_len = eir_append_data(ev->eir, eir_len, eir_len = eir_append_data(ev->eir, eir_len,
EIR_CLASS_OF_DEV, EIR_CLASS_OF_DEV,
conn->dev_class, 3); conn->dev_class, 3);
skb_put(skb, 5);
}
} }
ev->eir_len = cpu_to_le16(eir_len); ev->eir_len = cpu_to_le16(eir_len);
mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf, mgmt_event_skb(skb, NULL);
sizeof(*ev) + eir_len, NULL);
} }
static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data) static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
...@@ -9528,9 +9593,8 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, ...@@ -9528,9 +9593,8 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len) u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
{ {
char buf[512]; struct sk_buff *skb;
struct mgmt_ev_device_found *ev = (void *)buf; struct mgmt_ev_device_found *ev;
size_t ev_size;
/* Don't send events for a non-kernel initiated discovery. With /* Don't send events for a non-kernel initiated discovery. With
* LE one exception is if we have pend_le_reports > 0 in which * LE one exception is if we have pend_le_reports > 0 in which
...@@ -9565,13 +9629,13 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, ...@@ -9565,13 +9629,13 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
} }
} }
/* Make sure that the buffer is big enough. The 5 extra bytes /* Allocate skb. The 5 extra bytes are for the potential CoD field */
* are for the potential CoD field. skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
*/ sizeof(*ev) + eir_len + scan_rsp_len + 5);
if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf)) if (!skb)
return; return;
memset(buf, 0, sizeof(buf)); ev = skb_put(skb, sizeof(*ev));
/* In case of device discovery with BR/EDR devices (pre 1.2), the /* In case of device discovery with BR/EDR devices (pre 1.2), the
* RSSI value was reported as 0 when not available. This behavior * RSSI value was reported as 0 when not available. This behavior
...@@ -9592,44 +9656,57 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, ...@@ -9592,44 +9656,57 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
if (eir_len > 0) if (eir_len > 0)
/* Copy EIR or advertising data into event */ /* Copy EIR or advertising data into event */
memcpy(ev->eir, eir, eir_len); skb_put_data(skb, eir, eir_len);
if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
NULL)) u8 eir_cod[5];
eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
dev_class, 3); eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
dev_class, 3);
skb_put_data(skb, eir_cod, sizeof(eir_cod));
}
if (scan_rsp_len > 0) if (scan_rsp_len > 0)
/* Append scan response data to event */ /* Append scan response data to event */
memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len); skb_put_data(skb, scan_rsp, scan_rsp_len);
ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len); ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL); mgmt_event_skb(skb, NULL);
} }
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, s8 rssi, u8 *name, u8 name_len) u8 addr_type, s8 rssi, u8 *name, u8 name_len)
{ {
struct sk_buff *skb;
struct mgmt_ev_device_found *ev; struct mgmt_ev_device_found *ev;
char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
u16 eir_len; u16 eir_len;
u32 flags;
ev = (struct mgmt_ev_device_found *) buf; if (name_len)
skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 2 + name_len);
memset(buf, 0, sizeof(buf)); else
skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 0);
ev = skb_put(skb, sizeof(*ev));
bacpy(&ev->addr.bdaddr, bdaddr); bacpy(&ev->addr.bdaddr, bdaddr);
ev->addr.type = link_to_bdaddr(link_type, addr_type); ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->rssi = rssi; ev->rssi = rssi;
eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name, if (name) {
name_len); eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
name_len);
flags = 0;
skb_put(skb, eir_len);
} else {
eir_len = 0;
flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
}
ev->eir_len = cpu_to_le16(eir_len); ev->eir_len = cpu_to_le16(eir_len);
ev->flags = cpu_to_le32(flags);
mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL); mgmt_event_skb(skb, NULL);
} }
void mgmt_discovering(struct hci_dev *hdev, u8 discovering) void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
......
...@@ -56,40 +56,72 @@ static struct sk_buff *create_monitor_ctrl_event(__le16 index, u32 cookie, ...@@ -56,40 +56,72 @@ static struct sk_buff *create_monitor_ctrl_event(__le16 index, u32 cookie,
return skb; return skb;
} }
int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, struct sk_buff *mgmt_alloc_skb(struct hci_dev *hdev, u16 opcode,
void *data, u16 data_len, int flag, struct sock *skip_sk) unsigned int size)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct mgmt_hdr *hdr;
skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL); skb = alloc_skb(sizeof(struct mgmt_hdr) + size, GFP_KERNEL);
if (!skb) if (!skb)
return -ENOMEM; return skb;
hdr = skb_put(skb, sizeof(*hdr)); skb_reserve(skb, sizeof(struct mgmt_hdr));
hdr->opcode = cpu_to_le16(event); bt_cb(skb)->mgmt.hdev = hdev;
if (hdev) bt_cb(skb)->mgmt.opcode = opcode;
hdr->index = cpu_to_le16(hdev->id);
else
hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
hdr->len = cpu_to_le16(data_len);
if (data) return skb;
skb_put_data(skb, data, data_len); }
int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag,
struct sock *skip_sk)
{
struct hci_dev *hdev;
struct mgmt_hdr *hdr;
int len = skb->len;
if (!skb)
return -EINVAL;
hdev = bt_cb(skb)->mgmt.hdev;
/* Time stamp */ /* Time stamp */
__net_timestamp(skb); __net_timestamp(skb);
hci_send_to_channel(channel, skb, flag, skip_sk); /* Send just the data, without headers, to the monitor */
if (channel == HCI_CHANNEL_CONTROL) if (channel == HCI_CHANNEL_CONTROL)
hci_send_monitor_ctrl_event(hdev, event, data, data_len, hci_send_monitor_ctrl_event(hdev, bt_cb(skb)->mgmt.opcode,
skb->data, skb->len,
skb_get_ktime(skb), flag, skip_sk); skb_get_ktime(skb), flag, skip_sk);
hdr = skb_push(skb, sizeof(*hdr));
hdr->opcode = cpu_to_le16(bt_cb(skb)->mgmt.opcode);
if (hdev)
hdr->index = cpu_to_le16(hdev->id);
else
hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
hdr->len = cpu_to_le16(len);
hci_send_to_channel(channel, skb, flag, skip_sk);
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
} }
int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
void *data, u16 data_len, int flag, struct sock *skip_sk)
{
struct sk_buff *skb;
skb = mgmt_alloc_skb(hdev, event, data_len);
if (!skb)
return -ENOMEM;
if (data)
skb_put_data(skb, data, data_len);
return mgmt_send_event_skb(channel, skb, flag, skip_sk);
}
int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
{ {
struct sk_buff *skb, *mskb; struct sk_buff *skb, *mskb;
......
...@@ -32,6 +32,10 @@ struct mgmt_pending_cmd { ...@@ -32,6 +32,10 @@ struct mgmt_pending_cmd {
int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status); int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status);
}; };
struct sk_buff *mgmt_alloc_skb(struct hci_dev *hdev, u16 opcode,
unsigned int size);
int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag,
struct sock *skip_sk);
int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
void *data, u16 data_len, int flag, struct sock *skip_sk); void *data, u16 data_len, int flag, struct sock *skip_sk);
int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status); int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status);
......
...@@ -590,7 +590,7 @@ void msft_unregister(struct hci_dev *hdev) ...@@ -590,7 +590,7 @@ void msft_unregister(struct hci_dev *hdev)
kfree(msft); kfree(msft);
} }
void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb)
{ {
struct msft_data *msft = hdev->msft_data; struct msft_data *msft = hdev->msft_data;
u8 event; u8 event;
......
...@@ -17,7 +17,7 @@ void msft_register(struct hci_dev *hdev); ...@@ -17,7 +17,7 @@ void msft_register(struct hci_dev *hdev);
void msft_unregister(struct hci_dev *hdev); void msft_unregister(struct hci_dev *hdev);
void msft_do_open(struct hci_dev *hdev); void msft_do_open(struct hci_dev *hdev);
void msft_do_close(struct hci_dev *hdev); void msft_do_close(struct hci_dev *hdev);
void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb); void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb);
__u64 msft_get_features(struct hci_dev *hdev); __u64 msft_get_features(struct hci_dev *hdev);
int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor); int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor);
int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
...@@ -39,7 +39,8 @@ static inline void msft_register(struct hci_dev *hdev) {} ...@@ -39,7 +39,8 @@ static inline void msft_register(struct hci_dev *hdev) {}
static inline void msft_unregister(struct hci_dev *hdev) {} static inline void msft_unregister(struct hci_dev *hdev) {}
static inline void msft_do_open(struct hci_dev *hdev) {} static inline void msft_do_open(struct hci_dev *hdev) {}
static inline void msft_do_close(struct hci_dev *hdev) {} static inline void msft_do_close(struct hci_dev *hdev) {}
static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {} static inline void msft_vendor_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb) {}
static inline __u64 msft_get_features(struct hci_dev *hdev) { return 0; } static inline __u64 msft_get_features(struct hci_dev *hdev) { return 0; }
static inline int msft_add_monitor_pattern(struct hci_dev *hdev, static inline int msft_add_monitor_pattern(struct hci_dev *hdev,
struct adv_monitor *monitor) struct adv_monitor *monitor)
......
...@@ -2023,6 +2023,30 @@ void *skb_pull(struct sk_buff *skb, unsigned int len) ...@@ -2023,6 +2023,30 @@ void *skb_pull(struct sk_buff *skb, unsigned int len)
} }
EXPORT_SYMBOL(skb_pull); EXPORT_SYMBOL(skb_pull);
/**
* skb_pull_data - remove data from the start of a buffer returning its
* original position.
* @skb: buffer to use
* @len: amount of data to remove
*
* This function removes data from the start of a buffer, returning
* the memory to the headroom. A pointer to the original data in the buffer
* is returned after checking if there is enough data to pull. Once the
* data has been pulled future pushes will overwrite the old data.
*/
void *skb_pull_data(struct sk_buff *skb, size_t len)
{
void *data = skb->data;
if (skb->len < len)
return NULL;
skb_pull(skb, len);
return data;
}
EXPORT_SYMBOL(skb_pull_data);
/** /**
* skb_trim - remove end from a buffer * skb_trim - remove end from a buffer
* @skb: buffer to alter * @skb: buffer to alter
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册