提交 90bc7dfd 编写于 作者: K Kalle Valo

Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git

ath.git patches for 4.14. Major changes:

ath10k

* initial UBS bus support (no full support yet)

* add tdls support for 10.4 firmware

ath9k

* add Dell Wireless 1802

wil6210

* support FW RSSI reporting
......@@ -29,6 +29,13 @@ config ATH10K_SDIO
This module adds experimental support for SDIO/MMC bus. Currently
work in progress and will not fully work.
config ATH10K_USB
tristate "Atheros ath10k USB support (EXPERIMENTAL)"
depends on ATH10K && USB
---help---
This module adds experimental support for USB bus. Currently
work in progress and will not fully work.
config ATH10K_DEBUG
bool "Atheros ath10k debugging"
depends on ATH10K
......
......@@ -30,5 +30,8 @@ ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o
obj-$(CONFIG_ATH10K_SDIO) += ath10k_sdio.o
ath10k_sdio-y += sdio.o
obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o
ath10k_usb-y += usb.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
......@@ -197,35 +197,40 @@ static int ath10k_ahb_rst_ctrl_init(struct ath10k *ar)
dev = &ar_ahb->pdev->dev;
ar_ahb->core_cold_rst = devm_reset_control_get(dev, "wifi_core_cold");
ar_ahb->core_cold_rst = devm_reset_control_get_exclusive(dev,
"wifi_core_cold");
if (IS_ERR(ar_ahb->core_cold_rst)) {
ath10k_err(ar, "failed to get core cold rst ctrl: %ld\n",
PTR_ERR(ar_ahb->core_cold_rst));
return PTR_ERR(ar_ahb->core_cold_rst);
}
ar_ahb->radio_cold_rst = devm_reset_control_get(dev, "wifi_radio_cold");
ar_ahb->radio_cold_rst = devm_reset_control_get_exclusive(dev,
"wifi_radio_cold");
if (IS_ERR(ar_ahb->radio_cold_rst)) {
ath10k_err(ar, "failed to get radio cold rst ctrl: %ld\n",
PTR_ERR(ar_ahb->radio_cold_rst));
return PTR_ERR(ar_ahb->radio_cold_rst);
}
ar_ahb->radio_warm_rst = devm_reset_control_get(dev, "wifi_radio_warm");
ar_ahb->radio_warm_rst = devm_reset_control_get_exclusive(dev,
"wifi_radio_warm");
if (IS_ERR(ar_ahb->radio_warm_rst)) {
ath10k_err(ar, "failed to get radio warm rst ctrl: %ld\n",
PTR_ERR(ar_ahb->radio_warm_rst));
return PTR_ERR(ar_ahb->radio_warm_rst);
}
ar_ahb->radio_srif_rst = devm_reset_control_get(dev, "wifi_radio_srif");
ar_ahb->radio_srif_rst = devm_reset_control_get_exclusive(dev,
"wifi_radio_srif");
if (IS_ERR(ar_ahb->radio_srif_rst)) {
ath10k_err(ar, "failed to get radio srif rst ctrl: %ld\n",
PTR_ERR(ar_ahb->radio_srif_rst));
return PTR_ERR(ar_ahb->radio_srif_rst);
}
ar_ahb->cpu_init_rst = devm_reset_control_get(dev, "wifi_cpu_init");
ar_ahb->cpu_init_rst = devm_reset_control_get_exclusive(dev,
"wifi_cpu_init");
if (IS_ERR(ar_ahb->cpu_init_rst)) {
ath10k_err(ar, "failed to get cpu init rst ctrl: %ld\n",
PTR_ERR(ar_ahb->cpu_init_rst));
......
......@@ -1454,6 +1454,7 @@ static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name,
{
switch (ar->hif.bus) {
case ATH10K_BUS_SDIO:
case ATH10K_BUS_USB:
scnprintf(fw_name, fw_name_len, "%s-%s-%d.bin",
ATH10K_FW_FILE_BASE, ath10k_bus_str(ar->hif.bus),
fw_api);
......@@ -1885,6 +1886,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->fw_stats_req_mask = WMI_10_4_STAT_PEER |
WMI_10_4_STAT_PEER_EXTD;
ar->max_spatial_stream = ar->hw_params.max_spatial_stream;
ar->max_num_tdls_vdevs = TARGET_10_4_NUM_TDLS_VDEVS;
if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
fw_file->fw_features))
......@@ -2123,6 +2125,14 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
ar->running_fw->fw_file.fw_features))
val |= WMI_10_4_COEX_GPIO_SUPPORT;
if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
ar->wmi.svc_map))
val |= WMI_10_4_TDLS_EXPLICIT_MODE_ONLY;
if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA,
ar->wmi.svc_map))
val |= WMI_10_4_TDLS_UAPSD_BUFFER_STA;
status = ath10k_mac_ext_resource_config(ar, val);
if (status) {
ath10k_err(ar,
......
......@@ -92,6 +92,7 @@ enum ath10k_bus {
ATH10K_BUS_PCI,
ATH10K_BUS_AHB,
ATH10K_BUS_SDIO,
ATH10K_BUS_USB,
};
static inline const char *ath10k_bus_str(enum ath10k_bus bus)
......@@ -103,6 +104,8 @@ static inline const char *ath10k_bus_str(enum ath10k_bus bus)
return "ahb";
case ATH10K_BUS_SDIO:
return "sdio";
case ATH10K_BUS_USB:
return "usb";
}
return "unknown";
......@@ -993,6 +996,8 @@ struct ath10k {
u32 reg_ack_cts_timeout_orig;
} fw_coverage;
u32 ampdu_reference;
void *ce_priv;
/* must be last */
......
......@@ -40,6 +40,8 @@ enum ath10k_debug_mask {
ATH10K_DBG_AHB = 0x00008000,
ATH10K_DBG_SDIO = 0x00010000,
ATH10K_DBG_SDIO_DUMP = 0x00020000,
ATH10K_DBG_USB = 0x00040000,
ATH10K_DBG_USB_BULK = 0x00080000,
ATH10K_DBG_ANY = 0xffffffff,
};
......
......@@ -890,16 +890,26 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
status->nss = 0;
status->encoding = RX_ENC_LEGACY;
status->bw = RATE_INFO_BW_20;
status->flag &= ~RX_FLAG_MACTIME_END;
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
status->ampdu_reference = ar->ampdu_reference;
ath10k_htt_rx_h_signal(ar, status, rxd);
ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
ath10k_htt_rx_h_rates(ar, status, rxd);
}
if (is_last_ppdu)
if (is_last_ppdu) {
ath10k_htt_rx_h_mactime(ar, status, rxd);
/* set ampdu last segment flag */
status->flag |= RX_FLAG_AMPDU_IS_LAST;
ar->ampdu_reference++;
}
}
static const char * const tid_to_ac[] = {
......
......@@ -720,6 +720,11 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define TARGET_10_4_IPHDR_PAD_CONFIG 1
#define TARGET_10_4_QWRAP_CONFIG 0
/* TDLS config */
#define TARGET_10_4_NUM_TDLS_VDEVS 1
#define TARGET_10_4_NUM_TDLS_BUFFER_STA 1
#define TARGET_10_4_NUM_TDLS_SLEEP_STA 1
/* Maximum number of Copy Engine's supported */
#define CE_COUNT_MAX 12
......
......@@ -8197,8 +8197,11 @@ int ath10k_mac_register(struct ath10k *ar)
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
}
if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map))
if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) ||
test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) {
ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
}
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
......
......@@ -683,7 +683,7 @@ static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
lookaheads[0] = msg_lookahead;
timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;
while (time_before(jiffies, timeout)) {
do {
/* Try to allocate as many HTC RX packets indicated by
* n_lookaheads.
*/
......@@ -719,7 +719,7 @@ static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
* performance in high throughput situations.
*/
*done = false;
}
} while (time_before(jiffies, timeout));
if (ret && (ret != -ECANCELED))
ath10k_warn(ar, "failed to get pending recv messages: %d\n",
......@@ -1336,11 +1336,11 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func)
sdio_release_host(ar_sdio->func);
timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;
while (time_before(jiffies, timeout) && !done) {
do {
ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);
if (ret)
break;
}
} while (time_before(jiffies, timeout) && !done);
sdio_claim_host(ar_sdio->func);
......
/*
* Copyright (c) 2007-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#include <linux/usb.h>
#include "debug.h"
#include "core.h"
#include "bmi.h"
#include "hif.h"
#include "htc.h"
#include "usb.h"
static void ath10k_usb_post_recv_transfers(struct ath10k *ar,
struct ath10k_usb_pipe *recv_pipe);
/* inlined helper functions */
static inline enum ath10k_htc_ep_id
eid_from_htc_hdr(struct ath10k_htc_hdr *htc_hdr)
{
return (enum ath10k_htc_ep_id)htc_hdr->eid;
}
static inline bool is_trailer_only_msg(struct ath10k_htc_hdr *htc_hdr)
{
return __le16_to_cpu(htc_hdr->len) == htc_hdr->trailer_len;
}
/* pipe/urb operations */
static struct ath10k_urb_context *
ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe)
{
struct ath10k_urb_context *urb_context = NULL;
unsigned long flags;
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
if (!list_empty(&pipe->urb_list_head)) {
urb_context = list_first_entry(&pipe->urb_list_head,
struct ath10k_urb_context, link);
list_del(&urb_context->link);
pipe->urb_cnt--;
}
spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags);
return urb_context;
}
static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe,
struct ath10k_urb_context *urb_context)
{
unsigned long flags;
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
pipe->urb_cnt++;
list_add(&urb_context->link, &pipe->urb_list_head);
spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags);
}
static void ath10k_usb_cleanup_recv_urb(struct ath10k_urb_context *urb_context)
{
dev_kfree_skb(urb_context->skb);
urb_context->skb = NULL;
ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
}
static void ath10k_usb_free_pipe_resources(struct ath10k *ar,
struct ath10k_usb_pipe *pipe)
{
struct ath10k_urb_context *urb_context;
if (!pipe->ar_usb) {
/* nothing allocated for this pipe */
return;
}
ath10k_dbg(ar, ATH10K_DBG_USB,
"usb free resources lpipe %d hpipe 0x%x urbs %d avail %d\n",
pipe->logical_pipe_num, pipe->usb_pipe_handle,
pipe->urb_alloc, pipe->urb_cnt);
if (pipe->urb_alloc != pipe->urb_cnt) {
ath10k_dbg(ar, ATH10K_DBG_USB,
"usb urb leak lpipe %d hpipe 0x%x urbs %d avail %d\n",
pipe->logical_pipe_num, pipe->usb_pipe_handle,
pipe->urb_alloc, pipe->urb_cnt);
}
for (;;) {
urb_context = ath10k_usb_alloc_urb_from_pipe(pipe);
if (!urb_context)
break;
kfree(urb_context);
}
}
static void ath10k_usb_cleanup_pipe_resources(struct ath10k *ar)
{
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
int i;
for (i = 0; i < ATH10K_USB_PIPE_MAX; i++)
ath10k_usb_free_pipe_resources(ar, &ar_usb->pipes[i]);
}
/* hif usb rx/tx completion functions */
static void ath10k_usb_recv_complete(struct urb *urb)
{
struct ath10k_urb_context *urb_context = urb->context;
struct ath10k_usb_pipe *pipe = urb_context->pipe;
struct ath10k *ar = pipe->ar_usb->ar;
struct sk_buff *skb;
int status = 0;
ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
"usb recv pipe %d stat %d len %d urb 0x%pK\n",
pipe->logical_pipe_num, urb->status, urb->actual_length,
urb);
if (urb->status != 0) {
status = -EIO;
switch (urb->status) {
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* no need to spew these errors when device
* removed or urb killed due to driver shutdown
*/
status = -ECANCELED;
break;
default:
ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
"usb recv pipe %d ep 0x%2.2x failed: %d\n",
pipe->logical_pipe_num,
pipe->ep_address, urb->status);
break;
}
goto cleanup_recv_urb;
}
if (urb->actual_length == 0)
goto cleanup_recv_urb;
skb = urb_context->skb;
/* we are going to pass it up */
urb_context->skb = NULL;
skb_put(skb, urb->actual_length);
/* note: queue implements a lock */
skb_queue_tail(&pipe->io_comp_queue, skb);
schedule_work(&pipe->io_complete_work);
cleanup_recv_urb:
ath10k_usb_cleanup_recv_urb(urb_context);
if (status == 0 &&
pipe->urb_cnt >= pipe->urb_cnt_thresh) {
/* our free urbs are piling up, post more transfers */
ath10k_usb_post_recv_transfers(ar, pipe);
}
}
static void ath10k_usb_transmit_complete(struct urb *urb)
{
struct ath10k_urb_context *urb_context = urb->context;
struct ath10k_usb_pipe *pipe = urb_context->pipe;
struct ath10k *ar = pipe->ar_usb->ar;
struct sk_buff *skb;
if (urb->status != 0) {
ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
"pipe: %d, failed:%d\n",
pipe->logical_pipe_num, urb->status);
}
skb = urb_context->skb;
urb_context->skb = NULL;
ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
/* note: queue implements a lock */
skb_queue_tail(&pipe->io_comp_queue, skb);
schedule_work(&pipe->io_complete_work);
}
/* pipe operations */
static void ath10k_usb_post_recv_transfers(struct ath10k *ar,
struct ath10k_usb_pipe *recv_pipe)
{
struct ath10k_urb_context *urb_context;
struct urb *urb;
int usb_status;
for (;;) {
urb_context = ath10k_usb_alloc_urb_from_pipe(recv_pipe);
if (!urb_context)
break;
urb_context->skb = dev_alloc_skb(ATH10K_USB_RX_BUFFER_SIZE);
if (!urb_context->skb)
goto err;
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb)
goto err;
usb_fill_bulk_urb(urb,
recv_pipe->ar_usb->udev,
recv_pipe->usb_pipe_handle,
urb_context->skb->data,
ATH10K_USB_RX_BUFFER_SIZE,
ath10k_usb_recv_complete, urb_context);
ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
"usb bulk recv submit %d 0x%x ep 0x%2.2x len %d buf 0x%pK\n",
recv_pipe->logical_pipe_num,
recv_pipe->usb_pipe_handle, recv_pipe->ep_address,
ATH10K_USB_RX_BUFFER_SIZE, urb_context->skb);
usb_anchor_urb(urb, &recv_pipe->urb_submitted);
usb_status = usb_submit_urb(urb, GFP_ATOMIC);
if (usb_status) {
ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
"usb bulk recv failed: %d\n",
usb_status);
usb_unanchor_urb(urb);
usb_free_urb(urb);
goto err;
}
usb_free_urb(urb);
}
return;
err:
ath10k_usb_cleanup_recv_urb(urb_context);
}
static void ath10k_usb_flush_all(struct ath10k *ar)
{
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
int i;
for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) {
if (ar_usb->pipes[i].ar_usb) {
usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted);
cancel_work_sync(&ar_usb->pipes[i].io_complete_work);
}
}
}
static void ath10k_usb_start_recv_pipes(struct ath10k *ar)
{
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA].urb_cnt_thresh = 1;
ath10k_usb_post_recv_transfers(ar,
&ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]);
}
static void ath10k_usb_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htc_hdr *htc_hdr;
struct ath10k_htc_ep *ep;
htc_hdr = (struct ath10k_htc_hdr *)skb->data;
ep = &ar->htc.endpoint[htc_hdr->eid];
ath10k_htc_notify_tx_completion(ep, skb);
/* The TX complete handler now owns the skb... */
}
static void ath10k_usb_rx_complete(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_hdr *htc_hdr;
enum ath10k_htc_ep_id eid;
struct ath10k_htc_ep *ep;
u16 payload_len;
u8 *trailer;
int ret;
htc_hdr = (struct ath10k_htc_hdr *)skb->data;
eid = eid_from_htc_hdr(htc_hdr);
ep = &ar->htc.endpoint[eid];
if (ep->service_id == 0) {
ath10k_warn(ar, "ep %d is not connected\n", eid);
goto out_free_skb;
}
payload_len = le16_to_cpu(htc_hdr->len);
if (!payload_len) {
ath10k_warn(ar, "zero length frame received, firmware crashed?\n");
goto out_free_skb;
}
if (payload_len < htc_hdr->trailer_len) {
ath10k_warn(ar, "malformed frame received, firmware crashed?\n");
goto out_free_skb;
}
if (htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT) {
trailer = skb->data + sizeof(*htc_hdr) + payload_len -
htc_hdr->trailer_len;
ret = ath10k_htc_process_trailer(htc,
trailer,
htc_hdr->trailer_len,
eid,
NULL,
NULL);
if (ret)
goto out_free_skb;
if (is_trailer_only_msg(htc_hdr))
goto out_free_skb;
/* strip off the trailer from the skb since it should not
* be passed on to upper layers
*/
skb_trim(skb, skb->len - htc_hdr->trailer_len);
}
skb_pull(skb, sizeof(*htc_hdr));
ep->ep_ops.ep_rx_complete(ar, skb);
/* The RX complete handler now owns the skb... */
return;
out_free_skb:
dev_kfree_skb(skb);
}
static void ath10k_usb_io_comp_work(struct work_struct *work)
{
struct ath10k_usb_pipe *pipe = container_of(work,
struct ath10k_usb_pipe,
io_complete_work);
struct ath10k *ar = pipe->ar_usb->ar;
struct sk_buff *skb;
while ((skb = skb_dequeue(&pipe->io_comp_queue))) {
if (pipe->flags & ATH10K_USB_PIPE_FLAG_TX)
ath10k_usb_tx_complete(ar, skb);
else
ath10k_usb_rx_complete(ar, skb);
}
}
#define ATH10K_USB_MAX_DIAG_CMD (sizeof(struct ath10k_usb_ctrl_diag_cmd_write))
#define ATH10K_USB_MAX_DIAG_RESP (sizeof(struct ath10k_usb_ctrl_diag_resp_read))
static void ath10k_usb_destroy(struct ath10k *ar)
{
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
ath10k_usb_flush_all(ar);
ath10k_usb_cleanup_pipe_resources(ar);
usb_set_intfdata(ar_usb->interface, NULL);
kfree(ar_usb->diag_cmd_buffer);
kfree(ar_usb->diag_resp_buffer);
}
static int ath10k_usb_hif_start(struct ath10k *ar)
{
int i;
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
ath10k_usb_start_recv_pipes(ar);
/* set the TX resource avail threshold for each TX pipe */
for (i = ATH10K_USB_PIPE_TX_CTRL;
i <= ATH10K_USB_PIPE_TX_DATA_HP; i++) {
ar_usb->pipes[i].urb_cnt_thresh =
ar_usb->pipes[i].urb_alloc / 2;
}
return 0;
}
static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
struct ath10k_hif_sg_item *items, int n_items)
{
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
struct ath10k_usb_pipe *pipe = &ar_usb->pipes[pipe_id];
struct ath10k_urb_context *urb_context;
struct sk_buff *skb;
struct urb *urb;
int ret, i;
for (i = 0; i < n_items; i++) {
urb_context = ath10k_usb_alloc_urb_from_pipe(pipe);
if (!urb_context) {
ret = -ENOMEM;
goto err;
}
skb = items[i].transfer_context;
urb_context->skb = skb;
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
ret = -ENOMEM;
goto err_free_urb_to_pipe;
}
usb_fill_bulk_urb(urb,
ar_usb->udev,
pipe->usb_pipe_handle,
skb->data,
skb->len,
ath10k_usb_transmit_complete, urb_context);
if (!(skb->len % pipe->max_packet_size)) {
/* hit a max packet boundary on this pipe */
urb->transfer_flags |= URB_ZERO_PACKET;
}
usb_anchor_urb(urb, &pipe->urb_submitted);
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
"usb bulk transmit failed: %d\n", ret);
usb_unanchor_urb(urb);
ret = -EINVAL;
goto err_free_urb_to_pipe;
}
usb_free_urb(urb);
}
return 0;
err_free_urb_to_pipe:
ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
err:
return ret;
}
static void ath10k_usb_hif_stop(struct ath10k *ar)
{
ath10k_usb_flush_all(ar);
}
static u16 ath10k_usb_hif_get_free_queue_number(struct ath10k *ar, u8 pipe_id)
{
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
return ar_usb->pipes[pipe_id].urb_cnt;
}
static int ath10k_usb_submit_ctrl_out(struct ath10k *ar,
u8 req, u16 value, u16 index, void *data,
u32 size)
{
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
u8 *buf = NULL;
int ret;
if (size > 0) {
buf = kmemdup(data, size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
}
/* note: if successful returns number of bytes transferred */
ret = usb_control_msg(ar_usb->udev,
usb_sndctrlpipe(ar_usb->udev, 0),
req,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, value, index, buf,
size, 1000);
if (ret < 0) {
ath10k_warn(ar, "Failed to submit usb control message: %d\n",
ret);
kfree(buf);
return ret;
}
kfree(buf);
return 0;
}
static int ath10k_usb_submit_ctrl_in(struct ath10k *ar,
u8 req, u16 value, u16 index, void *data,
u32 size)
{
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
u8 *buf = NULL;
int ret;
if (size > 0) {
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
}
/* note: if successful returns number of bytes transferred */
ret = usb_control_msg(ar_usb->udev,
usb_rcvctrlpipe(ar_usb->udev, 0),
req,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, value, index, buf,
size, 2 * HZ);
if (ret < 0) {
ath10k_warn(ar, "Failed to read usb control message: %d\n",
ret);
kfree(buf);
return ret;
}
memcpy((u8 *)data, buf, size);
kfree(buf);
return 0;
}
static int ath10k_usb_ctrl_msg_exchange(struct ath10k *ar,
u8 req_val, u8 *req_buf, u32 req_len,
u8 resp_val, u8 *resp_buf,
u32 *resp_len)
{
int ret;
/* send command */
ret = ath10k_usb_submit_ctrl_out(ar, req_val, 0, 0,
req_buf, req_len);
if (ret)
goto err;
/* get response */
if (resp_buf) {
ret = ath10k_usb_submit_ctrl_in(ar, resp_val, 0, 0,
resp_buf, *resp_len);
if (ret)
goto err;
}
return 0;
err:
return ret;
}
static int ath10k_usb_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
size_t buf_len)
{
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
struct ath10k_usb_ctrl_diag_cmd_read *cmd;
u32 resp_len;
int ret;
if (buf_len < sizeof(struct ath10k_usb_ctrl_diag_resp_read))
return -EINVAL;
cmd = (struct ath10k_usb_ctrl_diag_cmd_read *)ar_usb->diag_cmd_buffer;
memset(cmd, 0, sizeof(*cmd));
cmd->cmd = ATH10K_USB_CTRL_DIAG_CC_READ;
cmd->address = cpu_to_le32(address);
resp_len = sizeof(struct ath10k_usb_ctrl_diag_resp_read);
ret = ath10k_usb_ctrl_msg_exchange(ar,
ATH10K_USB_CONTROL_REQ_DIAG_CMD,
(u8 *)cmd,
sizeof(*cmd),
ATH10K_USB_CONTROL_REQ_DIAG_RESP,
ar_usb->diag_resp_buffer, &resp_len);
if (ret)
return ret;
if (resp_len != sizeof(struct ath10k_usb_ctrl_diag_resp_read))
return -EMSGSIZE;
memcpy(buf, ar_usb->diag_resp_buffer,
sizeof(struct ath10k_usb_ctrl_diag_resp_read));
return 0;
}
static int ath10k_usb_hif_diag_write(struct ath10k *ar, u32 address,
const void *data, int nbytes)
{
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
struct ath10k_usb_ctrl_diag_cmd_write *cmd;
int ret;
if (nbytes != sizeof(cmd->value))
return -EINVAL;
cmd = (struct ath10k_usb_ctrl_diag_cmd_write *)ar_usb->diag_cmd_buffer;
memset(cmd, 0, sizeof(*cmd));
cmd->cmd = cpu_to_le32(ATH10K_USB_CTRL_DIAG_CC_WRITE);
cmd->address = cpu_to_le32(address);
memcpy(&cmd->value, data, nbytes);
ret = ath10k_usb_ctrl_msg_exchange(ar,
ATH10K_USB_CONTROL_REQ_DIAG_CMD,
(u8 *)cmd,
sizeof(*cmd),
0, NULL, NULL);
if (ret)
return ret;
return 0;
}
static int ath10k_usb_bmi_exchange_msg(struct ath10k *ar,
void *req, u32 req_len,
void *resp, u32 *resp_len)
{
int ret;
if (req) {
ret = ath10k_usb_submit_ctrl_out(ar,
ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD,
0, 0, req, req_len);
if (ret) {
ath10k_warn(ar,
"unable to send the bmi data to the device: %d\n",
ret);
return ret;
}
}
if (resp) {
ret = ath10k_usb_submit_ctrl_in(ar,
ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP,
0, 0, resp, *resp_len);
if (ret) {
ath10k_warn(ar,
"Unable to read the bmi data from the device: %d\n",
ret);
return ret;
}
}
return 0;
}
static void ath10k_usb_hif_get_default_pipe(struct ath10k *ar,
u8 *ul_pipe, u8 *dl_pipe)
{
*ul_pipe = ATH10K_USB_PIPE_TX_CTRL;
*dl_pipe = ATH10K_USB_PIPE_RX_CTRL;
}
static int ath10k_usb_hif_map_service_to_pipe(struct ath10k *ar, u16 svc_id,
u8 *ul_pipe, u8 *dl_pipe)
{
switch (svc_id) {
case ATH10K_HTC_SVC_ID_RSVD_CTRL:
case ATH10K_HTC_SVC_ID_WMI_CONTROL:
*ul_pipe = ATH10K_USB_PIPE_TX_CTRL;
/* due to large control packets, shift to data pipe */
*dl_pipe = ATH10K_USB_PIPE_RX_DATA;
break;
case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
*ul_pipe = ATH10K_USB_PIPE_TX_DATA_LP;
/* Disable rxdata2 directly, it will be enabled
* if FW enable rxdata2
*/
*dl_pipe = ATH10K_USB_PIPE_RX_DATA;
break;
default:
return -EPERM;
}
return 0;
}
/* This op is currently only used by htc_wait_target if the HTC ready
* message times out. It is not applicable for USB since there is nothing
* we can do if the HTC ready message does not arrive in time.
* TODO: Make this op non mandatory by introducing a NULL check in the
* hif op wrapper.
*/
static void ath10k_usb_hif_send_complete_check(struct ath10k *ar,
u8 pipe, int force)
{
}
static int ath10k_usb_hif_power_up(struct ath10k *ar)
{
return 0;
}
static void ath10k_usb_hif_power_down(struct ath10k *ar)
{
ath10k_usb_flush_all(ar);
}
#ifdef CONFIG_PM
static int ath10k_usb_hif_suspend(struct ath10k *ar)
{
return -EOPNOTSUPP;
}
static int ath10k_usb_hif_resume(struct ath10k *ar)
{
return -EOPNOTSUPP;
}
#endif
static const struct ath10k_hif_ops ath10k_usb_hif_ops = {
.tx_sg = ath10k_usb_hif_tx_sg,
.diag_read = ath10k_usb_hif_diag_read,
.diag_write = ath10k_usb_hif_diag_write,
.exchange_bmi_msg = ath10k_usb_bmi_exchange_msg,
.start = ath10k_usb_hif_start,
.stop = ath10k_usb_hif_stop,
.map_service_to_pipe = ath10k_usb_hif_map_service_to_pipe,
.get_default_pipe = ath10k_usb_hif_get_default_pipe,
.send_complete_check = ath10k_usb_hif_send_complete_check,
.get_free_queue_number = ath10k_usb_hif_get_free_queue_number,
.power_up = ath10k_usb_hif_power_up,
.power_down = ath10k_usb_hif_power_down,
#ifdef CONFIG_PM
.suspend = ath10k_usb_hif_suspend,
.resume = ath10k_usb_hif_resume,
#endif
};
static u8 ath10k_usb_get_logical_pipe_num(u8 ep_address, int *urb_count)
{
u8 pipe_num = ATH10K_USB_PIPE_INVALID;
switch (ep_address) {
case ATH10K_USB_EP_ADDR_APP_CTRL_IN:
pipe_num = ATH10K_USB_PIPE_RX_CTRL;
*urb_count = RX_URB_COUNT;
break;
case ATH10K_USB_EP_ADDR_APP_DATA_IN:
pipe_num = ATH10K_USB_PIPE_RX_DATA;
*urb_count = RX_URB_COUNT;
break;
case ATH10K_USB_EP_ADDR_APP_INT_IN:
pipe_num = ATH10K_USB_PIPE_RX_INT;
*urb_count = RX_URB_COUNT;
break;
case ATH10K_USB_EP_ADDR_APP_DATA2_IN:
pipe_num = ATH10K_USB_PIPE_RX_DATA2;
*urb_count = RX_URB_COUNT;
break;
case ATH10K_USB_EP_ADDR_APP_CTRL_OUT:
pipe_num = ATH10K_USB_PIPE_TX_CTRL;
*urb_count = TX_URB_COUNT;
break;
case ATH10K_USB_EP_ADDR_APP_DATA_LP_OUT:
pipe_num = ATH10K_USB_PIPE_TX_DATA_LP;
*urb_count = TX_URB_COUNT;
break;
case ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT:
pipe_num = ATH10K_USB_PIPE_TX_DATA_MP;
*urb_count = TX_URB_COUNT;
break;
case ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT:
pipe_num = ATH10K_USB_PIPE_TX_DATA_HP;
*urb_count = TX_URB_COUNT;
break;
default:
/* note: there may be endpoints not currently used */
break;
}
return pipe_num;
}
static int ath10k_usb_alloc_pipe_resources(struct ath10k *ar,
struct ath10k_usb_pipe *pipe,
int urb_cnt)
{
struct ath10k_urb_context *urb_context;
int i;
INIT_LIST_HEAD(&pipe->urb_list_head);
init_usb_anchor(&pipe->urb_submitted);
for (i = 0; i < urb_cnt; i++) {
urb_context = kzalloc(sizeof(*urb_context), GFP_KERNEL);
if (!urb_context)
return -ENOMEM;
urb_context->pipe = pipe;
/* we are only allocate the urb contexts here, the actual URB
* is allocated from the kernel as needed to do a transaction
*/
pipe->urb_alloc++;
ath10k_usb_free_urb_to_pipe(pipe, urb_context);
}
ath10k_dbg(ar, ATH10K_DBG_USB,
"usb alloc resources lpipe %d hpipe 0x%x urbs %d\n",
pipe->logical_pipe_num, pipe->usb_pipe_handle,
pipe->urb_alloc);
return 0;
}
static int ath10k_usb_setup_pipe_resources(struct ath10k *ar,
struct usb_interface *interface)
{
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
struct usb_host_interface *iface_desc = interface->cur_altsetting;
struct usb_endpoint_descriptor *endpoint;
struct ath10k_usb_pipe *pipe;
int ret, i, urbcount;
u8 pipe_num;
ath10k_dbg(ar, ATH10K_DBG_USB, "usb setting up pipes using interface\n");
/* walk decriptors and setup pipes */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) {
ath10k_dbg(ar, ATH10K_DBG_USB,
"usb %s bulk ep 0x%2.2x maxpktsz %d\n",
ATH10K_USB_IS_DIR_IN
(endpoint->bEndpointAddress) ?
"rx" : "tx", endpoint->bEndpointAddress,
le16_to_cpu(endpoint->wMaxPacketSize));
} else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) {
ath10k_dbg(ar, ATH10K_DBG_USB,
"usb %s int ep 0x%2.2x maxpktsz %d interval %d\n",
ATH10K_USB_IS_DIR_IN
(endpoint->bEndpointAddress) ?
"rx" : "tx", endpoint->bEndpointAddress,
le16_to_cpu(endpoint->wMaxPacketSize),
endpoint->bInterval);
} else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) {
/* TODO for ISO */
ath10k_dbg(ar, ATH10K_DBG_USB,
"usb %s isoc ep 0x%2.2x maxpktsz %d interval %d\n",
ATH10K_USB_IS_DIR_IN
(endpoint->bEndpointAddress) ?
"rx" : "tx", endpoint->bEndpointAddress,
le16_to_cpu(endpoint->wMaxPacketSize),
endpoint->bInterval);
}
urbcount = 0;
pipe_num =
ath10k_usb_get_logical_pipe_num(endpoint->bEndpointAddress,
&urbcount);
if (pipe_num == ATH10K_USB_PIPE_INVALID)
continue;
pipe = &ar_usb->pipes[pipe_num];
if (pipe->ar_usb)
/* hmmm..pipe was already setup */
continue;
pipe->ar_usb = ar_usb;
pipe->logical_pipe_num = pipe_num;
pipe->ep_address = endpoint->bEndpointAddress;
pipe->max_packet_size = le16_to_cpu(endpoint->wMaxPacketSize);
if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) {
if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) {
pipe->usb_pipe_handle =
usb_rcvbulkpipe(ar_usb->udev,
pipe->ep_address);
} else {
pipe->usb_pipe_handle =
usb_sndbulkpipe(ar_usb->udev,
pipe->ep_address);
}
} else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) {
if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) {
pipe->usb_pipe_handle =
usb_rcvintpipe(ar_usb->udev,
pipe->ep_address);
} else {
pipe->usb_pipe_handle =
usb_sndintpipe(ar_usb->udev,
pipe->ep_address);
}
} else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) {
/* TODO for ISO */
if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) {
pipe->usb_pipe_handle =
usb_rcvisocpipe(ar_usb->udev,
pipe->ep_address);
} else {
pipe->usb_pipe_handle =
usb_sndisocpipe(ar_usb->udev,
pipe->ep_address);
}
}
pipe->ep_desc = endpoint;
if (!ATH10K_USB_IS_DIR_IN(pipe->ep_address))
pipe->flags |= ATH10K_USB_PIPE_FLAG_TX;
ret = ath10k_usb_alloc_pipe_resources(ar, pipe, urbcount);
if (ret)
return ret;
}
return 0;
}
static int ath10k_usb_create(struct ath10k *ar,
struct usb_interface *interface)
{
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
struct usb_device *dev = interface_to_usbdev(interface);
struct ath10k_usb_pipe *pipe;
int ret, i;
usb_set_intfdata(interface, ar_usb);
spin_lock_init(&ar_usb->cs_lock);
ar_usb->udev = dev;
ar_usb->interface = interface;
for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) {
pipe = &ar_usb->pipes[i];
INIT_WORK(&pipe->io_complete_work,
ath10k_usb_io_comp_work);
skb_queue_head_init(&pipe->io_comp_queue);
}
ar_usb->diag_cmd_buffer = kzalloc(ATH10K_USB_MAX_DIAG_CMD, GFP_KERNEL);
if (!ar_usb->diag_cmd_buffer) {
ret = -ENOMEM;
goto err;
}
ar_usb->diag_resp_buffer = kzalloc(ATH10K_USB_MAX_DIAG_RESP,
GFP_KERNEL);
if (!ar_usb->diag_resp_buffer) {
ret = -ENOMEM;
goto err;
}
ret = ath10k_usb_setup_pipe_resources(ar, interface);
if (ret)
goto err;
return 0;
err:
ath10k_usb_destroy(ar);
return ret;
}
/* ath10k usb driver registered functions */
static int ath10k_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct ath10k *ar;
struct ath10k_usb *ar_usb;
struct usb_device *dev = interface_to_usbdev(interface);
int ret, vendor_id, product_id;
enum ath10k_hw_rev hw_rev;
u32 chip_id;
/* Assumption: All USB based chipsets (so far) are QCA9377 based.
* If there will be newer chipsets that does not use the hw reg
* setup as defined in qca6174_regs and qca6174_values, this
* assumption is no longer valid and hw_rev must be setup differently
* depending on chipset.
*/
hw_rev = ATH10K_HW_QCA9377;
ar = ath10k_core_create(sizeof(*ar_usb), &dev->dev, ATH10K_BUS_USB,
hw_rev, &ath10k_usb_hif_ops);
if (!ar) {
dev_err(&dev->dev, "failed to allocate core\n");
return -ENOMEM;
}
usb_get_dev(dev);
vendor_id = le16_to_cpu(dev->descriptor.idVendor);
product_id = le16_to_cpu(dev->descriptor.idProduct);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"usb new func vendor 0x%04x product 0x%04x\n",
vendor_id, product_id);
ar_usb = ath10k_usb_priv(ar);
ret = ath10k_usb_create(ar, interface);
ar_usb->ar = ar;
ar->dev_id = product_id;
ar->id.vendor = vendor_id;
ar->id.device = product_id;
/* TODO: don't know yet how to get chip_id with USB */
chip_id = 0;
ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_warn(ar, "failed to register driver core: %d\n", ret);
goto err;
}
/* TODO: remove this once USB support is fully implemented */
ath10k_warn(ar, "WARNING: ath10k USB support is incomplete, don't expect anything to work!\n");
return 0;
err:
ath10k_core_destroy(ar);
usb_put_dev(dev);
return ret;
}
static void ath10k_usb_remove(struct usb_interface *interface)
{
struct ath10k_usb *ar_usb;
ar_usb = usb_get_intfdata(interface);
if (!ar_usb)
return;
ath10k_core_unregister(ar_usb->ar);
ath10k_usb_destroy(ar_usb->ar);
usb_put_dev(interface_to_usbdev(interface));
ath10k_core_destroy(ar_usb->ar);
}
#ifdef CONFIG_PM
static int ath10k_usb_pm_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct ath10k_usb *ar_usb = usb_get_intfdata(interface);
ath10k_usb_flush_all(ar_usb->ar);
return 0;
}
static int ath10k_usb_pm_resume(struct usb_interface *interface)
{
struct ath10k_usb *ar_usb = usb_get_intfdata(interface);
struct ath10k *ar = ar_usb->ar;
ath10k_usb_post_recv_transfers(ar,
&ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]);
return 0;
}
#else
#define ath10k_usb_pm_suspend NULL
#define ath10k_usb_pm_resume NULL
#endif
/* table of devices that work with this driver */
static struct usb_device_id ath10k_usb_ids[] = {
{USB_DEVICE(0x13b1, 0x0042)}, /* Linksys WUSB6100M */
{ /* Terminating entry */ },
};
MODULE_DEVICE_TABLE(usb, ath10k_usb_ids);
static struct usb_driver ath10k_usb_driver = {
.name = "ath10k_usb",
.probe = ath10k_usb_probe,
.suspend = ath10k_usb_pm_suspend,
.resume = ath10k_usb_pm_resume,
.disconnect = ath10k_usb_remove,
.id_table = ath10k_usb_ids,
.supports_autosuspend = true,
.disable_hub_initiated_lpm = 1,
};
module_usb_driver(ath10k_usb_driver);
MODULE_AUTHOR("Atheros Communications, Inc.");
MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN USB devices");
MODULE_LICENSE("Dual BSD/GPL");
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _USB_H_
#define _USB_H_
/* constants */
#define TX_URB_COUNT 32
#define RX_URB_COUNT 32
#define ATH10K_USB_RX_BUFFER_SIZE 4096
#define ATH10K_USB_PIPE_INVALID ATH10K_USB_PIPE_MAX
/* USB endpoint definitions */
#define ATH10K_USB_EP_ADDR_APP_CTRL_IN 0x81
#define ATH10K_USB_EP_ADDR_APP_DATA_IN 0x82
#define ATH10K_USB_EP_ADDR_APP_DATA2_IN 0x83
#define ATH10K_USB_EP_ADDR_APP_INT_IN 0x84
#define ATH10K_USB_EP_ADDR_APP_CTRL_OUT 0x01
#define ATH10K_USB_EP_ADDR_APP_DATA_LP_OUT 0x02
#define ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT 0x03
#define ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT 0x04
/* diagnostic command defnitions */
#define ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD 1
#define ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP 2
#define ATH10K_USB_CONTROL_REQ_DIAG_CMD 3
#define ATH10K_USB_CONTROL_REQ_DIAG_RESP 4
#define ATH10K_USB_CTRL_DIAG_CC_READ 0
#define ATH10K_USB_CTRL_DIAG_CC_WRITE 1
#define ATH10K_USB_IS_BULK_EP(attr) (((attr) & 3) == 0x02)
#define ATH10K_USB_IS_INT_EP(attr) (((attr) & 3) == 0x03)
#define ATH10K_USB_IS_ISOC_EP(attr) (((attr) & 3) == 0x01)
#define ATH10K_USB_IS_DIR_IN(addr) ((addr) & 0x80)
struct ath10k_usb_ctrl_diag_cmd_write {
__le32 cmd;
__le32 address;
__le32 value;
__le32 padding;
} __packed;
struct ath10k_usb_ctrl_diag_cmd_read {
__le32 cmd;
__le32 address;
} __packed;
struct ath10k_usb_ctrl_diag_resp_read {
u8 value[4];
} __packed;
/* tx/rx pipes for usb */
enum ath10k_usb_pipe_id {
ATH10K_USB_PIPE_TX_CTRL = 0,
ATH10K_USB_PIPE_TX_DATA_LP,
ATH10K_USB_PIPE_TX_DATA_MP,
ATH10K_USB_PIPE_TX_DATA_HP,
ATH10K_USB_PIPE_RX_CTRL,
ATH10K_USB_PIPE_RX_DATA,
ATH10K_USB_PIPE_RX_DATA2,
ATH10K_USB_PIPE_RX_INT,
ATH10K_USB_PIPE_MAX
};
struct ath10k_usb_pipe {
struct list_head urb_list_head;
struct usb_anchor urb_submitted;
u32 urb_alloc;
u32 urb_cnt;
u32 urb_cnt_thresh;
unsigned int usb_pipe_handle;
u32 flags;
u8 ep_address;
u8 logical_pipe_num;
struct ath10k_usb *ar_usb;
u16 max_packet_size;
struct work_struct io_complete_work;
struct sk_buff_head io_comp_queue;
struct usb_endpoint_descriptor *ep_desc;
};
#define ATH10K_USB_PIPE_FLAG_TX BIT(0)
/* usb device object */
struct ath10k_usb {
/* protects pipe->urb_list_head and pipe->urb_cnt */
spinlock_t cs_lock;
struct usb_device *udev;
struct usb_interface *interface;
struct ath10k_usb_pipe pipes[ATH10K_USB_PIPE_MAX];
u8 *diag_cmd_buffer;
u8 *diag_resp_buffer;
struct ath10k *ar;
};
/* usb urb object */
struct ath10k_urb_context {
struct list_head link;
struct ath10k_usb_pipe *pipe;
struct sk_buff *skb;
struct ath10k *ar;
};
static inline struct ath10k_usb *ath10k_usb_priv(struct ath10k *ar)
{
return (struct ath10k_usb *)ar->drv_priv;
}
#endif
......@@ -651,8 +651,6 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = {
.gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
.vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
.tdls_set_state_cmdid = WMI_CMD_UNSUPPORTED,
.tdls_peer_update_cmdid = WMI_CMD_UNSUPPORTED,
.adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
.scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
.vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
......@@ -711,6 +709,33 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = {
.pdev_bss_chan_info_request_cmdid =
WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
.ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
.vdev_set_ie_cmdid = WMI_10_4_VDEV_SET_IE_CMDID,
.set_lteu_config_cmdid = WMI_10_4_SET_LTEU_CONFIG_CMDID,
.atf_ssid_grouping_request_cmdid =
WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID,
.peer_atf_ext_request_cmdid = WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID,
.set_periodic_channel_stats_cfg_cmdid =
WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG,
.peer_bwf_request_cmdid = WMI_10_4_PEER_BWF_REQUEST_CMDID,
.btcoex_cfg_cmdid = WMI_10_4_BTCOEX_CFG_CMDID,
.peer_tx_mu_txmit_count_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID,
.peer_tx_mu_txmit_rstcnt_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID,
.peer_gid_userpos_list_cmdid = WMI_10_4_PEER_GID_USERPOS_LIST_CMDID,
.pdev_check_cal_version_cmdid = WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID,
.coex_version_cfg_cmid = WMI_10_4_COEX_VERSION_CFG_CMID,
.pdev_get_rx_filter_cmdid = WMI_10_4_PDEV_GET_RX_FILTER_CMDID,
.pdev_extended_nss_cfg_cmdid = WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID,
.vdev_set_scan_nac_rssi_cmdid = WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID,
.prog_gpio_band_select_cmdid = WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID,
.config_smart_logging_cmdid = WMI_10_4_CONFIG_SMART_LOGGING_CMDID,
.debug_fatal_condition_cmdid = WMI_10_4_DEBUG_FATAL_CONDITION_CMDID,
.get_tsf_timer_cmdid = WMI_10_4_GET_TSF_TIMER_CMDID,
.pdev_get_tpc_table_cmdid = WMI_10_4_PDEV_GET_TPC_TABLE_CMDID,
.vdev_sifs_trigger_time_cmdid = WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID,
.pdev_wds_entry_list_cmdid = WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID,
.tdls_set_state_cmdid = WMI_10_4_TDLS_SET_STATE_CMDID,
.tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID,
.tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID,
};
/* MAIN WMI VDEV param map */
......@@ -6473,6 +6498,7 @@ ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
cmd = (struct wmi_peer_create_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
cmd->peer_type = __cpu_to_le32(peer_type);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi peer create vdev_id %d peer_addr %pM\n",
......@@ -7803,14 +7829,28 @@ ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
{
struct wmi_ext_resource_config_10_4_cmd *cmd;
struct sk_buff *skb;
u32 num_tdls_sleep_sta = 0;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return ERR_PTR(-ENOMEM);
if (test_bit(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, ar->wmi.svc_map))
num_tdls_sleep_sta = TARGET_10_4_NUM_TDLS_SLEEP_STA;
cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
cmd->host_platform_config = __cpu_to_le32(type);
cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
cmd->wlan_gpio_priority = __cpu_to_le32(-1);
cmd->coex_version = __cpu_to_le32(WMI_NO_COEX_VERSION_SUPPORT);
cmd->coex_gpio_pin1 = __cpu_to_le32(-1);
cmd->coex_gpio_pin2 = __cpu_to_le32(-1);
cmd->coex_gpio_pin3 = __cpu_to_le32(-1);
cmd->num_tdls_vdevs = __cpu_to_le32(TARGET_10_4_NUM_TDLS_VDEVS);
cmd->num_tdls_conn_table_entries = __cpu_to_le32(20);
cmd->max_tdls_concurrent_sleep_sta = __cpu_to_le32(num_tdls_sleep_sta);
cmd->max_tdls_concurrent_buffer_sta =
__cpu_to_le32(TARGET_10_4_NUM_TDLS_BUFFER_STA);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi ext resource config host type %d firmware feature bitmap %08x\n",
......@@ -7818,6 +7858,124 @@ ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
return skb;
}
static struct sk_buff *
ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
enum wmi_tdls_state state)
{
struct wmi_10_4_tdls_set_state_cmd *cmd;
struct sk_buff *skb;
u32 options = 0;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return ERR_PTR(-ENOMEM);
if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map))
state = WMI_TDLS_ENABLE_PASSIVE;
if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
options |= WMI_TDLS_BUFFER_STA_EN;
cmd = (struct wmi_10_4_tdls_set_state_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->state = __cpu_to_le32(state);
cmd->notification_interval_ms = __cpu_to_le32(5000);
cmd->tx_discovery_threshold = __cpu_to_le32(100);
cmd->tx_teardown_threshold = __cpu_to_le32(5);
cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
cmd->rssi_delta = __cpu_to_le32(-20);
cmd->tdls_options = __cpu_to_le32(options);
cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
cmd->teardown_notification_ms = __cpu_to_le32(10);
cmd->tdls_peer_kickout_threshold = __cpu_to_le32(96);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi update fw tdls state %d for vdev %i\n",
state, vdev_id);
return skb;
}
static u32 ath10k_wmi_prepare_peer_qos(u8 uapsd_queues, u8 sp)
{
u32 peer_qos = 0;
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
peer_qos |= WMI_TDLS_PEER_QOS_AC_VO;
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
peer_qos |= WMI_TDLS_PEER_QOS_AC_VI;
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
peer_qos |= WMI_TDLS_PEER_QOS_AC_BK;
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
peer_qos |= WMI_TDLS_PEER_QOS_AC_BE;
peer_qos |= SM(sp, WMI_TDLS_PEER_SP);
return peer_qos;
}
static struct sk_buff *
ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
const struct wmi_tdls_peer_update_cmd_arg *arg,
const struct wmi_tdls_peer_capab_arg *cap,
const struct wmi_channel_arg *chan_arg)
{
struct wmi_10_4_tdls_peer_update_cmd *cmd;
struct wmi_tdls_peer_capabilities *peer_cap;
struct wmi_channel *chan;
struct sk_buff *skb;
u32 peer_qos;
int len, chan_len;
int i;
/* tdls peer update cmd has place holder for one channel*/
chan_len = cap->peer_chan_len ? (cap->peer_chan_len - 1) : 0;
len = sizeof(*cmd) + chan_len * sizeof(*chan);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
memset(skb->data, 0, sizeof(*cmd));
cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
cmd->peer_state = __cpu_to_le32(arg->peer_state);
peer_qos = ath10k_wmi_prepare_peer_qos(cap->peer_uapsd_queues,
cap->peer_max_sp);
peer_cap = &cmd->peer_capab;
peer_cap->peer_qos = __cpu_to_le32(peer_qos);
peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
peer_cap->peer_operclass[i] = cap->peer_operclass[i];
peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
for (i = 0; i < cap->peer_chan_len; i++) {
chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i];
ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
}
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tdls peer update vdev %i state %d n_chans %u\n",
arg->vdev_id, arg->peer_state, cap->peer_chan_len);
return skb;
}
static struct sk_buff *
ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
{
......@@ -8197,6 +8355,8 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
.ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
.gen_update_fw_tdls_state = ath10k_wmi_10_4_gen_update_fw_tdls_state,
.gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update,
/* shared with 10.2 */
.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
......
......@@ -184,6 +184,17 @@ enum wmi_service {
WMI_SERVICE_TX_MODE_PUSH_ONLY,
WMI_SERVICE_TX_MODE_PUSH_PULL,
WMI_SERVICE_TX_MODE_DYNAMIC,
WMI_SERVICE_VDEV_RX_FILTER,
WMI_SERVICE_BTCOEX,
WMI_SERVICE_CHECK_CAL_VERSION,
WMI_SERVICE_DBGLOG_WARN2,
WMI_SERVICE_BTCOEX_DUTY_CYCLE,
WMI_SERVICE_4_WIRE_COEX_SUPPORT,
WMI_SERVICE_EXTENDED_NSS_SUPPORT,
WMI_SERVICE_PROG_GPIO_BAND_SELECT,
WMI_SERVICE_SMART_LOGGING_SUPPORT,
WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
/* keep last */
WMI_SERVICE_MAX,
......@@ -310,6 +321,21 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
WMI_10_4_SERVICE_VDEV_RX_FILTER,
WMI_10_4_SERVICE_BTCOEX,
WMI_10_4_SERVICE_CHECK_CAL_VERSION,
WMI_10_4_SERVICE_DBGLOG_WARN2,
WMI_10_4_SERVICE_BTCOEX_DUTY_CYCLE,
WMI_10_4_SERVICE_4_WIRE_COEX_SUPPORT,
WMI_10_4_SERVICE_EXTENDED_NSS_SUPPORT,
WMI_10_4_SERVICE_PROG_GPIO_BAND_SELECT,
WMI_10_4_SERVICE_SMART_LOGGING_SUPPORT,
WMI_10_4_SERVICE_TDLS,
WMI_10_4_SERVICE_TDLS_OFFCHAN,
WMI_10_4_SERVICE_TDLS_UAPSD_BUFFER_STA,
WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA,
WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
};
static inline char *wmi_service_name(int service_id)
......@@ -408,6 +434,16 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY);
SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
SVCSTR(WMI_SERVICE_VDEV_RX_FILTER);
SVCSTR(WMI_SERVICE_CHECK_CAL_VERSION);
SVCSTR(WMI_SERVICE_DBGLOG_WARN2);
SVCSTR(WMI_SERVICE_BTCOEX_DUTY_CYCLE);
SVCSTR(WMI_SERVICE_4_WIRE_COEX_SUPPORT);
SVCSTR(WMI_SERVICE_EXTENDED_NSS_SUPPORT);
SVCSTR(WMI_SERVICE_PROG_GPIO_BAND_SELECT);
SVCSTR(WMI_SERVICE_SMART_LOGGING_SUPPORT);
SVCSTR(WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE);
SVCSTR(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY);
default:
return NULL;
}
......@@ -420,9 +456,20 @@ static inline char *wmi_service_name(int service_id)
__le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \
BIT((svc_id) % (sizeof(u32))))
/* This extension is required to accommodate new services, current limit
* for wmi_services is 64 as target is using only 4-bits of each 32-bit
* wmi_service word. Extending this to make use of remaining unused bits
* for new services.
*/
#define WMI_EXT_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
((svc_id) >= (len) && \
__le32_to_cpu((wmi_svc_bmap)[((svc_id) - (len)) / 28]) & \
BIT(((((svc_id) - (len)) % 28) & 0x1f) + 4))
#define SVCMAP(x, y, len) \
do { \
if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \
if ((WMI_SERVICE_IS_ENABLED((in), (x), (len))) || \
(WMI_EXT_SERVICE_IS_ENABLED((in), (x), (len)))) \
__set_bit(y, out); \
} while (0)
......@@ -663,6 +710,36 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_TX_MODE_PUSH_PULL, len);
SVCMAP(WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
WMI_SERVICE_TX_MODE_DYNAMIC, len);
SVCMAP(WMI_10_4_SERVICE_VDEV_RX_FILTER,
WMI_SERVICE_VDEV_RX_FILTER, len);
SVCMAP(WMI_10_4_SERVICE_BTCOEX,
WMI_SERVICE_BTCOEX, len);
SVCMAP(WMI_10_4_SERVICE_CHECK_CAL_VERSION,
WMI_SERVICE_CHECK_CAL_VERSION, len);
SVCMAP(WMI_10_4_SERVICE_DBGLOG_WARN2,
WMI_SERVICE_DBGLOG_WARN2, len);
SVCMAP(WMI_10_4_SERVICE_BTCOEX_DUTY_CYCLE,
WMI_SERVICE_BTCOEX_DUTY_CYCLE, len);
SVCMAP(WMI_10_4_SERVICE_4_WIRE_COEX_SUPPORT,
WMI_SERVICE_4_WIRE_COEX_SUPPORT, len);
SVCMAP(WMI_10_4_SERVICE_EXTENDED_NSS_SUPPORT,
WMI_SERVICE_EXTENDED_NSS_SUPPORT, len);
SVCMAP(WMI_10_4_SERVICE_PROG_GPIO_BAND_SELECT,
WMI_SERVICE_PROG_GPIO_BAND_SELECT, len);
SVCMAP(WMI_10_4_SERVICE_SMART_LOGGING_SUPPORT,
WMI_SERVICE_SMART_LOGGING_SUPPORT, len);
SVCMAP(WMI_10_4_SERVICE_TDLS,
WMI_SERVICE_TDLS, len);
SVCMAP(WMI_10_4_SERVICE_TDLS_OFFCHAN,
WMI_SERVICE_TDLS_OFFCHAN, len);
SVCMAP(WMI_10_4_SERVICE_TDLS_UAPSD_BUFFER_STA,
WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, len);
SVCMAP(WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA,
WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, len);
SVCMAP(WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, len);
SVCMAP(WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, len);
}
#undef SVCMAP
......@@ -837,6 +914,29 @@ struct wmi_cmd_map {
u32 pdev_bss_chan_info_request_cmdid;
u32 pdev_enable_adaptive_cca_cmdid;
u32 ext_resource_cfg_cmdid;
u32 vdev_set_ie_cmdid;
u32 set_lteu_config_cmdid;
u32 atf_ssid_grouping_request_cmdid;
u32 peer_atf_ext_request_cmdid;
u32 set_periodic_channel_stats_cfg_cmdid;
u32 peer_bwf_request_cmdid;
u32 btcoex_cfg_cmdid;
u32 peer_tx_mu_txmit_count_cmdid;
u32 peer_tx_mu_txmit_rstcnt_cmdid;
u32 peer_gid_userpos_list_cmdid;
u32 pdev_check_cal_version_cmdid;
u32 coex_version_cfg_cmid;
u32 pdev_get_rx_filter_cmdid;
u32 pdev_extended_nss_cfg_cmdid;
u32 vdev_set_scan_nac_rssi_cmdid;
u32 prog_gpio_band_select_cmdid;
u32 config_smart_logging_cmdid;
u32 debug_fatal_condition_cmdid;
u32 get_tsf_timer_cmdid;
u32 pdev_get_tpc_table_cmdid;
u32 vdev_sifs_trigger_time_cmdid;
u32 pdev_wds_entry_list_cmdid;
u32 tdls_set_offchan_mode_cmdid;
};
/*
......@@ -1647,6 +1747,29 @@ enum wmi_10_4_cmd_id {
WMI_10_4_EXT_RESOURCE_CFG_CMDID,
WMI_10_4_VDEV_SET_IE_CMDID,
WMI_10_4_SET_LTEU_CONFIG_CMDID,
WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID,
WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID,
WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG,
WMI_10_4_PEER_BWF_REQUEST_CMDID,
WMI_10_4_BTCOEX_CFG_CMDID,
WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID,
WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID,
WMI_10_4_PEER_GID_USERPOS_LIST_CMDID,
WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID,
WMI_10_4_COEX_VERSION_CFG_CMID,
WMI_10_4_PDEV_GET_RX_FILTER_CMDID,
WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID,
WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID,
WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID,
WMI_10_4_CONFIG_SMART_LOGGING_CMDID,
WMI_10_4_DEBUG_FATAL_CONDITION_CMDID,
WMI_10_4_GET_TSF_TIMER_CMDID,
WMI_10_4_PDEV_GET_TPC_TABLE_CMDID,
WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID,
WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID,
WMI_10_4_TDLS_SET_STATE_CMDID,
WMI_10_4_TDLS_PEER_UPDATE_CMDID,
WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID,
WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1,
};
......@@ -1710,6 +1833,18 @@ enum wmi_10_4_event_id {
WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID,
WMI_10_4_MU_REPORT_EVENTID,
WMI_10_4_TX_DATA_TRAFFIC_CTRL_EVENTID,
WMI_10_4_PEER_TX_MU_TXMIT_COUNT_EVENTID,
WMI_10_4_PEER_GID_USERPOS_LIST_EVENTID,
WMI_10_4_PDEV_CHECK_CAL_VERSION_EVENTID,
WMI_10_4_ATF_PEER_STATS_EVENTID,
WMI_10_4_PDEV_GET_RX_FILTER_EVENTID,
WMI_10_4_NAC_RSSI_EVENTID,
WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID,
WMI_10_4_GET_TSF_TIMER_RESP_EVENTID,
WMI_10_4_PDEV_TPC_TABLE_EVENTID,
WMI_10_4_PDEV_WDS_ENTRY_LIST_EVENTID,
WMI_10_4_TDLS_PEER_EVENTID,
WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1,
};
......@@ -2718,6 +2853,18 @@ struct wmi_resource_config_10_4 {
__le32 qwrap_config;
} __packed;
enum wmi_coex_version {
WMI_NO_COEX_VERSION_SUPPORT = 0,
/* 3 wire coex support*/
WMI_COEX_VERSION_1 = 1,
/* 2.5 wire coex support*/
WMI_COEX_VERSION_2 = 2,
/* 2.5 wire coex with duty cycle support */
WMI_COEX_VERSION_3 = 3,
/* 4 wire coex support*/
WMI_COEX_VERSION_4 = 4,
};
/**
* enum wmi_10_4_feature_mask - WMI 10.4 feature enable/disable flags
* @WMI_10_4_LTEU_SUPPORT: LTEU config
......@@ -2726,6 +2873,14 @@ struct wmi_resource_config_10_4 {
* @WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF: AUX Radio Enhancement for chan load scan
* @WMI_10_4_BSS_CHANNEL_INFO_64: BSS channel info stats
* @WMI_10_4_PEER_STATS: Per station stats
* @WMI_10_4_VDEV_STATS: Per vdev stats
* @WMI_10_4_TDLS: Implicit TDLS support in firmware enable/disable
* @WMI_10_4_TDLS_OFFCHAN: TDLS offchannel support enable/disable
* @WMI_10_4_TDLS_UAPSD_BUFFER_STA: TDLS buffer sta support enable/disable
* @WMI_10_4_TDLS_UAPSD_SLEEP_STA: TDLS sleep sta support enable/disable
* @WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE: TDLS connection tracker in host
* enable/disable
* @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY:Explicit TDLS mode enable/disable
*/
enum wmi_10_4_feature_mask {
WMI_10_4_LTEU_SUPPORT = BIT(0),
......@@ -2734,6 +2889,14 @@ enum wmi_10_4_feature_mask {
WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF = BIT(3),
WMI_10_4_BSS_CHANNEL_INFO_64 = BIT(4),
WMI_10_4_PEER_STATS = BIT(5),
WMI_10_4_VDEV_STATS = BIT(6),
WMI_10_4_TDLS = BIT(7),
WMI_10_4_TDLS_OFFCHAN = BIT(8),
WMI_10_4_TDLS_UAPSD_BUFFER_STA = BIT(9),
WMI_10_4_TDLS_UAPSD_SLEEP_STA = BIT(10),
WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE = BIT(11),
WMI_10_4_TDLS_EXPLICIT_MODE_ONLY = BIT(12),
};
struct wmi_ext_resource_config_10_4_cmd {
......@@ -2741,6 +2904,22 @@ struct wmi_ext_resource_config_10_4_cmd {
__le32 host_platform_config;
/* see enum wmi_10_4_feature_mask */
__le32 fw_feature_bitmap;
/* WLAN priority GPIO number */
__le32 wlan_gpio_priority;
/* see enum wmi_coex_version */
__le32 coex_version;
/* COEX GPIO config */
__le32 coex_gpio_pin1;
__le32 coex_gpio_pin2;
__le32 coex_gpio_pin3;
/* number of vdevs allowed to perform tdls */
__le32 num_tdls_vdevs;
/* number of peers to track per TDLS vdev */
__le32 num_tdls_conn_table_entries;
/* number of tdls sleep sta supported */
__le32 max_tdls_concurrent_sleep_sta;
/* number of tdls buffer sta supported */
__le32 max_tdls_concurrent_buffer_sta;
};
/* strucutre describing host memory chunk. */
......@@ -5698,6 +5877,7 @@ struct wmi_tbtt_offset_event {
struct wmi_peer_create_cmd {
__le32 vdev_id;
struct wmi_mac_addr peer_macaddr;
__le32 peer_type;
} __packed;
enum wmi_peer_type {
......@@ -6556,6 +6736,22 @@ struct wmi_tdls_peer_update_cmd_arg {
#define WMI_TDLS_MAX_SUPP_OPER_CLASSES 32
#define WMI_TDLS_PEER_SP_MASK 0x60
#define WMI_TDLS_PEER_SP_LSB 5
enum wmi_tdls_options {
WMI_TDLS_OFFCHAN_EN = BIT(0),
WMI_TDLS_BUFFER_STA_EN = BIT(1),
WMI_TDLS_SLEEP_STA_EN = BIT(2),
};
enum {
WMI_TDLS_PEER_QOS_AC_VO = BIT(0),
WMI_TDLS_PEER_QOS_AC_VI = BIT(1),
WMI_TDLS_PEER_QOS_AC_BK = BIT(2),
WMI_TDLS_PEER_QOS_AC_BE = BIT(3),
};
struct wmi_tdls_peer_capab_arg {
u8 peer_uapsd_queues;
u8 peer_max_sp;
......@@ -6571,6 +6767,79 @@ struct wmi_tdls_peer_capab_arg {
u32 pref_offchan_bw;
};
struct wmi_10_4_tdls_set_state_cmd {
__le32 vdev_id;
__le32 state;
__le32 notification_interval_ms;
__le32 tx_discovery_threshold;
__le32 tx_teardown_threshold;
__le32 rssi_teardown_threshold;
__le32 rssi_delta;
__le32 tdls_options;
__le32 tdls_peer_traffic_ind_window;
__le32 tdls_peer_traffic_response_timeout_ms;
__le32 tdls_puapsd_mask;
__le32 tdls_puapsd_inactivity_time_ms;
__le32 tdls_puapsd_rx_frame_threshold;
__le32 teardown_notification_ms;
__le32 tdls_peer_kickout_threshold;
} __packed;
struct wmi_tdls_peer_capabilities {
__le32 peer_qos;
__le32 buff_sta_support;
__le32 off_chan_support;
__le32 peer_curr_operclass;
__le32 self_curr_operclass;
__le32 peer_chan_len;
__le32 peer_operclass_len;
u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
__le32 is_peer_responder;
__le32 pref_offchan_num;
__le32 pref_offchan_bw;
struct wmi_channel peer_chan_list[1];
} __packed;
struct wmi_10_4_tdls_peer_update_cmd {
__le32 vdev_id;
struct wmi_mac_addr peer_macaddr;
__le32 peer_state;
__le32 reserved[4];
struct wmi_tdls_peer_capabilities peer_capab;
} __packed;
enum wmi_tdls_peer_reason {
WMI_TDLS_TEARDOWN_REASON_TX,
WMI_TDLS_TEARDOWN_REASON_RSSI,
WMI_TDLS_TEARDOWN_REASON_SCAN,
WMI_TDLS_DISCONNECTED_REASON_PEER_DELETE,
WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT,
WMI_TDLS_TEARDOWN_REASON_BAD_PTR,
WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE,
WMI_TDLS_ENTER_BUF_STA,
WMI_TDLS_EXIT_BUF_STA,
WMI_TDLS_ENTER_BT_BUSY_MODE,
WMI_TDLS_EXIT_BT_BUSY_MODE,
WMI_TDLS_SCAN_STARTED_EVENT,
WMI_TDLS_SCAN_COMPLETED_EVENT,
};
enum wmi_tdls_peer_notification {
WMI_TDLS_SHOULD_DISCOVER,
WMI_TDLS_SHOULD_TEARDOWN,
WMI_TDLS_PEER_DISCONNECTED,
WMI_TDLS_CONNECTION_TRACKER_NOTIFICATION,
};
struct wmi_tdls_peer_event {
struct wmi_mac_addr peer_macaddr;
/* see enum wmi_tdls_peer_notification*/
__le32 peer_status;
/* see enum wmi_tdls_peer_reason */
__le32 peer_reason;
__le32 vdev_id;
} __packed;
enum wmi_txbf_conf {
WMI_TXBF_CONF_UNSUPPORTED,
WMI_TXBF_CONF_BEFORE_ASSOC,
......
......@@ -1452,7 +1452,7 @@ int ath9k_init_debug(struct ath_hw *ah)
#endif
#ifdef CONFIG_ATH9K_DYNACK
debugfs_create_file("ack_to", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
debugfs_create_file("ack_to", S_IRUSR, sc->debug.debugfs_phy,
sc, &fops_ackto);
#endif
debugfs_create_file("tpc", S_IRUSR | S_IWUSR,
......
......@@ -388,6 +388,11 @@ static const struct pci_device_id ath_pci_id_table[] = {
PCI_VENDOR_ID_DELL,
0x020B),
.driver_data = ATH9K_PCI_WOW },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0034,
PCI_VENDOR_ID_DELL,
0x0300),
.driver_data = ATH9K_PCI_WOW },
/* Killer Wireless (2x2) */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
......
......@@ -289,6 +289,11 @@ static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl)
skb_tail_pointer(skb),
WCN36XX_PKT_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dxe->dst_addr_l)) {
dev_err(dev, "unable to map skb\n");
kfree_skb(skb);
return -ENOMEM;
}
ctl->skb = skb;
return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册