提交 75ae83d6 编写于 作者: J John W. Linville

Merge tag 'for-linville-20131001' of git://github.com/kvalo/ath

......@@ -22,7 +22,8 @@
void ath10k_bmi_start(struct ath10k *ar)
{
ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n");
ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n");
ar->bmi.done_sent = false;
}
......@@ -32,8 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar)
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n");
if (ar->bmi.done_sent) {
ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__);
ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n");
return 0;
}
......@@ -46,7 +49,6 @@ int ath10k_bmi_done(struct ath10k *ar)
return ret;
}
ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");
return 0;
}
......@@ -59,6 +61,8 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
u32 resplen = sizeof(resp.get_target_info);
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n");
if (ar->bmi.done_sent) {
ath10k_warn("BMI Get Target Info Command disallowed\n");
return -EBUSY;
......@@ -80,6 +84,7 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
target_info->version = __le32_to_cpu(resp.get_target_info.version);
target_info->type = __le32_to_cpu(resp.get_target_info.type);
return 0;
}
......@@ -92,15 +97,14 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
u32 rxlen;
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
address, length);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
ath10k_dbg(ATH10K_DBG_CORE,
"%s: (device: 0x%p, address: 0x%x, length: %d)\n",
__func__, ar, address, length);
while (length) {
rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
......@@ -133,15 +137,14 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
u32 txlen;
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
address, length);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
ath10k_dbg(ATH10K_DBG_CORE,
"%s: (device: 0x%p, address: 0x%x, length: %d)\n",
__func__, ar, address, length);
while (length) {
txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
......@@ -180,15 +183,14 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
u32 resplen = sizeof(resp.execute);
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
address, *param);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
ath10k_dbg(ATH10K_DBG_CORE,
"%s: (device: 0x%p, address: 0x%x, param: %d)\n",
__func__, ar, address, *param);
cmd.id = __cpu_to_le32(BMI_EXECUTE);
cmd.execute.addr = __cpu_to_le32(address);
cmd.execute.param = __cpu_to_le32(*param);
......@@ -216,6 +218,9 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
u32 txlen;
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
......@@ -250,6 +255,9 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
address);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
......@@ -275,6 +283,10 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
u32 trailer_len = length - head_len;
int ret;
ath10k_dbg(ATH10K_DBG_BMI,
"bmi fast download address 0x%x buffer 0x%p length %d\n",
address, buffer, length);
ret = ath10k_bmi_lz_stream_start(ar, address);
if (ret)
return ret;
......
......@@ -338,33 +338,19 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
return ret;
}
void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
unsigned int nbytes, u32 flags)
{
unsigned int num_items = sendlist->num_items;
struct ce_sendlist_item *item;
item = &sendlist->item[num_items];
item->data = buffer;
item->u.nbytes = nbytes;
item->flags = flags;
sendlist->num_items++;
}
int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
struct ce_sendlist *sendlist,
unsigned int transfer_id)
unsigned int transfer_id,
u32 paddr, unsigned int nbytes,
u32 flags)
{
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
struct ce_sendlist_item *item;
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int num_items = sendlist->num_items;
unsigned int sw_index;
unsigned int write_index;
int i, delta, ret = -ENOMEM;
int delta, ret = -ENOMEM;
spin_lock_bh(&ar_pci->ce_lock);
......@@ -373,30 +359,12 @@ int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
if (delta >= num_items) {
/*
* Handle all but the last item uniformly.
*/
for (i = 0; i < num_items - 1; i++) {
item = &sendlist->item[i];
ret = ath10k_ce_send_nolock(ce_state,
CE_SENDLIST_ITEM_CTXT,
(u32) item->data,
item->u.nbytes, transfer_id,
item->flags |
CE_SEND_FLAG_GATHER);
if (ret)
ath10k_warn("CE send failed for item: %d\n", i);
}
/*
* Provide valid context pointer for final item.
*/
item = &sendlist->item[i];
if (delta >= 1) {
ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
(u32) item->data, item->u.nbytes,
transfer_id, item->flags);
paddr, nbytes,
transfer_id, flags);
if (ret)
ath10k_warn("CE send failed for last item: %d\n", i);
ath10k_warn("CE send failed: %d\n", ret);
}
spin_unlock_bh(&ar_pci->ce_lock);
......@@ -742,11 +710,6 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ce_state->ctrl_addr;
void *transfer_context;
u32 buf;
unsigned int nbytes;
unsigned int id;
unsigned int flags;
int ret;
ret = ath10k_pci_wake(ar);
......@@ -759,38 +722,15 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
HOST_IS_COPY_COMPLETE_MASK);
if (ce_state->recv_cb) {
/*
* Pop completed recv buffers and call the registered
* recv callback for each
*/
while (ath10k_ce_completed_recv_next_nolock(ce_state,
&transfer_context,
&buf, &nbytes,
&id, &flags) == 0) {
spin_unlock_bh(&ar_pci->ce_lock);
ce_state->recv_cb(ce_state, transfer_context, buf,
nbytes, id, flags);
spin_lock_bh(&ar_pci->ce_lock);
}
}
spin_unlock_bh(&ar_pci->ce_lock);
if (ce_state->send_cb) {
/*
* Pop completed send buffers and call the registered
* send callback for each
*/
while (ath10k_ce_completed_send_next_nolock(ce_state,
&transfer_context,
&buf,
&nbytes,
&id) == 0) {
spin_unlock_bh(&ar_pci->ce_lock);
ce_state->send_cb(ce_state, transfer_context,
buf, nbytes, id);
spin_lock_bh(&ar_pci->ce_lock);
}
}
if (ce_state->recv_cb)
ce_state->recv_cb(ce_state);
if (ce_state->send_cb)
ce_state->send_cb(ce_state);
spin_lock_bh(&ar_pci->ce_lock);
/*
* Misc CE interrupts are not being handled, but still need
......@@ -881,11 +821,7 @@ void ath10k_ce_disable_interrupts(struct ath10k *ar)
}
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
void (*send_cb)(struct ath10k_ce_pipe *ce_state,
void *transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id),
void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts)
{
struct ath10k *ar = ce_state->ar;
......@@ -898,12 +834,7 @@ void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
}
void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
void (*recv_cb)(struct ath10k_ce_pipe *ce_state,
void *transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags))
void (*recv_cb)(struct ath10k_ce_pipe *))
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
......@@ -1010,6 +941,10 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ATH10K_DBG_BOOT,
"boot ce src ring id %d entries %d base_addr %p\n",
ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
}
......@@ -1091,6 +1026,10 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ATH10K_DBG_BOOT,
"boot ce dest ring id %d entries %d base_addr %p\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
}
......
......@@ -27,7 +27,6 @@
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
#define CE_SENDLIST_ITEMS_MAX 12
#define CE_SEND_FLAG_GATHER 0x00010000
/*
......@@ -116,41 +115,14 @@ struct ath10k_ce_pipe {
u32 ctrl_addr;
void (*send_cb) (struct ath10k_ce_pipe *ce_state,
void *per_transfer_send_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id);
void (*recv_cb) (struct ath10k_ce_pipe *ce_state,
void *per_transfer_recv_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags);
void (*send_cb)(struct ath10k_ce_pipe *);
void (*recv_cb)(struct ath10k_ce_pipe *);
unsigned int src_sz_max;
struct ath10k_ce_ring *src_ring;
struct ath10k_ce_ring *dest_ring;
};
struct ce_sendlist_item {
/* e.g. buffer or desc list */
dma_addr_t data;
union {
/* simple buffer */
unsigned int nbytes;
/* Rx descriptor list */
unsigned int ndesc;
} u;
/* externally-specified flags; OR-ed with internal flags */
u32 flags;
};
struct ce_sendlist {
unsigned int num_items;
struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
};
/* Copy Engine settable attributes */
struct ce_attr;
......@@ -181,20 +153,9 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
unsigned int flags);
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
void (*send_cb)(struct ath10k_ce_pipe *ce_state,
void *transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id),
void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts);
/* Append a simple buffer (address/length) to a sendlist. */
void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
u32 buffer,
unsigned int nbytes,
/* OR-ed with internal flags */
u32 flags);
/*
* Queue a "sendlist" of buffers to be sent using gather to a single
* anonymous destination buffer
......@@ -206,10 +167,10 @@ void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
* Implemenation note: Pushes multiple buffers with Gather to Source ring.
*/
int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_send_context,
struct ce_sendlist *sendlist,
/* 14 bits */
unsigned int transfer_id);
void *per_transfer_context,
unsigned int transfer_id,
u32 paddr, unsigned int nbytes,
u32 flags);
/*==================Recv=======================*/
......@@ -228,12 +189,7 @@ int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
u32 buffer);
void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
void (*recv_cb)(struct ath10k_ce_pipe *ce_state,
void *transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags));
void (*recv_cb)(struct ath10k_ce_pipe *));
/* recv flags */
/* Data is byte-swapped */
......@@ -325,16 +281,6 @@ struct ce_attr {
unsigned int dest_nentries;
};
/*
* When using sendlist_send to transfer multiple buffer fragments, the
* transfer context of each fragment, except last one, will be filled
* with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
* each fragment done with send and the transfer context would be
* CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
* status of a send completion.
*/
#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
#define SR_BA_ADDRESS 0x0000
#define SR_SIZE_ADDRESS 0x0004
#define DR_BA_ADDRESS 0x0008
......
......@@ -53,7 +53,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
static void ath10k_send_suspend_complete(struct ath10k *ar)
{
ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__);
ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
ar->is_target_paused = true;
wake_up(&ar->event_queue);
......@@ -101,7 +101,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
goto timeout;
}
ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n");
ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
return 0;
timeout:
......@@ -203,8 +203,8 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
return ret;
}
ath10k_dbg(ATH10K_DBG_CORE,
"ath10k: Board extended Data download addr: 0x%x\n",
ath10k_dbg(ATH10K_DBG_BOOT,
"boot push board extended data addr 0x%x\n",
board_ext_data_addr);
if (board_ext_data_addr == 0)
......@@ -435,6 +435,13 @@ static int ath10k_init_uart(struct ath10k *ar)
return ret;
}
/* Set the UART baud rate to 19200. */
ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
if (ret) {
ath10k_warn("could not set the baud rate (%d)\n", ret);
return ret;
}
ath10k_info("UART prints enabled\n");
return 0;
}
......@@ -630,6 +637,10 @@ int ath10k_core_start(struct ath10k *ar)
if (status)
goto err_disconnect_htc;
status = ath10k_debug_start(ar);
if (status)
goto err_disconnect_htc;
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
return 0;
......@@ -647,6 +658,7 @@ EXPORT_SYMBOL(ath10k_core_start);
void ath10k_core_stop(struct ath10k *ar)
{
ath10k_debug_stop(ar);
ath10k_htc_stop(&ar->htc);
ath10k_htt_detach(&ar->htt);
ath10k_wmi_detach(ar);
......@@ -710,6 +722,9 @@ static int ath10k_core_check_chip_id(struct ath10k *ar)
{
u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);
ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
ar->chip_id, hw_revision);
/* Check that we are not using hw1.0 (some of them have same pci id
* as hw2.0) before doing anything else as ath10k crashes horribly
* due to missing hw1.0 workarounds. */
......@@ -777,6 +792,7 @@ void ath10k_core_unregister(struct ath10k *ar)
* Otherwise we will fail to submit commands to FW and mac80211 will be
* unhappy about callback failures. */
ath10k_mac_unregister(ar);
ath10k_core_free_firmware_files(ar);
}
EXPORT_SYMBOL(ath10k_core_unregister);
......
......@@ -52,18 +52,12 @@ struct ath10k_skb_cb {
struct {
u8 vdev_id;
u16 msdu_id;
u8 tid;
bool is_offchan;
bool is_conf;
bool discard;
bool no_ack;
u8 refcount;
struct sk_buff *txfrag;
struct sk_buff *msdu;
} __packed htt;
/* 4 bytes left on 64bit arch */
u8 frag_len;
u8 pad_len;
} __packed htt;
} __packed;
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
......@@ -112,11 +106,7 @@ struct ath10k_wmi {
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
atomic_t pending_tx_count;
wait_queue_head_t wq;
struct sk_buff_head wmi_event_list;
struct work_struct wmi_event_work;
wait_queue_head_t tx_credits_wq;
};
struct ath10k_peer_stat {
......@@ -203,6 +193,7 @@ struct ath10k_vif {
enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
u32 dtim_period;
struct sk_buff *beacon;
struct ath10k *ar;
struct ieee80211_vif *vif;
......@@ -246,6 +237,9 @@ struct ath10k_debug {
u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
struct completion event_stats_compl;
unsigned long htt_stats_mask;
struct delayed_work htt_stats_dwork;
};
enum ath10k_state {
......
......@@ -21,6 +21,9 @@
#include "core.h"
#include "debug.h"
/* ms */
#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
static int ath10k_printk(const char *level, const char *fmt, ...)
{
struct va_format vaf;
......@@ -517,6 +520,117 @@ static const struct file_operations fops_chip_id = {
.llseek = default_llseek,
};
static int ath10k_debug_htt_stats_req(struct ath10k *ar)
{
u64 cookie;
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (ar->debug.htt_stats_mask == 0)
/* htt stats are disabled */
return 0;
if (ar->state != ATH10K_STATE_ON)
return 0;
cookie = get_jiffies_64();
ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
cookie);
if (ret) {
ath10k_warn("failed to send htt stats request: %d\n", ret);
return ret;
}
queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork,
msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL));
return 0;
}
static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k,
debug.htt_stats_dwork.work);
mutex_lock(&ar->conf_mutex);
ath10k_debug_htt_stats_req(ar);
mutex_unlock(&ar->conf_mutex);
}
static ssize_t ath10k_read_htt_stats_mask(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char buf[32];
unsigned int len;
len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t ath10k_write_htt_stats_mask(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
unsigned long mask;
int ret;
ret = kstrtoul_from_user(user_buf, count, 0, &mask);
if (ret)
return ret;
/* max 8 bit masks (for now) */
if (mask > 0xff)
return -E2BIG;
mutex_lock(&ar->conf_mutex);
ar->debug.htt_stats_mask = mask;
ret = ath10k_debug_htt_stats_req(ar);
if (ret)
goto out;
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_htt_stats_mask = {
.read = ath10k_read_htt_stats_mask,
.write = ath10k_write_htt_stats_mask,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
int ath10k_debug_start(struct ath10k *ar)
{
int ret;
ret = ath10k_debug_htt_stats_req(ar);
if (ret)
/* continue normally anyway, this isn't serious */
ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
return 0;
}
void ath10k_debug_stop(struct ath10k *ar)
{
cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
}
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
......@@ -525,6 +639,9 @@ int ath10k_debug_create(struct ath10k *ar)
if (!ar->debug.debugfs_phy)
return -ENOMEM;
INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
ath10k_debug_htt_stats_dwork);
init_completion(&ar->debug.event_stats_compl);
debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
......@@ -539,8 +656,12 @@ int ath10k_debug_create(struct ath10k *ar)
debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_chip_id);
debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_htt_stats_mask);
return 0;
}
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
......
......@@ -27,11 +27,12 @@ enum ath10k_debug_mask {
ATH10K_DBG_HTC = 0x00000004,
ATH10K_DBG_HTT = 0x00000008,
ATH10K_DBG_MAC = 0x00000010,
ATH10K_DBG_CORE = 0x00000020,
ATH10K_DBG_BOOT = 0x00000020,
ATH10K_DBG_PCI_DUMP = 0x00000040,
ATH10K_DBG_HTT_DUMP = 0x00000080,
ATH10K_DBG_MGMT = 0x00000100,
ATH10K_DBG_DATA = 0x00000200,
ATH10K_DBG_BMI = 0x00000400,
ATH10K_DBG_ANY = 0xffffffff,
};
......@@ -42,6 +43,8 @@ extern __printf(1, 2) int ath10k_err(const char *fmt, ...);
extern __printf(1, 2) int ath10k_warn(const char *fmt, ...);
#ifdef CONFIG_ATH10K_DEBUGFS
int ath10k_debug_start(struct ath10k *ar);
void ath10k_debug_stop(struct ath10k *ar);
int ath10k_debug_create(struct ath10k *ar);
void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
......@@ -50,6 +53,15 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev);
#else
static inline int ath10k_debug_start(struct ath10k *ar)
{
return 0;
}
static inline void ath10k_debug_stop(struct ath10k *ar)
{
}
static inline int ath10k_debug_create(struct ath10k *ar)
{
return 0;
......
......@@ -103,10 +103,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct ath10k_htc_hdr *hdr;
hdr = (struct ath10k_htc_hdr *)skb->data;
memset(hdr, 0, sizeof(*hdr));
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0;
spin_lock_bh(&ep->htc->tx_lock);
hdr->seq_no = ep->seq_no++;
......@@ -117,134 +117,13 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
spin_unlock_bh(&ep->htc->tx_lock);
}
static int ath10k_htc_issue_skb(struct ath10k_htc *htc,
struct ath10k_htc_ep *ep,
struct sk_buff *skb,
u8 credits)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
int ret;
ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
ep->eid, skb);
ath10k_htc_prepare_tx_skb(ep, skb);
ret = ath10k_skb_map(htc->ar->dev, skb);
if (ret)
goto err;
ret = ath10k_hif_send_head(htc->ar,
ep->ul_pipe_id,
ep->eid,
skb->len,
skb);
if (unlikely(ret))
goto err;
return 0;
err:
ath10k_warn("HTC issue failed: %d\n", ret);
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
spin_unlock_bh(&htc->tx_lock);
/* this is the simplest way to handle out-of-resources for non-credit
* based endpoints. credit based endpoints can still get -ENOSR, but
* this is highly unlikely as credit reservation should prevent that */
if (ret == -ENOSR) {
spin_lock_bh(&htc->tx_lock);
__skb_queue_head(&ep->tx_queue, skb);
spin_unlock_bh(&htc->tx_lock);
return ret;
}
skb_cb->is_aborted = true;
ath10k_htc_notify_tx_completion(ep, skb);
return ret;
}
static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc,
struct ath10k_htc_ep *ep,
u8 *credits)
{
struct sk_buff *skb;
struct ath10k_skb_cb *skb_cb;
int credits_required;
int remainder;
unsigned int transfer_len;
lockdep_assert_held(&htc->tx_lock);
skb = __skb_dequeue(&ep->tx_queue);
if (!skb)
return NULL;
skb_cb = ATH10K_SKB_CB(skb);
transfer_len = skb->len;
if (likely(transfer_len <= htc->target_credit_size)) {
credits_required = 1;
} else {
/* figure out how many credits this message requires */
credits_required = transfer_len / htc->target_credit_size;
remainder = transfer_len % htc->target_credit_size;
if (remainder)
credits_required++;
}
ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n",
credits_required, ep->tx_credits);
if (ep->tx_credits < credits_required) {
__skb_queue_head(&ep->tx_queue, skb);
return NULL;
}
ep->tx_credits -= credits_required;
*credits = credits_required;
return skb;
}
static void ath10k_htc_send_work(struct work_struct *work)
{
struct ath10k_htc_ep *ep = container_of(work,
struct ath10k_htc_ep, send_work);
struct ath10k_htc *htc = ep->htc;
struct sk_buff *skb;
u8 credits = 0;
int ret;
while (true) {
if (ep->ul_is_polled)
ath10k_htc_send_complete_check(ep, 0);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credit_flow_enabled)
skb = ath10k_htc_get_skb_credit_based(htc, ep,
&credits);
else
skb = __skb_dequeue(&ep->tx_queue);
spin_unlock_bh(&htc->tx_lock);
if (!skb)
break;
ret = ath10k_htc_issue_skb(htc, ep, skb, credits);
if (ret == -ENOSR)
break;
}
}
int ath10k_htc_send(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
int credits = 0;
int ret;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
......@@ -254,18 +133,55 @@ int ath10k_htc_send(struct ath10k_htc *htc,
return -ENOENT;
}
/* FIXME: This looks ugly, can we fix it? */
spin_lock_bh(&htc->tx_lock);
if (htc->stopped) {
spin_unlock_bh(&htc->tx_lock);
return -ESHUTDOWN;
}
spin_unlock_bh(&htc->tx_lock);
__skb_queue_tail(&ep->tx_queue, skb);
skb_push(skb, sizeof(struct ath10k_htc_hdr));
spin_unlock_bh(&htc->tx_lock);
queue_work(htc->ar->workqueue, &ep->send_work);
if (ep->tx_credit_flow_enabled) {
credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
spin_unlock_bh(&htc->tx_lock);
ret = -EAGAIN;
goto err_pull;
}
ep->tx_credits -= credits;
spin_unlock_bh(&htc->tx_lock);
}
ath10k_htc_prepare_tx_skb(ep, skb);
ret = ath10k_skb_map(htc->ar->dev, skb);
if (ret)
goto err_credits;
ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid,
skb->len, skb);
if (ret)
goto err_unmap;
return 0;
err_unmap:
ath10k_skb_unmap(htc->ar->dev, skb);
err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
ep->ep_ops.ep_tx_credits(htc->ar);
}
err_pull:
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
return ret;
}
static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
......@@ -278,39 +194,9 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
/* note: when using TX credit flow, the re-checking of queues happens
* when credits flow back from the target. in the non-TX credit case,
* we recheck after the packet completes */
spin_lock_bh(&htc->tx_lock);
if (!ep->tx_credit_flow_enabled && !htc->stopped)
queue_work(ar->workqueue, &ep->send_work);
spin_unlock_bh(&htc->tx_lock);
return 0;
}
/* flush endpoint TX queue */
static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc,
struct ath10k_htc_ep *ep)
{
struct sk_buff *skb;
struct ath10k_skb_cb *skb_cb;
spin_lock_bh(&htc->tx_lock);
for (;;) {
skb = __skb_dequeue(&ep->tx_queue);
if (!skb)
break;
skb_cb = ATH10K_SKB_CB(skb);
skb_cb->is_aborted = true;
ath10k_htc_notify_tx_completion(ep, skb);
}
spin_unlock_bh(&htc->tx_lock);
cancel_work_sync(&ep->send_work);
}
/***********/
/* Receive */
/***********/
......@@ -340,8 +226,11 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue))
queue_work(htc->ar->workqueue, &ep->send_work);
if (ep->ep_ops.ep_tx_credits) {
spin_unlock_bh(&htc->tx_lock);
ep->ep_ops.ep_tx_credits(htc->ar);
spin_lock_bh(&htc->tx_lock);
}
}
spin_unlock_bh(&htc->tx_lock);
}
......@@ -599,10 +488,8 @@ static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
ep->max_ep_message_len = 0;
ep->max_tx_queue_depth = 0;
ep->eid = i;
skb_queue_head_init(&ep->tx_queue);
ep->htc = htc;
ep->tx_credit_flow_enabled = true;
INIT_WORK(&ep->send_work, ath10k_htc_send_work);
}
}
......@@ -752,8 +639,8 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
tx_alloc = ath10k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
ath10k_dbg(ATH10K_DBG_HTC,
"HTC Service %s does not allocate target credits\n",
ath10k_dbg(ATH10K_DBG_BOOT,
"boot htc service %s does not allocate target credits\n",
htc_service_name(conn_req->service_id));
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
......@@ -873,19 +760,19 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
if (status)
return status;
ath10k_dbg(ATH10K_DBG_HTC,
"HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n",
ath10k_dbg(ATH10K_DBG_BOOT,
"boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
ath10k_dbg(ATH10K_DBG_HTC,
"EP %d UL polled: %d, DL polled: %d\n",
ath10k_dbg(ATH10K_DBG_BOOT,
"boot htc ep %d ul polled %d dl polled %d\n",
ep->eid, ep->ul_is_polled, ep->dl_is_polled);
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
ath10k_dbg(ATH10K_DBG_HTC,
"HTC service: %s eid: %d TX flow control disabled\n",
ath10k_dbg(ATH10K_DBG_BOOT,
"boot htc service '%s' eid %d TX flow control disabled\n",
htc_service_name(ep->service_id), assigned_eid);
}
......@@ -945,18 +832,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
*/
void ath10k_htc_stop(struct ath10k_htc *htc)
{
int i;
struct ath10k_htc_ep *ep;
spin_lock_bh(&htc->tx_lock);
htc->stopped = true;
spin_unlock_bh(&htc->tx_lock);
for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
ep = &htc->endpoint[i];
ath10k_htc_flush_endpoint_tx(htc, ep);
}
ath10k_hif_stop(htc->ar);
}
......
......@@ -276,6 +276,7 @@ struct ath10k_htc_ops {
struct ath10k_htc_ep_ops {
void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
void (*ep_tx_credits)(struct ath10k *);
};
/* service connection information */
......@@ -315,15 +316,11 @@ struct ath10k_htc_ep {
int ul_is_polled; /* call HIF to get tx completions */
int dl_is_polled; /* call HIF to fetch rx (not implemented) */
struct sk_buff_head tx_queue;
u8 seq_no; /* for debugging */
int tx_credits;
int tx_credit_size;
int tx_credits_per_max_message;
bool tx_credit_flow_enabled;
struct work_struct send_work;
};
struct ath10k_htc_svc_tx_credits {
......
......@@ -19,6 +19,7 @@
#define _HTT_H_
#include <linux/bug.h>
#include <linux/interrupt.h>
#include "htc.h"
#include "rx_desc.h"
......@@ -1268,6 +1269,7 @@ struct ath10k_htt {
/* set if host-fw communication goes haywire
* used to avoid further failures */
bool rx_confused;
struct tasklet_struct rx_replenish_task;
};
#define RX_HTT_HDR_STATUS_LEN 64
......@@ -1308,6 +1310,10 @@ struct htt_rx_desc {
#define HTT_RX_BUF_SIZE 1920
#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
* aggregated traffic more nicely. */
#define ATH10K_HTT_MAX_NUM_REFILL 16
/*
* DMA_MAP expects the buffer to be an integral number of cache lines.
* Rather than checking the actual cache line size, this code makes a
......@@ -1327,6 +1333,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt);
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
......
......@@ -20,6 +20,7 @@
#include "htt.h"
#include "txrx.h"
#include "debug.h"
#include "trace.h"
#include <linux/log2.h>
......@@ -40,6 +41,10 @@
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
{
int size;
......@@ -177,10 +182,27 @@ static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
{
int ret, num_to_fill;
int ret, num_deficit, num_to_fill;
/* Refilling the whole RX ring buffer proves to be a bad idea. The
* reason is RX may take up significant amount of CPU cycles and starve
* other tasks, e.g. TX on an ethernet device while acting as a bridge
* with ath10k wlan interface. This ended up with very poor performance
* once CPU the host system was overwhelmed with RX on ath10k.
*
* By limiting the number of refills the replenishing occurs
* progressively. This in turns makes use of the fact tasklets are
* processed in FIFO order. This means actual RX processing can starve
* out refilling. If there's not enough buffers on RX ring FW will not
* report RX until it is refilled with enough buffers. This
* automatically balances load wrt to CPU power.
*
* This probably comes at a cost of lower maximum throughput but
* improves the avarage and stability. */
spin_lock_bh(&htt->rx_ring.lock);
num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
num_deficit -= num_to_fill;
ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
if (ret == -ENOMEM) {
/*
......@@ -191,6 +213,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
*/
mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
} else if (num_deficit > 0) {
tasklet_schedule(&htt->rx_replenish_task);
}
spin_unlock_bh(&htt->rx_ring.lock);
}
......@@ -212,6 +236,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt)
int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
del_timer_sync(&htt->rx_ring.refill_retry_timer);
tasklet_kill(&htt->rx_replenish_task);
while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
struct sk_buff *skb =
......@@ -441,6 +466,12 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
return msdu_chaining;
}
static void ath10k_htt_rx_replenish_task(unsigned long ptr)
{
struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
ath10k_htt_rx_msdu_buff_replenish(htt);
}
int ath10k_htt_rx_attach(struct ath10k_htt *htt)
{
dma_addr_t paddr;
......@@ -501,7 +532,10 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
goto err_fill_ring;
ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n",
tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
(unsigned long)htt);
ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
......@@ -590,142 +624,144 @@ static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
return false;
}
static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
struct htt_rx_info *info)
struct rfc1042_hdr {
u8 llc_dsap;
u8 llc_ssap;
u8 llc_ctrl;
u8 snap_oui[3];
__be16 snap_type;
} __packed;
struct amsdu_subframe_hdr {
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
__be16 len;
} __packed;
static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
struct htt_rx_info *info)
{
struct htt_rx_desc *rxd;
struct sk_buff *amsdu;
struct sk_buff *first;
struct ieee80211_hdr *hdr;
struct sk_buff *skb = info->skb;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
struct ieee80211_hdr *hdr;
u8 hdr_buf[64], addr[ETH_ALEN], *qos;
unsigned int hdr_len;
int crypto_len;
rxd = (void *)skb->data - sizeof(*rxd);
fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
/* FIXME: No idea what assumptions are safe here. Need logs */
if ((fmt == RX_MSDU_DECAP_RAW && skb->next)) {
ath10k_htt_rx_free_msdu_chain(skb->next);
skb->next = NULL;
return -ENOTSUPP;
}
hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
memcpy(hdr_buf, hdr, hdr_len);
hdr = (struct ieee80211_hdr *)hdr_buf;
/* A-MSDU max is a little less than 8K */
amsdu = dev_alloc_skb(8*1024);
if (!amsdu) {
ath10k_warn("A-MSDU allocation failed\n");
ath10k_htt_rx_free_msdu_chain(skb->next);
skb->next = NULL;
return -ENOMEM;
}
if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) {
int hdrlen;
hdr = (void *)rxd->rx_hdr_status;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen);
}
/* FIXME: Hopefully this is a temporary measure.
*
* Reporting individual A-MSDU subframes means each reported frame
* shares the same sequence number.
*
* mac80211 drops frames it recognizes as duplicates, i.e.
* retransmission flag is set and sequence number matches sequence
* number from a previous frame (as per IEEE 802.11-2012: 9.3.2.10
* "Duplicate detection and recovery")
*
* To avoid frames being dropped clear retransmission flag for all
* received A-MSDUs.
*
* Worst case: actual duplicate frames will be reported but this should
* still be handled gracefully by other OSI/ISO layers. */
hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_RETRY);
first = skb;
while (skb) {
void *decap_hdr;
int decap_len = 0;
int len;
rxd = (void *)skb->data - sizeof(*rxd);
fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
RX_MSDU_START_INFO1_DECAP_FORMAT);
decap_hdr = (void *)rxd->rx_hdr_status;
if (skb == first) {
/* We receive linked A-MSDU subframe skbuffs. The
* first one contains the original 802.11 header (and
* possible crypto param) in the RX descriptor. The
* A-MSDU subframe header follows that. Each part is
* aligned to 4 byte boundary. */
hdr = (void *)amsdu->data;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
decap_hdr += roundup(hdr_len, 4);
decap_hdr += roundup(crypto_len, 4);
}
skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
/* When fmt == RX_MSDU_DECAP_8023_SNAP_LLC:
*
* SNAP 802.3 consists of:
* [dst:6][src:6][len:2][dsap:1][ssap:1][ctl:1][snap:5]
* [data][fcs:4].
*
* Since this overlaps with A-MSDU header (da, sa, len)
* there's nothing extra to do. */
if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
/* Ethernet2 decap inserts ethernet header in place of
* A-MSDU subframe header. */
skb_pull(skb, 6 + 6 + 2);
/* A-MSDU subframe header length */
decap_len += 6 + 6 + 2;
/* Ethernet2 decap also strips the LLC/SNAP so we need
* to re-insert it. The LLC/SNAP follows A-MSDU
* subframe header. */
/* FIXME: Not all LLCs are 8 bytes long */
decap_len += 8;
memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
/* First frame in an A-MSDU chain has more decapped data. */
if (skb == first) {
len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
4);
decap_hdr += len;
}
if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) {
/* Native Wifi decap inserts regular 802.11 header
* in place of A-MSDU subframe header. */
switch (fmt) {
case RX_MSDU_DECAP_RAW:
/* remove trailing FCS */
skb_trim(skb, skb->len - FCS_LEN);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
/* pull decapped header and copy DA */
hdr = (struct ieee80211_hdr *)skb->data;
skb_pull(skb, ieee80211_hdrlen(hdr->frame_control));
hdr_len = ieee80211_hdrlen(hdr->frame_control);
memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
skb_pull(skb, hdr_len);
/* A-MSDU subframe header length */
decap_len += 6 + 6 + 2;
/* push original 802.11 header */
hdr = (struct ieee80211_hdr *)hdr_buf;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
}
/* original A-MSDU header has the bit set but we're
* not including A-MSDU subframe header */
hdr = (struct ieee80211_hdr *)skb->data;
qos = ieee80211_get_qos_ctl(hdr);
qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
if (fmt == RX_MSDU_DECAP_RAW)
skb_trim(skb, skb->len - 4); /* remove FCS */
/* original 802.11 header has a different DA */
memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
break;
case RX_MSDU_DECAP_ETHERNET2_DIX:
/* strip ethernet header and insert decapped 802.11
* header, amsdu subframe header and rfc1042 header */
memcpy(skb_put(amsdu, skb->len), skb->data, skb->len);
len = 0;
len += sizeof(struct rfc1042_hdr);
len += sizeof(struct amsdu_subframe_hdr);
/* A-MSDU subframes are padded to 4bytes
* but relative to first subframe, not the whole MPDU */
if (skb->next && ((decap_len + skb->len) & 3)) {
int padlen = 4 - ((decap_len + skb->len) & 3);
memset(skb_put(amsdu, padlen), 0, padlen);
skb_pull(skb, sizeof(struct ethhdr));
memcpy(skb_push(skb, len), decap_hdr, len);
memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
break;
case RX_MSDU_DECAP_8023_SNAP_LLC:
/* insert decapped 802.11 header making a singly
* A-MSDU */
memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
break;
}
info->skb = skb;
info->encrypt_type = enctype;
skb = skb->next;
}
info->skb = amsdu;
info->encrypt_type = enctype;
info->skb->next = NULL;
ath10k_htt_rx_free_msdu_chain(first);
ath10k_process_rx(htt->ar, info);
}
return 0;
/* FIXME: It might be nice to re-assemble the A-MSDU when there's a
* monitor interface active for sniffing purposes. */
}
static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
{
struct sk_buff *skb = info->skb;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
int hdr_len;
void *rfc1042;
/* This shouldn't happen. If it does than it may be a FW bug. */
if (skb->next) {
......@@ -739,49 +775,53 @@ static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
RX_MSDU_START_INFO1_DECAP_FORMAT);
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
switch (fmt) {
case RX_MSDU_DECAP_RAW:
/* remove trailing FCS */
skb_trim(skb, skb->len - 4);
skb_trim(skb, skb->len - FCS_LEN);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
/* nothing to do here */
/* Pull decapped header */
hdr = (struct ieee80211_hdr *)skb->data;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
skb_pull(skb, hdr_len);
/* Push original header */
hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
break;
case RX_MSDU_DECAP_ETHERNET2_DIX:
/* macaddr[6] + macaddr[6] + ethertype[2] */
skb_pull(skb, 6 + 6 + 2);
break;
case RX_MSDU_DECAP_8023_SNAP_LLC:
/* macaddr[6] + macaddr[6] + len[2] */
/* we don't need this for non-A-MSDU */
skb_pull(skb, 6 + 6 + 2);
break;
}
if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
void *llc;
int llclen;
/* strip ethernet header and insert decapped 802.11 header and
* rfc1042 header */
llclen = 8;
llc = hdr;
llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4);
llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
rfc1042 = hdr;
rfc1042 += roundup(hdr_len, 4);
rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
skb_push(skb, llclen);
memcpy(skb->data, llc, llclen);
}
skb_pull(skb, sizeof(struct ethhdr));
memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
rfc1042, sizeof(struct rfc1042_hdr));
memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
break;
case RX_MSDU_DECAP_8023_SNAP_LLC:
/* remove A-MSDU subframe header and insert
* decapped 802.11 header. rfc1042 header is already there */
if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) {
int len = ieee80211_hdrlen(hdr->frame_control);
skb_push(skb, len);
memcpy(skb->data, hdr, len);
skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
break;
}
info->skb = skb;
info->encrypt_type = enctype;
return 0;
ath10k_process_rx(htt->ar, info);
}
static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
......@@ -853,8 +893,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
int fw_desc_len;
u8 *fw_desc;
int i, j;
int ret;
int ip_summed;
memset(&info, 0, sizeof(info));
......@@ -929,11 +967,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
continue;
}
/* The skb is not yet processed and it may be
* reallocated. Since the offload is in the original
* skb extract the checksum now and assign it later */
ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
info.skb = msdu_head;
info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
......@@ -946,28 +979,13 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
if (ath10k_htt_rx_hdr_is_amsdu(hdr))
ret = ath10k_htt_rx_amsdu(htt, &info);
ath10k_htt_rx_amsdu(htt, &info);
else
ret = ath10k_htt_rx_msdu(htt, &info);
if (ret && !info.fcs_err) {
ath10k_warn("error processing msdus %d\n", ret);
dev_kfree_skb_any(info.skb);
continue;
}
if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
info.skb->ip_summed = ip_summed;
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
info.skb->data, info.skb->len);
ath10k_process_rx(htt->ar, &info);
ath10k_htt_rx_msdu(htt, &info);
}
}
ath10k_htt_rx_msdu_buff_replenish(htt);
tasklet_schedule(&htt->rx_replenish_task);
}
static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
......@@ -1139,7 +1157,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
ath10k_txrx_tx_completed(htt, &tx_done);
ath10k_txrx_tx_unref(htt, &tx_done);
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
......@@ -1173,7 +1191,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
msdu_id = resp->data_tx_completion.msdus[i];
tx_done.msdu_id = __le16_to_cpu(msdu_id);
ath10k_txrx_tx_completed(htt, &tx_done);
ath10k_txrx_tx_unref(htt, &tx_done);
}
break;
}
......@@ -1198,8 +1216,10 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_TEST:
/* FIX THIS */
break;
case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
case HTT_T2H_MSG_TYPE_STATS_CONF:
trace_ath10k_htt_stats(skb->data, skb->len);
break;
case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
case HTT_T2H_MSG_TYPE_RX_ADDBA:
case HTT_T2H_MSG_TYPE_RX_DELBA:
case HTT_T2H_MSG_TYPE_RX_FLUSH:
......
......@@ -96,7 +96,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
pipe);
ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n",
ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
......@@ -117,7 +117,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
{
struct sk_buff *txdesc;
struct htt_tx_done tx_done = {0};
int msdu_id;
/* No locks needed. Called after communication with the device has
......@@ -127,18 +127,13 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
if (!test_bit(msdu_id, htt->used_msdu_ids))
continue;
txdesc = htt->pending_tx[msdu_id];
if (!txdesc)
continue;
ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
msdu_id);
if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
ATH10K_SKB_CB(txdesc)->htt.refcount = 1;
tx_done.discard = 1;
tx_done.msdu_id = msdu_id;
ATH10K_SKB_CB(txdesc)->htt.discard = true;
ath10k_txrx_tx_unref(htt, txdesc);
ath10k_txrx_tx_unref(htt, &tx_done);
}
}
......@@ -152,26 +147,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct ath10k_htt *htt = &ar->htt;
if (skb_cb->htt.is_conf) {
dev_kfree_skb_any(skb);
return;
}
if (skb_cb->is_aborted) {
skb_cb->htt.discard = true;
/* if the skbuff is aborted we need to make sure we'll free up
* the tx resources, we can't simply run tx_unref() 2 times
* because if htt tx completion came in earlier we'd access
* unallocated memory */
if (skb_cb->htt.refcount > 1)
skb_cb->htt.refcount = 1;
}
ath10k_txrx_tx_unref(htt, skb);
dev_kfree_skb_any(skb);
}
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
......@@ -192,10 +168,48 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
ATH10K_SKB_CB(skb)->htt.is_conf = true;
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
{
struct htt_stats_req *req;
struct sk_buff *skb;
struct htt_cmd *cmd;
int len = 0, ret;
len += sizeof(cmd->hdr);
len += sizeof(cmd->stats_req);
skb = ath10k_htc_alloc_skb(len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
req = &cmd->stats_req;
memset(req, 0, sizeof(*req));
/* currently we support only max 8 bit masks so no need to worry
* about endian support */
req->upload_types[0] = mask;
req->reset_types[0] = mask;
req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
ath10k_warn("failed to send htt type stats request: %d", ret);
dev_kfree_skb_any(skb);
return ret;
}
......@@ -279,8 +293,6 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
#undef desc_offset
ATH10K_SKB_CB(skb)->htt.is_conf = true;
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
......@@ -293,10 +305,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct device *dev = htt->ar->dev;
struct ath10k_skb_cb *skb_cb;
struct sk_buff *txdesc = NULL;
struct htt_cmd *cmd;
u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
u8 vdev_id = skb_cb->htt.vdev_id;
int len = 0;
int msdu_id = -1;
int res;
......@@ -304,30 +316,30 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
res = ath10k_htt_tx_inc_pending(htt);
if (res)
return res;
goto err;
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
txdesc = ath10k_htc_alloc_skb(len);
if (!txdesc) {
res = -ENOMEM;
goto err;
}
spin_lock_bh(&htt->tx_lock);
msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
if (msdu_id < 0) {
res = ath10k_htt_tx_alloc_msdu_id(htt);
if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
res = msdu_id;
goto err;
goto err_tx_dec;
}
htt->pending_tx[msdu_id] = txdesc;
msdu_id = res;
htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
txdesc = ath10k_htc_alloc_skb(len);
if (!txdesc) {
res = -ENOMEM;
goto err_free_msdu_id;
}
res = ath10k_skb_map(dev, msdu);
if (res)
goto err;
goto err_free_txdesc;
skb_put(txdesc, len);
cmd = (struct htt_cmd *)txdesc->data;
......@@ -339,31 +351,27 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
memcpy(cmd->mgmt_tx.hdr, msdu->data,
min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
/* refcount is decremented by HTC and HTT completions until it reaches
* zero and is freed */
skb_cb = ATH10K_SKB_CB(txdesc);
skb_cb->htt.msdu_id = msdu_id;
skb_cb->htt.refcount = 2;
skb_cb->htt.msdu = msdu;
skb_cb->htt.frag_len = 0;
skb_cb->htt.pad_len = 0;
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
goto err;
goto err_unmap_msdu;
return 0;
err:
err_unmap_msdu:
ath10k_skb_unmap(dev, msdu);
if (txdesc)
dev_kfree_skb_any(txdesc);
if (msdu_id >= 0) {
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
}
err_free_txdesc:
dev_kfree_skb_any(txdesc);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
ath10k_htt_tx_dec_pending(htt);
err:
return res;
}
......@@ -373,13 +381,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
struct htt_cmd *cmd;
struct htt_data_tx_desc_frag *tx_frags;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct ath10k_skb_cb *skb_cb;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct sk_buff *txdesc = NULL;
struct sk_buff *txfrag = NULL;
bool use_frags;
u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
u8 tid;
int prefetch_len, desc_len, frag_len;
dma_addr_t frags_paddr;
int prefetch_len, desc_len;
int msdu_id = -1;
int res;
u8 flags0;
......@@ -387,73 +394,73 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
res = ath10k_htt_tx_inc_pending(htt);
if (res)
return res;
goto err;
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt);
if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
frag_len = sizeof(*tx_frags) * 2;
txdesc = ath10k_htc_alloc_skb(desc_len);
if (!txdesc) {
res = -ENOMEM;
goto err;
goto err_free_msdu_id;
}
/* Since HTT 3.0 there is no separate mgmt tx command. However in case
* of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
* fragment list host driver specifies directly frame pointer. */
if (htt->target_version_major < 3 ||
!ieee80211_is_mgmt(hdr->frame_control)) {
txfrag = dev_alloc_skb(frag_len);
if (!txfrag) {
res = -ENOMEM;
goto err;
}
}
use_frags = htt->target_version_major < 3 ||
!ieee80211_is_mgmt(hdr->frame_control);
if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
ath10k_warn("htt alignment check failed. dropping packet.\n");
res = -EIO;
goto err;
goto err_free_txdesc;
}
spin_lock_bh(&htt->tx_lock);
msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
if (msdu_id < 0) {
spin_unlock_bh(&htt->tx_lock);
res = msdu_id;
goto err;
if (use_frags) {
skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
skb_cb->htt.pad_len = (unsigned long)msdu->data -
round_down((unsigned long)msdu->data, 4);
skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
} else {
skb_cb->htt.frag_len = 0;
skb_cb->htt.pad_len = 0;
}
htt->pending_tx[msdu_id] = txdesc;
spin_unlock_bh(&htt->tx_lock);
res = ath10k_skb_map(dev, msdu);
if (res)
goto err;
goto err_pull_txfrag;
if (use_frags) {
dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
DMA_TO_DEVICE);
/* Since HTT 3.0 there is no separate mgmt tx command. However in case
* of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
* fragment list host driver specifies directly frame pointer. */
if (htt->target_version_major < 3 ||
!ieee80211_is_mgmt(hdr->frame_control)) {
/* tx fragment list must be terminated with zero-entry */
skb_put(txfrag, frag_len);
tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data;
tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
tx_frags[0].len = __cpu_to_le32(msdu->len);
tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
skb_cb->htt.frag_len +
skb_cb->htt.pad_len);
tx_frags[0].len = __cpu_to_le32(msdu->len -
skb_cb->htt.frag_len -
skb_cb->htt.pad_len);
tx_frags[1].paddr = __cpu_to_le32(0);
tx_frags[1].len = __cpu_to_le32(0);
res = ath10k_skb_map(dev, txfrag);
if (res)
goto err;
ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx\n",
(unsigned long long) ATH10K_SKB_CB(txfrag)->paddr);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ",
txfrag->data, frag_len);
dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
DMA_TO_DEVICE);
}
ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
......@@ -463,7 +470,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
skb_put(txdesc, desc_len);
cmd = (struct htt_cmd *)txdesc->data;
memset(cmd, 0, desc_len);
tid = ATH10K_SKB_CB(msdu)->htt.tid;
......@@ -474,15 +480,11 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
/* Since HTT 3.0 there is no separate mgmt tx command. However in case
* of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
* fragment list host driver specifies directly frame pointer. */
if (htt->target_version_major >= 3 &&
ieee80211_is_mgmt(hdr->frame_control))
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
if (use_frags)
flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
else
flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags1 = 0;
......@@ -491,52 +493,37 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
/* Since HTT 3.0 there is no separate mgmt tx command. However in case
* of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
* fragment list host driver specifies directly frame pointer. */
if (htt->target_version_major >= 3 &&
ieee80211_is_mgmt(hdr->frame_control))
frags_paddr = ATH10K_SKB_CB(msdu)->paddr;
else
frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
cmd->data_tx.flags0 = flags0;
cmd->data_tx.flags1 = __cpu_to_le16(flags1);
cmd->data_tx.len = __cpu_to_le16(msdu->len);
cmd->data_tx.len = __cpu_to_le16(msdu->len -
skb_cb->htt.frag_len -
skb_cb->htt.pad_len);
cmd->data_tx.id = __cpu_to_le16(msdu_id);
cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr);
cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len);
/* refcount is decremented by HTC and HTT completions until it reaches
* zero and is freed */
skb_cb = ATH10K_SKB_CB(txdesc);
skb_cb->htt.msdu_id = msdu_id;
skb_cb->htt.refcount = 2;
skb_cb->htt.txfrag = txfrag;
skb_cb->htt.msdu = msdu;
memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
goto err;
goto err_unmap_msdu;
return 0;
err:
if (txfrag)
ath10k_skb_unmap(dev, txfrag);
if (txdesc)
dev_kfree_skb_any(txdesc);
if (txfrag)
dev_kfree_skb_any(txfrag);
if (msdu_id >= 0) {
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
}
ath10k_htt_tx_dec_pending(htt);
err_unmap_msdu:
ath10k_skb_unmap(dev, msdu);
err_pull_txfrag:
skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
err_free_txdesc:
dev_kfree_skb_any(txdesc);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
ath10k_htt_tx_dec_pending(htt);
err:
return res;
}
......@@ -74,7 +74,11 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_TIMEOUT_LO_PRI 100
#define TARGET_RX_TIMEOUT_HI_PRI 40
#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_ETHERNET
/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
* avoid a very expensive re-alignment in mac80211. */
#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
#define TARGET_SCAN_MAX_PENDING_REQS 4
#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
......
......@@ -460,6 +460,11 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
arg.ssid_len = arvif->vif->bss_conf.ssid_len;
}
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d start center_freq %d phymode %s\n",
arg.vdev_id, arg.channel.freq,
ath10k_wmi_phymode_str(arg.channel.mode));
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
ath10k_warn("WMI vdev start failed: ret %d\n", ret);
......@@ -604,7 +609,7 @@ static int ath10k_monitor_create(struct ath10k *ar)
goto vdev_fail;
}
ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n",
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
ar->monitor_vdev_id);
ar->monitor_present = true;
......@@ -636,7 +641,7 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
ar->monitor_present = false;
ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n",
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
ar->monitor_vdev_id);
return ret;
}
......@@ -665,7 +670,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
arvif->vdev_id);
return;
}
ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id);
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
static void ath10k_control_ibss(struct ath10k_vif *arvif,
......@@ -749,14 +754,14 @@ static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
psmode = WMI_STA_PS_MODE_DISABLED;
}
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
arvif->vdev_id, psmode ? "enable" : "disable");
ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id,
psmode);
if (ar_iter->ret)
ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
psmode, arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n",
psmode, arvif->vdev_id);
}
/**********************/
......@@ -946,7 +951,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_ht_rates.num_rates = n;
arg->peer_num_spatial_streams = max((n+7) / 8, 1);
ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n",
ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
arg->addr,
arg->peer_ht_rates.num_rates,
arg->peer_num_spatial_streams);
}
......@@ -966,7 +972,7 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
arg->peer_flags |= WMI_PEER_QOS;
if (sta->wme && sta->uapsd_queues) {
ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n",
ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
sta->uapsd_queues, sta->max_sp);
arg->peer_flags |= WMI_PEER_APSD;
......@@ -1045,7 +1051,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
arg->peer_vht_rates.tx_mcs_set =
__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n");
ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
sta->addr, arg->peer_max_mpdu, arg->peer_flags);
}
static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
......@@ -1073,8 +1080,6 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
{
enum wmi_phy_mode phymode = MODE_UNKNOWN;
/* FIXME: add VHT */
switch (ar->hw->conf.chandef.chan->band) {
case IEEE80211_BAND_2GHZ:
if (sta->ht_cap.ht_supported) {
......@@ -1088,7 +1093,17 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
break;
case IEEE80211_BAND_5GHZ:
if (sta->ht_cap.ht_supported) {
/*
* Check VHT first.
*/
if (sta->vht_cap.vht_supported) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AC_VHT80;
else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
phymode = MODE_11AC_VHT20;
} else if (sta->ht_cap.ht_supported) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
......@@ -1102,6 +1117,9 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
break;
}
ath10k_dbg(ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
sta->addr, ath10k_wmi_phymode_str(phymode));
arg->peer_phymode = phymode;
WARN_ON(phymode == MODE_UNKNOWN);
}
......@@ -1159,15 +1177,15 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
rcu_read_unlock();
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n",
arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid,
bss_conf->bssid);
if (ret)
ath10k_warn("VDEV: %d up failed: ret %d\n",
arvif->vdev_id, ret);
else
ath10k_dbg(ATH10K_DBG_MAC,
"VDEV: %d associated, BSSID: %pM, AID: %d\n",
arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
}
/*
......@@ -1188,10 +1206,11 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
* No idea why this happens, even though VDEV-DOWN is supposed
* to be analogous to link down, so just stop the VDEV.
*/
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
arvif->vdev_id);
/* FIXME: check return value */
ret = ath10k_vdev_stop(arvif);
if (!ret)
ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n",
arvif->vdev_id);
/*
* If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
......@@ -1200,12 +1219,10 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
* interfaces as it expects there is no rx when no interface is
* running.
*/
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
if (ret)
ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n",
arvif->vdev_id, ret);
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
ath10k_wmi_flush_tx(ar);
/* FIXME: why don't we print error if wmi call fails? */
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
arvif->def_wep_key_index = 0;
}
......@@ -1330,8 +1347,8 @@ static int ath10k_update_channel_list(struct ath10k *ar)
continue;
ath10k_dbg(ATH10K_DBG_WMI,
"%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
__func__, ch - arg.channels, arg.n_channels,
"mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
ch - arg.channels, arg.n_channels,
ch->freq, ch->max_power, ch->max_reg_power,
ch->max_antenna_gain, ch->mode);
......@@ -1431,7 +1448,8 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
if (key->keyidx == arvif->def_wep_key_index)
return;
ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx);
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d keyidx %d\n",
arvif->vdev_id, key->keyidx);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_DEF_KEYID,
......@@ -1534,7 +1552,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n",
ath10k_dbg(ATH10K_DBG_MAC, "mac offchannel skb %p\n",
skb);
hdr = (struct ieee80211_hdr *)skb->data;
......@@ -1546,6 +1564,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
spin_unlock_bh(&ar->data_lock);
if (peer)
/* FIXME: should this use ath10k_warn()? */
ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
peer_addr, vdev_id);
......@@ -1643,8 +1662,6 @@ static int ath10k_abort_scan(struct ath10k *ar)
return -EIO;
}
ath10k_wmi_flush_tx(ar);
ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
if (ret == 0)
ath10k_warn("timed out while waiting for scan to stop\n");
......@@ -1678,10 +1695,6 @@ static int ath10k_start_scan(struct ath10k *ar,
if (ret)
return ret;
/* make sure we submit the command so the completion
* timeout makes sense */
ath10k_wmi_flush_tx(ar);
ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
if (ret == 0) {
ath10k_abort_scan(ar);
......@@ -1744,7 +1757,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
ath10k_tx_h_seq_no(skb);
}
memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
ATH10K_SKB_CB(skb)->htt.is_offchan = false;
ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
ATH10K_SKB_CB(skb)->htt.tid = tid;
......@@ -1886,7 +1899,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&ar->conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n",
ath10k_dbg(ATH10K_DBG_MAC, "mac config channel %d mhz\n",
conf->chandef.chan->center_freq);
spin_lock_bh(&ar->data_lock);
ar->rx_channel = conf->chandef.chan;
......@@ -1903,7 +1916,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
ret = ath10k_monitor_destroy(ar);
}
ath10k_wmi_flush_tx(ar);
mutex_unlock(&ar->conf_mutex);
return ret;
}
......@@ -1975,7 +1987,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
break;
}
ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n",
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
......@@ -2054,7 +2066,12 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id);
spin_lock_bh(&ar->data_lock);
if (arvif->beacon) {
dev_kfree_skb_any(arvif->beacon);
arvif->beacon = NULL;
}
spin_unlock_bh(&ar->data_lock);
ar->free_vdev_map |= 1 << (arvif->vdev_id);
......@@ -2066,6 +2083,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
kfree(arvif->u.ap.noa_data);
}
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n",
arvif->vdev_id);
ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
if (ret)
ath10k_warn("WMI vdev delete failed: %d\n", ret);
......@@ -2107,18 +2127,20 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
!ar->monitor_enabled) {
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
ar->monitor_vdev_id);
ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
if (ret)
ath10k_warn("Unable to start monitor mode\n");
else
ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n");
} else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
ar->monitor_enabled) {
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
ar->monitor_vdev_id);
ret = ath10k_monitor_stop(ar);
if (ret)
ath10k_warn("Unable to stop monitor mode\n");
else
ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n");
}
mutex_unlock(&ar->conf_mutex);
......@@ -2143,41 +2165,41 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_BEACON_INTERVAL,
arvif->beacon_interval);
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d beacon_interval %d\n",
arvif->vdev_id, arvif->beacon_interval);
if (ret)
ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Beacon interval: %d set for VDEV: %d\n",
arvif->beacon_interval, arvif->vdev_id);
}
if (changed & BSS_CHANGED_BEACON) {
ath10k_dbg(ATH10K_DBG_MAC,
"vdev %d set beacon tx mode to staggered\n",
arvif->vdev_id);
ret = ath10k_wmi_pdev_set_param(ar,
WMI_PDEV_PARAM_BEACON_TX_MODE,
WMI_BEACON_STAGGERED_MODE);
if (ret)
ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Set staggered beacon mode for VDEV: %d\n",
arvif->vdev_id);
}
if (changed & BSS_CHANGED_BEACON_INFO) {
arvif->dtim_period = info->dtim_period;
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d dtim_period %d\n",
arvif->vdev_id, arvif->dtim_period);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_DTIM_PERIOD,
arvif->dtim_period);
if (ret)
ath10k_warn("Failed to set dtim period for VDEV: %d\n",
arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Set dtim period: %d for VDEV: %d\n",
arvif->dtim_period, arvif->vdev_id);
}
if (changed & BSS_CHANGED_SSID &&
......@@ -2190,16 +2212,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
if (!is_zero_ether_addr(info->bssid)) {
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d create peer %pM\n",
arvif->vdev_id, info->bssid);
ret = ath10k_peer_create(ar, arvif->vdev_id,
info->bssid);
if (ret)
ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
info->bssid, arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Added peer: %pM for VDEV: %d\n",
info->bssid, arvif->vdev_id);
if (vif->type == NL80211_IFTYPE_STATION) {
/*
......@@ -2209,11 +2230,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
memcpy(arvif->u.sta.bssid, info->bssid,
ETH_ALEN);
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d start %pM\n",
arvif->vdev_id, info->bssid);
/* FIXME: check return value */
ret = ath10k_vdev_start(arvif);
if (!ret)
ath10k_dbg(ATH10K_DBG_MAC,
"VDEV: %d started with BSSID: %pM\n",
arvif->vdev_id, info->bssid);
}
/*
......@@ -2237,16 +2259,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
cts_prot = 0;
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
arvif->vdev_id, cts_prot);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_ENABLE_RTSCTS,
cts_prot);
if (ret)
ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Set CTS prot: %d for VDEV: %d\n",
cts_prot, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
......@@ -2257,16 +2278,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
arvif->vdev_id, slottime);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_SLOT_TIME,
slottime);
if (ret)
ath10k_warn("Failed to set erp slot for VDEV: %d\n",
arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Set slottime: %d for VDEV: %d\n",
slottime, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
......@@ -2276,16 +2296,16 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
preamble = WMI_VDEV_PREAMBLE_LONG;
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d preamble %dn",
arvif->vdev_id, preamble);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_PREAMBLE,
preamble);
if (ret)
ath10k_warn("Failed to set preamble for VDEV: %d\n",
arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Set preamble: %d for VDEV: %d\n",
preamble, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ASSOC) {
......@@ -2476,27 +2496,26 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New station addition.
*/
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d peer create %pM (new sta)\n",
arvif->vdev_id, sta->addr);
ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Added peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
/*
* Existing station deletion.
*/
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d peer delete %pM (sta gone)\n",
arvif->vdev_id, sta->addr);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Removed peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
if (vif->type == NL80211_IFTYPE_STATION)
ath10k_bss_disassoc(hw, vif);
......@@ -2507,14 +2526,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New association.
*/
ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
sta->addr);
ret = ath10k_station_assoc(ar, arvif, sta);
if (ret)
ath10k_warn("Failed to associate station: %pM\n",
sta->addr);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Station %pM moved to assoc state\n",
sta->addr);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
......@@ -2522,14 +2540,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* Disassociation.
*/
ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
sta->addr);
ret = ath10k_station_disassoc(ar, arvif, sta);
if (ret)
ath10k_warn("Failed to disassociate station: %pM\n",
sta->addr);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Station %pM moved to disassociated state\n",
sta->addr);
}
mutex_unlock(&ar->conf_mutex);
......@@ -2749,14 +2766,13 @@ static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
return;
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts_threshold %d\n",
arvif->vdev_id, rts);
ar_iter->ret = ath10k_mac_set_rts(arvif, rts);
if (ar_iter->ret)
ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Set RTS threshold: %d for VDEV: %d\n",
rts, arvif->vdev_id);
}
static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
......@@ -2791,14 +2807,13 @@ static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
return;
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation_threshold %d\n",
arvif->vdev_id, frag);
ar_iter->ret = ath10k_mac_set_frag(arvif, frag);
if (ar_iter->ret)
ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Set frag threshold: %d for VDEV: %d\n",
frag, arvif->vdev_id);
}
static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
......@@ -2838,8 +2853,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
bool empty;
spin_lock_bh(&ar->htt.tx_lock);
empty = bitmap_empty(ar->htt.used_msdu_ids,
ar->htt.max_num_pending_tx);
empty = (ar->htt.num_pending_tx == 0);
spin_unlock_bh(&ar->htt.tx_lock);
skip = (ar->state == ATH10K_STATE_WEDGED);
......@@ -3328,6 +3342,10 @@ int ath10k_mac_register(struct ath10k *ar)
IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_AP_LINK_PS;
/* MSDU can have HTT TX fragment pushed in front. The additional 4
* bytes is used for padding/alignment if necessary. */
ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4;
if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
......
......@@ -612,31 +612,20 @@ struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
}
/* Called by lower (CE) layer when a send to Target completes. */
static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state,
void *transfer_context,
u32 ce_data,
unsigned int nbytes,
unsigned int transfer_id)
static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_pci_compl *compl;
bool process = false;
do {
/*
* For the send completion of an item in sendlist, just
* increment num_sends_allowed. The upper layer callback will
* be triggered when last fragment is done with send.
*/
if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
spin_lock_bh(&pipe_info->pipe_lock);
pipe_info->num_sends_allowed++;
spin_unlock_bh(&pipe_info->pipe_lock);
continue;
}
void *transfer_context;
u32 ce_data;
unsigned int nbytes;
unsigned int transfer_id;
while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
&ce_data, &nbytes,
&transfer_id) == 0) {
compl = get_free_compl(pipe_info);
if (!compl)
break;
......@@ -655,38 +644,28 @@ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state,
spin_lock_bh(&ar_pci->compl_lock);
list_add_tail(&compl->list, &ar_pci->compl_process);
spin_unlock_bh(&ar_pci->compl_lock);
process = true;
} while (ath10k_ce_completed_send_next(ce_state,
&transfer_context,
&ce_data, &nbytes,
&transfer_id) == 0);
/*
* If only some of the items within a sendlist have completed,
* don't invoke completion processing until the entire sendlist
* has been sent.
*/
if (!process)
return;
}
ath10k_pci_process_ce(ar);
}
/* Called by lower (CE) layer when data is received from the Target. */
static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state,
void *transfer_context, u32 ce_data,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags)
static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_pci_compl *compl;
struct sk_buff *skb;
void *transfer_context;
u32 ce_data;
unsigned int nbytes;
unsigned int transfer_id;
unsigned int flags;
do {
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
&ce_data, &nbytes, &transfer_id,
&flags) == 0) {
compl = get_free_compl(pipe_info);
if (!compl)
break;
......@@ -709,12 +688,7 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state,
spin_lock_bh(&ar_pci->compl_lock);
list_add_tail(&compl->list, &ar_pci->compl_process);
spin_unlock_bh(&ar_pci->compl_lock);
} while (ath10k_ce_completed_recv_next(ce_state,
&transfer_context,
&ce_data, &nbytes,
&transfer_id,
&flags) == 0);
}
ath10k_pci_process_ce(ar);
}
......@@ -728,13 +702,10 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
struct ce_sendlist sendlist;
unsigned int len;
u32 flags = 0;
int ret;
memset(&sendlist, 0, sizeof(struct ce_sendlist));
len = min(bytes, nbuf->len);
bytes -= len;
......@@ -749,8 +720,6 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
"ath10k tx: data: ",
nbuf->data, nbuf->len);
ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
/* Make sure we have resources to handle this request */
spin_lock_bh(&pipe_info->pipe_lock);
if (!pipe_info->num_sends_allowed) {
......@@ -761,7 +730,8 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
pipe_info->num_sends_allowed--;
spin_unlock_bh(&pipe_info->pipe_lock);
ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, transfer_id,
skb_cb->paddr, len, flags);
if (ret)
ath10k_warn("CE send failed: %p\n", nbuf);
......@@ -1316,15 +1286,14 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
&ce_data, &nbytes, &id) == 0) {
if (netbuf != CE_SENDLIST_ITEM_CTXT)
/*
* Indicate the completion to higer layer to free
* the buffer
*/
ATH10K_SKB_CB(netbuf)->is_aborted = true;
ar_pci->msg_callbacks_current.tx_completion(ar,
netbuf,
id);
/*
* Indicate the completion to higer layer to free
* the buffer
*/
ATH10K_SKB_CB(netbuf)->is_aborted = true;
ar_pci->msg_callbacks_current.tx_completion(ar,
netbuf,
id);
}
}
......@@ -1490,13 +1459,16 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
return ret;
}
static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state,
void *transfer_context,
u32 data,
unsigned int nbytes,
unsigned int transfer_id)
static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
{
struct bmi_xfer *xfer = transfer_context;
struct bmi_xfer *xfer;
u32 ce_data;
unsigned int nbytes;
unsigned int transfer_id;
if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
&nbytes, &transfer_id))
return;
if (xfer->wait_for_resp)
return;
......@@ -1504,14 +1476,17 @@ static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state,
complete(&xfer->done);
}
static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state,
void *transfer_context,
u32 data,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags)
static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
{
struct bmi_xfer *xfer = transfer_context;
struct bmi_xfer *xfer;
u32 ce_data;
unsigned int nbytes;
unsigned int transfer_id;
unsigned int flags;
if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
&nbytes, &transfer_id, &flags))
return;
if (!xfer->wait_for_resp) {
ath10k_warn("unexpected: BMI data received; ignoring\n");
......@@ -2374,10 +2349,10 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
switch (i) {
case ATH10K_PCI_FEATURE_MSI_X:
ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
break;
case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
break;
}
}
......@@ -2503,6 +2478,8 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ath10k_do_pci_sleep(ar);
ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_err("could not register driver core (%d)\n", ret);
......
......@@ -422,10 +422,30 @@ struct rx_mpdu_end {
#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14)
#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15)
/* The decapped header (rx_hdr_status) contains the following:
* a) 802.11 header
* [padding to 4 bytes]
* b) HW crypto parameter
* - 0 bytes for no security
* - 4 bytes for WEP
* - 8 bytes for TKIP, AES
* [padding to 4 bytes]
* c) A-MSDU subframe header (14 bytes) if appliable
* d) LLC/SNAP (RFC1042, 8 bytes)
*
* In case of A-MSDU only first frame in sequence contains (a) and (b). */
enum rx_msdu_decap_format {
RX_MSDU_DECAP_RAW = 0,
RX_MSDU_DECAP_NATIVE_WIFI = 1,
RX_MSDU_DECAP_RAW = 0,
/* Note: QoS frames are reported as non-QoS. The rx_hdr_status in
* htt_rx_desc contains the original decapped 802.11 header. */
RX_MSDU_DECAP_NATIVE_WIFI = 1,
/* Payload contains an ethernet header (struct ethhdr). */
RX_MSDU_DECAP_ETHERNET2_DIX = 2,
/* Payload contains two 48-bit addresses and 2-byte length (14 bytes
* total), followed by an RFC1042 header (8 bytes). */
RX_MSDU_DECAP_8023_SNAP_LLC = 3
};
......
......@@ -111,26 +111,29 @@ TRACE_EVENT(ath10k_log_dbg_dump,
);
TRACE_EVENT(ath10k_wmi_cmd,
TP_PROTO(int id, void *buf, size_t buf_len),
TP_PROTO(int id, void *buf, size_t buf_len, int ret),
TP_ARGS(id, buf, buf_len),
TP_ARGS(id, buf, buf_len, ret),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
__field(int, ret)
),
TP_fast_assign(
__entry->id = id;
__entry->buf_len = buf_len;
__entry->ret = ret;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"id %d len %zu",
"id %d len %zu ret %d",
__entry->id,
__entry->buf_len
__entry->buf_len,
__entry->ret
)
);
......@@ -158,6 +161,27 @@ TRACE_EVENT(ath10k_wmi_event,
)
);
TRACE_EVENT(ath10k_htt_stats,
TP_PROTO(void *buf, size_t buf_len),
TP_ARGS(buf, buf_len),
TP_STRUCT__entry(
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"len %zu",
__entry->buf_len
)
);
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
......
......@@ -44,40 +44,39 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
spin_unlock_bh(&ar->data_lock);
}
void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done)
{
struct device *dev = htt->ar->dev;
struct ieee80211_tx_info *info;
struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag;
struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu;
struct ath10k_skb_cb *skb_cb;
struct sk_buff *msdu;
int ret;
if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0)
return;
ATH10K_SKB_CB(txdesc)->htt.refcount--;
ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn("warning: msdu_id %d too big, ignoring\n",
tx_done->msdu_id);
return;
if (txfrag) {
ret = ath10k_skb_unmap(dev, txfrag);
if (ret)
ath10k_warn("txfrag unmap failed (%d)\n", ret);
dev_kfree_skb_any(txfrag);
}
msdu = htt->pending_tx[tx_done->msdu_id];
skb_cb = ATH10K_SKB_CB(msdu);
ret = ath10k_skb_unmap(dev, msdu);
if (ret)
ath10k_warn("data skb unmap failed (%d)\n", ret);
if (skb_cb->htt.frag_len)
skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
ath10k_report_offchan_tx(htt->ar, msdu);
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
if (ATH10K_SKB_CB(txdesc)->htt.discard) {
if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
goto exit;
}
......@@ -85,7 +84,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
if (ATH10K_SKB_CB(txdesc)->htt.no_ack)
if (tx_done->no_ack)
info->flags &= ~IEEE80211_TX_STAT_ACK;
ieee80211_tx_status(htt->ar->hw, msdu);
......@@ -93,36 +92,12 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
exit:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id);
htt->pending_tx[tx_done->msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt);
if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx))
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
dev_kfree_skb_any(txdesc);
}
void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done)
{
struct sk_buff *txdesc;
ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn("warning: msdu_id %d too big, ignoring\n",
tx_done->msdu_id);
return;
}
txdesc = htt->pending_tx[tx_done->msdu_id];
ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard;
ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack;
ath10k_txrx_tx_unref(htt, txdesc);
}
static const u8 rx_legacy_rate_idx[] = {
......@@ -293,6 +268,8 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
status->vht_nss,
status->freq,
status->band);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
info->skb->data, info->skb->len);
ieee80211_rx(ar->hw, info->skb);
}
......
......@@ -19,9 +19,8 @@
#include "htt.h"
void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc);
void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done);
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done);
void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
......
......@@ -23,30 +23,6 @@
#include "wmi.h"
#include "mac.h"
void ath10k_wmi_flush_tx(struct ath10k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (ar->state == ATH10K_STATE_WEDGED) {
ath10k_warn("wmi flush skipped - device is wedged anyway\n");
return;
}
ret = wait_event_timeout(ar->wmi.wq,
atomic_read(&ar->wmi.pending_tx_count) == 0,
5*HZ);
if (atomic_read(&ar->wmi.pending_tx_count) == 0)
return;
if (ret == 0)
ret = -ETIMEDOUT;
if (ret < 0)
ath10k_warn("wmi flush failed (%d)\n", ret);
}
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
int ret;
......@@ -85,18 +61,14 @@ static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
dev_kfree_skb(skb);
if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0)
wake_up(&ar->wmi.wq);
}
/* WMI command API */
static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
enum wmi_cmd_id cmd_id)
static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
enum wmi_cmd_id cmd_id)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct wmi_cmd_hdr *cmd_hdr;
int status;
int ret;
u32 cmd = 0;
if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
......@@ -107,26 +79,87 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
cmd_hdr->cmd_id = __cpu_to_le32(cmd);
if (atomic_add_return(1, &ar->wmi.pending_tx_count) >
WMI_MAX_PENDING_TX_COUNT) {
/* avoid using up memory when FW hangs */
dev_kfree_skb(skb);
atomic_dec(&ar->wmi.pending_tx_count);
return -EBUSY;
}
memset(skb_cb, 0, sizeof(*skb_cb));
ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret);
trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
if (ret)
goto err_pull;
status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
if (status) {
return 0;
err_pull:
skb_pull(skb, sizeof(struct wmi_cmd_hdr));
return ret;
}
static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
{
struct wmi_bcn_tx_arg arg = {0};
int ret;
lockdep_assert_held(&arvif->ar->data_lock);
if (arvif->beacon == NULL)
return;
arg.vdev_id = arvif->vdev_id;
arg.tx_rate = 0;
arg.tx_power = 0;
arg.bcn = arvif->beacon->data;
arg.bcn_len = arvif->beacon->len;
ret = ath10k_wmi_beacon_send_nowait(arvif->ar, &arg);
if (ret)
return;
dev_kfree_skb_any(arvif->beacon);
arvif->beacon = NULL;
}
static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
ath10k_wmi_tx_beacon_nowait(arvif);
}
static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
{
spin_lock_bh(&ar->data_lock);
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_NORMAL,
ath10k_wmi_tx_beacons_iter,
NULL);
spin_unlock_bh(&ar->data_lock);
}
static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
{
/* try to send pending beacons first. they take priority */
ath10k_wmi_tx_beacons_nowait(ar);
wake_up(&ar->wmi.tx_credits_wq);
}
static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
enum wmi_cmd_id cmd_id)
{
int ret = -EINVAL;
wait_event_timeout(ar->wmi.tx_credits_wq, ({
/* try to send pending beacons first. they take priority */
ath10k_wmi_tx_beacons_nowait(ar);
ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
(ret != -EAGAIN);
}), 3*HZ);
if (ret)
dev_kfree_skb_any(skb);
atomic_dec(&ar->wmi.pending_tx_count);
return status;
}
return 0;
return ret;
}
static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
......@@ -748,10 +781,8 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
int i = -1;
struct wmi_bcn_info *bcn_info;
struct ath10k_vif *arvif;
struct wmi_bcn_tx_arg arg;
struct sk_buff *bcn;
int vdev_id = 0;
int ret;
ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
......@@ -808,17 +839,17 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
arg.vdev_id = arvif->vdev_id;
arg.tx_rate = 0;
arg.tx_power = 0;
arg.bcn = bcn->data;
arg.bcn_len = bcn->len;
spin_lock_bh(&ar->data_lock);
if (arvif->beacon) {
ath10k_warn("SWBA overrun on vdev %d\n",
arvif->vdev_id);
dev_kfree_skb_any(arvif->beacon);
}
ret = ath10k_wmi_beacon_send(ar, &arg);
if (ret)
ath10k_warn("could not send beacon (%d)\n", ret);
arvif->beacon = bcn;
dev_kfree_skb_any(bcn);
ath10k_wmi_tx_beacon_nowait(arvif);
spin_unlock_bh(&ar->data_lock);
}
}
......@@ -1024,7 +1055,7 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_event_id id;
......@@ -1143,64 +1174,18 @@ static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
dev_kfree_skb(skb);
}
static void ath10k_wmi_event_work(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k,
wmi.wmi_event_work);
struct sk_buff *skb;
for (;;) {
skb = skb_dequeue(&ar->wmi.wmi_event_list);
if (!skb)
break;
ath10k_wmi_event_process(ar, skb);
}
}
static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
enum wmi_event_id event_id;
event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
/* some events require to be handled ASAP
* thus can't be defered to a worker thread */
switch (event_id) {
case WMI_HOST_SWBA_EVENTID:
case WMI_MGMT_RX_EVENTID:
ath10k_wmi_event_process(ar, skb);
return;
default:
break;
}
skb_queue_tail(&ar->wmi.wmi_event_list, skb);
queue_work(ar->workqueue, &ar->wmi.wmi_event_work);
}
/* WMI Initialization functions */
int ath10k_wmi_attach(struct ath10k *ar)
{
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
init_waitqueue_head(&ar->wmi.wq);
skb_queue_head_init(&ar->wmi.wmi_event_list);
INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work);
init_waitqueue_head(&ar->wmi.tx_credits_wq);
return 0;
}
void ath10k_wmi_detach(struct ath10k *ar)
{
/* HTC should've drained the packets already */
if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0))
ath10k_warn("there are still pending packets\n");
cancel_work_sync(&ar->wmi.wmi_event_work);
skb_queue_purge(&ar->wmi.wmi_event_list);
}
int ath10k_wmi_connect_htc_service(struct ath10k *ar)
......@@ -1215,6 +1200,7 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
/* these fields are the same for all service endpoints */
conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
......@@ -2125,7 +2111,8 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
}
int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
const struct wmi_bcn_tx_arg *arg)
{
struct wmi_bcn_tx_cmd *cmd;
struct sk_buff *skb;
......@@ -2141,7 +2128,7 @@ int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID);
return ath10k_wmi_cmd_send_nowait(ar, skb, WMI_BCN_TX_CMDID);
}
static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
......
......@@ -508,6 +508,48 @@ enum wmi_phy_mode {
MODE_MAX = 14
};
static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
{
switch (mode) {
case MODE_11A:
return "11a";
case MODE_11G:
return "11g";
case MODE_11B:
return "11b";
case MODE_11GONLY:
return "11gonly";
case MODE_11NA_HT20:
return "11na-ht20";
case MODE_11NG_HT20:
return "11ng-ht20";
case MODE_11NA_HT40:
return "11na-ht40";
case MODE_11NG_HT40:
return "11ng-ht40";
case MODE_11AC_VHT20:
return "11ac-vht20";
case MODE_11AC_VHT40:
return "11ac-vht40";
case MODE_11AC_VHT80:
return "11ac-vht80";
case MODE_11AC_VHT20_2G:
return "11ac-vht20-2g";
case MODE_11AC_VHT40_2G:
return "11ac-vht40-2g";
case MODE_11AC_VHT80_2G:
return "11ac-vht80-2g";
case MODE_UNKNOWN:
/* skip */
break;
/* no default handler to allow compiler to check that the
* enum is fully handled */
};
return "<unknown>";
}
#define WMI_CHAN_LIST_TAG 0x1
#define WMI_SSID_LIST_TAG 0x2
#define WMI_BSSID_LIST_TAG 0x3
......@@ -763,14 +805,6 @@ struct wmi_service_ready_event {
struct wlan_host_mem_req mem_reqs[1];
} __packed;
/*
* status consists of upper 16 bits fo int status and lower 16 bits of
* module ID that retuned status
*/
#define WLAN_INIT_STATUS_SUCCESS 0x0
#define WLAN_GET_INIT_STATUS_REASON(status) ((status) & 0xffff)
#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff)
#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
......@@ -3010,7 +3044,6 @@ struct wmi_force_fw_hang_cmd {
#define WMI_MAX_EVENT 0x1000
/* Maximum number of pending TXed WMI packets */
#define WMI_MAX_PENDING_TX_COUNT 128
#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
/* By default disable power save for IBSS */
......@@ -3023,7 +3056,6 @@ int ath10k_wmi_attach(struct ath10k *ar);
void ath10k_wmi_detach(struct ath10k *ar);
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
void ath10k_wmi_flush_tx(struct ath10k *ar);
int ath10k_wmi_connect_htc_service(struct ath10k *ar);
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
......@@ -3076,7 +3108,8 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
enum wmi_ap_ps_peer_param param_id, u32 value);
int ath10k_wmi_scan_chan_list(struct ath10k *ar,
const struct wmi_scan_chan_list_arg *arg);
int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
const struct wmi_bcn_tx_arg *arg);
int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
const struct wmi_pdev_set_wmm_params_arg *arg);
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册