提交 9b5186b4 编写于 作者: J John W. Linville

Merge branch 'for-linville' of git://github.com/kvalo/ath

Kalle Valo <kvalo@qca.qualcomm.com> says:

"One ath6kl patch and rest for ath10k, but nothing really major which
stands out. Most notable:

o fix resume (Bartosz)

o firmware restart is now faster and more reliable (Michal)

o it's now possible to test hardware restart functionality without
  crashing the firmware using hw-restart parameter with
  simulate_fw_crash debugfs file (Michal)"
Signed-off-by: NJohn W. Linville <linville@tuxdriver.com>
......@@ -558,6 +558,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
/* sanity */
dest_ring->per_transfer_context[sw_index] = NULL;
desc->nbytes = 0;
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
......@@ -835,8 +836,8 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
nentries = roundup_pow_of_two(attr->src_nentries);
memset(src_ring->per_transfer_context, 0,
nentries * sizeof(*src_ring->per_transfer_context));
memset(src_ring->base_addr_owner_space, 0,
nentries * sizeof(struct ce_desc));
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
......@@ -872,8 +873,8 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
nentries = roundup_pow_of_two(attr->dest_nentries);
memset(dest_ring->per_transfer_context, 0,
nentries * sizeof(*dest_ring->per_transfer_context));
memset(dest_ring->base_addr_owner_space, 0,
nentries * sizeof(struct ce_desc));
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
......
......@@ -31,12 +31,17 @@
unsigned int ath10k_debug_mask;
static bool uart_print;
static unsigned int ath10k_p2p;
static bool skip_otp;
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
module_param(uart_print, bool, 0644);
module_param_named(p2p, ath10k_p2p, uint, 0644);
module_param(skip_otp, bool, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
......@@ -280,7 +285,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
if (result != 0) {
if (!skip_otp && result != 0) {
ath10k_err(ar, "otp calibration failed: %d", result);
return -EINVAL;
}
......@@ -744,6 +749,25 @@ static void ath10k_core_restart(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, restart_work);
set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
/* Place a barrier to make sure the compiler doesn't reorder
* CRASH_FLUSH and calling other functions.
*/
barrier();
ieee80211_stop_queues(ar->hw);
ath10k_drain_tx(ar);
complete_all(&ar->scan.started);
complete_all(&ar->scan.completed);
complete_all(&ar->scan.on_channel);
complete_all(&ar->offchan_tx_completed);
complete_all(&ar->install_key_done);
complete_all(&ar->vdev_setup_done);
wake_up(&ar->htt.empty_tx_wq);
wake_up(&ar->wmi.tx_credits_wq);
wake_up(&ar->peer_mapping_wq);
mutex_lock(&ar->conf_mutex);
switch (ar->state) {
......@@ -781,6 +805,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
lockdep_assert_held(&ar->conf_mutex);
clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
ath10k_bmi_start(ar);
if (ath10k_init_configure_target(ar)) {
......@@ -1185,6 +1211,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
INIT_LIST_HEAD(&ar->peers);
init_waitqueue_head(&ar->peer_mapping_wq);
init_waitqueue_head(&ar->htt.empty_tx_wq);
init_waitqueue_head(&ar->wmi.tx_credits_wq);
init_completion(&ar->offchan_tx_completed);
INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
......
......@@ -386,6 +386,11 @@ enum ath10k_dev_flags {
/* Indicates that ath10k device is during CAC phase of DFS */
ATH10K_CAC_RUNNING,
ATH10K_FLAG_CORE_REGISTERED,
/* Device has crashed and needs to restart. This indicates any pending
* waiters should immediately cancel instead of waiting for a time out.
*/
ATH10K_FLAG_CRASH_FLUSH,
};
enum ath10k_cal_mode {
......
......@@ -695,7 +695,8 @@ static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
"To simulate firmware crash write one of the keywords to this file:\n"
"`soft` - this will send WMI_FORCE_FW_HANG_ASSERT to firmware if FW supports that command.\n"
"`hard` - this will send to firmware command with illegal parameters causing firmware crash.\n"
"`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n";
"`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n"
"`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
}
......@@ -748,6 +749,10 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
} else if (!strcmp(buf, "assert")) {
ath10k_info(ar, "simulating firmware assert crash\n");
ret = ath10k_debug_fw_assert(ar);
} else if (!strcmp(buf, "hw-restart")) {
ath10k_info(ar, "user requested hw restart\n");
queue_work(ar->workqueue, &ar->restart_work);
ret = 0;
} else {
ret = -EINVAL;
goto exit;
......
......@@ -291,8 +291,12 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
htt->rx_ring.sw_rd_idx.msdu_payld = idx;
htt->rx_ring.fill_cnt--;
trace_ath10k_htt_rx_pop_msdu(ar, msdu->data, msdu->len +
skb_tailroom(msdu));
dma_unmap_single(htt->ar->dev,
ATH10K_SKB_CB(msdu)->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
msdu->data, msdu->len + skb_tailroom(msdu));
return msdu;
}
......@@ -319,7 +323,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
int msdu_len, msdu_chaining = 0;
struct sk_buff *msdu, *next;
struct htt_rx_desc *rx_desc;
u32 tsf;
lockdep_assert_held(&htt->rx_ring.lock);
......@@ -332,14 +335,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
while (msdu) {
int last_msdu, msdu_len_invalid, msdu_chained;
dma_unmap_single(htt->ar->dev,
ATH10K_SKB_CB(msdu)->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
msdu->data, msdu->len + skb_tailroom(msdu));
rx_desc = (struct htt_rx_desc *)msdu->data;
/* FIXME: we must report msdu payload since this is what caller
......@@ -430,14 +425,14 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
while (msdu_chained--) {
struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
dma_unmap_single(htt->ar->dev,
ATH10K_SKB_CB(next)->paddr,
next->len + skb_tailroom(next),
DMA_FROM_DEVICE);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
"htt rx chained: ", next->data,
next->len + skb_tailroom(next));
if (!next) {
ath10k_warn(ar, "failed to pop chained msdu\n");
ath10k_htt_rx_free_msdu_chain(*head_msdu);
*head_msdu = NULL;
msdu = NULL;
htt->rx_confused = true;
break;
}
skb_trim(next, 0);
skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
......@@ -451,8 +446,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
tsf = __le32_to_cpu(rx_desc->ppdu_end.tsf_timestamp);
trace_ath10k_htt_rx_desc(ar, tsf, &rx_desc->attention,
trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
sizeof(*rx_desc) - sizeof(u32));
if (last_msdu) {
msdu->next = NULL;
......@@ -499,6 +493,8 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
size_t size;
struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
htt->rx_confused = false;
htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
if (!is_power_of_2(htt->rx_ring.size)) {
ath10k_warn(ar, "htt rx ring size is not power of 2\n");
......@@ -588,41 +584,47 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
return 0;
case HTT_RX_MPDU_ENCRYPT_WEP40:
case HTT_RX_MPDU_ENCRYPT_WEP104:
return 4;
return IEEE80211_WEP_IV_LEN;
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
return IEEE80211_TKIP_IV_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
return 8;
case HTT_RX_MPDU_ENCRYPT_NONE:
return 0;
return IEEE80211_CCMP_HDR_LEN;
case HTT_RX_MPDU_ENCRYPT_WEP128:
case HTT_RX_MPDU_ENCRYPT_WAPI:
break;
}
ath10k_warn(ar, "unknown encryption type %d\n", type);
ath10k_warn(ar, "unsupported encryption type %d\n", type);
return 0;
}
#define MICHAEL_MIC_LEN 8
static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
return 0;
case HTT_RX_MPDU_ENCRYPT_WEP40:
case HTT_RX_MPDU_ENCRYPT_WEP104:
case HTT_RX_MPDU_ENCRYPT_WEP128:
case HTT_RX_MPDU_ENCRYPT_WAPI:
return 0;
return IEEE80211_WEP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
return 4;
return IEEE80211_TKIP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
return 8;
return IEEE80211_CCMP_MIC_LEN;
case HTT_RX_MPDU_ENCRYPT_WEP128:
case HTT_RX_MPDU_ENCRYPT_WAPI:
break;
}
ath10k_warn(ar, "unknown encryption type %d\n", type);
ath10k_warn(ar, "unsupported encryption type %d\n", type);
return 0;
}
......@@ -899,6 +901,8 @@ static void ath10k_process_rx(struct ath10k *ar,
!!(status->flag & RX_FLAG_AMSDU_MORE));
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
skb->data, skb->len);
trace_ath10k_rx_hdr(ar, skb->data, skb->len);
trace_ath10k_rx_payload(ar, skb->data, skb->len);
ieee80211_rx(ar->hw, skb);
}
......@@ -1176,7 +1180,6 @@ static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
struct sk_buff *head,
enum htt_rx_mpdu_status status,
bool channel_set,
u32 attention)
{
......@@ -1200,22 +1203,11 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
}
/* Skip mgmt frames while we handle this in WMI */
if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
if (attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
return false;
}
if (status != HTT_RX_IND_MPDU_STATUS_OK &&
status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
!htt->ar->monitor_started) {
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx ignoring frame w/ status %d\n",
status);
return false;
}
if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx CAC running\n");
......@@ -1231,8 +1223,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct ath10k *ar = htt->ar;
struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct htt_rx_indication_mpdu_range *mpdu_ranges;
struct htt_rx_desc *rxd;
enum htt_rx_mpdu_status status;
struct ieee80211_hdr *hdr;
int num_mpdu_ranges;
u32 attention;
......@@ -1280,8 +1270,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
num_mpdu_ranges));
for (i = 0; i < num_mpdu_ranges; i++) {
status = mpdu_ranges[i].mpdu_range_status;
for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
struct sk_buff *msdu_head, *msdu_tail;
......@@ -1302,12 +1290,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
continue;
}
rxd = container_of((void *)msdu_head->data,
struct htt_rx_desc,
msdu_payload);
if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
status,
channel_set,
attention)) {
ath10k_htt_rx_free_msdu_chain(msdu_head);
......@@ -1372,6 +1355,8 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
&attention);
spin_unlock_bh(&htt->rx_ring.lock);
tasklet_schedule(&htt->rx_replenish_task);
ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
if (ret) {
......@@ -1433,7 +1418,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
/* last fragment of TKIP frags has MIC */
if (!ieee80211_has_morefrags(hdr->frame_control) &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
trim += 8;
trim += MICHAEL_MIC_LEN;
if (trim > msdu_head->len) {
ath10k_warn(ar, "htt rx fragment: trailer longer than the frame itself? drop\n");
......
......@@ -92,7 +92,6 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
struct ath10k *ar = htt->ar;
spin_lock_init(&htt->tx_lock);
init_waitqueue_head(&htt->empty_tx_wq);
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
......@@ -564,7 +563,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
(u32)skb_cb->paddr, vdev_id, tid);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
msdu->data, msdu->len);
trace_ath10k_htt_tx_msdu(ar, msdu->data, msdu->len);
trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
sg_items[0].transfer_id = 0;
sg_items[0].transfer_context = NULL;
......
......@@ -519,6 +519,9 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
return -ESHUTDOWN;
ret = wait_for_completion_timeout(&ar->vdev_setup_done,
ATH10K_VDEV_SETUP_TIMEOUT_HZ);
if (ret == 0)
......@@ -551,6 +554,8 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
arg.channel.max_reg_power = channel->max_reg_power * 2;
arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
reinit_completion(&ar->vdev_setup_done);
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
......@@ -598,6 +603,8 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar)
ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
ar->monitor_vdev_id, ret);
reinit_completion(&ar->vdev_setup_done);
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
......@@ -2350,7 +2357,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
}
/* Must not be called with conf_mutex held as workers can use that also. */
static void ath10k_drain_tx(struct ath10k *ar)
void ath10k_drain_tx(struct ath10k *ar)
{
/* make sure rcu-protected mac80211 tx path itself is drained */
synchronize_net();
......@@ -3307,9 +3314,10 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
ath10k_scan_abort(ar);
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
}
static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
......@@ -3826,10 +3834,11 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
struct ath10k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
ath10k_scan_abort(ar);
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
return 0;
}
......@@ -3872,7 +3881,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
arvif->vdev_id, value);
ret = ath10k_mac_set_rts(arvif, value);
ret = ath10k_mac_set_frag(arvif, value);
if (ret) {
ath10k_warn(ar, "failed to set fragmentation threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
......@@ -3908,7 +3917,9 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
empty = (ar->htt.num_pending_tx == 0);
spin_unlock_bh(&ar->htt.tx_lock);
skip = (ar->state == ATH10K_STATE_WEDGED);
skip = (ar->state == ATH10K_STATE_WEDGED) ||
test_bit(ATH10K_FLAG_CRASH_FLUSH,
&ar->dev_flags);
(empty || skip);
}), ATH10K_FLUSH_TIMEOUT_HZ);
......@@ -4009,6 +4020,7 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
if (ar->state == ATH10K_STATE_RESTARTED) {
ath10k_info(ar, "device successfully recovered\n");
ar->state = ATH10K_STATE_ON;
ieee80211_wake_queues(ar->hw);
}
mutex_unlock(&ar->conf_mutex);
......@@ -4044,6 +4056,9 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
survey->channel = &sband->channels[idx];
if (ar->rx_channel == survey->channel)
survey->filled |= SURVEY_INFO_IN_USE;
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
......@@ -4917,6 +4932,8 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
/*
* on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing
......
......@@ -40,6 +40,7 @@ void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
void ath10k_halt(struct ath10k *ar);
void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif);
void ath10k_drain_tx(struct ath10k *ar);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{
......
......@@ -1196,64 +1196,74 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
return 0;
}
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
struct ath10k_ce_pipe *ce_hdl;
u32 buf_sz;
struct sk_buff *netbuf;
u32 ce_data;
struct ath10k_ce_pipe *ce_pipe;
struct ath10k_ce_ring *ce_ring;
struct sk_buff *skb;
int i;
buf_sz = pipe_info->buf_sz;
ar = pci_pipe->hif_ce_state;
ce_pipe = pci_pipe->ce_hdl;
ce_ring = ce_pipe->dest_ring;
/* Unused Copy Engine */
if (buf_sz == 0)
if (!ce_ring)
return;
ar = pipe_info->hif_ce_state;
ar_pci = ath10k_pci_priv(ar);
ce_hdl = pipe_info->ce_hdl;
if (!pci_pipe->buf_sz)
return;
while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
&ce_data) == 0) {
dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
netbuf->len + skb_tailroom(netbuf),
for (i = 0; i < ce_ring->nentries; i++) {
skb = ce_ring->per_transfer_context[i];
if (!skb)
continue;
ce_ring->per_transfer_context[i] = NULL;
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(netbuf);
dev_kfree_skb_any(skb);
}
}
static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
struct ath10k_ce_pipe *ce_hdl;
struct sk_buff *netbuf;
u32 ce_data;
unsigned int nbytes;
struct ath10k_ce_pipe *ce_pipe;
struct ath10k_ce_ring *ce_ring;
struct ce_desc *ce_desc;
struct sk_buff *skb;
unsigned int id;
u32 buf_sz;
int i;
buf_sz = pipe_info->buf_sz;
ar = pci_pipe->hif_ce_state;
ar_pci = ath10k_pci_priv(ar);
ce_pipe = pci_pipe->ce_hdl;
ce_ring = ce_pipe->src_ring;
/* Unused Copy Engine */
if (buf_sz == 0)
if (!ce_ring)
return;
ar = pipe_info->hif_ce_state;
ar_pci = ath10k_pci_priv(ar);
ce_hdl = pipe_info->ce_hdl;
if (!pci_pipe->buf_sz)
return;
while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
&ce_data, &nbytes, &id) == 0) {
/* no need to call tx completion for NULL pointers */
if (!netbuf)
ce_desc = ce_ring->shadow_base;
if (WARN_ON(!ce_desc))
return;
for (i = 0; i < ce_ring->nentries; i++) {
skb = ce_ring->per_transfer_context[i];
if (!skb)
continue;
ar_pci->msg_callbacks_current.tx_completion(ar,
netbuf,
id);
ce_ring->per_transfer_context[i] = NULL;
id = MS(__le16_to_cpu(ce_desc[i].flags),
CE_DESC_FLAGS_META_DATA);
ar_pci->msg_callbacks_current.tx_completion(ar, skb, id);
}
}
......@@ -1432,6 +1442,9 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
&nbytes, &transfer_id, &flags))
return;
if (WARN_ON_ONCE(!xfer))
return;
if (!xfer->wait_for_resp) {
ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
return;
......@@ -1707,99 +1720,167 @@ static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
msleep(10);
}
static int ath10k_pci_warm_reset(struct ath10k *ar)
static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
{
u32 val;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
spin_lock_bh(&ar->data_lock);
ar->stats.fw_warm_reset_counter++;
spin_unlock_bh(&ar->data_lock);
/* debug */
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_CAUSE_ADDRESS);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
val);
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CPU_INTR_ADDRESS);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
val);
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
SOC_RESET_CONTROL_ADDRESS);
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
}
/* disable pending irqs */
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_ENABLE_ADDRESS, 0);
static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
{
u32 val;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_CLR_ADDRESS, ~0);
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
SOC_RESET_CONTROL_ADDRESS);
msleep(100);
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
val | SOC_RESET_CONTROL_CE_RST_MASK);
msleep(10);
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
val & ~SOC_RESET_CONTROL_CE_RST_MASK);
}
/* clear fw indicator */
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
{
u32 val;
/* clear target LF timer interrupts */
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
SOC_LF_TIMER_CONTROL0_ADDRESS);
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
SOC_LF_TIMER_CONTROL0_ADDRESS,
val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
}
/* reset CE */
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
SOC_RESET_CONTROL_ADDRESS);
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
val | SOC_RESET_CONTROL_CE_RST_MASK);
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
SOC_RESET_CONTROL_ADDRESS);
msleep(10);
static int ath10k_pci_warm_reset(struct ath10k *ar)
{
int ret;
/* unreset CE */
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
val & ~SOC_RESET_CONTROL_CE_RST_MASK);
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
SOC_RESET_CONTROL_ADDRESS);
msleep(10);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
spin_lock_bh(&ar->data_lock);
ar->stats.fw_warm_reset_counter++;
spin_unlock_bh(&ar->data_lock);
ath10k_pci_irq_disable(ar);
/* Make sure the target CPU is not doing anything dangerous, e.g. if it
* were to access copy engine while host performs copy engine reset
* then it is possible for the device to confuse pci-e controller to
* the point of bringing host system to a complete stop (i.e. hang).
*/
ath10k_pci_warm_reset_si0(ar);
ath10k_pci_warm_reset_cpu(ar);
ath10k_pci_init_pipes(ar);
ath10k_pci_wait_for_target_init(ar);
/* debug */
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_CAUSE_ADDRESS);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
val);
ath10k_pci_warm_reset_clear_lf(ar);
ath10k_pci_warm_reset_ce(ar);
ath10k_pci_warm_reset_cpu(ar);
ath10k_pci_init_pipes(ar);
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CPU_INTR_ADDRESS);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
val);
ret = ath10k_pci_wait_for_target_init(ar);
if (ret) {
ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
return ret;
}
/* CPU warm reset */
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
SOC_RESET_CONTROL_ADDRESS);
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
SOC_RESET_CONTROL_ADDRESS);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n",
val);
return 0;
}
msleep(100);
static int ath10k_pci_chip_reset(struct ath10k *ar)
{
int i, ret;
u32 val;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset\n");
/* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
* It is thus preferred to use warm reset which is safer but may not be
* able to recover the device from all possible fail scenarios.
*
* Warm reset doesn't always work on first try so attempt it a few
* times before giving up.
*/
for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
ret = ath10k_pci_warm_reset(ar);
if (ret) {
ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
ret);
continue;
}
/* FIXME: Sometimes copy engine doesn't recover after warm
* reset. In most cases this needs cold reset. In some of these
* cases the device is in such a state that a cold reset may
* lock up the host.
*
* Reading any host interest register via copy engine is
* sufficient to verify if device is capable of booting
* firmware blob.
*/
ret = ath10k_pci_init_pipes(ar);
if (ret) {
ath10k_warn(ar, "failed to init copy engine: %d\n",
ret);
continue;
}
ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
&val);
if (ret) {
ath10k_warn(ar, "failed to poke copy engine: %d\n",
ret);
continue;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
return 0;
}
if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
ath10k_warn(ar, "refusing cold reset as requested\n");
return -EPERM;
}
ret = ath10k_pci_cold_reset(ar);
if (ret) {
ath10k_warn(ar, "failed to cold reset: %d\n", ret);
return ret;
}
ret = ath10k_pci_wait_for_target_init(ar);
if (ret) {
ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
ret);
return ret;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (cold)\n");
return 0;
}
static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
int ret;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_err(ar, "failed to wake up target: %d\n", ret);
return ret;
}
/*
* Bring the target up cleanly.
*
......@@ -1810,26 +1891,16 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
* is in an unexpected state. We try to catch that here in order to
* reset the Target and retry the probe.
*/
if (cold_reset)
ret = ath10k_pci_cold_reset(ar);
else
ret = ath10k_pci_warm_reset(ar);
ret = ath10k_pci_chip_reset(ar);
if (ret) {
ath10k_err(ar, "failed to reset target: %d\n", ret);
goto err;
ath10k_err(ar, "failed to reset chip: %d\n", ret);
goto err_sleep;
}
ret = ath10k_pci_init_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to initialize CE: %d\n", ret);
goto err;
}
ret = ath10k_pci_wait_for_target_init(ar);
if (ret) {
ath10k_err(ar, "failed to wait for target to init: %d\n", ret);
goto err_ce;
goto err_sleep;
}
ret = ath10k_pci_init_config(ar);
......@@ -1848,73 +1919,21 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
err_ce:
ath10k_pci_ce_deinit(ar);
ath10k_pci_warm_reset(ar);
err:
return ret;
}
static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
{
int i, ret;
/*
* Sometime warm reset succeeds after retries.
*
* FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
* at first try.
*/
for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
ret = __ath10k_pci_hif_power_up(ar, false);
if (ret == 0)
break;
ath10k_warn(ar, "failed to warm reset (attempt %d out of %d): %d\n",
i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
}
err_sleep:
ath10k_pci_sleep(ar);
return ret;
}
static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
int ret;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
/*
* Hardware CUS232 version 2 has some issues with cold reset and the
* preferred (and safer) way to perform a device reset is through a
* warm reset.
*
* Warm reset doesn't always work though so fall back to cold reset may
* be necessary.
*/
ret = ath10k_pci_hif_power_up_warm(ar);
if (ret) {
ath10k_warn(ar, "failed to power up target using warm reset: %d\n",
ret);
if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
return ret;
ath10k_warn(ar, "trying cold reset\n");
ret = __ath10k_pci_hif_power_up(ar, true);
if (ret) {
ath10k_err(ar, "failed to power up target using cold reset too (%d)\n",
ret);
return ret;
}
}
return 0;
}
static void ath10k_pci_hif_power_down(struct ath10k *ar)
{
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
ath10k_pci_warm_reset(ar);
/* Currently hif_power_up performs effectively a reset and hif_stop
* resets the chip as well so there's no point in resetting here.
*/
ath10k_pci_sleep(ar);
}
#ifdef CONFIG_PM
......@@ -2516,6 +2535,8 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_deinit_irq;
}
ath10k_pci_sleep(ar);
ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
......@@ -2567,7 +2588,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_pci_deinit_irq(ar);
ath10k_pci_ce_deinit(ar);
ath10k_pci_free_pipes(ar);
ath10k_pci_sleep(ar);
ath10k_pci_release(ar);
ath10k_core_destroy(ar);
}
......
......@@ -20,6 +20,13 @@
#include <linux/tracepoint.h>
#include "core.h"
#if !defined(_TRACE_H_)
static inline u32 ath10k_frm_hdr_len(void *buf)
{
return ieee80211_hdrlen(((struct ieee80211_hdr *)buf)->frame_control);
}
#endif
#define _TRACE_H_
/* create empty functions when tracing is disabled */
......@@ -281,36 +288,6 @@ TRACE_EVENT(ath10k_htt_pktlog,
)
);
TRACE_EVENT(ath10k_htt_rx_desc,
TP_PROTO(struct ath10k *ar, u32 tsf, void *rxdesc, u16 len),
TP_ARGS(ar, tsf, rxdesc, len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(u32, tsf)
__field(u16, len)
__dynamic_array(u8, rxdesc, len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->tsf = tsf;
__entry->len = len;
memcpy(__get_dynamic_array(rxdesc), rxdesc, len);
),
TP_printk(
"%s %s %u len %hu",
__get_str(driver),
__get_str(device),
__entry->tsf,
__entry->len
)
);
TRACE_EVENT(ath10k_htt_tx,
TP_PROTO(struct ath10k *ar, u16 msdu_id, u16 msdu_len,
u8 vdev_id, u8 tid),
......@@ -371,7 +348,7 @@ TRACE_EVENT(ath10k_txrx_tx_unref,
)
);
DECLARE_EVENT_CLASS(ath10k_data_event,
DECLARE_EVENT_CLASS(ath10k_hdr_event,
TP_PROTO(struct ath10k *ar, void *data, size_t len),
TP_ARGS(ar, data, len),
......@@ -380,14 +357,14 @@ DECLARE_EVENT_CLASS(ath10k_data_event,
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(size_t, len)
__dynamic_array(u8, data, len)
__dynamic_array(u8, data, ath10k_frm_hdr_len(data))
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->len = len;
memcpy(__get_dynamic_array(data), data, len);
__entry->len = ath10k_frm_hdr_len(data);
memcpy(__get_dynamic_array(data), data, __entry->len);
),
TP_printk(
......@@ -398,25 +375,81 @@ DECLARE_EVENT_CLASS(ath10k_data_event,
)
);
DEFINE_EVENT(ath10k_data_event, ath10k_htt_tx_msdu,
DECLARE_EVENT_CLASS(ath10k_payload_event,
TP_PROTO(struct ath10k *ar, void *data, size_t len),
TP_ARGS(ar, data, len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(size_t, len)
__dynamic_array(u8, payload, (len - ath10k_frm_hdr_len(data)))
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->len = len - ath10k_frm_hdr_len(data);
memcpy(__get_dynamic_array(payload),
data + ath10k_frm_hdr_len(data), __entry->len);
),
TP_printk(
"%s %s len %zu\n",
__get_str(driver),
__get_str(device),
__entry->len
)
);
DEFINE_EVENT(ath10k_hdr_event, ath10k_tx_hdr,
TP_PROTO(struct ath10k *ar, void *data, size_t len),
TP_ARGS(ar, data, len)
);
DEFINE_EVENT(ath10k_data_event, ath10k_htt_rx_pop_msdu,
DEFINE_EVENT(ath10k_payload_event, ath10k_tx_payload,
TP_PROTO(struct ath10k *ar, void *data, size_t len),
TP_ARGS(ar, data, len)
);
DEFINE_EVENT(ath10k_data_event, ath10k_wmi_mgmt_tx,
DEFINE_EVENT(ath10k_hdr_event, ath10k_rx_hdr,
TP_PROTO(struct ath10k *ar, void *data, size_t len),
TP_ARGS(ar, data, len)
);
DEFINE_EVENT(ath10k_data_event, ath10k_wmi_bcn_tx,
DEFINE_EVENT(ath10k_payload_event, ath10k_rx_payload,
TP_PROTO(struct ath10k *ar, void *data, size_t len),
TP_ARGS(ar, data, len)
);
TRACE_EVENT(ath10k_htt_rx_desc,
TP_PROTO(struct ath10k *ar, void *data, size_t len),
TP_ARGS(ar, data, len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(u16, len)
__dynamic_array(u8, rxdesc, len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->len = len;
memcpy(__get_dynamic_array(rxdesc), data, len);
),
TP_printk(
"%s %s rxdesc len %d",
__get_str(driver),
__get_str(device),
__entry->len
)
);
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
......
......@@ -146,7 +146,8 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
mapped = !!ath10k_peer_find(ar, vdev_id, addr);
spin_unlock_bh(&ar->data_lock);
mapped == expect_mapped;
(mapped == expect_mapped ||
test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
}), 3*HZ);
if (ret <= 0)
......
......@@ -779,6 +779,10 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
ath10k_wmi_tx_beacons_nowait(ar);
ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
ret = -ESHUTDOWN;
(ret != -EAGAIN);
}), 3*HZ);
......@@ -834,7 +838,8 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
fc & IEEE80211_FCTL_STYPE);
trace_ath10k_wmi_mgmt_tx(ar, skb->data, skb->len);
trace_ath10k_tx_hdr(ar, skb->data, skb->len);
trace_ath10k_tx_payload(ar, skb->data, skb->len);
/* Send the management frame buffer to the target */
ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
......@@ -1893,7 +1898,9 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
arvif->beacon = bcn;
arvif->beacon_sent = false;
trace_ath10k_wmi_bcn_tx(ar, bcn->data, bcn->len);
trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
ath10k_wmi_tx_beacon_nowait(arvif);
skip:
spin_unlock_bh(&ar->data_lock);
......@@ -4187,9 +4194,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
else
ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
else
ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
} else {
ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
}
......@@ -4398,7 +4405,6 @@ int ath10k_wmi_attach(struct ath10k *ar)
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
init_waitqueue_head(&ar->wmi.tx_credits_wq);
return 0;
}
......
......@@ -1193,18 +1193,10 @@ static int ath6kl_usb_pm_resume(struct usb_interface *interface)
return 0;
}
static int ath6kl_usb_pm_reset_resume(struct usb_interface *intf)
{
if (usb_get_intfdata(intf))
ath6kl_usb_remove(intf);
return 0;
}
#else
#define ath6kl_usb_pm_suspend NULL
#define ath6kl_usb_pm_resume NULL
#define ath6kl_usb_pm_reset_resume NULL
#endif
......@@ -1222,7 +1214,6 @@ static struct usb_driver ath6kl_usb_driver = {
.probe = ath6kl_usb_probe,
.suspend = ath6kl_usb_pm_suspend,
.resume = ath6kl_usb_pm_resume,
.reset_resume = ath6kl_usb_pm_reset_resume,
.disconnect = ath6kl_usb_remove,
.id_table = ath6kl_usb_ids,
.supports_autosuspend = true,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册