txrx.c 6.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/*
 * Copyright (c) 2005-2011 Atheros Communications Inc.
 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

#include "core.h"
#include "txrx.h"
#include "htt.h"
#include "mac.h"
#include "debug.h"

static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
{
	if (!ATH10K_SKB_CB(skb)->htt.is_offchan)
		return;

	/* If the original wait_for_completion() timed out before
	 * {data,mgmt}_tx_completed() was called then we could complete
	 * offchan_tx_completed for a different skb. Prevent this by using
	 * offchan_tx_skb. */
	spin_lock_bh(&ar->data_lock);
	if (ar->offchan_tx_skb != skb) {
35
		ath10k_warn(ar, "completed old offchannel frame\n");
36 37 38 39 40 41
		goto out;
	}

	complete(&ar->offchan_tx_completed);
	ar->offchan_tx_skb = NULL; /* just for sanity */

42
	ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
43 44 45 46
out:
	spin_unlock_bh(&ar->data_lock);
}

47 48
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
			  const struct htt_tx_done *tx_done)
49
{
50 51
	struct ath10k *ar = htt->ar;
	struct device *dev = ar->dev;
52
	struct ieee80211_tx_info *info;
53 54
	struct ath10k_skb_cb *skb_cb;
	struct sk_buff *msdu;
55 56 57
	struct ieee80211_hdr *hdr;
	__le16 fc;
	bool limit_mgmt_desc = false;
58

59 60 61 62
	ath10k_dbg(ar, ATH10K_DBG_HTT,
		   "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
		   tx_done->msdu_id, !!tx_done->discard,
		   !!tx_done->no_ack, !!tx_done->success);
63

64
	if (tx_done->msdu_id >= htt->max_num_pending_tx) {
65
		ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
66
			    tx_done->msdu_id);
67
		return;
68 69
	}

70
	spin_lock_bh(&htt->tx_lock);
M
Michal Kazior 已提交
71 72 73 74
	msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
	if (!msdu) {
		ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
			    tx_done->msdu_id);
75
		spin_unlock_bh(&htt->tx_lock);
M
Michal Kazior 已提交
76 77
		return;
	}
78 79 80 81 82 83 84 85

	hdr = (struct ieee80211_hdr *)msdu->data;
	fc = hdr->frame_control;

	if (unlikely(ieee80211_is_mgmt(fc)) &&
	    ar->hw_params.max_probe_resp_desc_thres)
		limit_mgmt_desc = true;

86
	ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
87
	__ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
88 89 90
	if (htt->num_pending_tx == 0)
		wake_up(&htt->empty_tx_wq);
	spin_unlock_bh(&htt->tx_lock);
M
Michal Kazior 已提交
91

92
	skb_cb = ATH10K_SKB_CB(msdu);
93
	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
94 95 96 97

	ath10k_report_offchan_tx(htt->ar, msdu);

	info = IEEE80211_SKB_CB(msdu);
M
Michal Kazior 已提交
98
	memset(&info->status, 0, sizeof(info->status));
99
	trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
100

101
	if (tx_done->discard) {
102
		ieee80211_free_txskb(htt->ar->hw, msdu);
103
		return;
104 105 106 107 108
	}

	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
		info->flags |= IEEE80211_TX_STAT_ACK;

109
	if (tx_done->no_ack)
110 111
		info->flags &= ~IEEE80211_TX_STAT_ACK;

112 113 114
	if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK))
		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
	ieee80211_tx_status(htt->ar->hw, msdu);
	/* we do not own the msdu anymore */
}

struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
				     const u8 *addr)
{
	struct ath10k_peer *peer;

	lockdep_assert_held(&ar->data_lock);

	list_for_each_entry(peer, &ar->peers, list) {
		if (peer->vdev_id != vdev_id)
			continue;
		if (memcmp(peer->addr, addr, ETH_ALEN))
			continue;

		return peer;
	}

	return NULL;
}

138
struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
{
	struct ath10k_peer *peer;

	lockdep_assert_held(&ar->data_lock);

	list_for_each_entry(peer, &ar->peers, list)
		if (test_bit(peer_id, peer->peer_ids))
			return peer;

	return NULL;
}

static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
				       const u8 *addr, bool expect_mapped)
{
154
	long time_left;
155

156
	time_left = wait_event_timeout(ar->peer_mapping_wq, ({
157 158 159 160 161 162
			bool mapped;

			spin_lock_bh(&ar->data_lock);
			mapped = !!ath10k_peer_find(ar, vdev_id, addr);
			spin_unlock_bh(&ar->data_lock);

M
Michal Kazior 已提交
163 164
			(mapped == expect_mapped ||
			 test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
165 166
		}), 3*HZ);

167
	if (time_left == 0)
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
		return -ETIMEDOUT;

	return 0;
}

int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
{
	return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
}

int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
{
	return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
}

void ath10k_peer_map_event(struct ath10k_htt *htt,
			   struct htt_peer_map_event *ev)
{
	struct ath10k *ar = htt->ar;
	struct ath10k_peer *peer;

	spin_lock_bh(&ar->data_lock);
	peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
	if (!peer) {
		peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
		if (!peer)
			goto exit;

		peer->vdev_id = ev->vdev_id;
K
Kalle Valo 已提交
197
		ether_addr_copy(peer->addr, ev->addr);
198 199 200 201
		list_add(&peer->list, &ar->peers);
		wake_up(&ar->peer_mapping_wq);
	}

202
	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
		   ev->vdev_id, ev->addr, ev->peer_id);

	set_bit(ev->peer_id, peer->peer_ids);
exit:
	spin_unlock_bh(&ar->data_lock);
}

void ath10k_peer_unmap_event(struct ath10k_htt *htt,
			     struct htt_peer_unmap_event *ev)
{
	struct ath10k *ar = htt->ar;
	struct ath10k_peer *peer;

	spin_lock_bh(&ar->data_lock);
	peer = ath10k_peer_find_by_id(ar, ev->peer_id);
	if (!peer) {
219
		ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
220
			    ev->peer_id);
221 222 223
		goto exit;
	}

224
	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
225 226 227 228 229 230 231 232 233 234 235 236 237
		   peer->vdev_id, peer->addr, ev->peer_id);

	clear_bit(ev->peer_id, peer->peer_ids);

	if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
		list_del(&peer->list);
		kfree(peer);
		wake_up(&ar->peer_mapping_wq);
	}

exit:
	spin_unlock_bh(&ar->data_lock);
}