tx.c 33.3 KB
Newer Older
L
Luciano Coelho 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * This file is part of wl1271
 *
 * Copyright (C) 2009 Nokia Corporation
 *
 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 * 02110-1301 USA
 *
 */

#include <linux/kernel.h>
#include <linux/module.h>
26
#include <linux/etherdevice.h>
L
Luciano Coelho 已提交
27

28
#include "wlcore.h"
29
#include "debug.h"
S
Shahar Levi 已提交
30 31 32
#include "io.h"
#include "ps.h"
#include "tx.h"
33
#include "event.h"
34
#include "hw_ops.h"
L
Luciano Coelho 已提交
35

36 37 38 39 40 41
/*
 * TODO: this is here just for now, it must be removed when the data
 * operations are in place.
 */
#include "../wl12xx/reg.h"

E
Eliad Peller 已提交
42 43
static int wl1271_set_default_wep_key(struct wl1271 *wl,
				      struct wl12xx_vif *wlvif, u8 id)
44 45
{
	int ret;
E
Eliad Peller 已提交
46
	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
47 48

	if (is_ap)
E
Eliad Peller 已提交
49
		ret = wl12xx_cmd_set_default_wep_key(wl, id,
50
						     wlvif->ap.bcast_hlid);
51
	else
E
Eliad Peller 已提交
52
		ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
53 54 55 56 57 58 59 60

	if (ret < 0)
		return ret;

	wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
	return 0;
}

61
static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
L
Luciano Coelho 已提交
62
{
63 64
	int id;

65 66
	id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc);
	if (id >= wl->num_tx_desc)
67 68 69 70 71 72 73
		return -EBUSY;

	__set_bit(id, wl->tx_frames_map);
	wl->tx_frames[id] = skb;
	wl->tx_frames_cnt++;
	return id;
}
L
Luciano Coelho 已提交
74

75
void wl1271_free_tx_id(struct wl1271 *wl, int id)
76 77
{
	if (__test_and_clear_bit(id, wl->tx_frames_map)) {
78
		if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
79 80
			clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);

81 82 83
		wl->tx_frames[id] = NULL;
		wl->tx_frames_cnt--;
	}
L
Luciano Coelho 已提交
84
}
85
EXPORT_SYMBOL(wl1271_free_tx_id);
L
Luciano Coelho 已提交
86

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
						 struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;

	/*
	 * add the station to the known list before transmitting the
	 * authentication response. this way it won't get de-authed by FW
	 * when transmitting too soon.
	 */
	hdr = (struct ieee80211_hdr *)(skb->data +
				       sizeof(struct wl1271_tx_hw_descr));
	if (ieee80211_is_auth(hdr->frame_control))
		wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
}

103 104 105
static void wl1271_tx_regulate_link(struct wl1271 *wl,
				    struct wl12xx_vif *wlvif,
				    u8 hlid)
106
{
107
	bool fw_ps, single_sta;
108
	u8 tx_pkts;
109

110
	if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
111 112 113
		return;

	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
114
	tx_pkts = wl->links[hlid].allocated_pkts;
115
	single_sta = (wl->active_sta_count == 1);
116 117 118 119

	/*
	 * if in FW PS and there is enough data in FW we can put the link
	 * into high-level PS and clean out its TX queues.
120 121
	 * Make an exception if this is the only connected station. In this
	 * case FW-memory congestion is not a problem.
122
	 */
123
	if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
124
		wl12xx_ps_link_start(wl, wlvif, hlid, true);
125 126
}

127
bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
E
Eliad Peller 已提交
128 129 130
{
	return wl->dummy_packet == skb;
}
131
EXPORT_SYMBOL(wl12xx_is_dummy_packet);
E
Eliad Peller 已提交
132

133 134
static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
				struct sk_buff *skb, struct ieee80211_sta *sta)
135
{
136
	if (sta) {
137 138
		struct wl1271_station *wl_sta;

139
		wl_sta = (struct wl1271_station *)sta->drv_priv;
140 141 142 143
		return wl_sta->hlid;
	} else {
		struct ieee80211_hdr *hdr;

144
		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
E
Eliad Peller 已提交
145 146
			return wl->system_hlid;

147
		hdr = (struct ieee80211_hdr *)skb->data;
148
		if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
149
			return wlvif->ap.bcast_hlid;
150 151
		else
			return wlvif->ap.global_hlid;
152 153 154
	}
}

155
u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
156
		      struct sk_buff *skb, struct ieee80211_sta *sta)
E
Eliad Peller 已提交
157
{
158 159
	struct ieee80211_tx_info *control;

E
Eliad Peller 已提交
160
	if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
E
Eliad Peller 已提交
161 162
		return wl->system_hlid;

E
Eliad Peller 已提交
163
	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
164
		return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
E
Eliad Peller 已提交
165

166 167 168 169 170 171
	control = IEEE80211_SKB_CB(skb);
	if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
		wl1271_debug(DEBUG_TX, "tx offchannel");
		return wlvif->dev_hlid;
	}

172
	return wlvif->sta.hlid;
E
Eliad Peller 已提交
173 174
}

175 176
unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
					  unsigned int packet_length)
177
{
178 179
	if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) ||
	    !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN))
180
		return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
181 182
	else
		return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
183
}
184
EXPORT_SYMBOL(wlcore_calc_packet_alignment);
185

186
static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
E
Eliad Peller 已提交
187
			      struct sk_buff *skb, u32 extra, u32 buf_offset,
188
			      u8 hlid, bool is_gem)
L
Luciano Coelho 已提交
189 190 191
{
	struct wl1271_tx_hw_descr *desc;
	u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
192
	u32 total_blocks;
193
	int id, ret = -EBUSY, ac;
194
	u32 spare_blocks;
L
Luciano Coelho 已提交
195

196
	if (buf_offset + total_len > wl->aggr_buf_size)
I
Ido Yariv 已提交
197
		return -EAGAIN;
198

199 200
	spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);

L
Luciano Coelho 已提交
201
	/* allocate free identifier for the packet */
202
	id = wl1271_alloc_tx_id(wl, skb);
L
Luciano Coelho 已提交
203 204 205
	if (id < 0)
		return id;

206
	total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
207

L
Luciano Coelho 已提交
208 209 210 211
	if (total_blocks <= wl->tx_blocks_available) {
		desc = (struct wl1271_tx_hw_descr *)skb_push(
			skb, total_len - skb->len);

212 213
		wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
					     spare_blocks);
214

L
Luciano Coelho 已提交
215 216 217
		desc->id = id;

		wl->tx_blocks_available -= total_blocks;
218
		wl->tx_allocated_blocks += total_blocks;
L
Luciano Coelho 已提交
219

A
Arik Nemtsov 已提交
220 221 222 223
		/* If the FW was empty before, arm the Tx watchdog */
		if (wl->tx_allocated_blocks == total_blocks)
			wl12xx_rearm_tx_watchdog_locked(wl);

224 225
		ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
		wl->tx_allocated_pkts[ac]++;
226

227
		if (test_bit(hlid, wl->links_map))
228
			wl->links[hlid].allocated_pkts++;
229

L
Luciano Coelho 已提交
230 231 232 233 234
		ret = 0;

		wl1271_debug(DEBUG_TX,
			     "tx_allocate: size: %d, blocks: %d, id: %d",
			     total_len, total_blocks, id);
235
	} else {
236
		wl1271_free_tx_id(wl, id);
237
	}
L
Luciano Coelho 已提交
238 239 240 241

	return ret;
}

242
static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
E
Eliad Peller 已提交
243 244
			       struct sk_buff *skb, u32 extra,
			       struct ieee80211_tx_info *control, u8 hlid)
L
Luciano Coelho 已提交
245
{
246
	struct timespec ts;
L
Luciano Coelho 已提交
247
	struct wl1271_tx_hw_descr *desc;
248
	int ac, rate_idx;
249
	s64 hosttime;
250
	u16 tx_attr = 0;
251 252 253
	__le16 frame_control;
	struct ieee80211_hdr *hdr;
	u8 *frame_start;
254
	bool is_dummy;
L
Luciano Coelho 已提交
255 256

	desc = (struct wl1271_tx_hw_descr *) skb->data;
257 258 259
	frame_start = (u8 *)(desc + 1);
	hdr = (struct ieee80211_hdr *)(frame_start + extra);
	frame_control = hdr->frame_control;
L
Luciano Coelho 已提交
260

261 262
	/* relocate space for security header */
	if (extra) {
263 264
		int hdrlen = ieee80211_hdrlen(frame_control);
		memmove(frame_start, hdr, hdrlen);
265
		skb_set_network_header(skb, skb_network_offset(skb) + extra);
266 267
	}

L
Luciano Coelho 已提交
268
	/* configure packet life time */
269 270 271
	getnstimeofday(&ts);
	hosttime = (timespec_to_ns(&ts) >> 10);
	desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
272

273
	is_dummy = wl12xx_is_dummy_packet(wl, skb);
E
Eliad Peller 已提交
274
	if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
275 276 277
		desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
	else
		desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
L
Luciano Coelho 已提交
278

279
	/* queue */
K
Kalle Valo 已提交
280
	ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
281
	desc->tid = skb->priority;
282

283
	if (is_dummy) {
284 285 286 287
		/*
		 * FW expects the dummy packet to have an invalid session id -
		 * any session id that is different than the one set in the join
		 */
288
		tx_attr = (SESSION_COUNTER_INVALID <<
289 290 291 292
			   TX_HW_ATTR_OFST_SESSION_COUNTER) &
			   TX_HW_ATTR_SESSION_COUNTER;

		tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
E
Eliad Peller 已提交
293
	} else if (wlvif) {
294 295 296 297 298 299
		u8 session_id = wl->session_ids[hlid];

		if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) &&
		    (wlvif->bss_type == BSS_TYPE_AP_BSS))
			session_id = 0;

300
		/* configure the tx attributes */
301
		tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER;
302 303
	}

E
Eliad Peller 已提交
304
	desc->hlid = hlid;
E
Eliad Peller 已提交
305
	if (is_dummy || !wlvif)
306 307
		rate_idx = 0;
	else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
308
		/*
309
		 * if the packets are data packets
310 311 312
		 * send them with AP rate policies (EAPOLs are an exception),
		 * otherwise use default basic rates
		 */
313
		if (skb->protocol == cpu_to_be16(ETH_P_PAE))
314
			rate_idx = wlvif->sta.basic_rate_idx;
315 316
		else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
			rate_idx = wlvif->sta.p2p_rate_idx;
317
		else if (ieee80211_is_data(frame_control))
E
Eliad Peller 已提交
318
			rate_idx = wlvif->sta.ap_rate_idx;
319
		else
E
Eliad Peller 已提交
320
			rate_idx = wlvif->sta.basic_rate_idx;
321
	} else {
322
		if (hlid == wlvif->ap.global_hlid)
E
Eliad Peller 已提交
323
			rate_idx = wlvif->ap.mgmt_rate_idx;
324
		else if (hlid == wlvif->ap.bcast_hlid ||
325 326 327 328 329 330
			 skb->protocol == cpu_to_be16(ETH_P_PAE) ||
			 !ieee80211_is_data(frame_control))
			/*
			 * send non-data, bcast and EAPOLs using the
			 * min basic rate
			 */
E
Eliad Peller 已提交
331
			rate_idx = wlvif->ap.bcast_rate_idx;
332
		else
E
Eliad Peller 已提交
333
			rate_idx = wlvif->ap.ucast_rate_idx[ac];
334 335 336
	}

	tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
L
Luciano Coelho 已提交
337

338 339 340 341 342
	/* for WEP shared auth - no fw encryption is needed */
	if (ieee80211_is_auth(frame_control) &&
	    ieee80211_has_protected(frame_control))
		tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;

L
Luciano Coelho 已提交
343
	desc->tx_attr = cpu_to_le16(tx_attr);
344

345
	wlcore_hw_set_tx_desc_csum(wl, desc, skb);
346
	wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
L
Luciano Coelho 已提交
347 348 349
}

/* caller must hold wl->mutex */
350
static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
351
				   struct sk_buff *skb, u32 buf_offset, u8 hlid)
L
Luciano Coelho 已提交
352 353 354 355
{
	struct ieee80211_tx_info *info;
	u32 extra = 0;
	int ret = 0;
356
	u32 total_len;
E
Eliad Peller 已提交
357
	bool is_dummy;
358
	bool is_gem = false;
L
Luciano Coelho 已提交
359

360 361
	if (!skb) {
		wl1271_error("discarding null skb");
L
Luciano Coelho 已提交
362
		return -EINVAL;
363
	}
L
Luciano Coelho 已提交
364

365 366 367 368 369
	if (hlid == WL12XX_INVALID_LINK_ID) {
		wl1271_error("invalid hlid. dropping skb 0x%p", skb);
		return -EINVAL;
	}

L
Luciano Coelho 已提交
370 371
	info = IEEE80211_SKB_CB(skb);

E
Eliad Peller 已提交
372 373
	is_dummy = wl12xx_is_dummy_packet(wl, skb);

374 375
	if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
	    info->control.hw_key &&
376
	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
377
		extra = WL1271_EXTRA_SPACE_TKIP;
L
Luciano Coelho 已提交
378 379

	if (info->control.hw_key) {
380 381 382 383 384 385
		bool is_wep;
		u8 idx = info->control.hw_key->hw_key_idx;
		u32 cipher = info->control.hw_key->cipher;

		is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
			 (cipher == WLAN_CIPHER_SUITE_WEP104);
L
Luciano Coelho 已提交
386

E
Eliad Peller 已提交
387
		if (unlikely(is_wep && wlvif->default_key != idx)) {
E
Eliad Peller 已提交
388
			ret = wl1271_set_default_wep_key(wl, wlvif, idx);
L
Luciano Coelho 已提交
389 390
			if (ret < 0)
				return ret;
E
Eliad Peller 已提交
391
			wlvif->default_key = idx;
L
Luciano Coelho 已提交
392
		}
393 394

		is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
L
Luciano Coelho 已提交
395
	}
396

397 398
	ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
				 is_gem);
L
Luciano Coelho 已提交
399 400 401
	if (ret < 0)
		return ret;

402
	wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
403

E
Eliad Peller 已提交
404
	if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
405
		wl1271_tx_ap_update_inconnection_sta(wl, skb);
406
		wl1271_tx_regulate_link(wl, wlvif, hlid);
407
	}
408

409
	/*
410 411 412 413 414 415
	 * The length of each packet is stored in terms of
	 * words. Thus, we must pad the skb data to make sure its
	 * length is aligned.  The number of padding bytes is computed
	 * and set in wl1271_tx_fill_hdr.
	 * In special cases, we want to align to a specific block size
	 * (eg. for wl128x with SDIO we align to 256).
416
	 */
417
	total_len = wlcore_calc_packet_alignment(wl, skb->len);
418

419 420
	memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
	memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
L
Luciano Coelho 已提交
421

422
	/* Revert side effects in the dummy packet skb, so it can be reused */
E
Eliad Peller 已提交
423
	if (is_dummy)
424 425
		skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));

426
	return total_len;
L
Luciano Coelho 已提交
427 428
}

429 430
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
				enum ieee80211_band rate_band)
431 432 433 434 435
{
	struct ieee80211_supported_band *band;
	u32 enabled_rates = 0;
	int bit;

436
	band = wl->hw->wiphy->bands[rate_band];
437 438 439 440 441 442
	for (bit = 0; bit < band->n_bitrates; bit++) {
		if (rate_set & 0x1)
			enabled_rates |= band->bitrates[bit].hw_value;
		rate_set >>= 1;
	}

A
Arik Nemtsov 已提交
443
	/* MCS rates indication are on bits 16 - 31 */
444 445
	rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;

A
Arik Nemtsov 已提交
446
	for (bit = 0; bit < 16; bit++) {
447 448 449 450 451
		if (rate_set & 0x1)
			enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
		rate_set >>= 1;
	}

452 453 454
	return enabled_rates;
}

455
void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
456
{
457
	int i;
458

459
	for (i = 0; i < NUM_TX_QUEUES; i++) {
460 461
		if (wlcore_is_queue_stopped_by_reason(wl, i,
			WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
462
		    wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
463
			/* firmware buffer has space, restart queues */
464 465
			wlcore_wake_queue(wl, i,
					  WLCORE_QUEUE_STOP_REASON_WATERMARK);
466
		}
467 468 469
	}
}

470
static int wlcore_select_ac(struct wl1271 *wl)
471 472 473 474 475 476 477 478 479 480 481 482 483
{
	int i, q = -1, ac;
	u32 min_pkts = 0xffffffff;

	/*
	 * Find a non-empty ac where:
	 * 1. There are packets to transmit
	 * 2. The FW has the least allocated blocks
	 *
	 * We prioritize the ACs according to VO>VI>BE>BK
	 */
	for (i = 0; i < NUM_TX_QUEUES; i++) {
		ac = wl1271_tx_get_queue(i);
484 485
		if (wl->tx_queue_count[ac] &&
		    wl->tx_allocated_pkts[ac] < min_pkts) {
486 487 488 489 490
			q = ac;
			min_pkts = wl->tx_allocated_pkts[q];
		}
	}

491
	return q;
492 493
}

494 495
static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl,
					  struct wl1271_link *lnk, u8 q)
496
{
497
	struct sk_buff *skb;
498 499
	unsigned long flags;

500
	skb = skb_dequeue(&lnk->tx_queue[q]);
501 502
	if (skb) {
		spin_lock_irqsave(&wl->wl_lock, flags);
503
		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
504
		wl->tx_queue_count[q]--;
505 506 507 508
		if (lnk->wlvif) {
			WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0);
			lnk->wlvif->tx_queue_count[q]--;
		}
509 510 511 512 513 514
		spin_unlock_irqrestore(&wl->wl_lock, flags);
	}

	return skb;
}

515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
static bool wlcore_lnk_high_prio(struct wl1271 *wl, u8 hlid,
				 struct wl1271_link *lnk)
{
	u8 thold;

	if (test_bit(hlid, (unsigned long *)&wl->fw_fast_lnk_map))
		thold = wl->conf.tx.fast_link_thold;
	else
		thold = wl->conf.tx.slow_link_thold;

	return lnk->allocated_pkts < thold;
}

static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl,
						    u8 hlid, u8 ac,
						    u8 *low_prio_hlid)
{
	struct wl1271_link *lnk = &wl->links[hlid];

	if (!wlcore_lnk_high_prio(wl, hlid, lnk)) {
		if (*low_prio_hlid == WL12XX_INVALID_LINK_ID &&
		    !skb_queue_empty(&lnk->tx_queue[ac]))
			/* we found the first non-empty low priority queue */
			*low_prio_hlid = hlid;

		return NULL;
	}

	return wlcore_lnk_dequeue(wl, lnk, ac);
}

static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
						    struct wl12xx_vif *wlvif,
						    u8 ac, u8 *hlid,
						    u8 *low_prio_hlid)
550 551 552 553 554
{
	struct sk_buff *skb = NULL;
	int i, h, start_hlid;

	/* start from the link after the last one */
555
	start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
556 557

	/* dequeue according to AC, round robin on each link */
558 559
	for (i = 0; i < WL12XX_MAX_LINKS; i++) {
		h = (start_hlid + i) % WL12XX_MAX_LINKS;
560

561
		/* only consider connected stations */
562
		if (!test_bit(h, wlvif->links_map))
563 564
			continue;

565 566
		skb = wlcore_lnk_dequeue_high_prio(wl, h, ac,
						   low_prio_hlid);
567
		if (!skb)
568 569
			continue;

570 571
		wlvif->last_tx_hlid = h;
		break;
572 573
	}

574
	if (!skb)
575
		wlvif->last_tx_hlid = 0;
576

577
	*hlid = wlvif->last_tx_hlid;
578 579 580
	return skb;
}

581
static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
582
{
583
	unsigned long flags;
584
	struct wl12xx_vif *wlvif = wl->last_wlvif;
585
	struct sk_buff *skb = NULL;
586 587 588 589 590 591
	int ac;
	u8 low_prio_hlid = WL12XX_INVALID_LINK_ID;

	ac = wlcore_select_ac(wl);
	if (ac < 0)
		goto out;
592

593
	/* continue from last wlvif (round robin) */
594 595
	if (wlvif) {
		wl12xx_for_each_wlvif_continue(wl, wlvif) {
596 597 598 599 600 601 602 603 604 605
			if (!wlvif->tx_queue_count[ac])
				continue;

			skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
							   &low_prio_hlid);
			if (!skb)
				continue;

			wl->last_wlvif = wlvif;
			break;
606 607 608
		}
	}

609
	/* dequeue from the system HLID before the restarting wlvif list */
610
	if (!skb) {
611 612 613 614 615 616
		skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid,
						   ac, &low_prio_hlid);
		if (skb) {
			*hlid = wl->system_hlid;
			wl->last_wlvif = NULL;
		}
617
	}
618

619 620
	/* Do a new pass over the wlvif list. But no need to continue
	 * after last_wlvif. The previous pass should have found it. */
621 622
	if (!skb) {
		wl12xx_for_each_wlvif(wl, wlvif) {
623 624 625 626 627
			if (!wlvif->tx_queue_count[ac])
				goto next;

			skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
							   &low_prio_hlid);
628 629 630 631
			if (skb) {
				wl->last_wlvif = wlvif;
				break;
			}
632

633
next:
634 635
			if (wlvif == wl->last_wlvif)
				break;
636
		}
637 638
	}

639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
	/* no high priority skbs found - but maybe a low priority one? */
	if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) {
		struct wl1271_link *lnk = &wl->links[low_prio_hlid];
		skb = wlcore_lnk_dequeue(wl, lnk, ac);

		WARN_ON(!skb); /* we checked this before */
		*hlid = low_prio_hlid;

		/* ensure proper round robin in the vif/link levels */
		wl->last_wlvif = lnk->wlvif;
		if (lnk->wlvif)
			lnk->wlvif->last_tx_hlid = low_prio_hlid;

	}

654 655
	if (!skb &&
	    test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
656 657
		int q;

658
		skb = wl->dummy_packet;
659
		*hlid = wl->system_hlid;
660
		q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
661
		spin_lock_irqsave(&wl->wl_lock, flags);
662
		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
663
		wl->tx_queue_count[q]--;
664 665 666
		spin_unlock_irqrestore(&wl->wl_lock, flags);
	}

667
out:
668
	return skb;
669 670
}

671
static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
672
				  struct sk_buff *skb, u8 hlid)
673 674 675 676
{
	unsigned long flags;
	int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));

677 678
	if (wl12xx_is_dummy_packet(wl, skb)) {
		set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
679
	} else {
680 681 682
		skb_queue_head(&wl->links[hlid].tx_queue[q], skb);

		/* make sure we dequeue the same packet next time */
683
		wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
684
				      WL12XX_MAX_LINKS;
685 686
	}

687
	spin_lock_irqsave(&wl->wl_lock, flags);
688
	wl->tx_queue_count[q]++;
689 690
	if (wlvif)
		wlvif->tx_queue_count[q]++;
691 692 693
	spin_unlock_irqrestore(&wl->wl_lock, flags);
}

694 695 696 697 698 699 700
static bool wl1271_tx_is_data_present(struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);

	return ieee80211_is_data_present(hdr->frame_control);
}

E
Eliad Peller 已提交
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
{
	struct wl12xx_vif *wlvif;
	u32 timeout;
	u8 hlid;

	if (!wl->conf.rx_streaming.interval)
		return;

	if (!wl->conf.rx_streaming.always &&
	    !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
		return;

	timeout = wl->conf.rx_streaming.duration;
	wl12xx_for_each_wlvif_sta(wl, wlvif) {
		bool found = false;
		for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
			if (test_bit(hlid, wlvif->links_map)) {
				found  = true;
				break;
			}
		}

		if (!found)
			continue;

		/* enable rx streaming */
728
		if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
E
Eliad Peller 已提交
729 730 731 732 733 734 735 736
			ieee80211_queue_work(wl->hw,
					     &wlvif->rx_streaming_enable_work);

		mod_timer(&wlvif->rx_streaming_timer,
			  jiffies + msecs_to_jiffies(timeout));
	}
}

737 738 739 740 741 742 743 744 745 746
/*
 * Returns failure values only in case of failed bus ops within this function.
 * wl1271_prepare_tx_frame retvals won't be returned in order to avoid
 * triggering recovery by higher layers when not necessary.
 * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery
 * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame
 * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING
 * within prepare_tx_frame code but there's nothing we should do about those
 * as well.
 */
747
int wlcore_tx_work_locked(struct wl1271 *wl)
L
Luciano Coelho 已提交
748
{
749
	struct wl12xx_vif *wlvif;
L
Luciano Coelho 已提交
750
	struct sk_buff *skb;
E
Eliad Peller 已提交
751
	struct wl1271_tx_hw_descr *desc;
752
	u32 buf_offset = 0, last_len = 0;
I
Ido Yariv 已提交
753
	bool sent_packets = false;
E
Eliad Peller 已提交
754
	unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
755
	int ret = 0;
756
	int bus_ret = 0;
757
	u8 hlid;
L
Luciano Coelho 已提交
758

759
	if (unlikely(wl->state != WLCORE_STATE_ON))
760
		return 0;
L
Luciano Coelho 已提交
761

762
	while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
E
Eliad Peller 已提交
763
		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
E
Eliad Peller 已提交
764 765
		bool has_data = false;

766
		wlvif = NULL;
E
Eliad Peller 已提交
767 768
		if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
			wlvif = wl12xx_vif_to_data(info->control.vif);
769 770
		else
			hlid = wl->system_hlid;
771

E
Eliad Peller 已提交
772
		has_data = wlvif && wl1271_tx_is_data_present(skb);
773 774
		ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
					      hlid);
I
Ido Yariv 已提交
775
		if (ret == -EAGAIN) {
776
			/*
I
Ido Yariv 已提交
777 778 779
			 * Aggregation buffer is full.
			 * Flush buffer and try again.
			 */
780
			wl1271_skb_queue_head(wl, wlvif, skb, hlid);
781 782 783

			buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
							    last_len);
784 785 786
			bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA,
					     wl->aggr_buf, buf_offset, true);
			if (bus_ret < 0)
787 788
				goto out;

I
Ido Yariv 已提交
789 790 791 792 793 794
			sent_packets = true;
			buf_offset = 0;
			continue;
		} else if (ret == -EBUSY) {
			/*
			 * Firmware buffer is full.
795 796
			 * Queue back last skb, and stop aggregating.
			 */
797
			wl1271_skb_queue_head(wl, wlvif, skb, hlid);
I
Ido Yariv 已提交
798 799
			/* No work left, avoid scheduling redundant tx work */
			set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
800
			goto out_ack;
L
Luciano Coelho 已提交
801
		} else if (ret < 0) {
E
Eliad Peller 已提交
802 803 804 805 806
			if (wl12xx_is_dummy_packet(wl, skb))
				/*
				 * fw still expects dummy packet,
				 * so re-enqueue it
				 */
807
				wl1271_skb_queue_head(wl, wlvif, skb, hlid);
E
Eliad Peller 已提交
808 809
			else
				ieee80211_free_txskb(wl->hw, skb);
810
			goto out_ack;
L
Luciano Coelho 已提交
811
		}
812 813
		last_len = ret;
		buf_offset += last_len;
814
		wl->tx_packets_count++;
E
Eliad Peller 已提交
815 816 817 818
		if (has_data) {
			desc = (struct wl1271_tx_hw_descr *) skb->data;
			__set_bit(desc->hlid, active_hlids);
		}
L
Luciano Coelho 已提交
819 820
	}

821
out_ack:
822
	if (buf_offset) {
823
		buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len);
824 825 826
		bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
					     buf_offset, true);
		if (bus_ret < 0)
827 828
			goto out;

I
Ido Yariv 已提交
829 830 831
		sent_packets = true;
	}
	if (sent_packets) {
832 833 834 835
		/*
		 * Interrupt the firmware with the new packets. This is only
		 * required for older hardware revisions
		 */
836
		if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
837
			bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS,
838
					     wl->tx_packets_count);
839
			if (bus_ret < 0)
840 841
				goto out;
		}
842

843
		wl1271_handle_tx_low_watermark(wl);
844
	}
E
Eliad Peller 已提交
845
	wl12xx_rearm_rx_streaming(wl, active_hlids);
846 847

out:
848
	return bus_ret;
I
Ido Yariv 已提交
849
}
L
Luciano Coelho 已提交
850

I
Ido Yariv 已提交
851 852 853
void wl1271_tx_work(struct work_struct *work)
{
	struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
854
	int ret;
I
Ido Yariv 已提交
855 856

	mutex_lock(&wl->mutex);
857 858 859 860
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;

861 862 863 864 865
	ret = wlcore_tx_work_locked(wl);
	if (ret < 0) {
		wl12xx_queue_recovery_work(wl);
		goto out;
	}
866

867
	wl1271_ps_elp_sleep(wl);
868
out:
L
Luciano Coelho 已提交
869 870 871
	mutex_unlock(&wl->mutex);
}

872 873
static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
{
874 875
	u8 flags = 0;

876 877 878 879 880
	/*
	 * TODO: use wl12xx constants when this code is moved to wl12xx, as
	 * only it uses Tx-completion.
	 */
	if (rate_class_index <= 8)
881
		flags |= IEEE80211_TX_RC_MCS;
882 883 884 885 886 887

	/*
	 * TODO: use wl12xx constants when this code is moved to wl12xx, as
	 * only it uses Tx-completion.
	 */
	if (rate_class_index == 0)
888
		flags |= IEEE80211_TX_RC_SHORT_GI;
889

890
	return flags;
891 892
}

L
Luciano Coelho 已提交
893 894 895 896
static void wl1271_tx_complete_packet(struct wl1271 *wl,
				      struct wl1271_tx_hw_res_descr *result)
{
	struct ieee80211_tx_info *info;
897 898
	struct ieee80211_vif *vif;
	struct wl12xx_vif *wlvif;
L
Luciano Coelho 已提交
899 900
	struct sk_buff *skb;
	int id = result->id;
J
Juuso Oikarinen 已提交
901
	int rate = -1;
902
	u8 rate_flags = 0;
J
Juuso Oikarinen 已提交
903
	u8 retries = 0;
L
Luciano Coelho 已提交
904 905

	/* check for id legality */
906
	if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
L
Luciano Coelho 已提交
907 908 909 910 911 912 913
		wl1271_warning("TX result illegal id: %d", id);
		return;
	}

	skb = wl->tx_frames[id];
	info = IEEE80211_SKB_CB(skb);

914
	if (wl12xx_is_dummy_packet(wl, skb)) {
915 916 917 918
		wl1271_free_tx_id(wl, id);
		return;
	}

919 920 921 922
	/* info->control is valid as long as we don't update info->status */
	vif = info->control.vif;
	wlvif = wl12xx_vif_to_data(vif);

J
Juuso Oikarinen 已提交
923 924 925
	/* update the TX status info */
	if (result->status == TX_SUCCESS) {
		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
L
Luciano Coelho 已提交
926
			info->flags |= IEEE80211_TX_STAT_ACK;
927
		rate = wlcore_rate_to_idx(wl, result->rate_class_index,
E
Eliad Peller 已提交
928
					  wlvif->band);
929
		rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
J
Juuso Oikarinen 已提交
930 931 932 933
		retries = result->ack_failures;
	} else if (result->status == TX_RETRY_EXCEEDED) {
		wl->stats.excessive_retries++;
		retries = result->ack_failures;
L
Luciano Coelho 已提交
934 935
	}

J
Juuso Oikarinen 已提交
936 937
	info->status.rates[0].idx = rate;
	info->status.rates[0].count = retries;
938
	info->status.rates[0].flags = rate_flags;
J
Juuso Oikarinen 已提交
939 940
	info->status.ack_signal = -1;

L
Luciano Coelho 已提交
941 942
	wl->stats.retry_count += result->ack_failures;

943 944 945 946 947 948 949 950 951
	/*
	 * update sequence number only when relevant, i.e. only in
	 * sessions of TKIP, AES and GEM (not in open or WEP sessions)
	 */
	if (info->control.hw_key &&
	    (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP ||
	     info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
	     info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
		u8 fw_lsb = result->tx_security_sequence_number_lsb;
952
		u8 cur_lsb = wlvif->tx_security_last_seq_lsb;
953 954 955 956 957

		/*
		 * update security sequence number, taking care of potential
		 * wrap-around
		 */
958 959
		wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff;
		wlvif->tx_security_last_seq_lsb = fw_lsb;
960
	}
961

962 963 964 965
	/* remove private header from packet */
	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));

	/* remove TKIP header space if present */
966 967
	if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
	    info->control.hw_key &&
968
	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
969
		int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
970 971 972
		memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
			hdrlen);
		skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
973
	}
L
Luciano Coelho 已提交
974 975 976 977 978 979 980

	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
		     " status 0x%x",
		     result->id, skb, result->ack_failures,
		     result->rate_class_index, result->status);

	/* return the packet to the stack */
981
	skb_queue_tail(&wl->deferred_tx_queue, skb);
982
	queue_work(wl->freezable_wq, &wl->netstack_work);
983
	wl1271_free_tx_id(wl, result->id);
L
Luciano Coelho 已提交
984 985 986
}

/* Called upon reception of a TX complete interrupt */
987
int wlcore_tx_complete(struct wl1271 *wl)
L
Luciano Coelho 已提交
988
{
989
	struct wl1271_acx_mem_map *memmap = wl->target_mem_map;
990
	u32 count, fw_counter;
L
Luciano Coelho 已提交
991
	u32 i;
992
	int ret;
L
Luciano Coelho 已提交
993 994

	/* read the tx results from the chipset */
995 996 997 998 999
	ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result),
			  wl->tx_res_if, sizeof(*wl->tx_res_if), false);
	if (ret < 0)
		goto out;

1000 1001 1002
	fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);

	/* write host counter to chipset (to ack) */
1003 1004 1005 1006 1007
	ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) +
			     offsetof(struct wl1271_tx_hw_res_if,
				      tx_result_host_counter), fw_counter);
	if (ret < 0)
		goto out;
1008 1009

	count = fw_counter - wl->tx_results_count;
1010
	wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
L
Luciano Coelho 已提交
1011 1012

	/* verify that the result buffer is not getting overrun */
1013
	if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
L
Luciano Coelho 已提交
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
		wl1271_warning("TX result overflow from chipset: %d", count);

	/* process the results */
	for (i = 0; i < count; i++) {
		struct wl1271_tx_hw_res_descr *result;
		u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;

		/* process the packet */
		result =  &(wl->tx_res_if->tx_results_queue[offset]);
		wl1271_tx_complete_packet(wl, result);

		wl->tx_results_count++;
	}
1027 1028 1029

out:
	return ret;
L
Luciano Coelho 已提交
1030
}
1031
EXPORT_SYMBOL(wlcore_tx_complete);
L
Luciano Coelho 已提交
1032

1033 1034 1035
void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
{
	struct sk_buff *skb;
1036
	int i;
1037
	unsigned long flags;
1038
	struct ieee80211_tx_info *info;
1039
	int total[NUM_TX_QUEUES];
1040
	struct wl1271_link *lnk = &wl->links[hlid];
1041 1042

	for (i = 0; i < NUM_TX_QUEUES; i++) {
1043
		total[i] = 0;
1044
		while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
1045
			wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
1046 1047 1048 1049 1050 1051 1052 1053

			if (!wl12xx_is_dummy_packet(wl, skb)) {
				info = IEEE80211_SKB_CB(skb);
				info->status.rates[0].idx = -1;
				info->status.rates[0].count = 0;
				ieee80211_tx_status_ni(wl->hw, skb);
			}

1054
			total[i]++;
1055 1056 1057 1058
		}
	}

	spin_lock_irqsave(&wl->wl_lock, flags);
1059
	for (i = 0; i < NUM_TX_QUEUES; i++) {
1060
		wl->tx_queue_count[i] -= total[i];
1061 1062 1063
		if (lnk->wlvif)
			lnk->wlvif->tx_queue_count[i] -= total[i];
	}
1064 1065 1066 1067 1068
	spin_unlock_irqrestore(&wl->wl_lock, flags);

	wl1271_handle_tx_low_watermark(wl);
}

1069
/* caller must hold wl->mutex and TX must be stopped */
1070
void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
L
Luciano Coelho 已提交
1071 1072 1073 1074
{
	int i;

	/* TX failure */
1075
	for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
1076 1077
		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
			/* this calls wl12xx_free_link */
1078
			wl1271_free_sta(wl, wlvif, i);
1079 1080
		} else {
			u8 hlid = i;
1081
			wlvif->sta.ba_rx_bitmap = 0;
1082 1083
			wl12xx_free_link(wl, wlvif, &hlid);
		}
L
Luciano Coelho 已提交
1084
	}
1085 1086
	wlvif->last_tx_hlid = 0;

1087 1088
	for (i = 0; i < NUM_TX_QUEUES; i++)
		wlvif->tx_queue_count[i] = 0;
1089 1090
}
/* caller must hold wl->mutex and TX must be stopped */
1091
void wl12xx_tx_reset(struct wl1271 *wl)
1092 1093 1094 1095
{
	int i;
	struct sk_buff *skb;
	struct ieee80211_tx_info *info;
1096

1097
	/* only reset the queues if something bad happened */
1098
	if (wl1271_tx_total_queue_count(wl) != 0) {
1099 1100 1101 1102 1103 1104
		for (i = 0; i < WL12XX_MAX_LINKS; i++)
			wl1271_tx_reset_link_queues(wl, i);

		for (i = 0; i < NUM_TX_QUEUES; i++)
			wl->tx_queue_count[i] = 0;
	}
1105

1106 1107 1108
	/*
	 * Make sure the driver is at a consistent state, in case this
	 * function is called from a context other than interface removal.
1109
	 * This call will always wake the TX queues.
1110
	 */
1111
	wl1271_handle_tx_low_watermark(wl);
1112

1113
	for (i = 0; i < wl->num_tx_desc; i++) {
1114 1115 1116 1117 1118 1119 1120
		if (wl->tx_frames[i] == NULL)
			continue;

		skb = wl->tx_frames[i];
		wl1271_free_tx_id(wl, i);
		wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);

1121
		if (!wl12xx_is_dummy_packet(wl, skb)) {
1122 1123 1124 1125 1126 1127
			/*
			 * Remove private headers before passing the skb to
			 * mac80211
			 */
			info = IEEE80211_SKB_CB(skb);
			skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
1128 1129
			if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
			    info->control.hw_key &&
1130 1131 1132
			    info->control.hw_key->cipher ==
			    WLAN_CIPHER_SUITE_TKIP) {
				int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1133
				memmove(skb->data + WL1271_EXTRA_SPACE_TKIP,
1134
					skb->data, hdrlen);
1135
				skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
1136
			}
1137

1138 1139
			info->status.rates[0].idx = -1;
			info->status.rates[0].count = 0;
1140

1141
			ieee80211_tx_status_ni(wl->hw, skb);
1142
		}
1143
	}
1144 1145 1146 1147 1148 1149 1150
}

#define WL1271_TX_FLUSH_TIMEOUT 500000

/* caller must *NOT* hold wl->mutex */
void wl1271_tx_flush(struct wl1271 *wl)
{
1151
	unsigned long timeout, start_time;
1152
	int i;
1153 1154
	start_time = jiffies;
	timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
1155

A
Arik Nemtsov 已提交
1156 1157 1158
	/* only one flush should be in progress, for consistent queue state */
	mutex_lock(&wl->flush_mutex);

1159 1160 1161 1162 1163 1164
	mutex_lock(&wl->mutex);
	if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) {
		mutex_unlock(&wl->mutex);
		goto out;
	}

A
Arik Nemtsov 已提交
1165 1166
	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);

1167
	while (!time_after(jiffies, timeout)) {
1168
		wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d",
1169 1170
			     wl->tx_frames_cnt,
			     wl1271_tx_total_queue_count(wl));
1171 1172 1173 1174 1175 1176 1177 1178

		/* force Tx and give the driver some time to flush data */
		mutex_unlock(&wl->mutex);
		if (wl1271_tx_total_queue_count(wl))
			wl1271_tx_work(&wl->tx_work);
		msleep(20);
		mutex_lock(&wl->mutex);

1179 1180
		if ((wl->tx_frames_cnt == 0) &&
		    (wl1271_tx_total_queue_count(wl) == 0)) {
1181 1182
			wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms",
				     jiffies_to_msecs(jiffies - start_time));
1183
			goto out_wake;
1184 1185 1186
		}
	}

1187 1188 1189
	wl1271_warning("Unable to flush all TX buffers, "
		       "timed out (timeout %d ms",
		       WL1271_TX_FLUSH_TIMEOUT / 1000);
1190 1191 1192 1193

	/* forcibly flush all Tx buffers on our queues */
	for (i = 0; i < WL12XX_MAX_LINKS; i++)
		wl1271_tx_reset_link_queues(wl, i);
A
Arik Nemtsov 已提交
1194

1195
out_wake:
A
Arik Nemtsov 已提交
1196
	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1197 1198
	mutex_unlock(&wl->mutex);
out:
A
Arik Nemtsov 已提交
1199
	mutex_unlock(&wl->flush_mutex);
L
Luciano Coelho 已提交
1200
}
1201
EXPORT_SYMBOL_GPL(wl1271_tx_flush);
A
Arik Nemtsov 已提交
1202

1203
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
A
Arik Nemtsov 已提交
1204
{
1205 1206
	if (WARN_ON(!rate_set))
		return 0;
A
Arik Nemtsov 已提交
1207

1208
	return BIT(__ffs(rate_set));
A
Arik Nemtsov 已提交
1209
}
1210
EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get);
1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262

void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
			      enum wlcore_queue_stop_reason reason)
{
	bool stopped = !!wl->queue_stop_reasons[queue];

	/* queue should not be stopped for this reason */
	WARN_ON(test_and_set_bit(reason, &wl->queue_stop_reasons[queue]));

	if (stopped)
		return;

	ieee80211_stop_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
}

void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
		       enum wlcore_queue_stop_reason reason)
{
	unsigned long flags;

	spin_lock_irqsave(&wl->wl_lock, flags);
	wlcore_stop_queue_locked(wl, queue, reason);
	spin_unlock_irqrestore(&wl->wl_lock, flags);
}

void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
		       enum wlcore_queue_stop_reason reason)
{
	unsigned long flags;

	spin_lock_irqsave(&wl->wl_lock, flags);

	/* queue should not be clear for this reason */
	WARN_ON(!test_and_clear_bit(reason, &wl->queue_stop_reasons[queue]));

	if (wl->queue_stop_reasons[queue])
		goto out;

	ieee80211_wake_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));

out:
	spin_unlock_irqrestore(&wl->wl_lock, flags);
}

void wlcore_stop_queues(struct wl1271 *wl,
			enum wlcore_queue_stop_reason reason)
{
	int i;

	for (i = 0; i < NUM_TX_QUEUES; i++)
		wlcore_stop_queue(wl, i, reason);
}
1263
EXPORT_SYMBOL_GPL(wlcore_stop_queues);
1264 1265 1266 1267 1268 1269 1270 1271 1272

void wlcore_wake_queues(struct wl1271 *wl,
			enum wlcore_queue_stop_reason reason)
{
	int i;

	for (i = 0; i < NUM_TX_QUEUES; i++)
		wlcore_wake_queue(wl, i, reason);
}
1273
EXPORT_SYMBOL_GPL(wlcore_wake_queues);
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303

void wlcore_reset_stopped_queues(struct wl1271 *wl)
{
	int i;
	unsigned long flags;

	spin_lock_irqsave(&wl->wl_lock, flags);

	for (i = 0; i < NUM_TX_QUEUES; i++) {
		if (!wl->queue_stop_reasons[i])
			continue;

		wl->queue_stop_reasons[i] = 0;
		ieee80211_wake_queue(wl->hw,
				     wl1271_tx_get_mac80211_queue(i));
	}

	spin_unlock_irqrestore(&wl->wl_lock, flags);
}

bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
			     enum wlcore_queue_stop_reason reason)
{
	return test_bit(reason, &wl->queue_stop_reasons[queue]);
}

bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue)
{
	return !!wl->queue_stop_reasons[queue];
}