xmit.c 68.3 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include <linux/dma-mapping.h>
S
Sujith 已提交
18
#include "ath9k.h"
19
#include "ar9003_mac.h"
20 21 22 23 24 25 26 27 28 29 30 31

#define BITS_PER_BYTE           8
#define OFDM_PLCP_BITS          22
#define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
#define L_STF                   8
#define L_LTF                   8
#define L_SIG                   4
#define HT_SIG                  8
#define HT_STF                  4
#define HT_LTF(_ns)             (4 * (_ns))
#define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
32 33
#define TIME_SYMBOLS(t)         ((t) >> 2)
#define TIME_SYMBOLS_HALFGI(t)  (((t) * 5 - 4) / 18)
34 35 36 37
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)


38
static u16 bits_per_symbol[][2] = {
39 40 41 42 43 44 45 46 47 48 49 50 51
	/* 20MHz 40MHz */
	{    26,   54 },     /*  0: BPSK */
	{    52,  108 },     /*  1: QPSK 1/2 */
	{    78,  162 },     /*  2: QPSK 3/4 */
	{   104,  216 },     /*  3: 16-QAM 1/2 */
	{   156,  324 },     /*  4: 16-QAM 3/4 */
	{   208,  432 },     /*  5: 64-QAM 2/3 */
	{   234,  486 },     /*  6: 64-QAM 3/4 */
	{   260,  540 },     /*  7: 64-QAM 5/6 */
};

#define IS_HT_RATE(_rate)     ((_rate) & 0x80)

F
Felix Fietkau 已提交
52
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
53 54 55
			       struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
			    int tx_flags, struct ath_txq *txq);
S
Sujith 已提交
56
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
57
				struct ath_txq *txq, struct list_head *bf_q,
58
				struct ath_tx_status *ts, int txok);
59
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
60
			     struct list_head *head, bool internal);
F
Felix Fietkau 已提交
61 62
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
63
			     int txok);
64 65
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno);
66 67 68
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
					   struct ath_txq *txq,
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
69
					   struct sk_buff *skb);
70

71
enum {
72 73
	MCS_HT20,
	MCS_HT20_SGI,
74 75 76 77
	MCS_HT40,
	MCS_HT40_SGI,
};

S
Sujith 已提交
78 79 80
/*********************/
/* Aggregation logic */
/*********************/
81

82
void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
83
	__acquires(&txq->axq_lock)
F
Felix Fietkau 已提交
84 85 86 87
{
	spin_lock_bh(&txq->axq_lock);
}

88
void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
89
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
90 91 92 93
{
	spin_unlock_bh(&txq->axq_lock);
}

94
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
95
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
96 97 98 99 100 101 102 103 104 105 106 107
{
	struct sk_buff_head q;
	struct sk_buff *skb;

	__skb_queue_head_init(&q);
	skb_queue_splice_init(&txq->complete_q, &q);
	spin_unlock_bh(&txq->axq_lock);

	while ((skb = __skb_dequeue(&q)))
		ieee80211_tx_status(sc->hw, skb);
}

S
Sujith 已提交
108
static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
S
Sujith 已提交
109
{
S
Sujith 已提交
110
	struct ath_atx_ac *ac = tid->ac;
S
Sujith 已提交
111

S
Sujith 已提交
112 113
	if (tid->paused)
		return;
S
Sujith 已提交
114

S
Sujith 已提交
115 116
	if (tid->sched)
		return;
S
Sujith 已提交
117

S
Sujith 已提交
118 119
	tid->sched = true;
	list_add_tail(&tid->list, &ac->tid_q);
S
Sujith 已提交
120

S
Sujith 已提交
121 122
	if (ac->sched)
		return;
123

S
Sujith 已提交
124 125 126
	ac->sched = true;
	list_add_tail(&ac->list, &txq->axq_acq);
}
127

128
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
129 130
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
131 132 133
	BUILD_BUG_ON(sizeof(struct ath_frame_info) >
		     sizeof(tx_info->rate_driver_data));
	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
134 135
}

136 137 138 139 140 141
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
{
	ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
			   seqno << IEEE80211_SEQ_SEQ_SHIFT);
}

142 143 144 145 146 147 148
static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
			  struct ath_buf *bf)
{
	ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates,
			       ARRAY_SIZE(bf->rates));
}

149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
			     struct sk_buff *skb)
{
	int q;

	q = skb_get_queue_mapping(skb);
	if (txq == sc->tx.uapsdq)
		txq = sc->tx.txq_map[q];

	if (txq != sc->tx.txq_map[q])
		return;

	if (WARN_ON(--txq->pending_frames < 0))
		txq->pending_frames = 0;

	if (txq->stopped &&
	    txq->pending_frames < sc->tx.txq_max_pending[q]) {
		ieee80211_wake_queue(sc->hw, q);
		txq->stopped = false;
	}
}

171 172 173 174 175 176 177 178 179 180 181 182 183 184
static struct ath_atx_tid *
ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	u8 tidno = 0;

	hdr = (struct ieee80211_hdr *) skb->data;
	if (ieee80211_is_data_qos(hdr->frame_control))
		tidno = ieee80211_get_qos_ctl(hdr)[0];

	tidno &= IEEE80211_QOS_CTL_TID_MASK;
	return ATH_AN_2_TID(an, tidno);
}

185 186
static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
{
187
	return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
188 189 190 191
}

static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
{
192 193 194 195 196 197 198
	struct sk_buff *skb;

	skb = __skb_dequeue(&tid->retry_q);
	if (!skb)
		skb = __skb_dequeue(&tid->buf_q);

	return skb;
199 200
}

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
/*
 * ath_tx_tid_change_state:
 * - clears a-mpdu flag of previous session
 * - force sequence number allocation to fix next BlockAck Window
 */
static void
ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
{
	struct ath_txq *txq = tid->ac->txq;
	struct ieee80211_tx_info *tx_info;
	struct sk_buff *skb, *tskb;
	struct ath_buf *bf;
	struct ath_frame_info *fi;

	skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
		fi = get_frame_info(skb);
		bf = fi->bf;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;

		if (bf)
			continue;

		bf = ath_tx_setup_buffer(sc, txq, tid, skb);
		if (!bf) {
			__skb_unlink(skb, &tid->buf_q);
			ath_txq_skb_done(sc, txq, skb);
			ieee80211_free_txskb(sc->hw, skb);
			continue;
		}
	}

}

236
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
S
Sujith 已提交
237
{
238
	struct ath_txq *txq = tid->ac->txq;
239
	struct sk_buff *skb;
S
Sujith 已提交
240 241
	struct ath_buf *bf;
	struct list_head bf_head;
242
	struct ath_tx_status ts;
243
	struct ath_frame_info *fi;
244
	bool sendbar = false;
245

246
	INIT_LIST_HEAD(&bf_head);
247

248
	memset(&ts, 0, sizeof(ts));
249

250
	while ((skb = __skb_dequeue(&tid->retry_q))) {
251 252
		fi = get_frame_info(skb);
		bf = fi->bf;
F
Felix Fietkau 已提交
253
		if (!bf) {
254 255 256
			ath_txq_skb_done(sc, txq, skb);
			ieee80211_free_txskb(sc->hw, skb);
			continue;
F
Felix Fietkau 已提交
257 258
		}

259
		if (fi->baw_tracked) {
260
			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
261
			sendbar = true;
262
		}
263 264 265

		list_add_tail(&bf->list, &bf_head);
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
266
	}
267

268
	if (sendbar) {
F
Felix Fietkau 已提交
269
		ath_txq_unlock(sc, txq);
270
		ath_send_bar(tid, tid->seq_start);
F
Felix Fietkau 已提交
271 272
		ath_txq_lock(sc, txq);
	}
S
Sujith 已提交
273
}
274

S
Sujith 已提交
275 276
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno)
S
Sujith 已提交
277
{
S
Sujith 已提交
278
	int index, cindex;
279

S
Sujith 已提交
280 281
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
282

283
	__clear_bit(cindex, tid->tx_buf);
S
Sujith 已提交
284

285
	while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
S
Sujith 已提交
286 287
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
288 289
		if (tid->bar_index >= 0)
			tid->bar_index--;
S
Sujith 已提交
290
	}
S
Sujith 已提交
291
}
292

S
Sujith 已提交
293
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
294
			     struct ath_buf *bf)
S
Sujith 已提交
295
{
296 297
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
	u16 seqno = bf->bf_state.seqno;
S
Sujith 已提交
298
	int index, cindex;
S
Sujith 已提交
299

300
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
S
Sujith 已提交
301
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
302
	__set_bit(cindex, tid->tx_buf);
303
	fi->baw_tracked = 1;
304

S
Sujith 已提交
305 306 307 308
	if (index >= ((tid->baw_tail - tid->baw_head) &
		(ATH_TID_MAX_BUFS - 1))) {
		tid->baw_tail = cindex;
		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
309 310 311 312
	}
}

/*
S
Sujith 已提交
313 314 315 316
 * TODO: For frame(s) that are in the retry state, we will reuse the
 * sequence number(s) without setting the retry bit. The
 * alternative is to give up on these and BAR the receiver's window
 * forward.
317
 */
S
Sujith 已提交
318 319
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
			  struct ath_atx_tid *tid)
320 321

{
322
	struct sk_buff *skb;
S
Sujith 已提交
323 324
	struct ath_buf *bf;
	struct list_head bf_head;
325
	struct ath_tx_status ts;
326
	struct ath_frame_info *fi;
327 328

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
329
	INIT_LIST_HEAD(&bf_head);
330

331
	while ((skb = ath_tid_dequeue(tid))) {
332 333
		fi = get_frame_info(skb);
		bf = fi->bf;
334

335 336 337 338 339
		if (!bf) {
			ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
			continue;
		}

340
		list_add_tail(&bf->list, &bf_head);
341

342
		ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
343
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
344
	}
345

S
Sujith 已提交
346 347
	tid->seq_next = tid->seq_start;
	tid->baw_tail = tid->baw_head;
348
	tid->bar_index = -1;
349 350
}

S
Sujith 已提交
351
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
352
			     struct sk_buff *skb, int count)
353
{
354
	struct ath_frame_info *fi = get_frame_info(skb);
355
	struct ath_buf *bf = fi->bf;
S
Sujith 已提交
356
	struct ieee80211_hdr *hdr;
357
	int prev = fi->retries;
358

S
Sujith 已提交
359
	TX_STAT_INC(txq->axq_qnum, a_retries);
360 361 362
	fi->retries += count;

	if (prev > 0)
363
		return;
364

S
Sujith 已提交
365 366
	hdr = (struct ieee80211_hdr *)skb->data;
	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
367 368
	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
		sizeof(*hdr), DMA_TO_DEVICE);
369 370
}

371
static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
S
Sujith 已提交
372
{
373
	struct ath_buf *bf = NULL;
S
Sujith 已提交
374 375

	spin_lock_bh(&sc->tx.txbuflock);
376 377

	if (unlikely(list_empty(&sc->tx.txbuf))) {
378 379 380
		spin_unlock_bh(&sc->tx.txbuflock);
		return NULL;
	}
381 382 383 384

	bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
	list_del(&bf->list);

S
Sujith 已提交
385 386
	spin_unlock_bh(&sc->tx.txbuflock);

387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
	return bf;
}

static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
{
	spin_lock_bh(&sc->tx.txbuflock);
	list_add_tail(&bf->list, &sc->tx.txbuf);
	spin_unlock_bh(&sc->tx.txbuflock);
}

static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
{
	struct ath_buf *tbf;

	tbf = ath_tx_get_buffer(sc);
	if (WARN_ON(!tbf))
		return NULL;

S
Sujith 已提交
405 406 407 408
	ATH_TXBUF_RESET(tbf);

	tbf->bf_mpdu = bf->bf_mpdu;
	tbf->bf_buf_addr = bf->bf_buf_addr;
409
	memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
S
Sujith 已提交
410 411 412 413 414
	tbf->bf_state = bf->bf_state;

	return tbf;
}

415 416 417 418
static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
			        struct ath_tx_status *ts, int txok,
			        int *nframes, int *nbad)
{
419
	struct ath_frame_info *fi;
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
	u16 seq_st = 0;
	u32 ba[WME_BA_BMP_SIZE >> 5];
	int ba_index;
	int isaggr = 0;

	*nbad = 0;
	*nframes = 0;

	isaggr = bf_isaggr(bf);
	if (isaggr) {
		seq_st = ts->ts_seqnum;
		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
	}

	while (bf) {
435
		fi = get_frame_info(bf->bf_mpdu);
436
		ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
437 438 439 440 441 442 443 444 445 446

		(*nframes)++;
		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
			(*nbad)++;

		bf = bf->bf_next;
	}
}


S
Sujith 已提交
447 448
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
				 struct ath_buf *bf, struct list_head *bf_q,
449
				 struct ath_tx_status *ts, int txok)
450
{
S
Sujith 已提交
451 452
	struct ath_node *an = NULL;
	struct sk_buff *skb;
453
	struct ieee80211_sta *sta;
F
Felix Fietkau 已提交
454
	struct ieee80211_hw *hw = sc->hw;
455
	struct ieee80211_hdr *hdr;
456
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
457
	struct ath_atx_tid *tid = NULL;
S
Sujith 已提交
458
	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
459 460
	struct list_head bf_head;
	struct sk_buff_head bf_pending;
461
	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
462
	u32 ba[WME_BA_BMP_SIZE >> 5];
463
	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
464
	bool rc_update = true, isba;
465
	struct ieee80211_tx_rate rates[4];
466
	struct ath_frame_info *fi;
467
	int nframes;
468
	bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
469
	int i, retries;
470
	int bar_index = -1;
471

S
Sujith 已提交
472
	skb = bf->bf_mpdu;
473 474
	hdr = (struct ieee80211_hdr *)skb->data;

475 476
	tx_info = IEEE80211_SKB_CB(skb);

477
	memcpy(rates, bf->rates, sizeof(rates));
478

479 480 481 482
	retries = ts->ts_longretry + 1;
	for (i = 0; i < ts->ts_rateindex; i++)
		retries += rates[i].count;

483
	rcu_read_lock();
484

485
	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
486 487
	if (!sta) {
		rcu_read_unlock();
488

489 490 491 492
		INIT_LIST_HEAD(&bf_head);
		while (bf) {
			bf_next = bf->bf_next;

493
			if (!bf->bf_stale || bf_next != NULL)
494 495
				list_move_tail(&bf->list, &bf_head);

496
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
497 498 499

			bf = bf_next;
		}
500
		return;
501 502
	}

503
	an = (struct ath_node *)sta->drv_priv;
504
	tid = ath_get_skb_tid(sc, an, skb);
505
	seq_first = tid->seq_start;
506
	isba = ts->ts_flags & ATH9K_TX_BA;
507

508 509 510 511
	/*
	 * The hardware occasionally sends a tx status for the wrong TID.
	 * In this case, the BA status cannot be considered valid and all
	 * subframes need to be retransmitted
512 513 514
	 *
	 * Only BlockAcks have a TID and therefore normal Acks cannot be
	 * checked
515
	 */
516
	if (isba && tid->tidno != ts->tid)
517 518
		txok = false;

S
Sujith 已提交
519
	isaggr = bf_isaggr(bf);
S
Sujith 已提交
520
	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
521

S
Sujith 已提交
522
	if (isaggr && txok) {
523 524 525
		if (ts->ts_flags & ATH9K_TX_BA) {
			seq_st = ts->ts_seqnum;
			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
S
Sujith 已提交
526
		} else {
S
Sujith 已提交
527 528 529 530 531 532 533
			/*
			 * AR5416 can become deaf/mute when BA
			 * issue happens. Chip needs to be reset.
			 * But AP code may have sychronization issues
			 * when perform internal reset in this routine.
			 * Only enable reset in STA mode for now.
			 */
534
			if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
S
Sujith 已提交
535
				needreset = 1;
S
Sujith 已提交
536
		}
537 538
	}

539
	__skb_queue_head_init(&bf_pending);
540

541
	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
S
Sujith 已提交
542
	while (bf) {
543 544
		u16 seqno = bf->bf_state.seqno;

545
		txfail = txpending = sendbar = 0;
S
Sujith 已提交
546
		bf_next = bf->bf_next;
547

548 549
		skb = bf->bf_mpdu;
		tx_info = IEEE80211_SKB_CB(skb);
550
		fi = get_frame_info(skb);
551

552 553
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
		    !tid->active) {
554 555 556 557 558 559
			/*
			 * Outside of the current BlockAck window,
			 * maybe part of a previous session
			 */
			txfail = 1;
		} else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
S
Sujith 已提交
560 561
			/* transmit completion, subframe is
			 * acked by block ack */
562
			acked_cnt++;
S
Sujith 已提交
563 564
		} else if (!isaggr && txok) {
			/* transmit completion */
565
			acked_cnt++;
566 567 568 569 570 571 572 573
		} else if (flush) {
			txpending = 1;
		} else if (fi->retries < ATH_MAX_SW_RETRIES) {
			if (txok || !an->sleeping)
				ath_tx_set_retry(sc, txq, bf->bf_mpdu,
						 retries);

			txpending = 1;
S
Sujith 已提交
574
		} else {
575 576 577 578
			txfail = 1;
			txfail_cnt++;
			bar_index = max_t(int, bar_index,
				ATH_BA_INDEX(seq_first, seqno));
S
Sujith 已提交
579
		}
580

581 582 583 584
		/*
		 * Make sure the last desc is reclaimed if it
		 * not a holding desc.
		 */
585
		INIT_LIST_HEAD(&bf_head);
586
		if (bf_next != NULL || !bf_last->bf_stale)
S
Sujith 已提交
587
			list_move_tail(&bf->list, &bf_head);
588

589
		if (!txpending) {
S
Sujith 已提交
590 591 592 593
			/*
			 * complete the acked-ones/xretried ones; update
			 * block-ack window
			 */
594
			ath_tx_update_baw(sc, tid, seqno);
595

596
			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
597
				memcpy(tx_info->control.rates, rates, sizeof(rates));
598
				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
599 600 601
				rc_update = false;
			}

602
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
603
				!txfail);
S
Sujith 已提交
604
		} else {
605 606 607 608
			if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
				tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
				ieee80211_sta_eosp(sta);
			}
S
Sujith 已提交
609
			/* retry the un-acked ones */
610
			if (bf->bf_next == NULL && bf_last->bf_stale) {
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
				struct ath_buf *tbf;

				tbf = ath_clone_txbuf(sc, bf_last);
				/*
				 * Update tx baw and complete the
				 * frame with failed status if we
				 * run out of tx buf.
				 */
				if (!tbf) {
					ath_tx_update_baw(sc, tid, seqno);

					ath_tx_complete_buf(sc, bf, txq,
							    &bf_head, ts, 0);
					bar_index = max_t(int, bar_index,
						ATH_BA_INDEX(seq_first, seqno));
					break;
627
				}
628 629

				fi->bf = tbf;
S
Sujith 已提交
630 631 632 633 634 635
			}

			/*
			 * Put this buffer to the temporary pending
			 * queue to retain ordering
			 */
636
			__skb_queue_tail(&bf_pending, skb);
S
Sujith 已提交
637 638 639
		}

		bf = bf_next;
640 641
	}

642
	/* prepend un-acked frames to the beginning of the pending frame queue */
643
	if (!skb_queue_empty(&bf_pending)) {
644
		if (an->sleeping)
645
			ieee80211_sta_set_buffered(sta, tid->tidno, true);
646

647
		skb_queue_splice_tail(&bf_pending, &tid->retry_q);
648
		if (!an->sleeping) {
649
			ath_tx_queue_tid(txq, tid);
650

S
Sujith Manoharan 已提交
651
			if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
652 653
				tid->ac->clear_ps_filter = true;
		}
654 655
	}

F
Felix Fietkau 已提交
656 657 658 659 660 661 662 663 664 665 666
	if (bar_index >= 0) {
		u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);

		if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
			tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);

		ath_txq_unlock(sc, txq);
		ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
		ath_txq_lock(sc, txq);
	}

667 668
	rcu_read_unlock();

669 670
	if (needreset)
		ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
S
Sujith 已提交
671
}
672

673 674 675 676 677 678 679 680 681 682
static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
{
    struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
    return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}

static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
				  struct ath_tx_status *ts, struct ath_buf *bf,
				  struct list_head *bf_head)
{
683
	struct ieee80211_tx_info *info;
684 685 686 687 688 689 690 691 692 693 694
	bool txok, flush;

	txok = !(ts->ts_status & ATH9K_TXERR_MASK);
	flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
	txq->axq_tx_inprogress = false;

	txq->axq_depth--;
	if (bf_is_ampdu_not_probing(bf))
		txq->axq_ampdu_depth--;

	if (!bf_isampdu(bf)) {
695 696 697 698
		if (!flush) {
			info = IEEE80211_SKB_CB(bf->bf_mpdu);
			memcpy(info->control.rates, bf->rates,
			       sizeof(info->control.rates));
699
			ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
700
		}
701 702 703 704
		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
	} else
		ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);

705
	if (!flush)
706 707 708
		ath_txq_schedule(sc, txq);
}

709 710 711 712 713 714 715 716 717 718 719
static bool ath_lookup_legacy(struct ath_buf *bf)
{
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	int i;

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
	rates = tx_info->control.rates;

720 721 722 723
	for (i = 0; i < 4; i++) {
		if (!rates[i].count || rates[i].idx < 0)
			break;

724 725 726 727 728 729 730
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
			return true;
	}

	return false;
}

S
Sujith 已提交
731 732
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
			   struct ath_atx_tid *tid)
733
{
S
Sujith 已提交
734 735
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
736
	struct ieee80211_tx_rate *rates;
S
Sujith 已提交
737
	u32 max_4ms_framelen, frmlen;
738
	u16 aggr_limit, bt_aggr_limit, legacy = 0;
739
	int q = tid->ac->txq->mac80211_qnum;
S
Sujith 已提交
740
	int i;
S
Sujith 已提交
741

S
Sujith 已提交
742
	skb = bf->bf_mpdu;
S
Sujith 已提交
743
	tx_info = IEEE80211_SKB_CB(skb);
744
	rates = bf->rates;
S
Sujith 已提交
745

S
Sujith 已提交
746 747
	/*
	 * Find the lowest frame length among the rate series that will have a
748
	 * 4ms (or TXOP limited) transmit duration.
S
Sujith 已提交
749 750
	 */
	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
S
Sujith 已提交
751

S
Sujith 已提交
752
	for (i = 0; i < 4; i++) {
753
		int modeidx;
S
Sujith 已提交
754

755 756
		if (!rates[i].count)
			continue;
757

758 759 760
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
			legacy = 1;
			break;
761
		}
762 763 764 765 766 767 768 769 770

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
			modeidx = MCS_HT40;
		else
			modeidx = MCS_HT20;

		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
			modeidx++;

771
		frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
772
		max_4ms_framelen = min(max_4ms_framelen, frmlen);
773
	}
S
Sujith 已提交
774

775
	/*
S
Sujith 已提交
776 777 778
	 * limit aggregate size by the minimum rate if rate selected is
	 * not a probe rate, if rate selected is a probe rate then
	 * avoid aggregation of this packet.
779
	 */
S
Sujith 已提交
780 781
	if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
		return 0;
782

783 784 785 786 787 788 789 790
	aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);

	/*
	 * Override the default aggregation limit for BTCOEX.
	 */
	bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
	if (bt_aggr_limit)
		aggr_limit = bt_aggr_limit;
791

S
Sujith 已提交
792
	/*
L
Lucas De Marchi 已提交
793 794
	 * h/w can accept aggregates up to 16 bit lengths (65535).
	 * The IE, however can hold up to 65536, which shows up here
S
Sujith 已提交
795
	 * as zero. Ignore 65536 since we  are constrained by hw.
796
	 */
797 798
	if (tid->an->maxampdu)
		aggr_limit = min(aggr_limit, tid->an->maxampdu);
799

S
Sujith 已提交
800 801
	return aggr_limit;
}
802

S
Sujith 已提交
803
/*
S
Sujith 已提交
804
 * Returns the number of delimiters to be added to
S
Sujith 已提交
805 806 807
 * meet the minimum required mpdudensity.
 */
static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
808 809
				  struct ath_buf *bf, u16 frmlen,
				  bool first_subfrm)
S
Sujith 已提交
810
{
811
#define FIRST_DESC_NDELIMS 60
812
	u32 nsymbits, nsymbols;
S
Sujith 已提交
813
	u16 minlen;
814
	u8 flags, rix;
815
	int width, streams, half_gi, ndelim, mindelim;
816
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith 已提交
817 818 819

	/* Select standard number of delimiters based on frame length alone */
	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
820 821

	/*
S
Sujith 已提交
822 823 824 825
	 * If encryption enabled, hardware requires some more padding between
	 * subframes.
	 * TODO - this could be improved to be dependent on the rate.
	 *      The hardware can keep up at lower rates, but not higher rates
826
	 */
827 828
	if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
	    !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
S
Sujith 已提交
829
		ndelim += ATH_AGGR_ENCRYPTDELIM;
830

831 832 833 834
	/*
	 * Add delimiter when using RTS/CTS with aggregation
	 * and non enterprise AR9003 card
	 */
835 836
	if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
	    (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
837 838
		ndelim = max(ndelim, FIRST_DESC_NDELIMS);

S
Sujith 已提交
839 840 841 842 843
	/*
	 * Convert desired mpdu density from microeconds to bytes based
	 * on highest rate in rate series (i.e. first rate) to determine
	 * required minimum length for subframe. Take into account
	 * whether high rate is 20 or 40Mhz and half or full GI.
844
	 *
S
Sujith 已提交
845 846 847
	 * If there is no mpdu density restriction, no further calculation
	 * is needed.
	 */
848 849

	if (tid->an->mpdudensity == 0)
S
Sujith 已提交
850
		return ndelim;
851

852 853
	rix = bf->rates[0].idx;
	flags = bf->rates[0].flags;
S
Sujith 已提交
854 855
	width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
	half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
856

S
Sujith 已提交
857
	if (half_gi)
858
		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
S
Sujith 已提交
859
	else
860
		nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
861

S
Sujith 已提交
862 863
	if (nsymbols == 0)
		nsymbols = 1;
864

865 866
	streams = HT_RC_2_STREAMS(rix);
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
867
	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
868

S
Sujith 已提交
869 870 871
	if (frmlen < minlen) {
		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
		ndelim = max(mindelim, ndelim);
872 873
	}

S
Sujith 已提交
874
	return ndelim;
875 876
}

877 878
static struct ath_buf *
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
879
			struct ath_atx_tid *tid, struct sk_buff_head **q)
880
{
881
	struct ieee80211_tx_info *tx_info;
882
	struct ath_frame_info *fi;
883
	struct sk_buff *skb;
884
	struct ath_buf *bf;
885
	u16 seqno;
886

887
	while (1) {
888 889 890 891
		*q = &tid->retry_q;
		if (skb_queue_empty(*q))
			*q = &tid->buf_q;

892
		skb = skb_peek(*q);
893 894 895
		if (!skb)
			break;

896 897
		fi = get_frame_info(skb);
		bf = fi->bf;
898
		if (!fi->bf)
F
Felix Fietkau 已提交
899
			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
900

F
Felix Fietkau 已提交
901
		if (!bf) {
902
			__skb_unlink(skb, *q);
903
			ath_txq_skb_done(sc, txq, skb);
F
Felix Fietkau 已提交
904
			ieee80211_free_txskb(sc->hw, skb);
905
			continue;
F
Felix Fietkau 已提交
906
		}
907

908 909 910 911 912 913 914 915 916 917
		bf->bf_next = NULL;
		bf->bf_lastbf = bf;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
		if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
			bf->bf_state.bf_type = 0;
			return bf;
		}

918
		bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
919
		seqno = bf->bf_state.seqno;
920

S
Sujith 已提交
921
		/* do not step over block-ack window */
922
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
S
Sujith 已提交
923
			break;
924

925 926 927 928 929 930
		if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
			struct ath_tx_status ts = {};
			struct list_head bf_head;

			INIT_LIST_HEAD(&bf_head);
			list_add(&bf->list, &bf_head);
931
			__skb_unlink(skb, *q);
932 933 934 935 936
			ath_tx_update_baw(sc, tid, seqno);
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
			continue;
		}

937 938 939 940 941 942
		return bf;
	}

	return NULL;
}

943 944 945 946 947
static bool
ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
		 struct ath_atx_tid *tid, struct list_head *bf_q,
		 struct ath_buf *bf_first, struct sk_buff_head *tid_q,
		 int *aggr_len)
948 949
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
950
	struct ath_buf *bf = bf_first, *bf_prev = NULL;
F
Felix Fietkau 已提交
951
	int nframes = 0, ndelim;
952
	u16 aggr_limit = 0, al = 0, bpad = 0,
F
Felix Fietkau 已提交
953
	    al_delta, h_baw = tid->baw_size / 2;
954 955 956
	struct ieee80211_tx_info *tx_info;
	struct ath_frame_info *fi;
	struct sk_buff *skb;
957
	bool closed = false;
958

959 960
	bf = bf_first;
	aggr_limit = ath_lookup_rate(sc, bf, tid);
961

962
	do {
963 964 965
		skb = bf->bf_mpdu;
		fi = get_frame_info(skb);

S
Sujith 已提交
966
		/* do not exceed aggregation limit */
967
		al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
F
Felix Fietkau 已提交
968 969
		if (nframes) {
			if (aggr_limit < al + bpad + al_delta ||
970
			    ath_lookup_legacy(bf) || nframes >= h_baw)
F
Felix Fietkau 已提交
971
				break;
972

F
Felix Fietkau 已提交
973
			tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
974 975
			if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
			    !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
F
Felix Fietkau 已提交
976
				break;
S
Sujith 已提交
977
		}
978

S
Sujith 已提交
979
		/* add padding for previous frame to aggregation length */
S
Sujith 已提交
980
		al += bpad + al_delta;
981

S
Sujith 已提交
982 983 984 985
		/*
		 * Get the delimiters needed to meet the MPDU
		 * density for this node.
		 */
986 987
		ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
						!nframes);
S
Sujith 已提交
988
		bpad = PADBYTES(al_delta) + (ndelim << 2);
989

990
		nframes++;
S
Sujith 已提交
991
		bf->bf_next = NULL;
992

S
Sujith 已提交
993
		/* link buffers of this frame to the aggregate */
994 995
		if (!fi->baw_tracked)
			ath_tx_addto_baw(sc, tid, bf);
996
		bf->bf_state.ndelim = ndelim;
997

998
		__skb_unlink(skb, tid_q);
999
		list_add_tail(&bf->list, bf_q);
1000
		if (bf_prev)
S
Sujith 已提交
1001
			bf_prev->bf_next = bf;
1002

S
Sujith 已提交
1003
		bf_prev = bf;
S
Sujith 已提交
1004

1005 1006 1007 1008 1009
		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf) {
			closed = true;
			break;
		}
1010
	} while (ath_tid_has_buffered(tid));
1011

1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
	bf = bf_first;
	bf->bf_lastbf = bf_prev;

	if (bf == bf_prev) {
		al = get_frame_info(bf->bf_mpdu)->framelen;
		bf->bf_state.bf_type = BUF_AMPDU;
	} else {
		TX_STAT_INC(txq->axq_qnum, a_aggr);
	}

1022
	*aggr_len = al;
S
Sujith 已提交
1023

1024
	return closed;
S
Sujith 已提交
1025 1026
#undef PADBYTES
}
1027

1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
/*
 * rix - rate index
 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
 * width  - 0 for 20 MHz, 1 for 40 MHz
 * half_gi - to use 4us v/s 3.6 us for symbol time
 */
static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
			    int width, int half_gi, bool shortPreamble)
{
	u32 nbits, nsymbits, duration, nsymbols;
	int streams;

	/* find number of symbols: PLCP + data */
	streams = HT_RC_2_STREAMS(rix);
	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
	nsymbols = (nbits + nsymbits - 1) / nsymbits;

	if (!half_gi)
		duration = SYMBOL_TIME(nsymbols);
	else
		duration = SYMBOL_TIME_HALFGI(nsymbols);

	/* addup duration for legacy/ht training and signal fields */
	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);

	return duration;
}

1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
{
	int streams = HT_RC_2_STREAMS(mcs);
	int symbols, bits;
	int bytes = 0;

	symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
	bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
	bits -= OFDM_PLCP_BITS;
	bytes = bits / 8;
	bytes -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
	if (bytes > 65532)
		bytes = 65532;

	return bytes;
}

void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
{
	u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi;
	int mcs;

	/* 4ms is the default (and maximum) duration */
	if (!txop || txop > 4096)
		txop = 4096;

	cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
	cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
	cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
	cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
	for (mcs = 0; mcs < 32; mcs++) {
		cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false);
		cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true);
		cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false);
		cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true);
	}
}

1095
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
S
Sujith Manoharan 已提交
1096
			     struct ath_tx_info *info, int len, bool rts)
1097 1098 1099 1100 1101 1102 1103
{
	struct ath_hw *ah = sc->sc_ah;
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	const struct ieee80211_rate *rate;
	struct ieee80211_hdr *hdr;
1104
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith Manoharan 已提交
1105
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
1106 1107
	int i;
	u8 rix = 0;
1108 1109 1110

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
1111
	rates = bf->rates;
1112
	hdr = (struct ieee80211_hdr *)skb->data;
1113 1114 1115

	/* set dur_update_en for l-sig computation except for PS-Poll frames */
	info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
1116
	info->rtscts_rate = fi->rtscts_rate;
1117

1118
	for (i = 0; i < ARRAY_SIZE(bf->rates); i++) {
1119 1120 1121 1122 1123 1124 1125
		bool is_40, is_sgi, is_sp;
		int phy;

		if (!rates[i].count || (rates[i].idx < 0))
			continue;

		rix = rates[i].idx;
1126
		info->rates[i].Tries = rates[i].count;
1127

S
Sujith Manoharan 已提交
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
		/*
		 * Handle RTS threshold for unaggregated HT frames.
		 */
		if (bf_isampdu(bf) && !bf_isaggr(bf) &&
		    (rates[i].flags & IEEE80211_TX_RC_MCS) &&
		    unlikely(rts_thresh != (u32) -1)) {
			if (!rts_thresh || (len > rts_thresh))
				rts = true;
		}

		if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1139 1140
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_RTSENA;
1141
		} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1142 1143
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_CTSENA;
1144 1145 1146
		}

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1147
			info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
1148
		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1149
			info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
1150 1151 1152 1153 1154 1155 1156

		is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
		is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
		is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);

		if (rates[i].flags & IEEE80211_TX_RC_MCS) {
			/* MCS rates */
1157 1158 1159 1160
			info->rates[i].Rate = rix | 0x80;
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
			info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
1161 1162
				 is_40, is_sgi, is_sp);
			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1163
				info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
1164 1165 1166 1167
			continue;
		}

		/* legacy rates */
1168
		rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1169 1170 1171 1172 1173 1174
		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
		    !(rate->flags & IEEE80211_RATE_ERP_G))
			phy = WLAN_RC_PHY_CCK;
		else
			phy = WLAN_RC_PHY_OFDM;

1175
		info->rates[i].Rate = rate->hw_value;
1176 1177
		if (rate->hw_value_short) {
			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1178
				info->rates[i].Rate |= rate->hw_value_short;
1179 1180 1181 1182 1183
		} else {
			is_sp = false;
		}

		if (bf->bf_state.bfs_paprd)
1184
			info->rates[i].ChSel = ah->txchainmask;
1185
		else
1186 1187
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
1188

1189
		info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1190 1191 1192 1193 1194
			phy, rate->bitrate * 100, len, rix, is_sp);
	}

	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
	if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1195
		info->flags &= ~ATH9K_TXDESC_RTSENA;
1196 1197

	/* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1198 1199 1200
	if (info->flags & ATH9K_TXDESC_RTSENA)
		info->flags &= ~ATH9K_TXDESC_CTSENA;
}
1201

1202 1203 1204 1205 1206 1207 1208 1209
static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	enum ath9k_pkt_type htype;
	__le16 fc;

	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
1210

1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
	if (ieee80211_is_beacon(fc))
		htype = ATH9K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = ATH9K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = ATH9K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = ATH9K_PKT_TYPE_PSPOLL;
	else
		htype = ATH9K_PKT_TYPE_NORMAL;

	return htype;
1223 1224
}

1225 1226
static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_txq *txq, int len)
1227 1228
{
	struct ath_hw *ah = sc->sc_ah;
1229
	struct ath_buf *bf_first = NULL;
1230
	struct ath_tx_info info;
S
Sujith Manoharan 已提交
1231 1232
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
	bool rts = false;
1233

1234 1235 1236 1237 1238 1239
	memset(&info, 0, sizeof(info));
	info.is_first = true;
	info.is_last = true;
	info.txpower = MAX_RATE_POWER;
	info.qcu = txq->axq_qnum;

1240
	while (bf) {
1241
		struct sk_buff *skb = bf->bf_mpdu;
1242
		struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1243
		struct ath_frame_info *fi = get_frame_info(skb);
1244
		bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
1245 1246

		info.type = get_hw_packet_type(skb);
1247
		if (bf->bf_next)
1248
			info.link = bf->bf_next->bf_daddr;
1249
		else
1250 1251
			info.link = 0;

1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
		if (!bf_first) {
			bf_first = bf;

			info.flags = ATH9K_TXDESC_INTREQ;
			if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
			    txq == sc->tx.uapsdq)
				info.flags |= ATH9K_TXDESC_CLRDMASK;

			if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
				info.flags |= ATH9K_TXDESC_NOACK;
			if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
				info.flags |= ATH9K_TXDESC_LDPC;

			if (bf->bf_state.bfs_paprd)
				info.flags |= (u32) bf->bf_state.bfs_paprd <<
					      ATH9K_TXDESC_PAPRD_S;

S
Sujith Manoharan 已提交
1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
			/*
			 * mac80211 doesn't handle RTS threshold for HT because
			 * the decision has to be taken based on AMPDU length
			 * and aggregation is done entirely inside ath9k.
			 * Set the RTS/CTS flag for the first subframe based
			 * on the threshold.
			 */
			if (aggr && (bf == bf_first) &&
			    unlikely(rts_thresh != (u32) -1)) {
				/*
				 * "len" is the size of the entire AMPDU.
				 */
				if (!rts_thresh || (len > rts_thresh))
					rts = true;
			}
			ath_buf_set_rate(sc, bf, &info, len, rts);
1285 1286
		}

1287 1288
		info.buf_addr[0] = bf->bf_buf_addr;
		info.buf_len[0] = skb->len;
1289 1290 1291 1292 1293
		info.pkt_len = fi->framelen;
		info.keyix = fi->keyix;
		info.keytype = fi->keytype;

		if (aggr) {
1294
			if (bf == bf_first)
1295
				info.aggr = AGGR_BUF_FIRST;
1296
			else if (bf == bf_first->bf_lastbf)
1297 1298 1299
				info.aggr = AGGR_BUF_LAST;
			else
				info.aggr = AGGR_BUF_MIDDLE;
1300

1301 1302
			info.ndelim = bf->bf_state.ndelim;
			info.aggr_len = len;
1303 1304
		}

1305 1306 1307
		if (bf == bf_first->bf_lastbf)
			bf_first = NULL;

1308
		ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
1309 1310 1311 1312
		bf = bf->bf_next;
	}
}

1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
static void
ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
		  struct ath_atx_tid *tid, struct list_head *bf_q,
		  struct ath_buf *bf_first, struct sk_buff_head *tid_q)
{
	struct ath_buf *bf = bf_first, *bf_prev = NULL;
	struct sk_buff *skb;
	int nframes = 0;

	do {
		struct ieee80211_tx_info *tx_info;
		skb = bf->bf_mpdu;

		nframes++;
		__skb_unlink(skb, tid_q);
		list_add_tail(&bf->list, bf_q);
		if (bf_prev)
			bf_prev->bf_next = bf;
		bf_prev = bf;

		if (nframes >= 2)
			break;

		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf)
			break;

		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
		if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
			break;

		ath_set_rates(tid->an->vif, tid->an->sta, bf);
	} while (1);
}

S
Sujith 已提交
1348 1349 1350
static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
			      struct ath_atx_tid *tid)
{
S
Sujith 已提交
1351
	struct ath_buf *bf;
1352
	struct ieee80211_tx_info *tx_info;
1353
	struct sk_buff_head *tid_q;
S
Sujith 已提交
1354
	struct list_head bf_q;
1355 1356
	int aggr_len = 0;
	bool aggr, last = true;
1357

S
Sujith 已提交
1358
	do {
1359
		if (!ath_tid_has_buffered(tid))
S
Sujith 已提交
1360
			return;
1361

S
Sujith 已提交
1362 1363
		INIT_LIST_HEAD(&bf_q);

1364 1365
		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf)
S
Sujith 已提交
1366
			break;
1367

1368
		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
		aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
		if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
		    (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH))
			break;

		ath_set_rates(tid->an->vif, tid->an->sta, bf);
		if (aggr)
			last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
						tid_q, &aggr_len);
		else
			ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);

		if (list_empty(&bf_q))
			return;
1383

1384 1385
		if (tid->ac->clear_ps_filter) {
			tid->ac->clear_ps_filter = false;
1386
			tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
S
Sujith 已提交
1387
		}
1388

1389
		ath_tx_fill_desc(sc, bf, txq, aggr_len);
1390
		ath_tx_txqaddbuf(sc, txq, &bf_q, false);
1391
	} while (!last);
S
Sujith 已提交
1392 1393
}

1394 1395
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
		      u16 tid, u16 *ssn)
S
Sujith 已提交
1396 1397 1398
{
	struct ath_atx_tid *txtid;
	struct ath_node *an;
1399
	u8 density;
S
Sujith 已提交
1400 1401

	an = (struct ath_node *)sta->drv_priv;
S
Sujith 已提交
1402
	txtid = ATH_AN_2_TID(an, tid);
1403

1404 1405 1406 1407
	/* update ampdu factor/density, they may have changed. This may happen
	 * in HT IBSS when a beacon with HT-info is received after the station
	 * has already been added.
	 */
1408
	if (sta->ht_cap.ht_supported) {
1409 1410 1411 1412 1413 1414
		an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
				     sta->ht_cap.ampdu_factor);
		density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
		an->mpdudensity = density;
	}

1415 1416 1417
	/* force sequence number allocation for pending frames */
	ath_tx_tid_change_state(sc, txtid);

1418
	txtid->active = true;
1419
	txtid->paused = true;
1420
	*ssn = txtid->seq_start = txtid->seq_next;
1421
	txtid->bar_index = -1;
1422

1423 1424 1425
	memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
	txtid->baw_head = txtid->baw_tail = 0;

1426
	return 0;
S
Sujith 已提交
1427
}
1428

1429
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
S
Sujith 已提交
1430 1431 1432
{
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1433
	struct ath_txq *txq = txtid->ac->txq;
1434

F
Felix Fietkau 已提交
1435
	ath_txq_lock(sc, txq);
1436
	txtid->active = false;
1437
	txtid->paused = false;
1438
	ath_tx_flush_tid(sc, txtid);
1439
	ath_tx_tid_change_state(sc, txtid);
F
Felix Fietkau 已提交
1440
	ath_txq_unlock_complete(sc, txq);
S
Sujith 已提交
1441
}
1442

1443 1444
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
		       struct ath_node *an)
1445 1446 1447 1448
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
1449
	bool buffered;
1450 1451 1452
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1453
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1454 1455 1456 1457 1458 1459 1460

		if (!tid->sched)
			continue;

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1461
		ath_txq_lock(sc, txq);
1462

1463
		buffered = ath_tid_has_buffered(tid);
1464 1465 1466 1467 1468 1469 1470 1471 1472

		tid->sched = false;
		list_del(&tid->list);

		if (ac->sched) {
			ac->sched = false;
			list_del(&ac->list);
		}

F
Felix Fietkau 已提交
1473
		ath_txq_unlock(sc, txq);
1474

1475 1476
		ieee80211_sta_set_buffered(sta, tidno, buffered);
	}
1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
}

void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1487
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1488 1489 1490 1491

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1492
		ath_txq_lock(sc, txq);
1493 1494
		ac->clear_ps_filter = true;

1495
		if (!tid->paused && ath_tid_has_buffered(tid)) {
1496 1497 1498 1499
			ath_tx_queue_tid(txq, tid);
			ath_txq_schedule(sc, txq);
		}

F
Felix Fietkau 已提交
1500
		ath_txq_unlock_complete(sc, txq);
1501 1502 1503
	}
}

1504 1505
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
			u16 tidno)
S
Sujith 已提交
1506
{
1507
	struct ath_atx_tid *tid;
S
Sujith 已提交
1508
	struct ath_node *an;
1509
	struct ath_txq *txq;
S
Sujith 已提交
1510 1511

	an = (struct ath_node *)sta->drv_priv;
1512 1513
	tid = ATH_AN_2_TID(an, tidno);
	txq = tid->ac->txq;
S
Sujith 已提交
1514

1515 1516 1517 1518 1519
	ath_txq_lock(sc, txq);

	tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
	tid->paused = false;

1520
	if (ath_tid_has_buffered(tid)) {
1521 1522 1523 1524 1525
		ath_tx_queue_tid(txq, tid);
		ath_txq_schedule(sc, txq);
	}

	ath_txq_unlock_complete(sc, txq);
1526 1527
}

1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539
void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
				   struct ieee80211_sta *sta,
				   u16 tids, int nframes,
				   enum ieee80211_frame_release_type reason,
				   bool more_data)
{
	struct ath_softc *sc = hw->priv;
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_txq *txq = sc->tx.uapsdq;
	struct ieee80211_tx_info *info;
	struct list_head bf_q;
	struct ath_buf *bf_tail = NULL, *bf;
1540
	struct sk_buff_head *tid_q;
1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555
	int sent = 0;
	int i;

	INIT_LIST_HEAD(&bf_q);
	for (i = 0; tids && nframes; i++, tids >>= 1) {
		struct ath_atx_tid *tid;

		if (!(tids & 1))
			continue;

		tid = ATH_AN_2_TID(an, i);
		if (tid->paused)
			continue;

		ath_txq_lock(sc, tid->ac->txq);
1556 1557
		while (nframes > 0) {
			bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
1558 1559 1560
			if (!bf)
				break;

1561
			__skb_unlink(bf->bf_mpdu, tid_q);
1562 1563
			list_add_tail(&bf->list, &bf_q);
			ath_set_rates(tid->an->vif, tid->an->sta, bf);
1564
			ath_tx_addto_baw(sc, tid, bf);
1565 1566 1567 1568 1569 1570 1571 1572 1573
			bf->bf_state.bf_type &= ~BUF_AGGR;
			if (bf_tail)
				bf_tail->bf_next = bf;

			bf_tail = bf;
			nframes--;
			sent++;
			TX_STAT_INC(txq->axq_qnum, a_queued_hw);

1574
			if (!ath_tid_has_buffered(tid))
1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
				ieee80211_sta_set_buffered(an->sta, i, false);
		}
		ath_txq_unlock_complete(sc, tid->ac->txq);
	}

	if (list_empty(&bf_q))
		return;

	info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
	info->flags |= IEEE80211_TX_STATUS_EOSP;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	ath_txq_lock(sc, txq);
	ath_tx_fill_desc(sc, bf, txq, 0);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	ath_txq_unlock(sc, txq);
}

S
Sujith 已提交
1593 1594 1595
/********************/
/* Queue Management */
/********************/
1596

S
Sujith 已提交
1597
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1598
{
1599
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1600
	struct ath9k_tx_queue_info qi;
1601
	static const int subtype_txq_to_hwq[] = {
1602 1603 1604 1605
		[IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
		[IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
		[IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
		[IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
1606
	};
1607
	int axq_qnum, i;
1608

S
Sujith 已提交
1609
	memset(&qi, 0, sizeof(qi));
1610
	qi.tqi_subtype = subtype_txq_to_hwq[subtype];
S
Sujith 已提交
1611 1612 1613 1614
	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_physCompBuf = 0;
1615 1616

	/*
S
Sujith 已提交
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise waiting for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
	 *
	 * The UAPSD queue is an exception, since we take a desc-
	 * based intr on the EOSP frames.
1630
	 */
1631
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1632
		qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
1633 1634 1635 1636 1637 1638 1639
	} else {
		if (qtype == ATH9K_TX_QUEUE_UAPSD)
			qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
		else
			qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
					TXQ_FLAG_TXDESCINT_ENABLE;
	}
1640 1641
	axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
	if (axq_qnum == -1) {
1642
		/*
S
Sujith 已提交
1643 1644
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
1645
		 */
S
Sujith 已提交
1646
		return NULL;
1647
	}
1648 1649
	if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
		struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1650

1651 1652
		txq->axq_qnum = axq_qnum;
		txq->mac80211_qnum = -1;
S
Sujith 已提交
1653
		txq->axq_link = NULL;
F
Felix Fietkau 已提交
1654
		__skb_queue_head_init(&txq->complete_q);
S
Sujith 已提交
1655 1656 1657 1658
		INIT_LIST_HEAD(&txq->axq_q);
		INIT_LIST_HEAD(&txq->axq_acq);
		spin_lock_init(&txq->axq_lock);
		txq->axq_depth = 0;
1659
		txq->axq_ampdu_depth = 0;
1660
		txq->axq_tx_inprogress = false;
1661
		sc->tx.txqsetup |= 1<<axq_qnum;
1662 1663 1664 1665

		txq->txq_headidx = txq->txq_tailidx = 0;
		for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
			INIT_LIST_HEAD(&txq->txq_fifo[i]);
S
Sujith 已提交
1666
	}
1667
	return &sc->tx.txq[axq_qnum];
1668 1669
}

S
Sujith 已提交
1670 1671 1672
int ath_txq_update(struct ath_softc *sc, int qnum,
		   struct ath9k_tx_queue_info *qinfo)
{
1673
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1674 1675 1676
	int error = 0;
	struct ath9k_tx_queue_info qi;

1677
	BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
S
Sujith 已提交
1678 1679 1680 1681 1682 1683 1684 1685 1686

	ath9k_hw_get_txq_props(ah, qnum, &qi);
	qi.tqi_aifs = qinfo->tqi_aifs;
	qi.tqi_cwmin = qinfo->tqi_cwmin;
	qi.tqi_cwmax = qinfo->tqi_cwmax;
	qi.tqi_burstTime = qinfo->tqi_burstTime;
	qi.tqi_readyTime = qinfo->tqi_readyTime;

	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1687 1688
		ath_err(ath9k_hw_common(sc->sc_ah),
			"Unable to update hardware queue %u!\n", qnum);
S
Sujith 已提交
1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
		error = -EIO;
	} else {
		ath9k_hw_resettxqueue(ah, qnum);
	}

	return error;
}

int ath_cabq_update(struct ath_softc *sc)
{
	struct ath9k_tx_queue_info qi;
1700
	struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
S
Sujith 已提交
1701
	int qnum = sc->beacon.cabq->axq_qnum;
1702

S
Sujith 已提交
1703
	ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1704
	/*
S
Sujith 已提交
1705
	 * Ensure the readytime % is within the bounds.
1706
	 */
S
Sujith 已提交
1707 1708 1709 1710
	if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
		sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
	else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
		sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1711

1712
	qi.tqi_readyTime = (cur_conf->beacon_interval *
S
Sujith 已提交
1713
			    sc->config.cabqReadytime) / 100;
S
Sujith 已提交
1714 1715 1716
	ath_txq_update(sc, qnum, &qi);

	return 0;
1717 1718
}

1719
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1720
			       struct list_head *list)
1721
{
S
Sujith 已提交
1722 1723
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
1724 1725 1726
	struct ath_tx_status ts;

	memset(&ts, 0, sizeof(ts));
1727
	ts.ts_status = ATH9K_TX_FLUSH;
S
Sujith 已提交
1728
	INIT_LIST_HEAD(&bf_head);
1729

1730 1731
	while (!list_empty(list)) {
		bf = list_first_entry(list, struct ath_buf, list);
1732

1733 1734
		if (bf->bf_stale) {
			list_del(&bf->list);
1735

1736 1737
			ath_tx_return_buffer(sc, bf);
			continue;
S
Sujith 已提交
1738
		}
1739

S
Sujith 已提交
1740
		lastbf = bf->bf_lastbf;
1741
		list_cut_position(&bf_head, list, &lastbf->list);
1742
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
1743
	}
1744
}
1745

1746 1747 1748 1749 1750 1751
/*
 * Drain a given TX queue (could be Beacon or Data)
 *
 * This assumes output has been stopped and
 * we do not need to block ath_tx_tasklet.
 */
1752
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
1753
{
F
Felix Fietkau 已提交
1754 1755
	ath_txq_lock(sc, txq);

1756
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1757
		int idx = txq->txq_tailidx;
1758

1759
		while (!list_empty(&txq->txq_fifo[idx])) {
1760
			ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
1761 1762

			INCR(idx, ATH_TXFIFO_DEPTH);
1763
		}
1764
		txq->txq_tailidx = idx;
1765
	}
1766

1767 1768
	txq->axq_link = NULL;
	txq->axq_tx_inprogress = false;
1769
	ath_drain_txq_list(sc, txq, &txq->axq_q);
1770

F
Felix Fietkau 已提交
1771
	ath_txq_unlock_complete(sc, txq);
1772 1773
}

1774
bool ath_drain_all_txq(struct ath_softc *sc)
1775
{
1776
	struct ath_hw *ah = sc->sc_ah;
1777
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
1778
	struct ath_txq *txq;
1779 1780
	int i;
	u32 npend = 0;
S
Sujith 已提交
1781

S
Sujith Manoharan 已提交
1782
	if (test_bit(SC_OP_INVALID, &sc->sc_flags))
1783
		return true;
S
Sujith 已提交
1784

1785
	ath9k_hw_abort_tx_dma(ah);
S
Sujith 已提交
1786

1787
	/* Check if any queue remains active */
S
Sujith 已提交
1788
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1789 1790 1791
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

1792 1793
		if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
			npend |= BIT(i);
S
Sujith 已提交
1794 1795
	}

1796
	if (npend)
1797
		ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
S
Sujith 已提交
1798 1799

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1800 1801 1802 1803 1804 1805 1806 1807 1808 1809
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

		/*
		 * The caller will resume queues with ieee80211_wake_queues.
		 * Mark the queue as not stopped to prevent ath_tx_complete
		 * from waking the queue too early.
		 */
		txq = &sc->tx.txq[i];
		txq->stopped = false;
1810
		ath_draintxq(sc, txq);
S
Sujith 已提交
1811
	}
1812 1813

	return !npend;
S
Sujith 已提交
1814
}
1815

S
Sujith 已提交
1816
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
S
Sujith 已提交
1817
{
S
Sujith 已提交
1818 1819
	ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
	sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
S
Sujith 已提交
1820
}
1821

1822 1823 1824
/* For each axq_acq entry, for each tid, try to schedule packets
 * for transmit until ampdu_depth has reached min Q depth.
 */
S
Sujith 已提交
1825 1826
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
1827 1828
	struct ath_atx_ac *ac, *ac_tmp, *last_ac;
	struct ath_atx_tid *tid, *last_tid;
1829

1830 1831
	if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
	    list_empty(&txq->axq_acq) ||
1832
	    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
S
Sujith 已提交
1833
		return;
1834

1835 1836
	rcu_read_lock();

S
Sujith 已提交
1837
	ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1838
	last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1839

1840 1841 1842 1843
	list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
		list_del(&ac->list);
		ac->sched = false;
1844

1845 1846 1847 1848 1849
		while (!list_empty(&ac->tid_q)) {
			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
					       list);
			list_del(&tid->list);
			tid->sched = false;
1850

1851 1852
			if (tid->paused)
				continue;
1853

1854
			ath_tx_sched_aggr(sc, txq, tid);
1855

1856 1857 1858 1859
			/*
			 * add tid to round-robin queue if more frames
			 * are pending for the tid
			 */
1860
			if (ath_tid_has_buffered(tid))
1861
				ath_tx_queue_tid(txq, tid);
1862

1863 1864 1865 1866
			if (tid == last_tid ||
			    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
				break;
		}
1867

1868 1869 1870
		if (!list_empty(&ac->tid_q) && !ac->sched) {
			ac->sched = true;
			list_add_tail(&ac->list, &txq->axq_acq);
1871
		}
1872 1873 1874

		if (ac == last_ac ||
		    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1875
			break;
S
Sujith 已提交
1876
	}
1877 1878

	rcu_read_unlock();
S
Sujith 已提交
1879
}
1880

S
Sujith 已提交
1881 1882 1883 1884
/***********/
/* TX, DMA */
/***********/

1885
/*
S
Sujith 已提交
1886 1887
 * Insert a chain of ath_buf (descriptors) on a txq and
 * assume the descriptors are already chained together by caller.
1888
 */
S
Sujith 已提交
1889
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1890
			     struct list_head *head, bool internal)
1891
{
1892
	struct ath_hw *ah = sc->sc_ah;
1893
	struct ath_common *common = ath9k_hw_common(ah);
1894 1895 1896
	struct ath_buf *bf, *bf_last;
	bool puttxbuf = false;
	bool edma;
1897

S
Sujith 已提交
1898 1899 1900 1901
	/*
	 * Insert the frame on the outbound list and
	 * pass it on to the hardware.
	 */
1902

S
Sujith 已提交
1903 1904
	if (list_empty(head))
		return;
1905

1906
	edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
S
Sujith 已提交
1907
	bf = list_first_entry(head, struct ath_buf, list);
1908
	bf_last = list_entry(head->prev, struct ath_buf, list);
1909

1910 1911
	ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
		txq->axq_qnum, txq->axq_depth);
1912

1913 1914
	if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
		list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
1915
		INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1916
		puttxbuf = true;
S
Sujith 已提交
1917
	} else {
1918 1919
		list_splice_tail_init(head, &txq->axq_q);

1920 1921
		if (txq->axq_link) {
			ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
1922
			ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
J
Joe Perches 已提交
1923 1924
				txq->axq_qnum, txq->axq_link,
				ito64(bf->bf_daddr), bf->bf_desc);
1925 1926 1927 1928 1929 1930 1931 1932 1933
		} else if (!edma)
			puttxbuf = true;

		txq->axq_link = bf_last->bf_desc;
	}

	if (puttxbuf) {
		TX_STAT_INC(txq->axq_qnum, puttxbuf);
		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1934
		ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
1935 1936 1937 1938
			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
	}

	if (!edma) {
F
Felix Fietkau 已提交
1939
		TX_STAT_INC(txq->axq_qnum, txstart);
1940
		ath9k_hw_txstart(ah, txq->axq_qnum);
S
Sujith 已提交
1941
	}
1942 1943

	if (!internal) {
1944 1945 1946 1947 1948 1949 1950
		while (bf) {
			txq->axq_depth++;
			if (bf_is_ampdu_not_probing(bf))
				txq->axq_ampdu_depth++;

			bf = bf->bf_lastbf->bf_next;
		}
1951
	}
S
Sujith 已提交
1952
}
1953

F
Felix Fietkau 已提交
1954
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1955
			       struct ath_atx_tid *tid, struct sk_buff *skb)
S
Sujith 已提交
1956
{
1957 1958
	struct ath_frame_info *fi = get_frame_info(skb);
	struct list_head bf_head;
S
Sujith 已提交
1959 1960
	struct ath_buf *bf;

1961 1962 1963 1964
	bf = fi->bf;

	INIT_LIST_HEAD(&bf_head);
	list_add_tail(&bf->list, &bf_head);
1965
	bf->bf_state.bf_type = 0;
S
Sujith 已提交
1966

1967
	bf->bf_next = NULL;
S
Sujith 已提交
1968
	bf->bf_lastbf = bf;
1969
	ath_tx_fill_desc(sc, bf, txq, fi->framelen);
1970
	ath_tx_txqaddbuf(sc, txq, &bf_head, false);
S
Sujith 已提交
1971
	TX_STAT_INC(txq->axq_qnum, queued);
S
Sujith 已提交
1972 1973
}

1974 1975 1976
static void setup_frame_info(struct ieee80211_hw *hw,
			     struct ieee80211_sta *sta,
			     struct sk_buff *skb,
1977
			     int framelen)
S
Sujith 已提交
1978 1979
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1980
	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1981
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1982
	const struct ieee80211_rate *rate;
1983
	struct ath_frame_info *fi = get_frame_info(skb);
1984
	struct ath_node *an = NULL;
1985
	enum ath9k_key_type keytype;
1986 1987 1988 1989 1990 1991 1992 1993 1994 1995
	bool short_preamble = false;

	/*
	 * We check if Short Preamble is needed for the CTS rate by
	 * checking the BSS's global flag.
	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
	 */
	if (tx_info->control.vif &&
	    tx_info->control.vif->bss_conf.use_short_preamble)
		short_preamble = true;
S
Sujith 已提交
1996

1997
	rate = ieee80211_get_rts_cts_rate(hw, tx_info);
1998
	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
S
Sujith 已提交
1999

2000 2001 2002
	if (sta)
		an = (struct ath_node *) sta->drv_priv;

2003 2004 2005
	memset(fi, 0, sizeof(*fi));
	if (hw_key)
		fi->keyix = hw_key->hw_key_idx;
2006 2007
	else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
		fi->keyix = an->ps_key;
2008 2009 2010 2011
	else
		fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->keytype = keytype;
	fi->framelen = framelen;
2012 2013 2014
	fi->rtscts_rate = rate->hw_value;
	if (short_preamble)
		fi->rtscts_rate |= rate->hw_value_short;
S
Sujith 已提交
2015 2016
}

2017 2018 2019 2020
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath9k_channel *curchan = ah->curchan;
2021

2022 2023 2024
	if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
	    (curchan->channelFlags & CHANNEL_5GHZ) &&
	    (chainmask == 0x7) && (rate < 0x90))
2025
		return 0x3;
2026 2027 2028
	else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
		 IS_CCK_RATE(rate))
		return 0x2;
2029 2030 2031 2032
	else
		return chainmask;
}

2033 2034 2035 2036
/*
 * Assign a descriptor (and sequence number if necessary,
 * and map buffer for DMA. Frees skb on error
 */
2037
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
F
Felix Fietkau 已提交
2038
					   struct ath_txq *txq,
2039
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
2040
					   struct sk_buff *skb)
2041
{
F
Felix Fietkau 已提交
2042
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2043
	struct ath_frame_info *fi = get_frame_info(skb);
2044
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
F
Felix Fietkau 已提交
2045
	struct ath_buf *bf;
S
Sujith Manoharan 已提交
2046
	int fragno;
2047
	u16 seqno;
F
Felix Fietkau 已提交
2048 2049 2050

	bf = ath_tx_get_buffer(sc);
	if (!bf) {
2051
		ath_dbg(common, XMIT, "TX buffers are full\n");
F
Felix Fietkau 已提交
2052
		return NULL;
F
Felix Fietkau 已提交
2053
	}
2054

S
Sujith 已提交
2055
	ATH_TXBUF_RESET(bf);
2056

2057
	if (tid) {
S
Sujith Manoharan 已提交
2058
		fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
2059 2060
		seqno = tid->seq_next;
		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
S
Sujith Manoharan 已提交
2061 2062 2063 2064 2065 2066 2067

		if (fragno)
			hdr->seq_ctrl |= cpu_to_le16(fragno);

		if (!ieee80211_has_morefrags(hdr->frame_control))
			INCR(tid->seq_next, IEEE80211_SEQ_MAX);

2068 2069 2070
		bf->bf_state.seqno = seqno;
	}

2071
	bf->bf_mpdu = skb;
2072

B
Ben Greear 已提交
2073 2074 2075
	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
2076
		bf->bf_mpdu = NULL;
2077
		bf->bf_buf_addr = 0;
2078 2079
		ath_err(ath9k_hw_common(sc->sc_ah),
			"dma_mapping_error() on TX\n");
F
Felix Fietkau 已提交
2080
		ath_tx_return_buffer(sc, bf);
F
Felix Fietkau 已提交
2081
		return NULL;
2082 2083
	}

2084
	fi->bf = bf;
F
Felix Fietkau 已提交
2085 2086 2087 2088

	return bf;
}

2089 2090
static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
			  struct ath_tx_control *txctl)
2091
{
2092 2093
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2094
	struct ieee80211_sta *sta = txctl->sta;
2095
	struct ieee80211_vif *vif = info->control.vif;
2096
	struct ath_softc *sc = hw->priv;
F
Felix Fietkau 已提交
2097
	int frmlen = skb->len + FCS_LEN;
2098
	int padpos, padsize;
2099

2100 2101 2102 2103
	/* NOTE:  sta can be NULL according to net/mac80211.h */
	if (sta)
		txctl->an = (struct ath_node *)sta->drv_priv;

F
Felix Fietkau 已提交
2104 2105 2106
	if (info->control.hw_key)
		frmlen += info->control.hw_key->icv_len;

2107
	/*
S
Sujith 已提交
2108 2109 2110
	 * As a temporary workaround, assign seq# here; this will likely need
	 * to be cleaned up to work better with Beacon transmission and virtual
	 * BSSes.
2111
	 */
S
Sujith 已提交
2112 2113 2114 2115 2116
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
			sc->tx.seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2117 2118
	}

2119 2120 2121 2122 2123
	if ((vif && vif->type != NL80211_IFTYPE_AP &&
	            vif->type != NL80211_IFTYPE_AP_VLAN) ||
	    !ieee80211_is_data(hdr->frame_control))
		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;

2124
	/* Add the padding after the header if this is not already done */
2125
	padpos = ieee80211_hdrlen(hdr->frame_control);
2126 2127 2128 2129
	padsize = padpos & 3;
	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize)
			return -ENOMEM;
2130

2131 2132
		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
2133 2134
	}

2135
	setup_frame_info(hw, sta, skb, frmlen);
2136 2137 2138
	return 0;
}

2139

2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159
/* Upon failure caller should free skb */
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
		 struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ieee80211_sta *sta = txctl->sta;
	struct ieee80211_vif *vif = info->control.vif;
	struct ath_softc *sc = hw->priv;
	struct ath_txq *txq = txctl->txq;
	struct ath_atx_tid *tid = NULL;
	struct ath_buf *bf;
	int q;
	int ret;

	ret = ath_tx_prepare(hw, skb, txctl);
	if (ret)
	    return ret;

	hdr = (struct ieee80211_hdr *) skb->data;
2160 2161 2162 2163 2164
	/*
	 * At this point, the vif, hw_key and sta pointers in the tx control
	 * info are no longer valid (overwritten by the ath_frame_info data.
	 */

2165
	q = skb_get_queue_mapping(skb);
F
Felix Fietkau 已提交
2166 2167

	ath_txq_lock(sc, txq);
2168
	if (txq == sc->tx.txq_map[q] &&
2169 2170
	    ++txq->pending_frames > sc->tx.txq_max_pending[q] &&
	    !txq->stopped) {
2171
		ieee80211_stop_queue(sc->hw, q);
2172
		txq->stopped = true;
2173 2174
	}

2175 2176 2177 2178
	if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
		ath_txq_unlock(sc, txq);
		txq = sc->tx.uapsdq;
		ath_txq_lock(sc, txq);
2179 2180
	} else if (txctl->an &&
		   ieee80211_is_data_present(hdr->frame_control)) {
2181
		tid = ath_get_skb_tid(sc, txctl->an, skb);
2182 2183 2184

		WARN_ON(tid->ac->txq != txctl->txq);

2185 2186 2187
		if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
			tid->ac->clear_ps_filter = true;

2188
		/*
2189 2190
		 * Add this frame to software queue for scheduling later
		 * for aggregation.
2191
		 */
2192 2193 2194 2195 2196 2197
		TX_STAT_INC(txq->axq_qnum, a_queued_sw);
		__skb_queue_tail(&tid->buf_q, skb);
		if (!txctl->an->sleeping)
			ath_tx_queue_tid(txq, tid);

		ath_txq_schedule(sc, txq);
2198 2199 2200
		goto out;
	}

2201
	bf = ath_tx_setup_buffer(sc, txq, tid, skb);
2202
	if (!bf) {
2203
		ath_txq_skb_done(sc, txq, skb);
2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215
		if (txctl->paprd)
			dev_kfree_skb_any(skb);
		else
			ieee80211_free_txskb(sc->hw, skb);
		goto out;
	}

	bf->bf_state.bfs_paprd = txctl->paprd;

	if (txctl->paprd)
		bf->bf_state.bfs_paprd_timestamp = jiffies;

2216
	ath_set_rates(vif, sta, bf);
2217
	ath_tx_send_normal(sc, txq, tid, skb);
F
Felix Fietkau 已提交
2218

2219
out:
F
Felix Fietkau 已提交
2220
	ath_txq_unlock(sc, txq);
F
Felix Fietkau 已提交
2221

2222
	return 0;
2223 2224
}

2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		 struct sk_buff *skb)
{
	struct ath_softc *sc = hw->priv;
	struct ath_tx_control txctl = {
		.txq = sc->beacon.cabq
	};
	struct ath_tx_info info = {};
	struct ieee80211_hdr *hdr;
	struct ath_buf *bf_tail = NULL;
	struct ath_buf *bf;
	LIST_HEAD(bf_q);
	int duration = 0;
	int max_duration;

	max_duration =
		sc->cur_beacon_conf.beacon_interval * 1000 *
		sc->cur_beacon_conf.dtim_period / ATH_BCBUF;

	do {
		struct ath_frame_info *fi = get_frame_info(skb);

		if (ath_tx_prepare(hw, skb, &txctl))
			break;

		bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
		if (!bf)
			break;

		bf->bf_lastbf = bf;
		ath_set_rates(vif, NULL, bf);
S
Sujith Manoharan 已提交
2256
		ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292
		duration += info.rates[0].PktDuration;
		if (bf_tail)
			bf_tail->bf_next = bf;

		list_add_tail(&bf->list, &bf_q);
		bf_tail = bf;
		skb = NULL;

		if (duration > max_duration)
			break;

		skb = ieee80211_get_buffered_bc(hw, vif);
	} while(skb);

	if (skb)
		ieee80211_free_txskb(hw, skb);

	if (list_empty(&bf_q))
		return;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;

	if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
		hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
			sizeof(*hdr), DMA_TO_DEVICE);
	}

	ath_txq_lock(sc, txctl.txq);
	ath_tx_fill_desc(sc, bf, txctl.txq, 0);
	ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
	TX_STAT_INC(txctl.txq->axq_qnum, queued);
	ath_txq_unlock(sc, txctl.txq);
}

S
Sujith 已提交
2293 2294 2295
/*****************/
/* TX Completion */
/*****************/
S
Sujith 已提交
2296

S
Sujith 已提交
2297
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2298
			    int tx_flags, struct ath_txq *txq)
S
Sujith 已提交
2299
{
S
Sujith 已提交
2300
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2301
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2302
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2303
	int padpos, padsize;
S
Sujith Manoharan 已提交
2304
	unsigned long flags;
S
Sujith 已提交
2305

2306
	ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
S
Sujith 已提交
2307

2308 2309 2310
	if (sc->sc_ah->caldata)
		sc->sc_ah->caldata->paprd_packet_sent = true;

2311
	if (!(tx_flags & ATH_TX_ERROR))
S
Sujith 已提交
2312 2313
		/* Frame was ACKed */
		tx_info->flags |= IEEE80211_TX_STAT_ACK;
S
Sujith 已提交
2314

2315
	padpos = ieee80211_hdrlen(hdr->frame_control);
2316 2317 2318 2319 2320 2321 2322 2323
	padsize = padpos & 3;
	if (padsize && skb->len>padpos+padsize) {
		/*
		 * Remove MAC header padding before giving the frame back to
		 * mac80211.
		 */
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
S
Sujith 已提交
2324
	}
S
Sujith 已提交
2325

S
Sujith Manoharan 已提交
2326
	spin_lock_irqsave(&sc->sc_pm_lock, flags);
2327
	if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
S
Sujith 已提交
2328
		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
2329
		ath_dbg(common, PS,
J
Joe Perches 已提交
2330
			"Going back to sleep after having received TX status (0x%lx)\n",
S
Sujith 已提交
2331 2332 2333 2334
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
2335
	}
S
Sujith Manoharan 已提交
2336
	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2337

2338
	__skb_queue_tail(&txq->complete_q, skb);
2339
	ath_txq_skb_done(sc, txq, skb);
S
Sujith 已提交
2340
}
2341

S
Sujith 已提交
2342
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2343
				struct ath_txq *txq, struct list_head *bf_q,
2344
				struct ath_tx_status *ts, int txok)
2345
{
S
Sujith 已提交
2346
	struct sk_buff *skb = bf->bf_mpdu;
2347
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
S
Sujith 已提交
2348
	unsigned long flags;
2349
	int tx_flags = 0;
2350

2351
	if (!txok)
2352
		tx_flags |= ATH_TX_ERROR;
2353

2354 2355 2356
	if (ts->ts_status & ATH9K_TXERR_FILT)
		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;

B
Ben Greear 已提交
2357
	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2358
	bf->bf_buf_addr = 0;
2359 2360

	if (bf->bf_state.bfs_paprd) {
2361 2362 2363
		if (time_after(jiffies,
				bf->bf_state.bfs_paprd_timestamp +
				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
2364
			dev_kfree_skb_any(skb);
2365
		else
2366
			complete(&sc->paprd_complete);
2367
	} else {
2368
		ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2369
		ath_tx_complete(sc, skb, tx_flags, txq);
2370
	}
2371 2372 2373 2374
	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
	 * accidentally reference it later.
	 */
	bf->bf_mpdu = NULL;
S
Sujith 已提交
2375 2376 2377 2378 2379 2380 2381

	/*
	 * Return the list of ath_buf of this mpdu to free queue
	 */
	spin_lock_irqsave(&sc->tx.txbuflock, flags);
	list_splice_tail_init(bf_q, &sc->tx.txbuf);
	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2382 2383
}

F
Felix Fietkau 已提交
2384 2385
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
2386
			     int txok)
2387
{
S
Sujith 已提交
2388
	struct sk_buff *skb = bf->bf_mpdu;
2389
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
S
Sujith 已提交
2390
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
F
Felix Fietkau 已提交
2391
	struct ieee80211_hw *hw = sc->hw;
2392
	struct ath_hw *ah = sc->sc_ah;
2393
	u8 i, tx_rateindex;
2394

S
Sujith 已提交
2395
	if (txok)
2396
		tx_info->status.ack_signal = ts->ts_rssi;
S
Sujith 已提交
2397

2398
	tx_rateindex = ts->ts_rateindex;
2399 2400
	WARN_ON(tx_rateindex >= hw->max_rates);

2401
	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
2402
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2403

2404
		BUG_ON(nbad > nframes);
2405
	}
2406 2407
	tx_info->status.ampdu_len = nframes;
	tx_info->status.ampdu_ack_len = nframes - nbad;
2408

2409
	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2410
	    (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422
		/*
		 * If an underrun error is seen assume it as an excessive
		 * retry only if max frame trigger level has been reached
		 * (2 KB for single stream, and 4 KB for dual stream).
		 * Adjust the long retry as if the frame was tried
		 * hw->max_rate_tries times to affect how rate control updates
		 * PER for the failed rate.
		 * In case of congestion on the bus penalizing this type of
		 * underruns should help hardware actually transmit new frames
		 * successfully by eventually preferring slower rates.
		 * This itself should also alleviate congestion on the bus.
		 */
2423 2424 2425
		if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
		                             ATH9K_TX_DELIM_UNDERRUN)) &&
		    ieee80211_is_data(hdr->frame_control) &&
2426
		    ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2427 2428
			tx_info->status.rates[tx_rateindex].count =
				hw->max_rate_tries;
2429
	}
2430

2431
	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2432
		tx_info->status.rates[i].count = 0;
2433 2434
		tx_info->status.rates[i].idx = -1;
	}
2435

2436
	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2437 2438
}

S
Sujith 已提交
2439
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2440
{
2441
	struct ath_hw *ah = sc->sc_ah;
2442
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
2443
	struct ath_buf *bf, *lastbf, *bf_held = NULL;
2444
	struct list_head bf_head;
S
Sujith 已提交
2445
	struct ath_desc *ds;
2446
	struct ath_tx_status ts;
S
Sujith 已提交
2447
	int status;
2448

2449
	ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
J
Joe Perches 已提交
2450 2451
		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
		txq->axq_link);
2452

F
Felix Fietkau 已提交
2453
	ath_txq_lock(sc, txq);
2454
	for (;;) {
2455
		if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
2456 2457
			break;

2458 2459
		if (list_empty(&txq->axq_q)) {
			txq->axq_link = NULL;
2460
			ath_txq_schedule(sc, txq);
2461 2462 2463 2464
			break;
		}
		bf = list_first_entry(&txq->axq_q, struct ath_buf, list);

S
Sujith 已提交
2465 2466 2467 2468 2469 2470 2471 2472 2473
		/*
		 * There is a race condition that a BH gets scheduled
		 * after sw writes TxE and before hw re-load the last
		 * descriptor to get the newly chained one.
		 * Software must keep the last DONE descriptor as a
		 * holding descriptor - software does so by marking
		 * it with the STALE flag.
		 */
		bf_held = NULL;
S
Sujith 已提交
2474
		if (bf->bf_stale) {
S
Sujith 已提交
2475
			bf_held = bf;
2476
			if (list_is_last(&bf_held->list, &txq->axq_q))
S
Sujith 已提交
2477
				break;
2478 2479 2480

			bf = list_entry(bf_held->list.next, struct ath_buf,
					list);
2481 2482 2483
		}

		lastbf = bf->bf_lastbf;
S
Sujith 已提交
2484
		ds = lastbf->bf_desc;
2485

2486 2487
		memset(&ts, 0, sizeof(ts));
		status = ath9k_hw_txprocdesc(ah, ds, &ts);
2488
		if (status == -EINPROGRESS)
S
Sujith 已提交
2489
			break;
2490

2491
		TX_STAT_INC(txq->axq_qnum, txprocdesc);
2492

S
Sujith 已提交
2493 2494 2495 2496 2497
		/*
		 * Remove ath_buf's of the same transmit unit from txq,
		 * however leave the last descriptor back as the holding
		 * descriptor for hw.
		 */
S
Sujith 已提交
2498
		lastbf->bf_stale = true;
S
Sujith 已提交
2499 2500 2501 2502
		INIT_LIST_HEAD(&bf_head);
		if (!list_is_singular(&lastbf->list))
			list_cut_position(&bf_head,
				&txq->axq_q, lastbf->list.prev);
2503

2504
		if (bf_held) {
2505 2506
			list_del(&bf_held->list);
			ath_tx_return_buffer(sc, bf_held);
S
Sujith 已提交
2507
		}
2508

2509
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2510
	}
F
Felix Fietkau 已提交
2511
	ath_txq_unlock_complete(sc, txq);
2512 2513
}

S
Sujith 已提交
2514
void ath_tx_tasklet(struct ath_softc *sc)
2515
{
2516 2517
	struct ath_hw *ah = sc->sc_ah;
	u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
S
Sujith 已提交
2518
	int i;
2519

S
Sujith 已提交
2520 2521 2522
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
		if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
			ath_tx_processq(sc, &sc->tx.txq[i]);
2523 2524 2525
	}
}

2526 2527
void ath_tx_edma_tasklet(struct ath_softc *sc)
{
2528
	struct ath_tx_status ts;
2529 2530 2531 2532 2533
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct ath_txq *txq;
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
2534
	struct list_head *fifo_list;
2535 2536 2537
	int status;

	for (;;) {
2538
		if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
2539 2540
			break;

2541
		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
2542 2543 2544
		if (status == -EINPROGRESS)
			break;
		if (status == -EIO) {
2545
			ath_dbg(common, XMIT, "Error processing tx status\n");
2546 2547 2548
			break;
		}

2549 2550 2551 2552
		/* Process beacon completions separately */
		if (ts.qid == sc->beacon.beaconq) {
			sc->beacon.tx_processed = true;
			sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2553
			continue;
2554
		}
2555

2556
		txq = &sc->tx.txq[ts.qid];
2557

F
Felix Fietkau 已提交
2558
		ath_txq_lock(sc, txq);
2559

2560 2561
		TX_STAT_INC(txq->axq_qnum, txprocdesc);

2562 2563
		fifo_list = &txq->txq_fifo[txq->txq_tailidx];
		if (list_empty(fifo_list)) {
F
Felix Fietkau 已提交
2564
			ath_txq_unlock(sc, txq);
2565 2566 2567
			return;
		}

2568 2569 2570 2571 2572 2573 2574
		bf = list_first_entry(fifo_list, struct ath_buf, list);
		if (bf->bf_stale) {
			list_del(&bf->list);
			ath_tx_return_buffer(sc, bf);
			bf = list_first_entry(fifo_list, struct ath_buf, list);
		}

2575 2576 2577
		lastbf = bf->bf_lastbf;

		INIT_LIST_HEAD(&bf_head);
2578 2579
		if (list_is_last(&lastbf->list, fifo_list)) {
			list_splice_tail_init(fifo_list, &bf_head);
2580
			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2581

2582 2583
			if (!list_empty(&txq->axq_q)) {
				struct list_head bf_q;
2584

2585 2586 2587 2588 2589
				INIT_LIST_HEAD(&bf_q);
				txq->axq_link = NULL;
				list_splice_tail_init(&txq->axq_q, &bf_q);
				ath_tx_txqaddbuf(sc, txq, &bf_q, true);
			}
2590 2591 2592 2593 2594
		} else {
			lastbf->bf_stale = true;
			if (bf != lastbf)
				list_cut_position(&bf_head, fifo_list,
						  lastbf->list.prev);
2595
		}
2596

2597
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
F
Felix Fietkau 已提交
2598
		ath_txq_unlock_complete(sc, txq);
2599 2600 2601
	}
}

S
Sujith 已提交
2602 2603 2604
/*****************/
/* Init, Cleanup */
/*****************/
2605

2606 2607 2608 2609 2610 2611
static int ath_txstatus_setup(struct ath_softc *sc, int size)
{
	struct ath_descdma *dd = &sc->txsdma;
	u8 txs_len = sc->sc_ah->caps.txs_len;

	dd->dd_desc_len = size * txs_len;
2612 2613
	dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
					  &dd->dd_desc_paddr, GFP_KERNEL);
2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632
	if (!dd->dd_desc)
		return -ENOMEM;

	return 0;
}

static int ath_tx_edma_init(struct ath_softc *sc)
{
	int err;

	err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
	if (!err)
		ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
					  sc->txsdma.dd_desc_paddr,
					  ATH_TXSTATUS_RING_SIZE);

	return err;
}

S
Sujith 已提交
2633
int ath_tx_init(struct ath_softc *sc, int nbufs)
2634
{
2635
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
2636
	int error = 0;
2637

2638
	spin_lock_init(&sc->tx.txbuflock);
2639

2640
	error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2641
				  "tx", nbufs, 1, 1);
2642
	if (error != 0) {
2643 2644
		ath_err(common,
			"Failed to allocate tx descriptors: %d\n", error);
2645
		return error;
2646
	}
2647

2648
	error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2649
				  "beacon", ATH_BCBUF, 1, 1);
2650
	if (error != 0) {
2651 2652
		ath_err(common,
			"Failed to allocate beacon descriptors: %d\n", error);
2653
		return error;
2654
	}
2655

2656 2657
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);

2658
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2659
		error = ath_tx_edma_init(sc);
2660

S
Sujith 已提交
2661
	return error;
2662 2663 2664 2665
}

void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
2666 2667 2668
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	int tidno, acno;
2669

2670
	for (tidno = 0, tid = &an->tid[tidno];
2671
	     tidno < IEEE80211_NUM_TIDS;
2672 2673 2674 2675 2676 2677 2678
	     tidno++, tid++) {
		tid->an        = an;
		tid->tidno     = tidno;
		tid->seq_start = tid->seq_next = 0;
		tid->baw_size  = WME_MAX_BA;
		tid->baw_head  = tid->baw_tail = 0;
		tid->sched     = false;
S
Sujith 已提交
2679
		tid->paused    = false;
2680
		tid->active	   = false;
2681
		__skb_queue_head_init(&tid->buf_q);
2682
		__skb_queue_head_init(&tid->retry_q);
2683
		acno = TID_TO_WME_AC(tidno);
2684
		tid->ac = &an->ac[acno];
2685
	}
2686

2687
	for (acno = 0, ac = &an->ac[acno];
2688
	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
2689
		ac->sched    = false;
2690
		ac->clear_ps_filter = true;
2691
		ac->txq = sc->tx.txq_map[acno];
2692
		INIT_LIST_HEAD(&ac->tid_q);
2693 2694 2695
	}
}

S
Sujith 已提交
2696
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2697
{
2698 2699
	struct ath_atx_ac *ac;
	struct ath_atx_tid *tid;
2700
	struct ath_txq *txq;
2701
	int tidno;
S
Sujith 已提交
2702

2703
	for (tidno = 0, tid = &an->tid[tidno];
2704
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
2705

2706
		ac = tid->ac;
2707
		txq = ac->txq;
2708

F
Felix Fietkau 已提交
2709
		ath_txq_lock(sc, txq);
2710 2711 2712 2713 2714 2715 2716 2717 2718

		if (tid->sched) {
			list_del(&tid->list);
			tid->sched = false;
		}

		if (ac->sched) {
			list_del(&ac->list);
			tid->ac->sched = false;
2719
		}
2720 2721

		ath_tid_drain(sc, txq, tid);
2722
		tid->active = false;
2723

F
Felix Fietkau 已提交
2724
		ath_txq_unlock(sc, txq);
2725 2726
	}
}