xmit.c 69.7 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include <linux/dma-mapping.h>
S
Sujith 已提交
18
#include "ath9k.h"
19
#include "ar9003_mac.h"
20 21 22 23 24 25 26 27 28 29 30 31

#define BITS_PER_BYTE           8
#define OFDM_PLCP_BITS          22
#define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
#define L_STF                   8
#define L_LTF                   8
#define L_SIG                   4
#define HT_SIG                  8
#define HT_STF                  4
#define HT_LTF(_ns)             (4 * (_ns))
#define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
32 33
#define TIME_SYMBOLS(t)         ((t) >> 2)
#define TIME_SYMBOLS_HALFGI(t)  (((t) * 5 - 4) / 18)
34 35 36 37
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)


38
static u16 bits_per_symbol[][2] = {
39 40 41 42 43 44 45 46 47 48 49
	/* 20MHz 40MHz */
	{    26,   54 },     /*  0: BPSK */
	{    52,  108 },     /*  1: QPSK 1/2 */
	{    78,  162 },     /*  2: QPSK 3/4 */
	{   104,  216 },     /*  3: 16-QAM 1/2 */
	{   156,  324 },     /*  4: 16-QAM 3/4 */
	{   208,  432 },     /*  5: 64-QAM 2/3 */
	{   234,  486 },     /*  6: 64-QAM 3/4 */
	{   260,  540 },     /*  7: 64-QAM 5/6 */
};

F
Felix Fietkau 已提交
50
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
51 52 53
			       struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
			    int tx_flags, struct ath_txq *txq);
S
Sujith 已提交
54
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
55
				struct ath_txq *txq, struct list_head *bf_q,
56
				struct ath_tx_status *ts, int txok);
57
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58
			     struct list_head *head, bool internal);
F
Felix Fietkau 已提交
59 60
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
61
			     int txok);
62 63
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno);
64 65 66
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
					   struct ath_txq *txq,
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
67
					   struct sk_buff *skb);
68

69
enum {
70 71
	MCS_HT20,
	MCS_HT20_SGI,
72 73 74 75
	MCS_HT40,
	MCS_HT40_SGI,
};

S
Sujith 已提交
76 77 78
/*********************/
/* Aggregation logic */
/*********************/
79

80
void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
81
	__acquires(&txq->axq_lock)
F
Felix Fietkau 已提交
82 83 84 85
{
	spin_lock_bh(&txq->axq_lock);
}

86
void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
87
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
88 89 90 91
{
	spin_unlock_bh(&txq->axq_lock);
}

92
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
93
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
94 95 96 97 98 99 100 101 102 103 104 105
{
	struct sk_buff_head q;
	struct sk_buff *skb;

	__skb_queue_head_init(&q);
	skb_queue_splice_init(&txq->complete_q, &q);
	spin_unlock_bh(&txq->axq_lock);

	while ((skb = __skb_dequeue(&q)))
		ieee80211_tx_status(sc->hw, skb);
}

S
Sujith 已提交
106
static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
S
Sujith 已提交
107
{
S
Sujith 已提交
108
	struct ath_atx_ac *ac = tid->ac;
S
Sujith 已提交
109

S
Sujith 已提交
110 111
	if (tid->paused)
		return;
S
Sujith 已提交
112

S
Sujith 已提交
113 114
	if (tid->sched)
		return;
S
Sujith 已提交
115

S
Sujith 已提交
116 117
	tid->sched = true;
	list_add_tail(&tid->list, &ac->tid_q);
S
Sujith 已提交
118

S
Sujith 已提交
119 120
	if (ac->sched)
		return;
121

S
Sujith 已提交
122 123 124
	ac->sched = true;
	list_add_tail(&ac->list, &txq->axq_acq);
}
125

126
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
127 128
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
129 130 131
	BUILD_BUG_ON(sizeof(struct ath_frame_info) >
		     sizeof(tx_info->rate_driver_data));
	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
132 133
}

134 135
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
{
136 137 138
	if (!tid->an->sta)
		return;

139 140 141 142
	ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
			   seqno << IEEE80211_SEQ_SEQ_SHIFT);
}

143 144 145 146 147 148 149
static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
			  struct ath_buf *bf)
{
	ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates,
			       ARRAY_SIZE(bf->rates));
}

150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
			     struct sk_buff *skb)
{
	int q;

	q = skb_get_queue_mapping(skb);
	if (txq == sc->tx.uapsdq)
		txq = sc->tx.txq_map[q];

	if (txq != sc->tx.txq_map[q])
		return;

	if (WARN_ON(--txq->pending_frames < 0))
		txq->pending_frames = 0;

	if (txq->stopped &&
	    txq->pending_frames < sc->tx.txq_max_pending[q]) {
		ieee80211_wake_queue(sc->hw, q);
		txq->stopped = false;
	}
}

172 173 174
static struct ath_atx_tid *
ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
{
175
	u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
176 177 178
	return ATH_AN_2_TID(an, tidno);
}

179 180
static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
{
181
	return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
182 183 184 185
}

static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
{
186 187 188 189 190 191 192
	struct sk_buff *skb;

	skb = __skb_dequeue(&tid->retry_q);
	if (!skb)
		skb = __skb_dequeue(&tid->buf_q);

	return skb;
193 194
}

195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
/*
 * ath_tx_tid_change_state:
 * - clears a-mpdu flag of previous session
 * - force sequence number allocation to fix next BlockAck Window
 */
static void
ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
{
	struct ath_txq *txq = tid->ac->txq;
	struct ieee80211_tx_info *tx_info;
	struct sk_buff *skb, *tskb;
	struct ath_buf *bf;
	struct ath_frame_info *fi;

	skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
		fi = get_frame_info(skb);
		bf = fi->bf;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;

		if (bf)
			continue;

		bf = ath_tx_setup_buffer(sc, txq, tid, skb);
		if (!bf) {
			__skb_unlink(skb, &tid->buf_q);
			ath_txq_skb_done(sc, txq, skb);
			ieee80211_free_txskb(sc->hw, skb);
			continue;
		}
	}

}

230
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
S
Sujith 已提交
231
{
232
	struct ath_txq *txq = tid->ac->txq;
233
	struct sk_buff *skb;
S
Sujith 已提交
234 235
	struct ath_buf *bf;
	struct list_head bf_head;
236
	struct ath_tx_status ts;
237
	struct ath_frame_info *fi;
238
	bool sendbar = false;
239

240
	INIT_LIST_HEAD(&bf_head);
241

242
	memset(&ts, 0, sizeof(ts));
243

244
	while ((skb = __skb_dequeue(&tid->retry_q))) {
245 246
		fi = get_frame_info(skb);
		bf = fi->bf;
F
Felix Fietkau 已提交
247
		if (!bf) {
248 249 250
			ath_txq_skb_done(sc, txq, skb);
			ieee80211_free_txskb(sc->hw, skb);
			continue;
F
Felix Fietkau 已提交
251 252
		}

253
		if (fi->baw_tracked) {
254
			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
255
			sendbar = true;
256
		}
257 258 259

		list_add_tail(&bf->list, &bf_head);
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
260
	}
261

262
	if (sendbar) {
F
Felix Fietkau 已提交
263
		ath_txq_unlock(sc, txq);
264
		ath_send_bar(tid, tid->seq_start);
F
Felix Fietkau 已提交
265 266
		ath_txq_lock(sc, txq);
	}
S
Sujith 已提交
267
}
268

S
Sujith 已提交
269 270
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno)
S
Sujith 已提交
271
{
S
Sujith 已提交
272
	int index, cindex;
273

S
Sujith 已提交
274 275
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
276

277
	__clear_bit(cindex, tid->tx_buf);
S
Sujith 已提交
278

279
	while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
S
Sujith 已提交
280 281
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
282 283
		if (tid->bar_index >= 0)
			tid->bar_index--;
S
Sujith 已提交
284
	}
S
Sujith 已提交
285
}
286

S
Sujith 已提交
287
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
288
			     struct ath_buf *bf)
S
Sujith 已提交
289
{
290 291
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
	u16 seqno = bf->bf_state.seqno;
S
Sujith 已提交
292
	int index, cindex;
S
Sujith 已提交
293

294
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
S
Sujith 已提交
295
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
296
	__set_bit(cindex, tid->tx_buf);
297
	fi->baw_tracked = 1;
298

S
Sujith 已提交
299 300 301 302
	if (index >= ((tid->baw_tail - tid->baw_head) &
		(ATH_TID_MAX_BUFS - 1))) {
		tid->baw_tail = cindex;
		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
303 304 305
	}
}

S
Sujith 已提交
306 307
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
			  struct ath_atx_tid *tid)
308 309

{
310
	struct sk_buff *skb;
S
Sujith 已提交
311 312
	struct ath_buf *bf;
	struct list_head bf_head;
313
	struct ath_tx_status ts;
314
	struct ath_frame_info *fi;
315 316

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
317
	INIT_LIST_HEAD(&bf_head);
318

319
	while ((skb = ath_tid_dequeue(tid))) {
320 321
		fi = get_frame_info(skb);
		bf = fi->bf;
322

323 324 325 326 327
		if (!bf) {
			ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
			continue;
		}

328
		list_add_tail(&bf->list, &bf_head);
329
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
330
	}
331 332
}

S
Sujith 已提交
333
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
334
			     struct sk_buff *skb, int count)
335
{
336
	struct ath_frame_info *fi = get_frame_info(skb);
337
	struct ath_buf *bf = fi->bf;
S
Sujith 已提交
338
	struct ieee80211_hdr *hdr;
339
	int prev = fi->retries;
340

S
Sujith 已提交
341
	TX_STAT_INC(txq->axq_qnum, a_retries);
342 343 344
	fi->retries += count;

	if (prev > 0)
345
		return;
346

S
Sujith 已提交
347 348
	hdr = (struct ieee80211_hdr *)skb->data;
	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
349 350
	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
		sizeof(*hdr), DMA_TO_DEVICE);
351 352
}

353
static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
S
Sujith 已提交
354
{
355
	struct ath_buf *bf = NULL;
S
Sujith 已提交
356 357

	spin_lock_bh(&sc->tx.txbuflock);
358 359

	if (unlikely(list_empty(&sc->tx.txbuf))) {
360 361 362
		spin_unlock_bh(&sc->tx.txbuflock);
		return NULL;
	}
363 364 365 366

	bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
	list_del(&bf->list);

S
Sujith 已提交
367 368
	spin_unlock_bh(&sc->tx.txbuflock);

369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
	return bf;
}

static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
{
	spin_lock_bh(&sc->tx.txbuflock);
	list_add_tail(&bf->list, &sc->tx.txbuf);
	spin_unlock_bh(&sc->tx.txbuflock);
}

static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
{
	struct ath_buf *tbf;

	tbf = ath_tx_get_buffer(sc);
	if (WARN_ON(!tbf))
		return NULL;

S
Sujith 已提交
387 388 389 390
	ATH_TXBUF_RESET(tbf);

	tbf->bf_mpdu = bf->bf_mpdu;
	tbf->bf_buf_addr = bf->bf_buf_addr;
391
	memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
S
Sujith 已提交
392
	tbf->bf_state = bf->bf_state;
393
	tbf->bf_state.stale = false;
S
Sujith 已提交
394 395 396 397

	return tbf;
}

398 399 400 401
static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
			        struct ath_tx_status *ts, int txok,
			        int *nframes, int *nbad)
{
402
	struct ath_frame_info *fi;
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
	u16 seq_st = 0;
	u32 ba[WME_BA_BMP_SIZE >> 5];
	int ba_index;
	int isaggr = 0;

	*nbad = 0;
	*nframes = 0;

	isaggr = bf_isaggr(bf);
	if (isaggr) {
		seq_st = ts->ts_seqnum;
		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
	}

	while (bf) {
418
		fi = get_frame_info(bf->bf_mpdu);
419
		ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
420 421 422 423 424 425 426 427 428 429

		(*nframes)++;
		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
			(*nbad)++;

		bf = bf->bf_next;
	}
}


S
Sujith 已提交
430 431
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
				 struct ath_buf *bf, struct list_head *bf_q,
432
				 struct ath_tx_status *ts, int txok)
433
{
S
Sujith 已提交
434 435
	struct ath_node *an = NULL;
	struct sk_buff *skb;
436
	struct ieee80211_sta *sta;
F
Felix Fietkau 已提交
437
	struct ieee80211_hw *hw = sc->hw;
438
	struct ieee80211_hdr *hdr;
439
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
440
	struct ath_atx_tid *tid = NULL;
S
Sujith 已提交
441
	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
442 443
	struct list_head bf_head;
	struct sk_buff_head bf_pending;
444
	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
445
	u32 ba[WME_BA_BMP_SIZE >> 5];
446
	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
447
	bool rc_update = true, isba;
448
	struct ieee80211_tx_rate rates[4];
449
	struct ath_frame_info *fi;
450
	int nframes;
451
	bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
452
	int i, retries;
453
	int bar_index = -1;
454

S
Sujith 已提交
455
	skb = bf->bf_mpdu;
456 457
	hdr = (struct ieee80211_hdr *)skb->data;

458 459
	tx_info = IEEE80211_SKB_CB(skb);

460
	memcpy(rates, bf->rates, sizeof(rates));
461

462 463 464 465
	retries = ts->ts_longretry + 1;
	for (i = 0; i < ts->ts_rateindex; i++)
		retries += rates[i].count;

466
	rcu_read_lock();
467

468
	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
469 470
	if (!sta) {
		rcu_read_unlock();
471

472 473 474 475
		INIT_LIST_HEAD(&bf_head);
		while (bf) {
			bf_next = bf->bf_next;

476
			if (!bf->bf_state.stale || bf_next != NULL)
477 478
				list_move_tail(&bf->list, &bf_head);

479
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
480 481 482

			bf = bf_next;
		}
483
		return;
484 485
	}

486
	an = (struct ath_node *)sta->drv_priv;
487
	tid = ath_get_skb_tid(sc, an, skb);
488
	seq_first = tid->seq_start;
489
	isba = ts->ts_flags & ATH9K_TX_BA;
490

491 492 493 494
	/*
	 * The hardware occasionally sends a tx status for the wrong TID.
	 * In this case, the BA status cannot be considered valid and all
	 * subframes need to be retransmitted
495 496 497
	 *
	 * Only BlockAcks have a TID and therefore normal Acks cannot be
	 * checked
498
	 */
499
	if (isba && tid->tidno != ts->tid)
500 501
		txok = false;

S
Sujith 已提交
502
	isaggr = bf_isaggr(bf);
S
Sujith 已提交
503
	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
504

S
Sujith 已提交
505
	if (isaggr && txok) {
506 507 508
		if (ts->ts_flags & ATH9K_TX_BA) {
			seq_st = ts->ts_seqnum;
			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
S
Sujith 已提交
509
		} else {
S
Sujith 已提交
510 511 512 513 514 515 516
			/*
			 * AR5416 can become deaf/mute when BA
			 * issue happens. Chip needs to be reset.
			 * But AP code may have sychronization issues
			 * when perform internal reset in this routine.
			 * Only enable reset in STA mode for now.
			 */
517
			if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
S
Sujith 已提交
518
				needreset = 1;
S
Sujith 已提交
519
		}
520 521
	}

522
	__skb_queue_head_init(&bf_pending);
523

524
	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
S
Sujith 已提交
525
	while (bf) {
526 527
		u16 seqno = bf->bf_state.seqno;

528
		txfail = txpending = sendbar = 0;
S
Sujith 已提交
529
		bf_next = bf->bf_next;
530

531 532
		skb = bf->bf_mpdu;
		tx_info = IEEE80211_SKB_CB(skb);
533
		fi = get_frame_info(skb);
534

535 536
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
		    !tid->active) {
537 538 539 540 541 542
			/*
			 * Outside of the current BlockAck window,
			 * maybe part of a previous session
			 */
			txfail = 1;
		} else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
S
Sujith 已提交
543 544
			/* transmit completion, subframe is
			 * acked by block ack */
545
			acked_cnt++;
S
Sujith 已提交
546 547
		} else if (!isaggr && txok) {
			/* transmit completion */
548
			acked_cnt++;
549 550 551 552 553 554 555 556
		} else if (flush) {
			txpending = 1;
		} else if (fi->retries < ATH_MAX_SW_RETRIES) {
			if (txok || !an->sleeping)
				ath_tx_set_retry(sc, txq, bf->bf_mpdu,
						 retries);

			txpending = 1;
S
Sujith 已提交
557
		} else {
558 559 560 561
			txfail = 1;
			txfail_cnt++;
			bar_index = max_t(int, bar_index,
				ATH_BA_INDEX(seq_first, seqno));
S
Sujith 已提交
562
		}
563

564 565 566 567
		/*
		 * Make sure the last desc is reclaimed if it
		 * not a holding desc.
		 */
568
		INIT_LIST_HEAD(&bf_head);
569
		if (bf_next != NULL || !bf_last->bf_state.stale)
S
Sujith 已提交
570
			list_move_tail(&bf->list, &bf_head);
571

572
		if (!txpending) {
S
Sujith 已提交
573 574 575 576
			/*
			 * complete the acked-ones/xretried ones; update
			 * block-ack window
			 */
577
			ath_tx_update_baw(sc, tid, seqno);
578

579
			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
580
				memcpy(tx_info->control.rates, rates, sizeof(rates));
581
				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
582 583 584
				rc_update = false;
			}

585
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
586
				!txfail);
S
Sujith 已提交
587
		} else {
588 589 590 591
			if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
				tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
				ieee80211_sta_eosp(sta);
			}
S
Sujith 已提交
592
			/* retry the un-acked ones */
593
			if (bf->bf_next == NULL && bf_last->bf_state.stale) {
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
				struct ath_buf *tbf;

				tbf = ath_clone_txbuf(sc, bf_last);
				/*
				 * Update tx baw and complete the
				 * frame with failed status if we
				 * run out of tx buf.
				 */
				if (!tbf) {
					ath_tx_update_baw(sc, tid, seqno);

					ath_tx_complete_buf(sc, bf, txq,
							    &bf_head, ts, 0);
					bar_index = max_t(int, bar_index,
						ATH_BA_INDEX(seq_first, seqno));
					break;
610
				}
611 612

				fi->bf = tbf;
S
Sujith 已提交
613 614 615 616 617 618
			}

			/*
			 * Put this buffer to the temporary pending
			 * queue to retain ordering
			 */
619
			__skb_queue_tail(&bf_pending, skb);
S
Sujith 已提交
620 621 622
		}

		bf = bf_next;
623 624
	}

625
	/* prepend un-acked frames to the beginning of the pending frame queue */
626
	if (!skb_queue_empty(&bf_pending)) {
627
		if (an->sleeping)
628
			ieee80211_sta_set_buffered(sta, tid->tidno, true);
629

630
		skb_queue_splice_tail(&bf_pending, &tid->retry_q);
631
		if (!an->sleeping) {
632
			ath_tx_queue_tid(txq, tid);
633

S
Sujith Manoharan 已提交
634
			if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
635 636
				tid->ac->clear_ps_filter = true;
		}
637 638
	}

F
Felix Fietkau 已提交
639 640 641 642 643 644 645 646 647 648 649
	if (bar_index >= 0) {
		u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);

		if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
			tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);

		ath_txq_unlock(sc, txq);
		ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
		ath_txq_lock(sc, txq);
	}

650 651
	rcu_read_unlock();

652 653
	if (needreset)
		ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
S
Sujith 已提交
654
}
655

656 657 658 659 660 661 662 663 664 665
static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
{
    struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
    return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}

static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
				  struct ath_tx_status *ts, struct ath_buf *bf,
				  struct list_head *bf_head)
{
666
	struct ieee80211_tx_info *info;
667 668 669 670 671 672 673 674 675 676 677
	bool txok, flush;

	txok = !(ts->ts_status & ATH9K_TXERR_MASK);
	flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
	txq->axq_tx_inprogress = false;

	txq->axq_depth--;
	if (bf_is_ampdu_not_probing(bf))
		txq->axq_ampdu_depth--;

	if (!bf_isampdu(bf)) {
678 679 680 681
		if (!flush) {
			info = IEEE80211_SKB_CB(bf->bf_mpdu);
			memcpy(info->control.rates, bf->rates,
			       sizeof(info->control.rates));
682
			ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
683
		}
684 685 686 687
		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
	} else
		ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);

688
	if (!flush)
689 690 691
		ath_txq_schedule(sc, txq);
}

692 693 694 695 696 697 698 699 700 701 702
static bool ath_lookup_legacy(struct ath_buf *bf)
{
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	int i;

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
	rates = tx_info->control.rates;

703 704 705 706
	for (i = 0; i < 4; i++) {
		if (!rates[i].count || rates[i].idx < 0)
			break;

707 708 709 710 711 712 713
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
			return true;
	}

	return false;
}

S
Sujith 已提交
714 715
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
			   struct ath_atx_tid *tid)
716
{
S
Sujith 已提交
717 718
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
719
	struct ieee80211_tx_rate *rates;
S
Sujith 已提交
720
	u32 max_4ms_framelen, frmlen;
721
	u16 aggr_limit, bt_aggr_limit, legacy = 0;
722
	int q = tid->ac->txq->mac80211_qnum;
S
Sujith 已提交
723
	int i;
S
Sujith 已提交
724

S
Sujith 已提交
725
	skb = bf->bf_mpdu;
S
Sujith 已提交
726
	tx_info = IEEE80211_SKB_CB(skb);
727
	rates = bf->rates;
S
Sujith 已提交
728

S
Sujith 已提交
729 730
	/*
	 * Find the lowest frame length among the rate series that will have a
731
	 * 4ms (or TXOP limited) transmit duration.
S
Sujith 已提交
732 733
	 */
	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
S
Sujith 已提交
734

S
Sujith 已提交
735
	for (i = 0; i < 4; i++) {
736
		int modeidx;
S
Sujith 已提交
737

738 739
		if (!rates[i].count)
			continue;
740

741 742 743
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
			legacy = 1;
			break;
744
		}
745 746 747 748 749 750 751 752 753

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
			modeidx = MCS_HT40;
		else
			modeidx = MCS_HT20;

		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
			modeidx++;

754
		frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
755
		max_4ms_framelen = min(max_4ms_framelen, frmlen);
756
	}
S
Sujith 已提交
757

758
	/*
S
Sujith 已提交
759 760 761
	 * limit aggregate size by the minimum rate if rate selected is
	 * not a probe rate, if rate selected is a probe rate then
	 * avoid aggregation of this packet.
762
	 */
S
Sujith 已提交
763 764
	if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
		return 0;
765

766 767 768 769 770 771 772 773
	aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);

	/*
	 * Override the default aggregation limit for BTCOEX.
	 */
	bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
	if (bt_aggr_limit)
		aggr_limit = bt_aggr_limit;
774

775 776
	if (tid->an->maxampdu)
		aggr_limit = min(aggr_limit, tid->an->maxampdu);
777

S
Sujith 已提交
778 779
	return aggr_limit;
}
780

S
Sujith 已提交
781
/*
S
Sujith 已提交
782
 * Returns the number of delimiters to be added to
S
Sujith 已提交
783 784 785
 * meet the minimum required mpdudensity.
 */
static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
786 787
				  struct ath_buf *bf, u16 frmlen,
				  bool first_subfrm)
S
Sujith 已提交
788
{
789
#define FIRST_DESC_NDELIMS 60
790
	u32 nsymbits, nsymbols;
S
Sujith 已提交
791
	u16 minlen;
792
	u8 flags, rix;
793
	int width, streams, half_gi, ndelim, mindelim;
794
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith 已提交
795 796 797

	/* Select standard number of delimiters based on frame length alone */
	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
798 799

	/*
S
Sujith 已提交
800 801 802 803
	 * If encryption enabled, hardware requires some more padding between
	 * subframes.
	 * TODO - this could be improved to be dependent on the rate.
	 *      The hardware can keep up at lower rates, but not higher rates
804
	 */
805 806
	if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
	    !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
S
Sujith 已提交
807
		ndelim += ATH_AGGR_ENCRYPTDELIM;
808

809 810 811 812
	/*
	 * Add delimiter when using RTS/CTS with aggregation
	 * and non enterprise AR9003 card
	 */
813 814
	if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
	    (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
815 816
		ndelim = max(ndelim, FIRST_DESC_NDELIMS);

S
Sujith 已提交
817 818 819 820 821
	/*
	 * Convert desired mpdu density from microeconds to bytes based
	 * on highest rate in rate series (i.e. first rate) to determine
	 * required minimum length for subframe. Take into account
	 * whether high rate is 20 or 40Mhz and half or full GI.
822
	 *
S
Sujith 已提交
823 824 825
	 * If there is no mpdu density restriction, no further calculation
	 * is needed.
	 */
826 827

	if (tid->an->mpdudensity == 0)
S
Sujith 已提交
828
		return ndelim;
829

830 831
	rix = bf->rates[0].idx;
	flags = bf->rates[0].flags;
S
Sujith 已提交
832 833
	width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
	half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
834

S
Sujith 已提交
835
	if (half_gi)
836
		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
S
Sujith 已提交
837
	else
838
		nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
839

S
Sujith 已提交
840 841
	if (nsymbols == 0)
		nsymbols = 1;
842

843 844
	streams = HT_RC_2_STREAMS(rix);
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
845
	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
846

S
Sujith 已提交
847 848 849
	if (frmlen < minlen) {
		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
		ndelim = max(mindelim, ndelim);
850 851
	}

S
Sujith 已提交
852
	return ndelim;
853 854
}

855 856
static struct ath_buf *
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
857
			struct ath_atx_tid *tid, struct sk_buff_head **q)
858
{
859
	struct ieee80211_tx_info *tx_info;
860
	struct ath_frame_info *fi;
861
	struct sk_buff *skb;
862
	struct ath_buf *bf;
863
	u16 seqno;
864

865
	while (1) {
866 867 868 869
		*q = &tid->retry_q;
		if (skb_queue_empty(*q))
			*q = &tid->buf_q;

870
		skb = skb_peek(*q);
871 872 873
		if (!skb)
			break;

874 875
		fi = get_frame_info(skb);
		bf = fi->bf;
876
		if (!fi->bf)
F
Felix Fietkau 已提交
877
			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
878 879
		else
			bf->bf_state.stale = false;
880

F
Felix Fietkau 已提交
881
		if (!bf) {
882
			__skb_unlink(skb, *q);
883
			ath_txq_skb_done(sc, txq, skb);
F
Felix Fietkau 已提交
884
			ieee80211_free_txskb(sc->hw, skb);
885
			continue;
F
Felix Fietkau 已提交
886
		}
887

888 889 890 891 892 893 894 895 896 897
		bf->bf_next = NULL;
		bf->bf_lastbf = bf;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
		if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
			bf->bf_state.bf_type = 0;
			return bf;
		}

898
		bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
899
		seqno = bf->bf_state.seqno;
900

S
Sujith 已提交
901
		/* do not step over block-ack window */
902
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
S
Sujith 已提交
903
			break;
904

905 906 907 908 909 910
		if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
			struct ath_tx_status ts = {};
			struct list_head bf_head;

			INIT_LIST_HEAD(&bf_head);
			list_add(&bf->list, &bf_head);
911
			__skb_unlink(skb, *q);
912 913 914 915 916
			ath_tx_update_baw(sc, tid, seqno);
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
			continue;
		}

917 918 919 920 921 922
		return bf;
	}

	return NULL;
}

923 924 925 926 927
static bool
ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
		 struct ath_atx_tid *tid, struct list_head *bf_q,
		 struct ath_buf *bf_first, struct sk_buff_head *tid_q,
		 int *aggr_len)
928 929
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
930
	struct ath_buf *bf = bf_first, *bf_prev = NULL;
F
Felix Fietkau 已提交
931
	int nframes = 0, ndelim;
932
	u16 aggr_limit = 0, al = 0, bpad = 0,
F
Felix Fietkau 已提交
933
	    al_delta, h_baw = tid->baw_size / 2;
934 935 936
	struct ieee80211_tx_info *tx_info;
	struct ath_frame_info *fi;
	struct sk_buff *skb;
937
	bool closed = false;
938

939 940
	bf = bf_first;
	aggr_limit = ath_lookup_rate(sc, bf, tid);
941

942
	do {
943 944 945
		skb = bf->bf_mpdu;
		fi = get_frame_info(skb);

S
Sujith 已提交
946
		/* do not exceed aggregation limit */
947
		al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
F
Felix Fietkau 已提交
948 949
		if (nframes) {
			if (aggr_limit < al + bpad + al_delta ||
950
			    ath_lookup_legacy(bf) || nframes >= h_baw)
F
Felix Fietkau 已提交
951
				break;
952

F
Felix Fietkau 已提交
953
			tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
954 955
			if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
			    !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
F
Felix Fietkau 已提交
956
				break;
S
Sujith 已提交
957
		}
958

S
Sujith 已提交
959
		/* add padding for previous frame to aggregation length */
S
Sujith 已提交
960
		al += bpad + al_delta;
961

S
Sujith 已提交
962 963 964 965
		/*
		 * Get the delimiters needed to meet the MPDU
		 * density for this node.
		 */
966 967
		ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
						!nframes);
S
Sujith 已提交
968
		bpad = PADBYTES(al_delta) + (ndelim << 2);
969

970
		nframes++;
S
Sujith 已提交
971
		bf->bf_next = NULL;
972

S
Sujith 已提交
973
		/* link buffers of this frame to the aggregate */
974 975
		if (!fi->baw_tracked)
			ath_tx_addto_baw(sc, tid, bf);
976
		bf->bf_state.ndelim = ndelim;
977

978
		__skb_unlink(skb, tid_q);
979
		list_add_tail(&bf->list, bf_q);
980
		if (bf_prev)
S
Sujith 已提交
981
			bf_prev->bf_next = bf;
982

S
Sujith 已提交
983
		bf_prev = bf;
S
Sujith 已提交
984

985 986 987 988 989
		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf) {
			closed = true;
			break;
		}
990
	} while (ath_tid_has_buffered(tid));
991

992 993 994 995 996 997 998 999 1000 1001
	bf = bf_first;
	bf->bf_lastbf = bf_prev;

	if (bf == bf_prev) {
		al = get_frame_info(bf->bf_mpdu)->framelen;
		bf->bf_state.bf_type = BUF_AMPDU;
	} else {
		TX_STAT_INC(txq->axq_qnum, a_aggr);
	}

1002
	*aggr_len = al;
S
Sujith 已提交
1003

1004
	return closed;
S
Sujith 已提交
1005 1006
#undef PADBYTES
}
1007

1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
/*
 * rix - rate index
 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
 * width  - 0 for 20 MHz, 1 for 40 MHz
 * half_gi - to use 4us v/s 3.6 us for symbol time
 */
static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
			    int width, int half_gi, bool shortPreamble)
{
	u32 nbits, nsymbits, duration, nsymbols;
	int streams;

	/* find number of symbols: PLCP + data */
	streams = HT_RC_2_STREAMS(rix);
	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
	nsymbols = (nbits + nsymbits - 1) / nsymbits;

	if (!half_gi)
		duration = SYMBOL_TIME(nsymbols);
	else
		duration = SYMBOL_TIME_HALFGI(nsymbols);

	/* addup duration for legacy/ht training and signal fields */
	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);

	return duration;
}

1037 1038 1039 1040 1041 1042
static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
{
	int streams = HT_RC_2_STREAMS(mcs);
	int symbols, bits;
	int bytes = 0;

1043
	usec -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
	symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
	bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
	bits -= OFDM_PLCP_BITS;
	bytes = bits / 8;
	if (bytes > 65532)
		bytes = 65532;

	return bytes;
}

void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
{
	u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi;
	int mcs;

	/* 4ms is the default (and maximum) duration */
	if (!txop || txop > 4096)
		txop = 4096;

	cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
	cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
	cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
	cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
	for (mcs = 0; mcs < 32; mcs++) {
		cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false);
		cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true);
		cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false);
		cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true);
	}
}

1075
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
S
Sujith Manoharan 已提交
1076
			     struct ath_tx_info *info, int len, bool rts)
1077 1078
{
	struct ath_hw *ah = sc->sc_ah;
1079
	struct ath_common *common = ath9k_hw_common(ah);
1080 1081 1082 1083 1084
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	const struct ieee80211_rate *rate;
	struct ieee80211_hdr *hdr;
1085
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith Manoharan 已提交
1086
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
1087 1088
	int i;
	u8 rix = 0;
1089 1090 1091

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
1092
	rates = bf->rates;
1093
	hdr = (struct ieee80211_hdr *)skb->data;
1094 1095 1096

	/* set dur_update_en for l-sig computation except for PS-Poll frames */
	info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
1097
	info->rtscts_rate = fi->rtscts_rate;
1098

1099
	for (i = 0; i < ARRAY_SIZE(bf->rates); i++) {
1100 1101 1102 1103 1104 1105 1106
		bool is_40, is_sgi, is_sp;
		int phy;

		if (!rates[i].count || (rates[i].idx < 0))
			continue;

		rix = rates[i].idx;
1107
		info->rates[i].Tries = rates[i].count;
1108

S
Sujith Manoharan 已提交
1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
		/*
		 * Handle RTS threshold for unaggregated HT frames.
		 */
		if (bf_isampdu(bf) && !bf_isaggr(bf) &&
		    (rates[i].flags & IEEE80211_TX_RC_MCS) &&
		    unlikely(rts_thresh != (u32) -1)) {
			if (!rts_thresh || (len > rts_thresh))
				rts = true;
		}

		if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1120 1121
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_RTSENA;
1122
		} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1123 1124
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_CTSENA;
1125 1126 1127
		}

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1128
			info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
1129
		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1130
			info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
1131 1132 1133 1134 1135 1136 1137

		is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
		is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
		is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);

		if (rates[i].flags & IEEE80211_TX_RC_MCS) {
			/* MCS rates */
1138 1139 1140 1141
			info->rates[i].Rate = rix | 0x80;
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
			info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
1142 1143
				 is_40, is_sgi, is_sp);
			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1144
				info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
1145 1146 1147 1148
			continue;
		}

		/* legacy rates */
1149
		rate = &common->sbands[tx_info->band].bitrates[rates[i].idx];
1150 1151 1152 1153 1154 1155
		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
		    !(rate->flags & IEEE80211_RATE_ERP_G))
			phy = WLAN_RC_PHY_CCK;
		else
			phy = WLAN_RC_PHY_OFDM;

1156
		info->rates[i].Rate = rate->hw_value;
1157 1158
		if (rate->hw_value_short) {
			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1159
				info->rates[i].Rate |= rate->hw_value_short;
1160 1161 1162 1163 1164
		} else {
			is_sp = false;
		}

		if (bf->bf_state.bfs_paprd)
1165
			info->rates[i].ChSel = ah->txchainmask;
1166
		else
1167 1168
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
1169

1170
		info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1171 1172 1173 1174 1175
			phy, rate->bitrate * 100, len, rix, is_sp);
	}

	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
	if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1176
		info->flags &= ~ATH9K_TXDESC_RTSENA;
1177 1178

	/* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1179 1180 1181
	if (info->flags & ATH9K_TXDESC_RTSENA)
		info->flags &= ~ATH9K_TXDESC_CTSENA;
}
1182

1183 1184 1185 1186 1187 1188 1189 1190
static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	enum ath9k_pkt_type htype;
	__le16 fc;

	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
1191

1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
	if (ieee80211_is_beacon(fc))
		htype = ATH9K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = ATH9K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = ATH9K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = ATH9K_PKT_TYPE_PSPOLL;
	else
		htype = ATH9K_PKT_TYPE_NORMAL;

	return htype;
1204 1205
}

1206 1207
static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_txq *txq, int len)
1208 1209
{
	struct ath_hw *ah = sc->sc_ah;
1210
	struct ath_buf *bf_first = NULL;
1211
	struct ath_tx_info info;
S
Sujith Manoharan 已提交
1212 1213
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
	bool rts = false;
1214

1215 1216 1217 1218 1219 1220
	memset(&info, 0, sizeof(info));
	info.is_first = true;
	info.is_last = true;
	info.txpower = MAX_RATE_POWER;
	info.qcu = txq->axq_qnum;

1221
	while (bf) {
1222
		struct sk_buff *skb = bf->bf_mpdu;
1223
		struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1224
		struct ath_frame_info *fi = get_frame_info(skb);
1225
		bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
1226 1227

		info.type = get_hw_packet_type(skb);
1228
		if (bf->bf_next)
1229
			info.link = bf->bf_next->bf_daddr;
1230
		else
L
Luis R. Rodriguez 已提交
1231
			info.link = (sc->tx99_state) ? bf->bf_daddr : 0;
1232

1233 1234 1235
		if (!bf_first) {
			bf_first = bf;

L
Luis R. Rodriguez 已提交
1236 1237
			if (!sc->tx99_state)
				info.flags = ATH9K_TXDESC_INTREQ;
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
			if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
			    txq == sc->tx.uapsdq)
				info.flags |= ATH9K_TXDESC_CLRDMASK;

			if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
				info.flags |= ATH9K_TXDESC_NOACK;
			if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
				info.flags |= ATH9K_TXDESC_LDPC;

			if (bf->bf_state.bfs_paprd)
				info.flags |= (u32) bf->bf_state.bfs_paprd <<
					      ATH9K_TXDESC_PAPRD_S;

S
Sujith Manoharan 已提交
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
			/*
			 * mac80211 doesn't handle RTS threshold for HT because
			 * the decision has to be taken based on AMPDU length
			 * and aggregation is done entirely inside ath9k.
			 * Set the RTS/CTS flag for the first subframe based
			 * on the threshold.
			 */
			if (aggr && (bf == bf_first) &&
			    unlikely(rts_thresh != (u32) -1)) {
				/*
				 * "len" is the size of the entire AMPDU.
				 */
				if (!rts_thresh || (len > rts_thresh))
					rts = true;
			}
1266 1267 1268 1269

			if (!aggr)
				len = fi->framelen;

S
Sujith Manoharan 已提交
1270
			ath_buf_set_rate(sc, bf, &info, len, rts);
1271 1272
		}

1273 1274
		info.buf_addr[0] = bf->bf_buf_addr;
		info.buf_len[0] = skb->len;
1275 1276 1277 1278 1279
		info.pkt_len = fi->framelen;
		info.keyix = fi->keyix;
		info.keytype = fi->keytype;

		if (aggr) {
1280
			if (bf == bf_first)
1281
				info.aggr = AGGR_BUF_FIRST;
1282
			else if (bf == bf_first->bf_lastbf)
1283 1284 1285
				info.aggr = AGGR_BUF_LAST;
			else
				info.aggr = AGGR_BUF_MIDDLE;
1286

1287 1288
			info.ndelim = bf->bf_state.ndelim;
			info.aggr_len = len;
1289 1290
		}

1291 1292 1293
		if (bf == bf_first->bf_lastbf)
			bf_first = NULL;

1294
		ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
1295 1296 1297 1298
		bf = bf->bf_next;
	}
}

1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
static void
ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
		  struct ath_atx_tid *tid, struct list_head *bf_q,
		  struct ath_buf *bf_first, struct sk_buff_head *tid_q)
{
	struct ath_buf *bf = bf_first, *bf_prev = NULL;
	struct sk_buff *skb;
	int nframes = 0;

	do {
		struct ieee80211_tx_info *tx_info;
		skb = bf->bf_mpdu;

		nframes++;
		__skb_unlink(skb, tid_q);
		list_add_tail(&bf->list, bf_q);
		if (bf_prev)
			bf_prev->bf_next = bf;
		bf_prev = bf;

		if (nframes >= 2)
			break;

		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf)
			break;

		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
		if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
			break;

		ath_set_rates(tid->an->vif, tid->an->sta, bf);
	} while (1);
}

1334 1335
static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
			      struct ath_atx_tid *tid, bool *stop)
S
Sujith 已提交
1336
{
S
Sujith 已提交
1337
	struct ath_buf *bf;
1338
	struct ieee80211_tx_info *tx_info;
1339
	struct sk_buff_head *tid_q;
S
Sujith 已提交
1340
	struct list_head bf_q;
1341 1342
	int aggr_len = 0;
	bool aggr, last = true;
1343

1344 1345
	if (!ath_tid_has_buffered(tid))
		return false;
1346

1347
	INIT_LIST_HEAD(&bf_q);
S
Sujith 已提交
1348

1349 1350 1351
	bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
	if (!bf)
		return false;
1352

1353 1354 1355 1356 1357 1358 1359
	tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
	aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
	if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
		(!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
		*stop = true;
		return false;
	}
1360

1361 1362 1363 1364 1365 1366
	ath_set_rates(tid->an->vif, tid->an->sta, bf);
	if (aggr)
		last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
					tid_q, &aggr_len);
	else
		ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
1367

1368 1369
	if (list_empty(&bf_q))
		return false;
1370

1371
	if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
1372 1373 1374
		tid->ac->clear_ps_filter = false;
		tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
	}
1375

1376 1377 1378
	ath_tx_fill_desc(sc, bf, txq, aggr_len);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	return true;
S
Sujith 已提交
1379 1380
}

1381 1382
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
		      u16 tid, u16 *ssn)
S
Sujith 已提交
1383 1384
{
	struct ath_atx_tid *txtid;
1385
	struct ath_txq *txq;
S
Sujith 已提交
1386
	struct ath_node *an;
1387
	u8 density;
S
Sujith 已提交
1388 1389

	an = (struct ath_node *)sta->drv_priv;
S
Sujith 已提交
1390
	txtid = ATH_AN_2_TID(an, tid);
1391 1392 1393
	txq = txtid->ac->txq;

	ath_txq_lock(sc, txq);
1394

1395 1396 1397 1398
	/* update ampdu factor/density, they may have changed. This may happen
	 * in HT IBSS when a beacon with HT-info is received after the station
	 * has already been added.
	 */
1399
	if (sta->ht_cap.ht_supported) {
1400 1401
		an->maxampdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
				      sta->ht_cap.ampdu_factor)) - 1;
1402 1403 1404 1405
		density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
		an->mpdudensity = density;
	}

1406 1407 1408
	/* force sequence number allocation for pending frames */
	ath_tx_tid_change_state(sc, txtid);

1409
	txtid->active = true;
1410
	txtid->paused = true;
1411
	*ssn = txtid->seq_start = txtid->seq_next;
1412
	txtid->bar_index = -1;
1413

1414 1415 1416
	memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
	txtid->baw_head = txtid->baw_tail = 0;

1417 1418
	ath_txq_unlock_complete(sc, txq);

1419
	return 0;
S
Sujith 已提交
1420
}
1421

1422
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
S
Sujith 已提交
1423 1424 1425
{
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1426
	struct ath_txq *txq = txtid->ac->txq;
1427

F
Felix Fietkau 已提交
1428
	ath_txq_lock(sc, txq);
1429
	txtid->active = false;
1430
	txtid->paused = false;
1431
	ath_tx_flush_tid(sc, txtid);
1432
	ath_tx_tid_change_state(sc, txtid);
F
Felix Fietkau 已提交
1433
	ath_txq_unlock_complete(sc, txq);
S
Sujith 已提交
1434
}
1435

1436 1437
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
		       struct ath_node *an)
1438 1439 1440 1441
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
1442
	bool buffered;
1443 1444 1445
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1446
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1447 1448 1449 1450

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1451
		ath_txq_lock(sc, txq);
1452

1453 1454 1455 1456 1457
		if (!tid->sched) {
			ath_txq_unlock(sc, txq);
			continue;
		}

1458
		buffered = ath_tid_has_buffered(tid);
1459 1460 1461 1462 1463 1464 1465 1466 1467

		tid->sched = false;
		list_del(&tid->list);

		if (ac->sched) {
			ac->sched = false;
			list_del(&ac->list);
		}

F
Felix Fietkau 已提交
1468
		ath_txq_unlock(sc, txq);
1469

1470 1471
		ieee80211_sta_set_buffered(sta, tidno, buffered);
	}
1472 1473 1474 1475 1476 1477 1478 1479 1480 1481
}

void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1482
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1483 1484 1485 1486

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1487
		ath_txq_lock(sc, txq);
1488 1489
		ac->clear_ps_filter = true;

1490
		if (!tid->paused && ath_tid_has_buffered(tid)) {
1491 1492 1493 1494
			ath_tx_queue_tid(txq, tid);
			ath_txq_schedule(sc, txq);
		}

F
Felix Fietkau 已提交
1495
		ath_txq_unlock_complete(sc, txq);
1496 1497 1498
	}
}

1499 1500
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
			u16 tidno)
S
Sujith 已提交
1501
{
1502
	struct ath_atx_tid *tid;
S
Sujith 已提交
1503
	struct ath_node *an;
1504
	struct ath_txq *txq;
S
Sujith 已提交
1505 1506

	an = (struct ath_node *)sta->drv_priv;
1507 1508
	tid = ATH_AN_2_TID(an, tidno);
	txq = tid->ac->txq;
S
Sujith 已提交
1509

1510 1511 1512 1513 1514
	ath_txq_lock(sc, txq);

	tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
	tid->paused = false;

1515
	if (ath_tid_has_buffered(tid)) {
1516 1517 1518 1519 1520
		ath_tx_queue_tid(txq, tid);
		ath_txq_schedule(sc, txq);
	}

	ath_txq_unlock_complete(sc, txq);
1521 1522
}

1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534
void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
				   struct ieee80211_sta *sta,
				   u16 tids, int nframes,
				   enum ieee80211_frame_release_type reason,
				   bool more_data)
{
	struct ath_softc *sc = hw->priv;
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_txq *txq = sc->tx.uapsdq;
	struct ieee80211_tx_info *info;
	struct list_head bf_q;
	struct ath_buf *bf_tail = NULL, *bf;
1535
	struct sk_buff_head *tid_q;
1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
	int sent = 0;
	int i;

	INIT_LIST_HEAD(&bf_q);
	for (i = 0; tids && nframes; i++, tids >>= 1) {
		struct ath_atx_tid *tid;

		if (!(tids & 1))
			continue;

		tid = ATH_AN_2_TID(an, i);
		if (tid->paused)
			continue;

		ath_txq_lock(sc, tid->ac->txq);
1551 1552
		while (nframes > 0) {
			bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
1553 1554 1555
			if (!bf)
				break;

1556
			__skb_unlink(bf->bf_mpdu, tid_q);
1557 1558
			list_add_tail(&bf->list, &bf_q);
			ath_set_rates(tid->an->vif, tid->an->sta, bf);
1559 1560 1561 1562
			if (bf_isampdu(bf)) {
				ath_tx_addto_baw(sc, tid, bf);
				bf->bf_state.bf_type &= ~BUF_AGGR;
			}
1563 1564 1565 1566 1567 1568 1569 1570
			if (bf_tail)
				bf_tail->bf_next = bf;

			bf_tail = bf;
			nframes--;
			sent++;
			TX_STAT_INC(txq->axq_qnum, a_queued_hw);

1571
			if (an->sta && !ath_tid_has_buffered(tid))
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589
				ieee80211_sta_set_buffered(an->sta, i, false);
		}
		ath_txq_unlock_complete(sc, tid->ac->txq);
	}

	if (list_empty(&bf_q))
		return;

	info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
	info->flags |= IEEE80211_TX_STATUS_EOSP;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	ath_txq_lock(sc, txq);
	ath_tx_fill_desc(sc, bf, txq, 0);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	ath_txq_unlock(sc, txq);
}

S
Sujith 已提交
1590 1591 1592
/********************/
/* Queue Management */
/********************/
1593

S
Sujith 已提交
1594
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1595
{
1596
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1597
	struct ath9k_tx_queue_info qi;
1598
	static const int subtype_txq_to_hwq[] = {
1599 1600 1601 1602
		[IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
		[IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
		[IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
		[IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
1603
	};
1604
	int axq_qnum, i;
1605

S
Sujith 已提交
1606
	memset(&qi, 0, sizeof(qi));
1607
	qi.tqi_subtype = subtype_txq_to_hwq[subtype];
S
Sujith 已提交
1608 1609 1610 1611
	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_physCompBuf = 0;
1612 1613

	/*
S
Sujith 已提交
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise waiting for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
	 *
	 * The UAPSD queue is an exception, since we take a desc-
	 * based intr on the EOSP frames.
1627
	 */
1628
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1629
		qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
1630 1631 1632 1633 1634 1635 1636
	} else {
		if (qtype == ATH9K_TX_QUEUE_UAPSD)
			qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
		else
			qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
					TXQ_FLAG_TXDESCINT_ENABLE;
	}
1637 1638
	axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
	if (axq_qnum == -1) {
1639
		/*
S
Sujith 已提交
1640 1641
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
1642
		 */
S
Sujith 已提交
1643
		return NULL;
1644
	}
1645 1646
	if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
		struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1647

1648 1649
		txq->axq_qnum = axq_qnum;
		txq->mac80211_qnum = -1;
S
Sujith 已提交
1650
		txq->axq_link = NULL;
F
Felix Fietkau 已提交
1651
		__skb_queue_head_init(&txq->complete_q);
S
Sujith 已提交
1652 1653 1654 1655
		INIT_LIST_HEAD(&txq->axq_q);
		INIT_LIST_HEAD(&txq->axq_acq);
		spin_lock_init(&txq->axq_lock);
		txq->axq_depth = 0;
1656
		txq->axq_ampdu_depth = 0;
1657
		txq->axq_tx_inprogress = false;
1658
		sc->tx.txqsetup |= 1<<axq_qnum;
1659 1660 1661 1662

		txq->txq_headidx = txq->txq_tailidx = 0;
		for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
			INIT_LIST_HEAD(&txq->txq_fifo[i]);
S
Sujith 已提交
1663
	}
1664
	return &sc->tx.txq[axq_qnum];
1665 1666
}

S
Sujith 已提交
1667 1668 1669
int ath_txq_update(struct ath_softc *sc, int qnum,
		   struct ath9k_tx_queue_info *qinfo)
{
1670
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1671 1672 1673
	int error = 0;
	struct ath9k_tx_queue_info qi;

1674
	BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
S
Sujith 已提交
1675 1676 1677 1678 1679 1680 1681 1682 1683

	ath9k_hw_get_txq_props(ah, qnum, &qi);
	qi.tqi_aifs = qinfo->tqi_aifs;
	qi.tqi_cwmin = qinfo->tqi_cwmin;
	qi.tqi_cwmax = qinfo->tqi_cwmax;
	qi.tqi_burstTime = qinfo->tqi_burstTime;
	qi.tqi_readyTime = qinfo->tqi_readyTime;

	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1684 1685
		ath_err(ath9k_hw_common(sc->sc_ah),
			"Unable to update hardware queue %u!\n", qnum);
S
Sujith 已提交
1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
		error = -EIO;
	} else {
		ath9k_hw_resettxqueue(ah, qnum);
	}

	return error;
}

int ath_cabq_update(struct ath_softc *sc)
{
	struct ath9k_tx_queue_info qi;
1697
	struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
S
Sujith 已提交
1698
	int qnum = sc->beacon.cabq->axq_qnum;
1699

S
Sujith 已提交
1700
	ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1701

1702
	qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) *
1703
			    ATH_CABQ_READY_TIME) / 100;
S
Sujith 已提交
1704 1705 1706
	ath_txq_update(sc, qnum, &qi);

	return 0;
1707 1708
}

1709
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1710
			       struct list_head *list)
1711
{
S
Sujith 已提交
1712 1713
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
1714 1715 1716
	struct ath_tx_status ts;

	memset(&ts, 0, sizeof(ts));
1717
	ts.ts_status = ATH9K_TX_FLUSH;
S
Sujith 已提交
1718
	INIT_LIST_HEAD(&bf_head);
1719

1720 1721
	while (!list_empty(list)) {
		bf = list_first_entry(list, struct ath_buf, list);
1722

1723
		if (bf->bf_state.stale) {
1724
			list_del(&bf->list);
1725

1726 1727
			ath_tx_return_buffer(sc, bf);
			continue;
S
Sujith 已提交
1728
		}
1729

S
Sujith 已提交
1730
		lastbf = bf->bf_lastbf;
1731
		list_cut_position(&bf_head, list, &lastbf->list);
1732
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
1733
	}
1734
}
1735

1736 1737 1738 1739 1740 1741
/*
 * Drain a given TX queue (could be Beacon or Data)
 *
 * This assumes output has been stopped and
 * we do not need to block ath_tx_tasklet.
 */
1742
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
1743
{
F
Felix Fietkau 已提交
1744 1745
	ath_txq_lock(sc, txq);

1746
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1747
		int idx = txq->txq_tailidx;
1748

1749
		while (!list_empty(&txq->txq_fifo[idx])) {
1750
			ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
1751 1752

			INCR(idx, ATH_TXFIFO_DEPTH);
1753
		}
1754
		txq->txq_tailidx = idx;
1755
	}
1756

1757 1758
	txq->axq_link = NULL;
	txq->axq_tx_inprogress = false;
1759
	ath_drain_txq_list(sc, txq, &txq->axq_q);
1760

F
Felix Fietkau 已提交
1761
	ath_txq_unlock_complete(sc, txq);
1762 1763
}

1764
bool ath_drain_all_txq(struct ath_softc *sc)
1765
{
1766
	struct ath_hw *ah = sc->sc_ah;
1767
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
1768
	struct ath_txq *txq;
1769 1770
	int i;
	u32 npend = 0;
S
Sujith 已提交
1771

1772
	if (test_bit(ATH_OP_INVALID, &common->op_flags))
1773
		return true;
S
Sujith 已提交
1774

1775
	ath9k_hw_abort_tx_dma(ah);
S
Sujith 已提交
1776

1777
	/* Check if any queue remains active */
S
Sujith 已提交
1778
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1779 1780 1781
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

F
Felix Fietkau 已提交
1782 1783 1784
		if (!sc->tx.txq[i].axq_depth)
			continue;

1785 1786
		if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
			npend |= BIT(i);
S
Sujith 已提交
1787 1788
	}

1789
	if (npend)
1790
		ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
S
Sujith 已提交
1791 1792

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1793 1794 1795 1796 1797 1798 1799 1800 1801 1802
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

		/*
		 * The caller will resume queues with ieee80211_wake_queues.
		 * Mark the queue as not stopped to prevent ath_tx_complete
		 * from waking the queue too early.
		 */
		txq = &sc->tx.txq[i];
		txq->stopped = false;
1803
		ath_draintxq(sc, txq);
S
Sujith 已提交
1804
	}
1805 1806

	return !npend;
S
Sujith 已提交
1807
}
1808

S
Sujith 已提交
1809
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
S
Sujith 已提交
1810
{
S
Sujith 已提交
1811 1812
	ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
	sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
S
Sujith 已提交
1813
}
1814

1815 1816 1817
/* For each axq_acq entry, for each tid, try to schedule packets
 * for transmit until ampdu_depth has reached min Q depth.
 */
S
Sujith 已提交
1818 1819
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
1820
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1821
	struct ath_atx_ac *ac, *last_ac;
1822
	struct ath_atx_tid *tid, *last_tid;
1823
	bool sent = false;
1824

1825
	if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
1826
	    list_empty(&txq->axq_acq))
S
Sujith 已提交
1827
		return;
1828

1829 1830
	rcu_read_lock();

1831
	last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1832 1833
	while (!list_empty(&txq->axq_acq)) {
		bool stop = false;
1834

1835
		ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1836 1837 1838
		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
		list_del(&ac->list);
		ac->sched = false;
1839

1840
		while (!list_empty(&ac->tid_q)) {
1841

1842 1843 1844 1845
			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
					       list);
			list_del(&tid->list);
			tid->sched = false;
1846

1847 1848
			if (tid->paused)
				continue;
1849

1850 1851
			if (ath_tx_sched_aggr(sc, txq, tid, &stop))
				sent = true;
1852

1853 1854 1855 1856
			/*
			 * add tid to round-robin queue if more frames
			 * are pending for the tid
			 */
1857
			if (ath_tid_has_buffered(tid))
1858
				ath_tx_queue_tid(txq, tid);
1859

1860
			if (stop || tid == last_tid)
1861 1862
				break;
		}
1863

1864 1865 1866
		if (!list_empty(&ac->tid_q) && !ac->sched) {
			ac->sched = true;
			list_add_tail(&ac->list, &txq->axq_acq);
1867
		}
1868

1869
		if (stop)
1870
			break;
1871 1872 1873 1874 1875 1876 1877 1878 1879

		if (ac == last_ac) {
			if (!sent)
				break;

			sent = false;
			last_ac = list_entry(txq->axq_acq.prev,
					     struct ath_atx_ac, list);
		}
S
Sujith 已提交
1880
	}
1881 1882

	rcu_read_unlock();
S
Sujith 已提交
1883
}
1884

S
Sujith 已提交
1885 1886 1887 1888
/***********/
/* TX, DMA */
/***********/

1889
/*
S
Sujith 已提交
1890 1891
 * Insert a chain of ath_buf (descriptors) on a txq and
 * assume the descriptors are already chained together by caller.
1892
 */
S
Sujith 已提交
1893
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1894
			     struct list_head *head, bool internal)
1895
{
1896
	struct ath_hw *ah = sc->sc_ah;
1897
	struct ath_common *common = ath9k_hw_common(ah);
1898 1899 1900
	struct ath_buf *bf, *bf_last;
	bool puttxbuf = false;
	bool edma;
1901

S
Sujith 已提交
1902 1903 1904 1905
	/*
	 * Insert the frame on the outbound list and
	 * pass it on to the hardware.
	 */
1906

S
Sujith 已提交
1907 1908
	if (list_empty(head))
		return;
1909

1910
	edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
S
Sujith 已提交
1911
	bf = list_first_entry(head, struct ath_buf, list);
1912
	bf_last = list_entry(head->prev, struct ath_buf, list);
1913

1914 1915
	ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
		txq->axq_qnum, txq->axq_depth);
1916

1917 1918
	if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
		list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
1919
		INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1920
		puttxbuf = true;
S
Sujith 已提交
1921
	} else {
1922 1923
		list_splice_tail_init(head, &txq->axq_q);

1924 1925
		if (txq->axq_link) {
			ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
1926
			ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
J
Joe Perches 已提交
1927 1928
				txq->axq_qnum, txq->axq_link,
				ito64(bf->bf_daddr), bf->bf_desc);
1929 1930 1931 1932 1933 1934 1935 1936 1937
		} else if (!edma)
			puttxbuf = true;

		txq->axq_link = bf_last->bf_desc;
	}

	if (puttxbuf) {
		TX_STAT_INC(txq->axq_qnum, puttxbuf);
		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1938
		ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
1939 1940 1941
			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
	}

L
Luis R. Rodriguez 已提交
1942
	if (!edma || sc->tx99_state) {
F
Felix Fietkau 已提交
1943
		TX_STAT_INC(txq->axq_qnum, txstart);
1944
		ath9k_hw_txstart(ah, txq->axq_qnum);
S
Sujith 已提交
1945
	}
1946 1947

	if (!internal) {
1948 1949 1950 1951 1952
		while (bf) {
			txq->axq_depth++;
			if (bf_is_ampdu_not_probing(bf))
				txq->axq_ampdu_depth++;

1953 1954 1955
			bf_last = bf->bf_lastbf;
			bf = bf_last->bf_next;
			bf_last->bf_next = NULL;
1956
		}
1957
	}
S
Sujith 已提交
1958
}
1959

F
Felix Fietkau 已提交
1960
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1961
			       struct ath_atx_tid *tid, struct sk_buff *skb)
S
Sujith 已提交
1962
{
1963
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1964 1965
	struct ath_frame_info *fi = get_frame_info(skb);
	struct list_head bf_head;
1966
	struct ath_buf *bf = fi->bf;
1967 1968 1969

	INIT_LIST_HEAD(&bf_head);
	list_add_tail(&bf->list, &bf_head);
1970
	bf->bf_state.bf_type = 0;
1971 1972 1973 1974
	if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
		bf->bf_state.bf_type = BUF_AMPDU;
		ath_tx_addto_baw(sc, tid, bf);
	}
S
Sujith 已提交
1975

1976
	bf->bf_next = NULL;
S
Sujith 已提交
1977
	bf->bf_lastbf = bf;
1978
	ath_tx_fill_desc(sc, bf, txq, fi->framelen);
1979
	ath_tx_txqaddbuf(sc, txq, &bf_head, false);
S
Sujith 已提交
1980
	TX_STAT_INC(txq->axq_qnum, queued);
S
Sujith 已提交
1981 1982
}

1983 1984 1985
static void setup_frame_info(struct ieee80211_hw *hw,
			     struct ieee80211_sta *sta,
			     struct sk_buff *skb,
1986
			     int framelen)
S
Sujith 已提交
1987 1988
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1989
	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1990
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1991
	const struct ieee80211_rate *rate;
1992
	struct ath_frame_info *fi = get_frame_info(skb);
1993
	struct ath_node *an = NULL;
1994
	enum ath9k_key_type keytype;
1995 1996 1997 1998 1999 2000 2001 2002 2003 2004
	bool short_preamble = false;

	/*
	 * We check if Short Preamble is needed for the CTS rate by
	 * checking the BSS's global flag.
	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
	 */
	if (tx_info->control.vif &&
	    tx_info->control.vif->bss_conf.use_short_preamble)
		short_preamble = true;
S
Sujith 已提交
2005

2006
	rate = ieee80211_get_rts_cts_rate(hw, tx_info);
2007
	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
S
Sujith 已提交
2008

2009 2010 2011
	if (sta)
		an = (struct ath_node *) sta->drv_priv;

2012 2013 2014
	memset(fi, 0, sizeof(*fi));
	if (hw_key)
		fi->keyix = hw_key->hw_key_idx;
2015 2016
	else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
		fi->keyix = an->ps_key;
2017 2018 2019 2020
	else
		fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->keytype = keytype;
	fi->framelen = framelen;
2021 2022 2023

	if (!rate)
		return;
2024 2025 2026
	fi->rtscts_rate = rate->hw_value;
	if (short_preamble)
		fi->rtscts_rate |= rate->hw_value_short;
S
Sujith 已提交
2027 2028
}

2029 2030 2031 2032
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath9k_channel *curchan = ah->curchan;
2033

2034
	if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && IS_CHAN_5GHZ(curchan) &&
2035
	    (chainmask == 0x7) && (rate < 0x90))
2036
		return 0x3;
2037 2038 2039
	else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
		 IS_CCK_RATE(rate))
		return 0x2;
2040 2041 2042 2043
	else
		return chainmask;
}

2044 2045 2046 2047
/*
 * Assign a descriptor (and sequence number if necessary,
 * and map buffer for DMA. Frees skb on error
 */
2048
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
F
Felix Fietkau 已提交
2049
					   struct ath_txq *txq,
2050
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
2051
					   struct sk_buff *skb)
2052
{
F
Felix Fietkau 已提交
2053
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2054
	struct ath_frame_info *fi = get_frame_info(skb);
2055
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
F
Felix Fietkau 已提交
2056
	struct ath_buf *bf;
S
Sujith Manoharan 已提交
2057
	int fragno;
2058
	u16 seqno;
F
Felix Fietkau 已提交
2059 2060 2061

	bf = ath_tx_get_buffer(sc);
	if (!bf) {
2062
		ath_dbg(common, XMIT, "TX buffers are full\n");
F
Felix Fietkau 已提交
2063
		return NULL;
F
Felix Fietkau 已提交
2064
	}
2065

S
Sujith 已提交
2066
	ATH_TXBUF_RESET(bf);
2067

2068
	if (tid && ieee80211_is_data_present(hdr->frame_control)) {
S
Sujith Manoharan 已提交
2069
		fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
2070 2071
		seqno = tid->seq_next;
		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
S
Sujith Manoharan 已提交
2072 2073 2074 2075 2076 2077 2078

		if (fragno)
			hdr->seq_ctrl |= cpu_to_le16(fragno);

		if (!ieee80211_has_morefrags(hdr->frame_control))
			INCR(tid->seq_next, IEEE80211_SEQ_MAX);

2079 2080 2081
		bf->bf_state.seqno = seqno;
	}

2082
	bf->bf_mpdu = skb;
2083

B
Ben Greear 已提交
2084 2085 2086
	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
2087
		bf->bf_mpdu = NULL;
2088
		bf->bf_buf_addr = 0;
2089 2090
		ath_err(ath9k_hw_common(sc->sc_ah),
			"dma_mapping_error() on TX\n");
F
Felix Fietkau 已提交
2091
		ath_tx_return_buffer(sc, bf);
F
Felix Fietkau 已提交
2092
		return NULL;
2093 2094
	}

2095
	fi->bf = bf;
F
Felix Fietkau 已提交
2096 2097 2098 2099

	return bf;
}

2100 2101
static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
			  struct ath_tx_control *txctl)
2102
{
2103 2104
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2105
	struct ieee80211_sta *sta = txctl->sta;
2106
	struct ieee80211_vif *vif = info->control.vif;
2107
	struct ath_vif *avp;
2108
	struct ath_softc *sc = hw->priv;
F
Felix Fietkau 已提交
2109
	int frmlen = skb->len + FCS_LEN;
2110
	int padpos, padsize;
2111

2112 2113 2114
	/* NOTE:  sta can be NULL according to net/mac80211.h */
	if (sta)
		txctl->an = (struct ath_node *)sta->drv_priv;
2115 2116 2117 2118
	else if (vif && ieee80211_is_data(hdr->frame_control)) {
		avp = (void *)vif->drv_priv;
		txctl->an = &avp->mcast_node;
	}
2119

F
Felix Fietkau 已提交
2120 2121 2122
	if (info->control.hw_key)
		frmlen += info->control.hw_key->icv_len;

2123
	/*
S
Sujith 已提交
2124 2125 2126
	 * As a temporary workaround, assign seq# here; this will likely need
	 * to be cleaned up to work better with Beacon transmission and virtual
	 * BSSes.
2127
	 */
S
Sujith 已提交
2128 2129 2130 2131 2132
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
			sc->tx.seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2133 2134
	}

2135 2136 2137 2138 2139
	if ((vif && vif->type != NL80211_IFTYPE_AP &&
	            vif->type != NL80211_IFTYPE_AP_VLAN) ||
	    !ieee80211_is_data(hdr->frame_control))
		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;

2140
	/* Add the padding after the header if this is not already done */
2141
	padpos = ieee80211_hdrlen(hdr->frame_control);
2142 2143 2144 2145
	padsize = padpos & 3;
	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize)
			return -ENOMEM;
2146

2147 2148
		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
2149 2150
	}

2151
	setup_frame_info(hw, sta, skb, frmlen);
2152 2153 2154
	return 0;
}

2155

2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175
/* Upon failure caller should free skb */
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
		 struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ieee80211_sta *sta = txctl->sta;
	struct ieee80211_vif *vif = info->control.vif;
	struct ath_softc *sc = hw->priv;
	struct ath_txq *txq = txctl->txq;
	struct ath_atx_tid *tid = NULL;
	struct ath_buf *bf;
	int q;
	int ret;

	ret = ath_tx_prepare(hw, skb, txctl);
	if (ret)
	    return ret;

	hdr = (struct ieee80211_hdr *) skb->data;
2176 2177 2178 2179 2180
	/*
	 * At this point, the vif, hw_key and sta pointers in the tx control
	 * info are no longer valid (overwritten by the ath_frame_info data.
	 */

2181
	q = skb_get_queue_mapping(skb);
F
Felix Fietkau 已提交
2182 2183

	ath_txq_lock(sc, txq);
2184
	if (txq == sc->tx.txq_map[q] &&
2185 2186
	    ++txq->pending_frames > sc->tx.txq_max_pending[q] &&
	    !txq->stopped) {
2187
		ieee80211_stop_queue(sc->hw, q);
2188
		txq->stopped = true;
2189 2190
	}

2191
	if (txctl->an && ieee80211_is_data_present(hdr->frame_control))
2192 2193
		tid = ath_get_skb_tid(sc, txctl->an, skb);

2194 2195 2196 2197
	if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
		ath_txq_unlock(sc, txq);
		txq = sc->tx.uapsdq;
		ath_txq_lock(sc, txq);
2198 2199
	} else if (txctl->an &&
		   ieee80211_is_data_present(hdr->frame_control)) {
2200 2201
		WARN_ON(tid->ac->txq != txctl->txq);

2202 2203 2204
		if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
			tid->ac->clear_ps_filter = true;

2205
		/*
2206 2207
		 * Add this frame to software queue for scheduling later
		 * for aggregation.
2208
		 */
2209 2210 2211 2212 2213 2214
		TX_STAT_INC(txq->axq_qnum, a_queued_sw);
		__skb_queue_tail(&tid->buf_q, skb);
		if (!txctl->an->sleeping)
			ath_tx_queue_tid(txq, tid);

		ath_txq_schedule(sc, txq);
2215 2216 2217
		goto out;
	}

2218
	bf = ath_tx_setup_buffer(sc, txq, tid, skb);
2219
	if (!bf) {
2220
		ath_txq_skb_done(sc, txq, skb);
2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232
		if (txctl->paprd)
			dev_kfree_skb_any(skb);
		else
			ieee80211_free_txskb(sc->hw, skb);
		goto out;
	}

	bf->bf_state.bfs_paprd = txctl->paprd;

	if (txctl->paprd)
		bf->bf_state.bfs_paprd_timestamp = jiffies;

2233
	ath_set_rates(vif, sta, bf);
2234
	ath_tx_send_normal(sc, txq, tid, skb);
F
Felix Fietkau 已提交
2235

2236
out:
F
Felix Fietkau 已提交
2237
	ath_txq_unlock(sc, txq);
F
Felix Fietkau 已提交
2238

2239
	return 0;
2240 2241
}

2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272
void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		 struct sk_buff *skb)
{
	struct ath_softc *sc = hw->priv;
	struct ath_tx_control txctl = {
		.txq = sc->beacon.cabq
	};
	struct ath_tx_info info = {};
	struct ieee80211_hdr *hdr;
	struct ath_buf *bf_tail = NULL;
	struct ath_buf *bf;
	LIST_HEAD(bf_q);
	int duration = 0;
	int max_duration;

	max_duration =
		sc->cur_beacon_conf.beacon_interval * 1000 *
		sc->cur_beacon_conf.dtim_period / ATH_BCBUF;

	do {
		struct ath_frame_info *fi = get_frame_info(skb);

		if (ath_tx_prepare(hw, skb, &txctl))
			break;

		bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
		if (!bf)
			break;

		bf->bf_lastbf = bf;
		ath_set_rates(vif, NULL, bf);
S
Sujith Manoharan 已提交
2273
		ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309
		duration += info.rates[0].PktDuration;
		if (bf_tail)
			bf_tail->bf_next = bf;

		list_add_tail(&bf->list, &bf_q);
		bf_tail = bf;
		skb = NULL;

		if (duration > max_duration)
			break;

		skb = ieee80211_get_buffered_bc(hw, vif);
	} while(skb);

	if (skb)
		ieee80211_free_txskb(hw, skb);

	if (list_empty(&bf_q))
		return;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;

	if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
		hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
			sizeof(*hdr), DMA_TO_DEVICE);
	}

	ath_txq_lock(sc, txctl.txq);
	ath_tx_fill_desc(sc, bf, txctl.txq, 0);
	ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
	TX_STAT_INC(txctl.txq->axq_qnum, queued);
	ath_txq_unlock(sc, txctl.txq);
}

S
Sujith 已提交
2310 2311 2312
/*****************/
/* TX Completion */
/*****************/
S
Sujith 已提交
2313

S
Sujith 已提交
2314
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2315
			    int tx_flags, struct ath_txq *txq)
S
Sujith 已提交
2316
{
S
Sujith 已提交
2317
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2318
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2319
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2320
	int padpos, padsize;
S
Sujith Manoharan 已提交
2321
	unsigned long flags;
S
Sujith 已提交
2322

2323
	ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
S
Sujith 已提交
2324

2325
	if (sc->sc_ah->caldata)
2326
		set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
2327

2328
	if (!(tx_flags & ATH_TX_ERROR))
S
Sujith 已提交
2329 2330
		/* Frame was ACKed */
		tx_info->flags |= IEEE80211_TX_STAT_ACK;
S
Sujith 已提交
2331

2332
	padpos = ieee80211_hdrlen(hdr->frame_control);
2333 2334 2335 2336 2337 2338 2339 2340
	padsize = padpos & 3;
	if (padsize && skb->len>padpos+padsize) {
		/*
		 * Remove MAC header padding before giving the frame back to
		 * mac80211.
		 */
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
S
Sujith 已提交
2341
	}
S
Sujith 已提交
2342

S
Sujith Manoharan 已提交
2343
	spin_lock_irqsave(&sc->sc_pm_lock, flags);
2344
	if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
S
Sujith 已提交
2345
		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
2346
		ath_dbg(common, PS,
J
Joe Perches 已提交
2347
			"Going back to sleep after having received TX status (0x%lx)\n",
S
Sujith 已提交
2348 2349 2350 2351
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
2352
	}
S
Sujith Manoharan 已提交
2353
	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2354

2355
	__skb_queue_tail(&txq->complete_q, skb);
2356
	ath_txq_skb_done(sc, txq, skb);
S
Sujith 已提交
2357
}
2358

S
Sujith 已提交
2359
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2360
				struct ath_txq *txq, struct list_head *bf_q,
2361
				struct ath_tx_status *ts, int txok)
2362
{
S
Sujith 已提交
2363
	struct sk_buff *skb = bf->bf_mpdu;
2364
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
S
Sujith 已提交
2365
	unsigned long flags;
2366
	int tx_flags = 0;
2367

2368
	if (!txok)
2369
		tx_flags |= ATH_TX_ERROR;
2370

2371 2372 2373
	if (ts->ts_status & ATH9K_TXERR_FILT)
		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;

B
Ben Greear 已提交
2374
	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2375
	bf->bf_buf_addr = 0;
L
Luis R. Rodriguez 已提交
2376 2377
	if (sc->tx99_state)
		goto skip_tx_complete;
2378 2379

	if (bf->bf_state.bfs_paprd) {
2380 2381 2382
		if (time_after(jiffies,
				bf->bf_state.bfs_paprd_timestamp +
				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
2383
			dev_kfree_skb_any(skb);
2384
		else
2385
			complete(&sc->paprd_complete);
2386
	} else {
2387
		ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2388
		ath_tx_complete(sc, skb, tx_flags, txq);
2389
	}
L
Luis R. Rodriguez 已提交
2390
skip_tx_complete:
2391 2392 2393 2394
	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
	 * accidentally reference it later.
	 */
	bf->bf_mpdu = NULL;
S
Sujith 已提交
2395 2396 2397 2398 2399 2400 2401

	/*
	 * Return the list of ath_buf of this mpdu to free queue
	 */
	spin_lock_irqsave(&sc->tx.txbuflock, flags);
	list_splice_tail_init(bf_q, &sc->tx.txbuf);
	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2402 2403
}

F
Felix Fietkau 已提交
2404 2405
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
2406
			     int txok)
2407
{
S
Sujith 已提交
2408
	struct sk_buff *skb = bf->bf_mpdu;
2409
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
S
Sujith 已提交
2410
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
F
Felix Fietkau 已提交
2411
	struct ieee80211_hw *hw = sc->hw;
2412
	struct ath_hw *ah = sc->sc_ah;
2413
	u8 i, tx_rateindex;
2414

S
Sujith 已提交
2415
	if (txok)
2416
		tx_info->status.ack_signal = ts->ts_rssi;
S
Sujith 已提交
2417

2418
	tx_rateindex = ts->ts_rateindex;
2419 2420
	WARN_ON(tx_rateindex >= hw->max_rates);

2421
	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
2422
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2423

2424
		BUG_ON(nbad > nframes);
2425
	}
2426 2427
	tx_info->status.ampdu_len = nframes;
	tx_info->status.ampdu_ack_len = nframes - nbad;
2428

2429
	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2430
	    (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442
		/*
		 * If an underrun error is seen assume it as an excessive
		 * retry only if max frame trigger level has been reached
		 * (2 KB for single stream, and 4 KB for dual stream).
		 * Adjust the long retry as if the frame was tried
		 * hw->max_rate_tries times to affect how rate control updates
		 * PER for the failed rate.
		 * In case of congestion on the bus penalizing this type of
		 * underruns should help hardware actually transmit new frames
		 * successfully by eventually preferring slower rates.
		 * This itself should also alleviate congestion on the bus.
		 */
2443 2444 2445
		if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
		                             ATH9K_TX_DELIM_UNDERRUN)) &&
		    ieee80211_is_data(hdr->frame_control) &&
2446
		    ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2447 2448
			tx_info->status.rates[tx_rateindex].count =
				hw->max_rate_tries;
2449
	}
2450

2451
	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2452
		tx_info->status.rates[i].count = 0;
2453 2454
		tx_info->status.rates[i].idx = -1;
	}
2455

2456
	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2457 2458
}

S
Sujith 已提交
2459
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2460
{
2461
	struct ath_hw *ah = sc->sc_ah;
2462
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
2463
	struct ath_buf *bf, *lastbf, *bf_held = NULL;
2464
	struct list_head bf_head;
S
Sujith 已提交
2465
	struct ath_desc *ds;
2466
	struct ath_tx_status ts;
S
Sujith 已提交
2467
	int status;
2468

2469
	ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
J
Joe Perches 已提交
2470 2471
		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
		txq->axq_link);
2472

F
Felix Fietkau 已提交
2473
	ath_txq_lock(sc, txq);
2474
	for (;;) {
2475
		if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
2476 2477
			break;

2478 2479
		if (list_empty(&txq->axq_q)) {
			txq->axq_link = NULL;
2480
			ath_txq_schedule(sc, txq);
2481 2482 2483 2484
			break;
		}
		bf = list_first_entry(&txq->axq_q, struct ath_buf, list);

S
Sujith 已提交
2485 2486 2487 2488 2489 2490 2491 2492 2493
		/*
		 * There is a race condition that a BH gets scheduled
		 * after sw writes TxE and before hw re-load the last
		 * descriptor to get the newly chained one.
		 * Software must keep the last DONE descriptor as a
		 * holding descriptor - software does so by marking
		 * it with the STALE flag.
		 */
		bf_held = NULL;
2494
		if (bf->bf_state.stale) {
S
Sujith 已提交
2495
			bf_held = bf;
2496
			if (list_is_last(&bf_held->list, &txq->axq_q))
S
Sujith 已提交
2497
				break;
2498 2499 2500

			bf = list_entry(bf_held->list.next, struct ath_buf,
					list);
2501 2502 2503
		}

		lastbf = bf->bf_lastbf;
S
Sujith 已提交
2504
		ds = lastbf->bf_desc;
2505

2506 2507
		memset(&ts, 0, sizeof(ts));
		status = ath9k_hw_txprocdesc(ah, ds, &ts);
2508
		if (status == -EINPROGRESS)
S
Sujith 已提交
2509
			break;
2510

2511
		TX_STAT_INC(txq->axq_qnum, txprocdesc);
2512

S
Sujith 已提交
2513 2514 2515 2516 2517
		/*
		 * Remove ath_buf's of the same transmit unit from txq,
		 * however leave the last descriptor back as the holding
		 * descriptor for hw.
		 */
2518
		lastbf->bf_state.stale = true;
S
Sujith 已提交
2519 2520 2521 2522
		INIT_LIST_HEAD(&bf_head);
		if (!list_is_singular(&lastbf->list))
			list_cut_position(&bf_head,
				&txq->axq_q, lastbf->list.prev);
2523

2524
		if (bf_held) {
2525 2526
			list_del(&bf_held->list);
			ath_tx_return_buffer(sc, bf_held);
S
Sujith 已提交
2527
		}
2528

2529
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2530
	}
F
Felix Fietkau 已提交
2531
	ath_txq_unlock_complete(sc, txq);
2532 2533
}

S
Sujith 已提交
2534
void ath_tx_tasklet(struct ath_softc *sc)
2535
{
2536 2537
	struct ath_hw *ah = sc->sc_ah;
	u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
S
Sujith 已提交
2538
	int i;
2539

S
Sujith 已提交
2540 2541 2542
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
		if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
			ath_tx_processq(sc, &sc->tx.txq[i]);
2543 2544 2545
	}
}

2546 2547
void ath_tx_edma_tasklet(struct ath_softc *sc)
{
2548
	struct ath_tx_status ts;
2549 2550 2551 2552 2553
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct ath_txq *txq;
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
2554
	struct list_head *fifo_list;
2555 2556 2557
	int status;

	for (;;) {
2558
		if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
2559 2560
			break;

2561
		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
2562 2563 2564
		if (status == -EINPROGRESS)
			break;
		if (status == -EIO) {
2565
			ath_dbg(common, XMIT, "Error processing tx status\n");
2566 2567 2568
			break;
		}

2569 2570 2571 2572
		/* Process beacon completions separately */
		if (ts.qid == sc->beacon.beaconq) {
			sc->beacon.tx_processed = true;
			sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2573

2574
			ath9k_csa_update(sc);
2575
			continue;
2576
		}
2577

2578
		txq = &sc->tx.txq[ts.qid];
2579

F
Felix Fietkau 已提交
2580
		ath_txq_lock(sc, txq);
2581

2582 2583
		TX_STAT_INC(txq->axq_qnum, txprocdesc);

2584 2585
		fifo_list = &txq->txq_fifo[txq->txq_tailidx];
		if (list_empty(fifo_list)) {
F
Felix Fietkau 已提交
2586
			ath_txq_unlock(sc, txq);
2587 2588 2589
			return;
		}

2590
		bf = list_first_entry(fifo_list, struct ath_buf, list);
2591
		if (bf->bf_state.stale) {
2592 2593 2594 2595 2596
			list_del(&bf->list);
			ath_tx_return_buffer(sc, bf);
			bf = list_first_entry(fifo_list, struct ath_buf, list);
		}

2597 2598 2599
		lastbf = bf->bf_lastbf;

		INIT_LIST_HEAD(&bf_head);
2600 2601
		if (list_is_last(&lastbf->list, fifo_list)) {
			list_splice_tail_init(fifo_list, &bf_head);
2602
			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2603

2604 2605
			if (!list_empty(&txq->axq_q)) {
				struct list_head bf_q;
2606

2607 2608 2609 2610 2611
				INIT_LIST_HEAD(&bf_q);
				txq->axq_link = NULL;
				list_splice_tail_init(&txq->axq_q, &bf_q);
				ath_tx_txqaddbuf(sc, txq, &bf_q, true);
			}
2612
		} else {
2613
			lastbf->bf_state.stale = true;
2614 2615 2616
			if (bf != lastbf)
				list_cut_position(&bf_head, fifo_list,
						  lastbf->list.prev);
2617
		}
2618

2619
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
F
Felix Fietkau 已提交
2620
		ath_txq_unlock_complete(sc, txq);
2621 2622 2623
	}
}

S
Sujith 已提交
2624 2625 2626
/*****************/
/* Init, Cleanup */
/*****************/
2627

2628 2629 2630 2631 2632 2633
static int ath_txstatus_setup(struct ath_softc *sc, int size)
{
	struct ath_descdma *dd = &sc->txsdma;
	u8 txs_len = sc->sc_ah->caps.txs_len;

	dd->dd_desc_len = size * txs_len;
2634 2635
	dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
					  &dd->dd_desc_paddr, GFP_KERNEL);
2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654
	if (!dd->dd_desc)
		return -ENOMEM;

	return 0;
}

static int ath_tx_edma_init(struct ath_softc *sc)
{
	int err;

	err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
	if (!err)
		ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
					  sc->txsdma.dd_desc_paddr,
					  ATH_TXSTATUS_RING_SIZE);

	return err;
}

S
Sujith 已提交
2655
int ath_tx_init(struct ath_softc *sc, int nbufs)
2656
{
2657
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
2658
	int error = 0;
2659

2660
	spin_lock_init(&sc->tx.txbuflock);
2661

2662
	error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2663
				  "tx", nbufs, 1, 1);
2664
	if (error != 0) {
2665 2666
		ath_err(common,
			"Failed to allocate tx descriptors: %d\n", error);
2667
		return error;
2668
	}
2669

2670
	error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2671
				  "beacon", ATH_BCBUF, 1, 1);
2672
	if (error != 0) {
2673 2674
		ath_err(common,
			"Failed to allocate beacon descriptors: %d\n", error);
2675
		return error;
2676
	}
2677

2678 2679
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);

2680
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2681
		error = ath_tx_edma_init(sc);
2682

S
Sujith 已提交
2683
	return error;
2684 2685 2686 2687
}

void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
2688 2689 2690
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	int tidno, acno;
2691

2692
	for (tidno = 0, tid = &an->tid[tidno];
2693
	     tidno < IEEE80211_NUM_TIDS;
2694 2695 2696 2697 2698 2699 2700
	     tidno++, tid++) {
		tid->an        = an;
		tid->tidno     = tidno;
		tid->seq_start = tid->seq_next = 0;
		tid->baw_size  = WME_MAX_BA;
		tid->baw_head  = tid->baw_tail = 0;
		tid->sched     = false;
S
Sujith 已提交
2701
		tid->paused    = false;
2702
		tid->active	   = false;
2703
		__skb_queue_head_init(&tid->buf_q);
2704
		__skb_queue_head_init(&tid->retry_q);
2705
		acno = TID_TO_WME_AC(tidno);
2706
		tid->ac = &an->ac[acno];
2707
	}
2708

2709
	for (acno = 0, ac = &an->ac[acno];
2710
	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
2711
		ac->sched    = false;
2712
		ac->clear_ps_filter = true;
2713
		ac->txq = sc->tx.txq_map[acno];
2714
		INIT_LIST_HEAD(&ac->tid_q);
2715 2716 2717
	}
}

S
Sujith 已提交
2718
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2719
{
2720 2721
	struct ath_atx_ac *ac;
	struct ath_atx_tid *tid;
2722
	struct ath_txq *txq;
2723
	int tidno;
S
Sujith 已提交
2724

2725
	for (tidno = 0, tid = &an->tid[tidno];
2726
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
2727

2728
		ac = tid->ac;
2729
		txq = ac->txq;
2730

F
Felix Fietkau 已提交
2731
		ath_txq_lock(sc, txq);
2732 2733 2734 2735 2736 2737 2738 2739 2740

		if (tid->sched) {
			list_del(&tid->list);
			tid->sched = false;
		}

		if (ac->sched) {
			list_del(&ac->list);
			tid->ac->sched = false;
2741
		}
2742 2743

		ath_tid_drain(sc, txq, tid);
2744
		tid->active = false;
2745

F
Felix Fietkau 已提交
2746
		ath_txq_unlock(sc, txq);
2747 2748
	}
}
L
Luis R. Rodriguez 已提交
2749

2750 2751
#ifdef CONFIG_ATH9K_TX99

L
Luis R. Rodriguez 已提交
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793
int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
		    struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ath_frame_info *fi = get_frame_info(skb);
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_buf *bf;
	int padpos, padsize;

	padpos = ieee80211_hdrlen(hdr->frame_control);
	padsize = padpos & 3;

	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize) {
			ath_dbg(common, XMIT,
				"tx99 padding failed\n");
		return -EINVAL;
		}

		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
	}

	fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->framelen = skb->len + FCS_LEN;
	fi->keytype = ATH9K_KEY_TYPE_CLEAR;

	bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
	if (!bf) {
		ath_dbg(common, XMIT, "tx99 buffer setup failed\n");
		return -EINVAL;
	}

	ath_set_rates(sc->tx99_vif, NULL, bf);

	ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
	ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);

	ath_tx_send_normal(sc, txctl->txq, NULL, skb);

	return 0;
}
2794 2795

#endif /* CONFIG_ATH9K_TX99 */