xmit.c 69.5 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include <linux/dma-mapping.h>
S
Sujith 已提交
18
#include "ath9k.h"
19
#include "ar9003_mac.h"
20 21 22 23 24 25 26 27 28 29 30 31

#define BITS_PER_BYTE           8
#define OFDM_PLCP_BITS          22
#define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
#define L_STF                   8
#define L_LTF                   8
#define L_SIG                   4
#define HT_SIG                  8
#define HT_STF                  4
#define HT_LTF(_ns)             (4 * (_ns))
#define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
32 33
#define TIME_SYMBOLS(t)         ((t) >> 2)
#define TIME_SYMBOLS_HALFGI(t)  (((t) * 5 - 4) / 18)
34 35 36 37
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)


38
static u16 bits_per_symbol[][2] = {
39 40 41 42 43 44 45 46 47 48 49
	/* 20MHz 40MHz */
	{    26,   54 },     /*  0: BPSK */
	{    52,  108 },     /*  1: QPSK 1/2 */
	{    78,  162 },     /*  2: QPSK 3/4 */
	{   104,  216 },     /*  3: 16-QAM 1/2 */
	{   156,  324 },     /*  4: 16-QAM 3/4 */
	{   208,  432 },     /*  5: 64-QAM 2/3 */
	{   234,  486 },     /*  6: 64-QAM 3/4 */
	{   260,  540 },     /*  7: 64-QAM 5/6 */
};

F
Felix Fietkau 已提交
50
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
51 52 53
			       struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
			    int tx_flags, struct ath_txq *txq);
S
Sujith 已提交
54
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
55
				struct ath_txq *txq, struct list_head *bf_q,
56
				struct ath_tx_status *ts, int txok);
57
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58
			     struct list_head *head, bool internal);
F
Felix Fietkau 已提交
59 60
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
61
			     int txok);
62 63
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno);
64 65 66
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
					   struct ath_txq *txq,
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
67
					   struct sk_buff *skb);
68

69
enum {
70 71
	MCS_HT20,
	MCS_HT20_SGI,
72 73 74 75
	MCS_HT40,
	MCS_HT40_SGI,
};

S
Sujith 已提交
76 77 78
/*********************/
/* Aggregation logic */
/*********************/
79

80
void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
81
	__acquires(&txq->axq_lock)
F
Felix Fietkau 已提交
82 83 84 85
{
	spin_lock_bh(&txq->axq_lock);
}

86
void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
87
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
88 89 90 91
{
	spin_unlock_bh(&txq->axq_lock);
}

92
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
93
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
94 95 96 97 98 99 100 101 102 103 104 105
{
	struct sk_buff_head q;
	struct sk_buff *skb;

	__skb_queue_head_init(&q);
	skb_queue_splice_init(&txq->complete_q, &q);
	spin_unlock_bh(&txq->axq_lock);

	while ((skb = __skb_dequeue(&q)))
		ieee80211_tx_status(sc->hw, skb);
}

S
Sujith 已提交
106
static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
S
Sujith 已提交
107
{
S
Sujith 已提交
108
	struct ath_atx_ac *ac = tid->ac;
S
Sujith 已提交
109

S
Sujith 已提交
110 111
	if (tid->sched)
		return;
S
Sujith 已提交
112

S
Sujith 已提交
113 114
	tid->sched = true;
	list_add_tail(&tid->list, &ac->tid_q);
S
Sujith 已提交
115

S
Sujith 已提交
116 117
	if (ac->sched)
		return;
118

S
Sujith 已提交
119 120 121
	ac->sched = true;
	list_add_tail(&ac->list, &txq->axq_acq);
}
122

123
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
124 125
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
126 127 128
	BUILD_BUG_ON(sizeof(struct ath_frame_info) >
		     sizeof(tx_info->rate_driver_data));
	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
129 130
}

131 132
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
{
133 134 135
	if (!tid->an->sta)
		return;

136 137 138 139
	ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
			   seqno << IEEE80211_SEQ_SEQ_SHIFT);
}

140 141 142 143 144 145 146
static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
			  struct ath_buf *bf)
{
	ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates,
			       ARRAY_SIZE(bf->rates));
}

147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
			     struct sk_buff *skb)
{
	int q;

	q = skb_get_queue_mapping(skb);
	if (txq == sc->tx.uapsdq)
		txq = sc->tx.txq_map[q];

	if (txq != sc->tx.txq_map[q])
		return;

	if (WARN_ON(--txq->pending_frames < 0))
		txq->pending_frames = 0;

	if (txq->stopped &&
	    txq->pending_frames < sc->tx.txq_max_pending[q]) {
		ieee80211_wake_queue(sc->hw, q);
		txq->stopped = false;
	}
}

169 170 171
static struct ath_atx_tid *
ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
{
172
	u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
173 174 175
	return ATH_AN_2_TID(an, tidno);
}

176 177
static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
{
178
	return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
179 180 181 182
}

static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
{
183 184 185 186 187 188 189
	struct sk_buff *skb;

	skb = __skb_dequeue(&tid->retry_q);
	if (!skb)
		skb = __skb_dequeue(&tid->buf_q);

	return skb;
190 191
}

192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
/*
 * ath_tx_tid_change_state:
 * - clears a-mpdu flag of previous session
 * - force sequence number allocation to fix next BlockAck Window
 */
static void
ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
{
	struct ath_txq *txq = tid->ac->txq;
	struct ieee80211_tx_info *tx_info;
	struct sk_buff *skb, *tskb;
	struct ath_buf *bf;
	struct ath_frame_info *fi;

	skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
		fi = get_frame_info(skb);
		bf = fi->bf;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;

		if (bf)
			continue;

		bf = ath_tx_setup_buffer(sc, txq, tid, skb);
		if (!bf) {
			__skb_unlink(skb, &tid->buf_q);
			ath_txq_skb_done(sc, txq, skb);
			ieee80211_free_txskb(sc->hw, skb);
			continue;
		}
	}

}

227
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
S
Sujith 已提交
228
{
229
	struct ath_txq *txq = tid->ac->txq;
230
	struct sk_buff *skb;
S
Sujith 已提交
231 232
	struct ath_buf *bf;
	struct list_head bf_head;
233
	struct ath_tx_status ts;
234
	struct ath_frame_info *fi;
235
	bool sendbar = false;
236

237
	INIT_LIST_HEAD(&bf_head);
238

239
	memset(&ts, 0, sizeof(ts));
240

241
	while ((skb = __skb_dequeue(&tid->retry_q))) {
242 243
		fi = get_frame_info(skb);
		bf = fi->bf;
F
Felix Fietkau 已提交
244
		if (!bf) {
245 246 247
			ath_txq_skb_done(sc, txq, skb);
			ieee80211_free_txskb(sc->hw, skb);
			continue;
F
Felix Fietkau 已提交
248 249
		}

250
		if (fi->baw_tracked) {
251
			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
252
			sendbar = true;
253
		}
254 255 256

		list_add_tail(&bf->list, &bf_head);
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
257
	}
258

259
	if (sendbar) {
F
Felix Fietkau 已提交
260
		ath_txq_unlock(sc, txq);
261
		ath_send_bar(tid, tid->seq_start);
F
Felix Fietkau 已提交
262 263
		ath_txq_lock(sc, txq);
	}
S
Sujith 已提交
264
}
265

S
Sujith 已提交
266 267
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno)
S
Sujith 已提交
268
{
S
Sujith 已提交
269
	int index, cindex;
270

S
Sujith 已提交
271 272
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
273

274
	__clear_bit(cindex, tid->tx_buf);
S
Sujith 已提交
275

276
	while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
S
Sujith 已提交
277 278
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
279 280
		if (tid->bar_index >= 0)
			tid->bar_index--;
S
Sujith 已提交
281
	}
S
Sujith 已提交
282
}
283

S
Sujith 已提交
284
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
285
			     struct ath_buf *bf)
S
Sujith 已提交
286
{
287 288
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
	u16 seqno = bf->bf_state.seqno;
S
Sujith 已提交
289
	int index, cindex;
S
Sujith 已提交
290

291
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
S
Sujith 已提交
292
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
293
	__set_bit(cindex, tid->tx_buf);
294
	fi->baw_tracked = 1;
295

S
Sujith 已提交
296 297 298 299
	if (index >= ((tid->baw_tail - tid->baw_head) &
		(ATH_TID_MAX_BUFS - 1))) {
		tid->baw_tail = cindex;
		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
300 301 302
	}
}

S
Sujith 已提交
303 304
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
			  struct ath_atx_tid *tid)
305 306

{
307
	struct sk_buff *skb;
S
Sujith 已提交
308 309
	struct ath_buf *bf;
	struct list_head bf_head;
310
	struct ath_tx_status ts;
311
	struct ath_frame_info *fi;
312 313

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
314
	INIT_LIST_HEAD(&bf_head);
315

316
	while ((skb = ath_tid_dequeue(tid))) {
317 318
		fi = get_frame_info(skb);
		bf = fi->bf;
319

320 321 322 323 324
		if (!bf) {
			ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
			continue;
		}

325
		list_add_tail(&bf->list, &bf_head);
326
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
327
	}
328 329
}

S
Sujith 已提交
330
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
331
			     struct sk_buff *skb, int count)
332
{
333
	struct ath_frame_info *fi = get_frame_info(skb);
334
	struct ath_buf *bf = fi->bf;
S
Sujith 已提交
335
	struct ieee80211_hdr *hdr;
336
	int prev = fi->retries;
337

S
Sujith 已提交
338
	TX_STAT_INC(txq->axq_qnum, a_retries);
339 340 341
	fi->retries += count;

	if (prev > 0)
342
		return;
343

S
Sujith 已提交
344 345
	hdr = (struct ieee80211_hdr *)skb->data;
	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
346 347
	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
		sizeof(*hdr), DMA_TO_DEVICE);
348 349
}

350
static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
S
Sujith 已提交
351
{
352
	struct ath_buf *bf = NULL;
S
Sujith 已提交
353 354

	spin_lock_bh(&sc->tx.txbuflock);
355 356

	if (unlikely(list_empty(&sc->tx.txbuf))) {
357 358 359
		spin_unlock_bh(&sc->tx.txbuflock);
		return NULL;
	}
360 361 362 363

	bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
	list_del(&bf->list);

S
Sujith 已提交
364 365
	spin_unlock_bh(&sc->tx.txbuflock);

366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
	return bf;
}

static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
{
	spin_lock_bh(&sc->tx.txbuflock);
	list_add_tail(&bf->list, &sc->tx.txbuf);
	spin_unlock_bh(&sc->tx.txbuflock);
}

static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
{
	struct ath_buf *tbf;

	tbf = ath_tx_get_buffer(sc);
	if (WARN_ON(!tbf))
		return NULL;

S
Sujith 已提交
384 385 386 387
	ATH_TXBUF_RESET(tbf);

	tbf->bf_mpdu = bf->bf_mpdu;
	tbf->bf_buf_addr = bf->bf_buf_addr;
388
	memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
S
Sujith 已提交
389
	tbf->bf_state = bf->bf_state;
390
	tbf->bf_state.stale = false;
S
Sujith 已提交
391 392 393 394

	return tbf;
}

395 396 397 398
static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
			        struct ath_tx_status *ts, int txok,
			        int *nframes, int *nbad)
{
399
	struct ath_frame_info *fi;
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
	u16 seq_st = 0;
	u32 ba[WME_BA_BMP_SIZE >> 5];
	int ba_index;
	int isaggr = 0;

	*nbad = 0;
	*nframes = 0;

	isaggr = bf_isaggr(bf);
	if (isaggr) {
		seq_st = ts->ts_seqnum;
		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
	}

	while (bf) {
415
		fi = get_frame_info(bf->bf_mpdu);
416
		ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
417 418 419 420 421 422 423 424 425 426

		(*nframes)++;
		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
			(*nbad)++;

		bf = bf->bf_next;
	}
}


S
Sujith 已提交
427 428
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
				 struct ath_buf *bf, struct list_head *bf_q,
429
				 struct ath_tx_status *ts, int txok)
430
{
S
Sujith 已提交
431 432
	struct ath_node *an = NULL;
	struct sk_buff *skb;
433
	struct ieee80211_sta *sta;
F
Felix Fietkau 已提交
434
	struct ieee80211_hw *hw = sc->hw;
435
	struct ieee80211_hdr *hdr;
436
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
437
	struct ath_atx_tid *tid = NULL;
S
Sujith 已提交
438
	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
439 440
	struct list_head bf_head;
	struct sk_buff_head bf_pending;
441
	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
442
	u32 ba[WME_BA_BMP_SIZE >> 5];
443
	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
444
	bool rc_update = true, isba;
445
	struct ieee80211_tx_rate rates[4];
446
	struct ath_frame_info *fi;
447
	int nframes;
448
	bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
449
	int i, retries;
450
	int bar_index = -1;
451

S
Sujith 已提交
452
	skb = bf->bf_mpdu;
453 454
	hdr = (struct ieee80211_hdr *)skb->data;

455 456
	tx_info = IEEE80211_SKB_CB(skb);

457
	memcpy(rates, bf->rates, sizeof(rates));
458

459 460 461 462
	retries = ts->ts_longretry + 1;
	for (i = 0; i < ts->ts_rateindex; i++)
		retries += rates[i].count;

463
	rcu_read_lock();
464

465
	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
466 467
	if (!sta) {
		rcu_read_unlock();
468

469 470 471 472
		INIT_LIST_HEAD(&bf_head);
		while (bf) {
			bf_next = bf->bf_next;

473
			if (!bf->bf_state.stale || bf_next != NULL)
474 475
				list_move_tail(&bf->list, &bf_head);

476
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
477 478 479

			bf = bf_next;
		}
480
		return;
481 482
	}

483
	an = (struct ath_node *)sta->drv_priv;
484
	tid = ath_get_skb_tid(sc, an, skb);
485
	seq_first = tid->seq_start;
486
	isba = ts->ts_flags & ATH9K_TX_BA;
487

488 489 490 491
	/*
	 * The hardware occasionally sends a tx status for the wrong TID.
	 * In this case, the BA status cannot be considered valid and all
	 * subframes need to be retransmitted
492 493 494
	 *
	 * Only BlockAcks have a TID and therefore normal Acks cannot be
	 * checked
495
	 */
496
	if (isba && tid->tidno != ts->tid)
497 498
		txok = false;

S
Sujith 已提交
499
	isaggr = bf_isaggr(bf);
S
Sujith 已提交
500
	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
501

S
Sujith 已提交
502
	if (isaggr && txok) {
503 504 505
		if (ts->ts_flags & ATH9K_TX_BA) {
			seq_st = ts->ts_seqnum;
			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
S
Sujith 已提交
506
		} else {
S
Sujith 已提交
507 508 509 510 511 512 513
			/*
			 * AR5416 can become deaf/mute when BA
			 * issue happens. Chip needs to be reset.
			 * But AP code may have sychronization issues
			 * when perform internal reset in this routine.
			 * Only enable reset in STA mode for now.
			 */
514
			if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
S
Sujith 已提交
515
				needreset = 1;
S
Sujith 已提交
516
		}
517 518
	}

519
	__skb_queue_head_init(&bf_pending);
520

521
	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
S
Sujith 已提交
522
	while (bf) {
523 524
		u16 seqno = bf->bf_state.seqno;

525
		txfail = txpending = sendbar = 0;
S
Sujith 已提交
526
		bf_next = bf->bf_next;
527

528 529
		skb = bf->bf_mpdu;
		tx_info = IEEE80211_SKB_CB(skb);
530
		fi = get_frame_info(skb);
531

532 533
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
		    !tid->active) {
534 535 536 537 538 539
			/*
			 * Outside of the current BlockAck window,
			 * maybe part of a previous session
			 */
			txfail = 1;
		} else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
S
Sujith 已提交
540 541
			/* transmit completion, subframe is
			 * acked by block ack */
542
			acked_cnt++;
S
Sujith 已提交
543 544
		} else if (!isaggr && txok) {
			/* transmit completion */
545
			acked_cnt++;
546 547 548 549 550 551 552 553
		} else if (flush) {
			txpending = 1;
		} else if (fi->retries < ATH_MAX_SW_RETRIES) {
			if (txok || !an->sleeping)
				ath_tx_set_retry(sc, txq, bf->bf_mpdu,
						 retries);

			txpending = 1;
S
Sujith 已提交
554
		} else {
555 556 557 558
			txfail = 1;
			txfail_cnt++;
			bar_index = max_t(int, bar_index,
				ATH_BA_INDEX(seq_first, seqno));
S
Sujith 已提交
559
		}
560

561 562 563 564
		/*
		 * Make sure the last desc is reclaimed if it
		 * not a holding desc.
		 */
565
		INIT_LIST_HEAD(&bf_head);
566
		if (bf_next != NULL || !bf_last->bf_state.stale)
S
Sujith 已提交
567
			list_move_tail(&bf->list, &bf_head);
568

569
		if (!txpending) {
S
Sujith 已提交
570 571 572 573
			/*
			 * complete the acked-ones/xretried ones; update
			 * block-ack window
			 */
574
			ath_tx_update_baw(sc, tid, seqno);
575

576
			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
577
				memcpy(tx_info->control.rates, rates, sizeof(rates));
578
				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
579 580 581
				rc_update = false;
			}

582
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
583
				!txfail);
S
Sujith 已提交
584
		} else {
585 586 587 588
			if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
				tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
				ieee80211_sta_eosp(sta);
			}
S
Sujith 已提交
589
			/* retry the un-acked ones */
590
			if (bf->bf_next == NULL && bf_last->bf_state.stale) {
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
				struct ath_buf *tbf;

				tbf = ath_clone_txbuf(sc, bf_last);
				/*
				 * Update tx baw and complete the
				 * frame with failed status if we
				 * run out of tx buf.
				 */
				if (!tbf) {
					ath_tx_update_baw(sc, tid, seqno);

					ath_tx_complete_buf(sc, bf, txq,
							    &bf_head, ts, 0);
					bar_index = max_t(int, bar_index,
						ATH_BA_INDEX(seq_first, seqno));
					break;
607
				}
608 609

				fi->bf = tbf;
S
Sujith 已提交
610 611 612 613 614 615
			}

			/*
			 * Put this buffer to the temporary pending
			 * queue to retain ordering
			 */
616
			__skb_queue_tail(&bf_pending, skb);
S
Sujith 已提交
617 618 619
		}

		bf = bf_next;
620 621
	}

622
	/* prepend un-acked frames to the beginning of the pending frame queue */
623
	if (!skb_queue_empty(&bf_pending)) {
624
		if (an->sleeping)
625
			ieee80211_sta_set_buffered(sta, tid->tidno, true);
626

627
		skb_queue_splice_tail(&bf_pending, &tid->retry_q);
628
		if (!an->sleeping) {
629
			ath_tx_queue_tid(txq, tid);
630

S
Sujith Manoharan 已提交
631
			if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
632 633
				tid->ac->clear_ps_filter = true;
		}
634 635
	}

F
Felix Fietkau 已提交
636 637 638 639 640 641 642 643 644 645 646
	if (bar_index >= 0) {
		u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);

		if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
			tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);

		ath_txq_unlock(sc, txq);
		ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
		ath_txq_lock(sc, txq);
	}

647 648
	rcu_read_unlock();

649 650
	if (needreset)
		ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
S
Sujith 已提交
651
}
652

653 654 655 656 657 658 659 660 661 662
static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
{
    struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
    return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}

static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
				  struct ath_tx_status *ts, struct ath_buf *bf,
				  struct list_head *bf_head)
{
663
	struct ieee80211_tx_info *info;
664 665 666 667 668 669 670 671 672 673 674
	bool txok, flush;

	txok = !(ts->ts_status & ATH9K_TXERR_MASK);
	flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
	txq->axq_tx_inprogress = false;

	txq->axq_depth--;
	if (bf_is_ampdu_not_probing(bf))
		txq->axq_ampdu_depth--;

	if (!bf_isampdu(bf)) {
675 676 677 678
		if (!flush) {
			info = IEEE80211_SKB_CB(bf->bf_mpdu);
			memcpy(info->control.rates, bf->rates,
			       sizeof(info->control.rates));
679
			ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
680
		}
681 682 683 684
		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
	} else
		ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);

685
	if (!flush)
686 687 688
		ath_txq_schedule(sc, txq);
}

689 690 691 692 693 694 695 696 697 698 699
static bool ath_lookup_legacy(struct ath_buf *bf)
{
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	int i;

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
	rates = tx_info->control.rates;

700 701 702 703
	for (i = 0; i < 4; i++) {
		if (!rates[i].count || rates[i].idx < 0)
			break;

704 705 706 707 708 709 710
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
			return true;
	}

	return false;
}

S
Sujith 已提交
711 712
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
			   struct ath_atx_tid *tid)
713
{
S
Sujith 已提交
714 715
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
716
	struct ieee80211_tx_rate *rates;
S
Sujith 已提交
717
	u32 max_4ms_framelen, frmlen;
718
	u16 aggr_limit, bt_aggr_limit, legacy = 0;
719
	int q = tid->ac->txq->mac80211_qnum;
S
Sujith 已提交
720
	int i;
S
Sujith 已提交
721

S
Sujith 已提交
722
	skb = bf->bf_mpdu;
S
Sujith 已提交
723
	tx_info = IEEE80211_SKB_CB(skb);
724
	rates = bf->rates;
S
Sujith 已提交
725

S
Sujith 已提交
726 727
	/*
	 * Find the lowest frame length among the rate series that will have a
728
	 * 4ms (or TXOP limited) transmit duration.
S
Sujith 已提交
729 730
	 */
	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
S
Sujith 已提交
731

S
Sujith 已提交
732
	for (i = 0; i < 4; i++) {
733
		int modeidx;
S
Sujith 已提交
734

735 736
		if (!rates[i].count)
			continue;
737

738 739 740
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
			legacy = 1;
			break;
741
		}
742 743 744 745 746 747 748 749 750

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
			modeidx = MCS_HT40;
		else
			modeidx = MCS_HT20;

		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
			modeidx++;

751
		frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
752
		max_4ms_framelen = min(max_4ms_framelen, frmlen);
753
	}
S
Sujith 已提交
754

755
	/*
S
Sujith 已提交
756 757 758
	 * limit aggregate size by the minimum rate if rate selected is
	 * not a probe rate, if rate selected is a probe rate then
	 * avoid aggregation of this packet.
759
	 */
S
Sujith 已提交
760 761
	if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
		return 0;
762

763 764 765 766 767 768 769 770
	aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);

	/*
	 * Override the default aggregation limit for BTCOEX.
	 */
	bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
	if (bt_aggr_limit)
		aggr_limit = bt_aggr_limit;
771

772 773
	if (tid->an->maxampdu)
		aggr_limit = min(aggr_limit, tid->an->maxampdu);
774

S
Sujith 已提交
775 776
	return aggr_limit;
}
777

S
Sujith 已提交
778
/*
S
Sujith 已提交
779
 * Returns the number of delimiters to be added to
S
Sujith 已提交
780 781 782
 * meet the minimum required mpdudensity.
 */
static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
783 784
				  struct ath_buf *bf, u16 frmlen,
				  bool first_subfrm)
S
Sujith 已提交
785
{
786
#define FIRST_DESC_NDELIMS 60
787
	u32 nsymbits, nsymbols;
S
Sujith 已提交
788
	u16 minlen;
789
	u8 flags, rix;
790
	int width, streams, half_gi, ndelim, mindelim;
791
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith 已提交
792 793 794

	/* Select standard number of delimiters based on frame length alone */
	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
795 796

	/*
S
Sujith 已提交
797 798 799 800
	 * If encryption enabled, hardware requires some more padding between
	 * subframes.
	 * TODO - this could be improved to be dependent on the rate.
	 *      The hardware can keep up at lower rates, but not higher rates
801
	 */
802 803
	if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
	    !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
S
Sujith 已提交
804
		ndelim += ATH_AGGR_ENCRYPTDELIM;
805

806 807 808 809
	/*
	 * Add delimiter when using RTS/CTS with aggregation
	 * and non enterprise AR9003 card
	 */
810 811
	if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
	    (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
812 813
		ndelim = max(ndelim, FIRST_DESC_NDELIMS);

S
Sujith 已提交
814 815 816 817 818
	/*
	 * Convert desired mpdu density from microeconds to bytes based
	 * on highest rate in rate series (i.e. first rate) to determine
	 * required minimum length for subframe. Take into account
	 * whether high rate is 20 or 40Mhz and half or full GI.
819
	 *
S
Sujith 已提交
820 821 822
	 * If there is no mpdu density restriction, no further calculation
	 * is needed.
	 */
823 824

	if (tid->an->mpdudensity == 0)
S
Sujith 已提交
825
		return ndelim;
826

827 828
	rix = bf->rates[0].idx;
	flags = bf->rates[0].flags;
S
Sujith 已提交
829 830
	width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
	half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
831

S
Sujith 已提交
832
	if (half_gi)
833
		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
S
Sujith 已提交
834
	else
835
		nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
836

S
Sujith 已提交
837 838
	if (nsymbols == 0)
		nsymbols = 1;
839

840 841
	streams = HT_RC_2_STREAMS(rix);
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
842
	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
843

S
Sujith 已提交
844 845 846
	if (frmlen < minlen) {
		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
		ndelim = max(mindelim, ndelim);
847 848
	}

S
Sujith 已提交
849
	return ndelim;
850 851
}

852 853
static struct ath_buf *
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
854
			struct ath_atx_tid *tid, struct sk_buff_head **q)
855
{
856
	struct ieee80211_tx_info *tx_info;
857
	struct ath_frame_info *fi;
858
	struct sk_buff *skb;
859
	struct ath_buf *bf;
860
	u16 seqno;
861

862
	while (1) {
863 864 865 866
		*q = &tid->retry_q;
		if (skb_queue_empty(*q))
			*q = &tid->buf_q;

867
		skb = skb_peek(*q);
868 869 870
		if (!skb)
			break;

871 872
		fi = get_frame_info(skb);
		bf = fi->bf;
873
		if (!fi->bf)
F
Felix Fietkau 已提交
874
			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
875 876
		else
			bf->bf_state.stale = false;
877

F
Felix Fietkau 已提交
878
		if (!bf) {
879
			__skb_unlink(skb, *q);
880
			ath_txq_skb_done(sc, txq, skb);
F
Felix Fietkau 已提交
881
			ieee80211_free_txskb(sc->hw, skb);
882
			continue;
F
Felix Fietkau 已提交
883
		}
884

885 886 887 888 889 890 891 892 893 894
		bf->bf_next = NULL;
		bf->bf_lastbf = bf;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
		if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
			bf->bf_state.bf_type = 0;
			return bf;
		}

895
		bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
896
		seqno = bf->bf_state.seqno;
897

S
Sujith 已提交
898
		/* do not step over block-ack window */
899
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
S
Sujith 已提交
900
			break;
901

902 903 904 905 906 907
		if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
			struct ath_tx_status ts = {};
			struct list_head bf_head;

			INIT_LIST_HEAD(&bf_head);
			list_add(&bf->list, &bf_head);
908
			__skb_unlink(skb, *q);
909 910 911 912 913
			ath_tx_update_baw(sc, tid, seqno);
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
			continue;
		}

914 915 916 917 918 919
		return bf;
	}

	return NULL;
}

920 921 922 923 924
static bool
ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
		 struct ath_atx_tid *tid, struct list_head *bf_q,
		 struct ath_buf *bf_first, struct sk_buff_head *tid_q,
		 int *aggr_len)
925 926
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
927
	struct ath_buf *bf = bf_first, *bf_prev = NULL;
F
Felix Fietkau 已提交
928
	int nframes = 0, ndelim;
929
	u16 aggr_limit = 0, al = 0, bpad = 0,
F
Felix Fietkau 已提交
930
	    al_delta, h_baw = tid->baw_size / 2;
931 932 933
	struct ieee80211_tx_info *tx_info;
	struct ath_frame_info *fi;
	struct sk_buff *skb;
934
	bool closed = false;
935

936 937
	bf = bf_first;
	aggr_limit = ath_lookup_rate(sc, bf, tid);
938

939
	do {
940 941 942
		skb = bf->bf_mpdu;
		fi = get_frame_info(skb);

S
Sujith 已提交
943
		/* do not exceed aggregation limit */
944
		al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
F
Felix Fietkau 已提交
945 946
		if (nframes) {
			if (aggr_limit < al + bpad + al_delta ||
947
			    ath_lookup_legacy(bf) || nframes >= h_baw)
F
Felix Fietkau 已提交
948
				break;
949

F
Felix Fietkau 已提交
950
			tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
951 952
			if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
			    !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
F
Felix Fietkau 已提交
953
				break;
S
Sujith 已提交
954
		}
955

S
Sujith 已提交
956
		/* add padding for previous frame to aggregation length */
S
Sujith 已提交
957
		al += bpad + al_delta;
958

S
Sujith 已提交
959 960 961 962
		/*
		 * Get the delimiters needed to meet the MPDU
		 * density for this node.
		 */
963 964
		ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
						!nframes);
S
Sujith 已提交
965
		bpad = PADBYTES(al_delta) + (ndelim << 2);
966

967
		nframes++;
S
Sujith 已提交
968
		bf->bf_next = NULL;
969

S
Sujith 已提交
970
		/* link buffers of this frame to the aggregate */
971 972
		if (!fi->baw_tracked)
			ath_tx_addto_baw(sc, tid, bf);
973
		bf->bf_state.ndelim = ndelim;
974

975
		__skb_unlink(skb, tid_q);
976
		list_add_tail(&bf->list, bf_q);
977
		if (bf_prev)
S
Sujith 已提交
978
			bf_prev->bf_next = bf;
979

S
Sujith 已提交
980
		bf_prev = bf;
S
Sujith 已提交
981

982 983 984 985 986
		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf) {
			closed = true;
			break;
		}
987
	} while (ath_tid_has_buffered(tid));
988

989 990 991 992 993 994 995 996 997 998
	bf = bf_first;
	bf->bf_lastbf = bf_prev;

	if (bf == bf_prev) {
		al = get_frame_info(bf->bf_mpdu)->framelen;
		bf->bf_state.bf_type = BUF_AMPDU;
	} else {
		TX_STAT_INC(txq->axq_qnum, a_aggr);
	}

999
	*aggr_len = al;
S
Sujith 已提交
1000

1001
	return closed;
S
Sujith 已提交
1002 1003
#undef PADBYTES
}
1004

1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
/*
 * rix - rate index
 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
 * width  - 0 for 20 MHz, 1 for 40 MHz
 * half_gi - to use 4us v/s 3.6 us for symbol time
 */
static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
			    int width, int half_gi, bool shortPreamble)
{
	u32 nbits, nsymbits, duration, nsymbols;
	int streams;

	/* find number of symbols: PLCP + data */
	streams = HT_RC_2_STREAMS(rix);
	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
	nsymbols = (nbits + nsymbits - 1) / nsymbits;

	if (!half_gi)
		duration = SYMBOL_TIME(nsymbols);
	else
		duration = SYMBOL_TIME_HALFGI(nsymbols);

	/* addup duration for legacy/ht training and signal fields */
	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);

	return duration;
}

1034 1035 1036 1037 1038 1039
static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
{
	int streams = HT_RC_2_STREAMS(mcs);
	int symbols, bits;
	int bytes = 0;

1040
	usec -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
	symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
	bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
	bits -= OFDM_PLCP_BITS;
	bytes = bits / 8;
	if (bytes > 65532)
		bytes = 65532;

	return bytes;
}

void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
{
	u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi;
	int mcs;

	/* 4ms is the default (and maximum) duration */
	if (!txop || txop > 4096)
		txop = 4096;

	cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
	cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
	cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
	cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
	for (mcs = 0; mcs < 32; mcs++) {
		cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false);
		cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true);
		cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false);
		cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true);
	}
}

1072
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
S
Sujith Manoharan 已提交
1073
			     struct ath_tx_info *info, int len, bool rts)
1074 1075
{
	struct ath_hw *ah = sc->sc_ah;
1076
	struct ath_common *common = ath9k_hw_common(ah);
1077 1078 1079 1080 1081
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	const struct ieee80211_rate *rate;
	struct ieee80211_hdr *hdr;
1082
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith Manoharan 已提交
1083
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
1084 1085
	int i;
	u8 rix = 0;
1086 1087 1088

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
1089
	rates = bf->rates;
1090
	hdr = (struct ieee80211_hdr *)skb->data;
1091 1092 1093

	/* set dur_update_en for l-sig computation except for PS-Poll frames */
	info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
1094
	info->rtscts_rate = fi->rtscts_rate;
1095

1096
	for (i = 0; i < ARRAY_SIZE(bf->rates); i++) {
1097 1098 1099 1100 1101 1102 1103
		bool is_40, is_sgi, is_sp;
		int phy;

		if (!rates[i].count || (rates[i].idx < 0))
			continue;

		rix = rates[i].idx;
1104
		info->rates[i].Tries = rates[i].count;
1105

S
Sujith Manoharan 已提交
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
		/*
		 * Handle RTS threshold for unaggregated HT frames.
		 */
		if (bf_isampdu(bf) && !bf_isaggr(bf) &&
		    (rates[i].flags & IEEE80211_TX_RC_MCS) &&
		    unlikely(rts_thresh != (u32) -1)) {
			if (!rts_thresh || (len > rts_thresh))
				rts = true;
		}

		if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1117 1118
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_RTSENA;
1119
		} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1120 1121
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_CTSENA;
1122 1123 1124
		}

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1125
			info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
1126
		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1127
			info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
1128 1129 1130 1131 1132 1133 1134

		is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
		is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
		is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);

		if (rates[i].flags & IEEE80211_TX_RC_MCS) {
			/* MCS rates */
1135 1136 1137 1138
			info->rates[i].Rate = rix | 0x80;
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
			info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
1139 1140
				 is_40, is_sgi, is_sp);
			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1141
				info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
1142 1143 1144 1145
			continue;
		}

		/* legacy rates */
1146
		rate = &common->sbands[tx_info->band].bitrates[rates[i].idx];
1147 1148 1149 1150 1151 1152
		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
		    !(rate->flags & IEEE80211_RATE_ERP_G))
			phy = WLAN_RC_PHY_CCK;
		else
			phy = WLAN_RC_PHY_OFDM;

1153
		info->rates[i].Rate = rate->hw_value;
1154 1155
		if (rate->hw_value_short) {
			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1156
				info->rates[i].Rate |= rate->hw_value_short;
1157 1158 1159 1160 1161
		} else {
			is_sp = false;
		}

		if (bf->bf_state.bfs_paprd)
1162
			info->rates[i].ChSel = ah->txchainmask;
1163
		else
1164 1165
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
1166

1167
		info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1168 1169 1170 1171 1172
			phy, rate->bitrate * 100, len, rix, is_sp);
	}

	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
	if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1173
		info->flags &= ~ATH9K_TXDESC_RTSENA;
1174 1175

	/* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1176 1177 1178
	if (info->flags & ATH9K_TXDESC_RTSENA)
		info->flags &= ~ATH9K_TXDESC_CTSENA;
}
1179

1180 1181 1182 1183 1184 1185 1186 1187
static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	enum ath9k_pkt_type htype;
	__le16 fc;

	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
1188

1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
	if (ieee80211_is_beacon(fc))
		htype = ATH9K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = ATH9K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = ATH9K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = ATH9K_PKT_TYPE_PSPOLL;
	else
		htype = ATH9K_PKT_TYPE_NORMAL;

	return htype;
1201 1202
}

1203 1204
static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_txq *txq, int len)
1205 1206
{
	struct ath_hw *ah = sc->sc_ah;
1207
	struct ath_buf *bf_first = NULL;
1208
	struct ath_tx_info info;
S
Sujith Manoharan 已提交
1209 1210
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
	bool rts = false;
1211

1212 1213 1214 1215 1216 1217
	memset(&info, 0, sizeof(info));
	info.is_first = true;
	info.is_last = true;
	info.txpower = MAX_RATE_POWER;
	info.qcu = txq->axq_qnum;

1218
	while (bf) {
1219
		struct sk_buff *skb = bf->bf_mpdu;
1220
		struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1221
		struct ath_frame_info *fi = get_frame_info(skb);
1222
		bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
1223 1224

		info.type = get_hw_packet_type(skb);
1225
		if (bf->bf_next)
1226
			info.link = bf->bf_next->bf_daddr;
1227
		else
L
Luis R. Rodriguez 已提交
1228
			info.link = (sc->tx99_state) ? bf->bf_daddr : 0;
1229

1230 1231 1232
		if (!bf_first) {
			bf_first = bf;

L
Luis R. Rodriguez 已提交
1233 1234
			if (!sc->tx99_state)
				info.flags = ATH9K_TXDESC_INTREQ;
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
			if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
			    txq == sc->tx.uapsdq)
				info.flags |= ATH9K_TXDESC_CLRDMASK;

			if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
				info.flags |= ATH9K_TXDESC_NOACK;
			if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
				info.flags |= ATH9K_TXDESC_LDPC;

			if (bf->bf_state.bfs_paprd)
				info.flags |= (u32) bf->bf_state.bfs_paprd <<
					      ATH9K_TXDESC_PAPRD_S;

S
Sujith Manoharan 已提交
1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
			/*
			 * mac80211 doesn't handle RTS threshold for HT because
			 * the decision has to be taken based on AMPDU length
			 * and aggregation is done entirely inside ath9k.
			 * Set the RTS/CTS flag for the first subframe based
			 * on the threshold.
			 */
			if (aggr && (bf == bf_first) &&
			    unlikely(rts_thresh != (u32) -1)) {
				/*
				 * "len" is the size of the entire AMPDU.
				 */
				if (!rts_thresh || (len > rts_thresh))
					rts = true;
			}
1263 1264 1265 1266

			if (!aggr)
				len = fi->framelen;

S
Sujith Manoharan 已提交
1267
			ath_buf_set_rate(sc, bf, &info, len, rts);
1268 1269
		}

1270 1271
		info.buf_addr[0] = bf->bf_buf_addr;
		info.buf_len[0] = skb->len;
1272 1273 1274 1275 1276
		info.pkt_len = fi->framelen;
		info.keyix = fi->keyix;
		info.keytype = fi->keytype;

		if (aggr) {
1277
			if (bf == bf_first)
1278
				info.aggr = AGGR_BUF_FIRST;
1279
			else if (bf == bf_first->bf_lastbf)
1280 1281 1282
				info.aggr = AGGR_BUF_LAST;
			else
				info.aggr = AGGR_BUF_MIDDLE;
1283

1284 1285
			info.ndelim = bf->bf_state.ndelim;
			info.aggr_len = len;
1286 1287
		}

1288 1289 1290
		if (bf == bf_first->bf_lastbf)
			bf_first = NULL;

1291
		ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
1292 1293 1294 1295
		bf = bf->bf_next;
	}
}

1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
static void
ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
		  struct ath_atx_tid *tid, struct list_head *bf_q,
		  struct ath_buf *bf_first, struct sk_buff_head *tid_q)
{
	struct ath_buf *bf = bf_first, *bf_prev = NULL;
	struct sk_buff *skb;
	int nframes = 0;

	do {
		struct ieee80211_tx_info *tx_info;
		skb = bf->bf_mpdu;

		nframes++;
		__skb_unlink(skb, tid_q);
		list_add_tail(&bf->list, bf_q);
		if (bf_prev)
			bf_prev->bf_next = bf;
		bf_prev = bf;

		if (nframes >= 2)
			break;

		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf)
			break;

		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
		if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
			break;

		ath_set_rates(tid->an->vif, tid->an->sta, bf);
	} while (1);
}

1331 1332
static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
			      struct ath_atx_tid *tid, bool *stop)
S
Sujith 已提交
1333
{
S
Sujith 已提交
1334
	struct ath_buf *bf;
1335
	struct ieee80211_tx_info *tx_info;
1336
	struct sk_buff_head *tid_q;
S
Sujith 已提交
1337
	struct list_head bf_q;
1338 1339
	int aggr_len = 0;
	bool aggr, last = true;
1340

1341 1342
	if (!ath_tid_has_buffered(tid))
		return false;
1343

1344
	INIT_LIST_HEAD(&bf_q);
S
Sujith 已提交
1345

1346 1347 1348
	bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
	if (!bf)
		return false;
1349

1350 1351 1352 1353 1354 1355 1356
	tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
	aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
	if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
		(!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
		*stop = true;
		return false;
	}
1357

1358 1359 1360 1361 1362 1363
	ath_set_rates(tid->an->vif, tid->an->sta, bf);
	if (aggr)
		last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
					tid_q, &aggr_len);
	else
		ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
1364

1365 1366
	if (list_empty(&bf_q))
		return false;
1367

1368
	if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
1369 1370 1371
		tid->ac->clear_ps_filter = false;
		tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
	}
1372

1373 1374 1375
	ath_tx_fill_desc(sc, bf, txq, aggr_len);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	return true;
S
Sujith 已提交
1376 1377
}

1378 1379
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
		      u16 tid, u16 *ssn)
S
Sujith 已提交
1380 1381
{
	struct ath_atx_tid *txtid;
1382
	struct ath_txq *txq;
S
Sujith 已提交
1383
	struct ath_node *an;
1384
	u8 density;
S
Sujith 已提交
1385 1386

	an = (struct ath_node *)sta->drv_priv;
S
Sujith 已提交
1387
	txtid = ATH_AN_2_TID(an, tid);
1388 1389 1390
	txq = txtid->ac->txq;

	ath_txq_lock(sc, txq);
1391

1392 1393 1394 1395
	/* update ampdu factor/density, they may have changed. This may happen
	 * in HT IBSS when a beacon with HT-info is received after the station
	 * has already been added.
	 */
1396
	if (sta->ht_cap.ht_supported) {
1397 1398
		an->maxampdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
				      sta->ht_cap.ampdu_factor)) - 1;
1399 1400 1401 1402
		density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
		an->mpdudensity = density;
	}

1403 1404 1405
	/* force sequence number allocation for pending frames */
	ath_tx_tid_change_state(sc, txtid);

1406
	txtid->active = true;
1407
	*ssn = txtid->seq_start = txtid->seq_next;
1408
	txtid->bar_index = -1;
1409

1410 1411 1412
	memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
	txtid->baw_head = txtid->baw_tail = 0;

1413 1414
	ath_txq_unlock_complete(sc, txq);

1415
	return 0;
S
Sujith 已提交
1416
}
1417

1418
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
S
Sujith 已提交
1419 1420 1421
{
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1422
	struct ath_txq *txq = txtid->ac->txq;
1423

F
Felix Fietkau 已提交
1424
	ath_txq_lock(sc, txq);
1425 1426
	txtid->active = false;
	ath_tx_flush_tid(sc, txtid);
1427
	ath_tx_tid_change_state(sc, txtid);
F
Felix Fietkau 已提交
1428
	ath_txq_unlock_complete(sc, txq);
S
Sujith 已提交
1429
}
1430

1431 1432
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
		       struct ath_node *an)
1433 1434 1435 1436
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
1437
	bool buffered;
1438 1439 1440
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1441
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1442 1443 1444 1445

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1446
		ath_txq_lock(sc, txq);
1447

1448 1449 1450 1451 1452
		if (!tid->sched) {
			ath_txq_unlock(sc, txq);
			continue;
		}

1453
		buffered = ath_tid_has_buffered(tid);
1454 1455 1456 1457 1458 1459 1460 1461 1462

		tid->sched = false;
		list_del(&tid->list);

		if (ac->sched) {
			ac->sched = false;
			list_del(&ac->list);
		}

F
Felix Fietkau 已提交
1463
		ath_txq_unlock(sc, txq);
1464

1465 1466
		ieee80211_sta_set_buffered(sta, tidno, buffered);
	}
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
}

void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1477
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1478 1479 1480 1481

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1482
		ath_txq_lock(sc, txq);
1483 1484
		ac->clear_ps_filter = true;

F
Felix Fietkau 已提交
1485
		if (ath_tid_has_buffered(tid)) {
1486 1487 1488 1489
			ath_tx_queue_tid(txq, tid);
			ath_txq_schedule(sc, txq);
		}

F
Felix Fietkau 已提交
1490
		ath_txq_unlock_complete(sc, txq);
1491 1492 1493
	}
}

1494 1495
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
			u16 tidno)
S
Sujith 已提交
1496
{
1497
	struct ath_atx_tid *tid;
S
Sujith 已提交
1498
	struct ath_node *an;
1499
	struct ath_txq *txq;
S
Sujith 已提交
1500 1501

	an = (struct ath_node *)sta->drv_priv;
1502 1503
	tid = ATH_AN_2_TID(an, tidno);
	txq = tid->ac->txq;
S
Sujith 已提交
1504

1505 1506 1507 1508
	ath_txq_lock(sc, txq);

	tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;

1509
	if (ath_tid_has_buffered(tid)) {
1510 1511 1512 1513 1514
		ath_tx_queue_tid(txq, tid);
		ath_txq_schedule(sc, txq);
	}

	ath_txq_unlock_complete(sc, txq);
1515 1516
}

1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
				   struct ieee80211_sta *sta,
				   u16 tids, int nframes,
				   enum ieee80211_frame_release_type reason,
				   bool more_data)
{
	struct ath_softc *sc = hw->priv;
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_txq *txq = sc->tx.uapsdq;
	struct ieee80211_tx_info *info;
	struct list_head bf_q;
	struct ath_buf *bf_tail = NULL, *bf;
1529
	struct sk_buff_head *tid_q;
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
	int sent = 0;
	int i;

	INIT_LIST_HEAD(&bf_q);
	for (i = 0; tids && nframes; i++, tids >>= 1) {
		struct ath_atx_tid *tid;

		if (!(tids & 1))
			continue;

		tid = ATH_AN_2_TID(an, i);

		ath_txq_lock(sc, tid->ac->txq);
1543 1544
		while (nframes > 0) {
			bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
1545 1546 1547
			if (!bf)
				break;

1548
			__skb_unlink(bf->bf_mpdu, tid_q);
1549 1550
			list_add_tail(&bf->list, &bf_q);
			ath_set_rates(tid->an->vif, tid->an->sta, bf);
1551 1552 1553 1554
			if (bf_isampdu(bf)) {
				ath_tx_addto_baw(sc, tid, bf);
				bf->bf_state.bf_type &= ~BUF_AGGR;
			}
1555 1556 1557 1558 1559 1560 1561 1562
			if (bf_tail)
				bf_tail->bf_next = bf;

			bf_tail = bf;
			nframes--;
			sent++;
			TX_STAT_INC(txq->axq_qnum, a_queued_hw);

1563
			if (an->sta && !ath_tid_has_buffered(tid))
1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581
				ieee80211_sta_set_buffered(an->sta, i, false);
		}
		ath_txq_unlock_complete(sc, tid->ac->txq);
	}

	if (list_empty(&bf_q))
		return;

	info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
	info->flags |= IEEE80211_TX_STATUS_EOSP;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	ath_txq_lock(sc, txq);
	ath_tx_fill_desc(sc, bf, txq, 0);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	ath_txq_unlock(sc, txq);
}

S
Sujith 已提交
1582 1583 1584
/********************/
/* Queue Management */
/********************/
1585

S
Sujith 已提交
1586
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1587
{
1588
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1589
	struct ath9k_tx_queue_info qi;
1590
	static const int subtype_txq_to_hwq[] = {
1591 1592 1593 1594
		[IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
		[IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
		[IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
		[IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
1595
	};
1596
	int axq_qnum, i;
1597

S
Sujith 已提交
1598
	memset(&qi, 0, sizeof(qi));
1599
	qi.tqi_subtype = subtype_txq_to_hwq[subtype];
S
Sujith 已提交
1600 1601 1602 1603
	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_physCompBuf = 0;
1604 1605

	/*
S
Sujith 已提交
1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise waiting for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
	 *
	 * The UAPSD queue is an exception, since we take a desc-
	 * based intr on the EOSP frames.
1619
	 */
1620
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1621
		qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
1622 1623 1624 1625 1626 1627 1628
	} else {
		if (qtype == ATH9K_TX_QUEUE_UAPSD)
			qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
		else
			qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
					TXQ_FLAG_TXDESCINT_ENABLE;
	}
1629 1630
	axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
	if (axq_qnum == -1) {
1631
		/*
S
Sujith 已提交
1632 1633
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
1634
		 */
S
Sujith 已提交
1635
		return NULL;
1636
	}
1637 1638
	if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
		struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1639

1640 1641
		txq->axq_qnum = axq_qnum;
		txq->mac80211_qnum = -1;
S
Sujith 已提交
1642
		txq->axq_link = NULL;
F
Felix Fietkau 已提交
1643
		__skb_queue_head_init(&txq->complete_q);
S
Sujith 已提交
1644 1645 1646 1647
		INIT_LIST_HEAD(&txq->axq_q);
		INIT_LIST_HEAD(&txq->axq_acq);
		spin_lock_init(&txq->axq_lock);
		txq->axq_depth = 0;
1648
		txq->axq_ampdu_depth = 0;
1649
		txq->axq_tx_inprogress = false;
1650
		sc->tx.txqsetup |= 1<<axq_qnum;
1651 1652 1653 1654

		txq->txq_headidx = txq->txq_tailidx = 0;
		for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
			INIT_LIST_HEAD(&txq->txq_fifo[i]);
S
Sujith 已提交
1655
	}
1656
	return &sc->tx.txq[axq_qnum];
1657 1658
}

S
Sujith 已提交
1659 1660 1661
int ath_txq_update(struct ath_softc *sc, int qnum,
		   struct ath9k_tx_queue_info *qinfo)
{
1662
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1663 1664 1665
	int error = 0;
	struct ath9k_tx_queue_info qi;

1666
	BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
S
Sujith 已提交
1667 1668 1669 1670 1671 1672 1673 1674 1675

	ath9k_hw_get_txq_props(ah, qnum, &qi);
	qi.tqi_aifs = qinfo->tqi_aifs;
	qi.tqi_cwmin = qinfo->tqi_cwmin;
	qi.tqi_cwmax = qinfo->tqi_cwmax;
	qi.tqi_burstTime = qinfo->tqi_burstTime;
	qi.tqi_readyTime = qinfo->tqi_readyTime;

	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1676 1677
		ath_err(ath9k_hw_common(sc->sc_ah),
			"Unable to update hardware queue %u!\n", qnum);
S
Sujith 已提交
1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
		error = -EIO;
	} else {
		ath9k_hw_resettxqueue(ah, qnum);
	}

	return error;
}

int ath_cabq_update(struct ath_softc *sc)
{
	struct ath9k_tx_queue_info qi;
1689
	struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
S
Sujith 已提交
1690
	int qnum = sc->beacon.cabq->axq_qnum;
1691

S
Sujith 已提交
1692
	ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1693

1694
	qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) *
1695
			    ATH_CABQ_READY_TIME) / 100;
S
Sujith 已提交
1696 1697 1698
	ath_txq_update(sc, qnum, &qi);

	return 0;
1699 1700
}

1701
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1702
			       struct list_head *list)
1703
{
S
Sujith 已提交
1704 1705
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
1706 1707 1708
	struct ath_tx_status ts;

	memset(&ts, 0, sizeof(ts));
1709
	ts.ts_status = ATH9K_TX_FLUSH;
S
Sujith 已提交
1710
	INIT_LIST_HEAD(&bf_head);
1711

1712 1713
	while (!list_empty(list)) {
		bf = list_first_entry(list, struct ath_buf, list);
1714

1715
		if (bf->bf_state.stale) {
1716
			list_del(&bf->list);
1717

1718 1719
			ath_tx_return_buffer(sc, bf);
			continue;
S
Sujith 已提交
1720
		}
1721

S
Sujith 已提交
1722
		lastbf = bf->bf_lastbf;
1723
		list_cut_position(&bf_head, list, &lastbf->list);
1724
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
1725
	}
1726
}
1727

1728 1729 1730 1731 1732 1733
/*
 * Drain a given TX queue (could be Beacon or Data)
 *
 * This assumes output has been stopped and
 * we do not need to block ath_tx_tasklet.
 */
1734
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
1735
{
F
Felix Fietkau 已提交
1736 1737
	ath_txq_lock(sc, txq);

1738
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1739
		int idx = txq->txq_tailidx;
1740

1741
		while (!list_empty(&txq->txq_fifo[idx])) {
1742
			ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
1743 1744

			INCR(idx, ATH_TXFIFO_DEPTH);
1745
		}
1746
		txq->txq_tailidx = idx;
1747
	}
1748

1749 1750
	txq->axq_link = NULL;
	txq->axq_tx_inprogress = false;
1751
	ath_drain_txq_list(sc, txq, &txq->axq_q);
1752

F
Felix Fietkau 已提交
1753
	ath_txq_unlock_complete(sc, txq);
1754 1755
}

1756
bool ath_drain_all_txq(struct ath_softc *sc)
1757
{
1758
	struct ath_hw *ah = sc->sc_ah;
1759
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
1760
	struct ath_txq *txq;
1761 1762
	int i;
	u32 npend = 0;
S
Sujith 已提交
1763

1764
	if (test_bit(ATH_OP_INVALID, &common->op_flags))
1765
		return true;
S
Sujith 已提交
1766

1767
	ath9k_hw_abort_tx_dma(ah);
S
Sujith 已提交
1768

1769
	/* Check if any queue remains active */
S
Sujith 已提交
1770
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1771 1772 1773
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

F
Felix Fietkau 已提交
1774 1775 1776
		if (!sc->tx.txq[i].axq_depth)
			continue;

1777 1778
		if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
			npend |= BIT(i);
S
Sujith 已提交
1779 1780
	}

1781
	if (npend)
1782
		ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
S
Sujith 已提交
1783 1784

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1785 1786 1787 1788 1789 1790 1791 1792 1793 1794
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

		/*
		 * The caller will resume queues with ieee80211_wake_queues.
		 * Mark the queue as not stopped to prevent ath_tx_complete
		 * from waking the queue too early.
		 */
		txq = &sc->tx.txq[i];
		txq->stopped = false;
1795
		ath_draintxq(sc, txq);
S
Sujith 已提交
1796
	}
1797 1798

	return !npend;
S
Sujith 已提交
1799
}
1800

S
Sujith 已提交
1801
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
S
Sujith 已提交
1802
{
S
Sujith 已提交
1803 1804
	ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
	sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
S
Sujith 已提交
1805
}
1806

1807 1808 1809
/* For each axq_acq entry, for each tid, try to schedule packets
 * for transmit until ampdu_depth has reached min Q depth.
 */
S
Sujith 已提交
1810 1811
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
1812
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1813
	struct ath_atx_ac *ac, *last_ac;
1814
	struct ath_atx_tid *tid, *last_tid;
1815
	bool sent = false;
1816

1817
	if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
1818
	    list_empty(&txq->axq_acq))
S
Sujith 已提交
1819
		return;
1820

1821 1822
	rcu_read_lock();

1823
	last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1824 1825
	while (!list_empty(&txq->axq_acq)) {
		bool stop = false;
1826

1827
		ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1828 1829 1830
		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
		list_del(&ac->list);
		ac->sched = false;
1831

1832
		while (!list_empty(&ac->tid_q)) {
1833

1834 1835 1836 1837
			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
					       list);
			list_del(&tid->list);
			tid->sched = false;
1838

1839 1840
			if (ath_tx_sched_aggr(sc, txq, tid, &stop))
				sent = true;
1841

1842 1843 1844 1845
			/*
			 * add tid to round-robin queue if more frames
			 * are pending for the tid
			 */
1846
			if (ath_tid_has_buffered(tid))
1847
				ath_tx_queue_tid(txq, tid);
1848

1849
			if (stop || tid == last_tid)
1850 1851
				break;
		}
1852

1853 1854 1855
		if (!list_empty(&ac->tid_q) && !ac->sched) {
			ac->sched = true;
			list_add_tail(&ac->list, &txq->axq_acq);
1856
		}
1857

1858
		if (stop)
1859
			break;
1860 1861 1862 1863 1864 1865 1866 1867 1868

		if (ac == last_ac) {
			if (!sent)
				break;

			sent = false;
			last_ac = list_entry(txq->axq_acq.prev,
					     struct ath_atx_ac, list);
		}
S
Sujith 已提交
1869
	}
1870 1871

	rcu_read_unlock();
S
Sujith 已提交
1872
}
1873

S
Sujith 已提交
1874 1875 1876 1877
/***********/
/* TX, DMA */
/***********/

1878
/*
S
Sujith 已提交
1879 1880
 * Insert a chain of ath_buf (descriptors) on a txq and
 * assume the descriptors are already chained together by caller.
1881
 */
S
Sujith 已提交
1882
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1883
			     struct list_head *head, bool internal)
1884
{
1885
	struct ath_hw *ah = sc->sc_ah;
1886
	struct ath_common *common = ath9k_hw_common(ah);
1887 1888 1889
	struct ath_buf *bf, *bf_last;
	bool puttxbuf = false;
	bool edma;
1890

S
Sujith 已提交
1891 1892 1893 1894
	/*
	 * Insert the frame on the outbound list and
	 * pass it on to the hardware.
	 */
1895

S
Sujith 已提交
1896 1897
	if (list_empty(head))
		return;
1898

1899
	edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
S
Sujith 已提交
1900
	bf = list_first_entry(head, struct ath_buf, list);
1901
	bf_last = list_entry(head->prev, struct ath_buf, list);
1902

1903 1904
	ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
		txq->axq_qnum, txq->axq_depth);
1905

1906 1907
	if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
		list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
1908
		INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1909
		puttxbuf = true;
S
Sujith 已提交
1910
	} else {
1911 1912
		list_splice_tail_init(head, &txq->axq_q);

1913 1914
		if (txq->axq_link) {
			ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
1915
			ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
J
Joe Perches 已提交
1916 1917
				txq->axq_qnum, txq->axq_link,
				ito64(bf->bf_daddr), bf->bf_desc);
1918 1919 1920 1921 1922 1923 1924 1925 1926
		} else if (!edma)
			puttxbuf = true;

		txq->axq_link = bf_last->bf_desc;
	}

	if (puttxbuf) {
		TX_STAT_INC(txq->axq_qnum, puttxbuf);
		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1927
		ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
1928 1929 1930
			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
	}

L
Luis R. Rodriguez 已提交
1931
	if (!edma || sc->tx99_state) {
F
Felix Fietkau 已提交
1932
		TX_STAT_INC(txq->axq_qnum, txstart);
1933
		ath9k_hw_txstart(ah, txq->axq_qnum);
S
Sujith 已提交
1934
	}
1935 1936

	if (!internal) {
1937 1938 1939 1940 1941
		while (bf) {
			txq->axq_depth++;
			if (bf_is_ampdu_not_probing(bf))
				txq->axq_ampdu_depth++;

1942 1943 1944
			bf_last = bf->bf_lastbf;
			bf = bf_last->bf_next;
			bf_last->bf_next = NULL;
1945
		}
1946
	}
S
Sujith 已提交
1947
}
1948

F
Felix Fietkau 已提交
1949
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1950
			       struct ath_atx_tid *tid, struct sk_buff *skb)
S
Sujith 已提交
1951
{
1952
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1953 1954
	struct ath_frame_info *fi = get_frame_info(skb);
	struct list_head bf_head;
1955
	struct ath_buf *bf = fi->bf;
1956 1957 1958

	INIT_LIST_HEAD(&bf_head);
	list_add_tail(&bf->list, &bf_head);
1959
	bf->bf_state.bf_type = 0;
1960 1961 1962 1963
	if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
		bf->bf_state.bf_type = BUF_AMPDU;
		ath_tx_addto_baw(sc, tid, bf);
	}
S
Sujith 已提交
1964

1965
	bf->bf_next = NULL;
S
Sujith 已提交
1966
	bf->bf_lastbf = bf;
1967
	ath_tx_fill_desc(sc, bf, txq, fi->framelen);
1968
	ath_tx_txqaddbuf(sc, txq, &bf_head, false);
S
Sujith 已提交
1969
	TX_STAT_INC(txq->axq_qnum, queued);
S
Sujith 已提交
1970 1971
}

1972 1973 1974
static void setup_frame_info(struct ieee80211_hw *hw,
			     struct ieee80211_sta *sta,
			     struct sk_buff *skb,
1975
			     int framelen)
S
Sujith 已提交
1976 1977
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1978
	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1979
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1980
	const struct ieee80211_rate *rate;
1981
	struct ath_frame_info *fi = get_frame_info(skb);
1982
	struct ath_node *an = NULL;
1983
	enum ath9k_key_type keytype;
1984 1985 1986 1987 1988 1989 1990 1991 1992 1993
	bool short_preamble = false;

	/*
	 * We check if Short Preamble is needed for the CTS rate by
	 * checking the BSS's global flag.
	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
	 */
	if (tx_info->control.vif &&
	    tx_info->control.vif->bss_conf.use_short_preamble)
		short_preamble = true;
S
Sujith 已提交
1994

1995
	rate = ieee80211_get_rts_cts_rate(hw, tx_info);
1996
	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
S
Sujith 已提交
1997

1998 1999 2000
	if (sta)
		an = (struct ath_node *) sta->drv_priv;

2001 2002 2003
	memset(fi, 0, sizeof(*fi));
	if (hw_key)
		fi->keyix = hw_key->hw_key_idx;
2004 2005
	else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
		fi->keyix = an->ps_key;
2006 2007 2008 2009
	else
		fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->keytype = keytype;
	fi->framelen = framelen;
2010 2011 2012

	if (!rate)
		return;
2013 2014 2015
	fi->rtscts_rate = rate->hw_value;
	if (short_preamble)
		fi->rtscts_rate |= rate->hw_value_short;
S
Sujith 已提交
2016 2017
}

2018 2019 2020 2021
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath9k_channel *curchan = ah->curchan;
2022

2023
	if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && IS_CHAN_5GHZ(curchan) &&
2024
	    (chainmask == 0x7) && (rate < 0x90))
2025
		return 0x3;
2026 2027 2028
	else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
		 IS_CCK_RATE(rate))
		return 0x2;
2029 2030 2031 2032
	else
		return chainmask;
}

2033 2034 2035 2036
/*
 * Assign a descriptor (and sequence number if necessary,
 * and map buffer for DMA. Frees skb on error
 */
2037
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
F
Felix Fietkau 已提交
2038
					   struct ath_txq *txq,
2039
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
2040
					   struct sk_buff *skb)
2041
{
F
Felix Fietkau 已提交
2042
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2043
	struct ath_frame_info *fi = get_frame_info(skb);
2044
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
F
Felix Fietkau 已提交
2045
	struct ath_buf *bf;
S
Sujith Manoharan 已提交
2046
	int fragno;
2047
	u16 seqno;
F
Felix Fietkau 已提交
2048 2049 2050

	bf = ath_tx_get_buffer(sc);
	if (!bf) {
2051
		ath_dbg(common, XMIT, "TX buffers are full\n");
F
Felix Fietkau 已提交
2052
		return NULL;
F
Felix Fietkau 已提交
2053
	}
2054

S
Sujith 已提交
2055
	ATH_TXBUF_RESET(bf);
2056

2057
	if (tid && ieee80211_is_data_present(hdr->frame_control)) {
S
Sujith Manoharan 已提交
2058
		fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
2059 2060
		seqno = tid->seq_next;
		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
S
Sujith Manoharan 已提交
2061 2062 2063 2064 2065 2066 2067

		if (fragno)
			hdr->seq_ctrl |= cpu_to_le16(fragno);

		if (!ieee80211_has_morefrags(hdr->frame_control))
			INCR(tid->seq_next, IEEE80211_SEQ_MAX);

2068 2069 2070
		bf->bf_state.seqno = seqno;
	}

2071
	bf->bf_mpdu = skb;
2072

B
Ben Greear 已提交
2073 2074 2075
	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
2076
		bf->bf_mpdu = NULL;
2077
		bf->bf_buf_addr = 0;
2078 2079
		ath_err(ath9k_hw_common(sc->sc_ah),
			"dma_mapping_error() on TX\n");
F
Felix Fietkau 已提交
2080
		ath_tx_return_buffer(sc, bf);
F
Felix Fietkau 已提交
2081
		return NULL;
2082 2083
	}

2084
	fi->bf = bf;
F
Felix Fietkau 已提交
2085 2086 2087 2088

	return bf;
}

2089 2090
static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
			  struct ath_tx_control *txctl)
2091
{
2092 2093
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2094
	struct ieee80211_sta *sta = txctl->sta;
2095
	struct ieee80211_vif *vif = info->control.vif;
2096
	struct ath_vif *avp;
2097
	struct ath_softc *sc = hw->priv;
F
Felix Fietkau 已提交
2098
	int frmlen = skb->len + FCS_LEN;
2099
	int padpos, padsize;
2100

2101 2102 2103
	/* NOTE:  sta can be NULL according to net/mac80211.h */
	if (sta)
		txctl->an = (struct ath_node *)sta->drv_priv;
2104 2105 2106 2107
	else if (vif && ieee80211_is_data(hdr->frame_control)) {
		avp = (void *)vif->drv_priv;
		txctl->an = &avp->mcast_node;
	}
2108

F
Felix Fietkau 已提交
2109 2110 2111
	if (info->control.hw_key)
		frmlen += info->control.hw_key->icv_len;

2112
	/*
S
Sujith 已提交
2113 2114 2115
	 * As a temporary workaround, assign seq# here; this will likely need
	 * to be cleaned up to work better with Beacon transmission and virtual
	 * BSSes.
2116
	 */
S
Sujith 已提交
2117 2118 2119 2120 2121
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
			sc->tx.seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2122 2123
	}

2124 2125 2126 2127 2128
	if ((vif && vif->type != NL80211_IFTYPE_AP &&
	            vif->type != NL80211_IFTYPE_AP_VLAN) ||
	    !ieee80211_is_data(hdr->frame_control))
		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;

2129
	/* Add the padding after the header if this is not already done */
2130
	padpos = ieee80211_hdrlen(hdr->frame_control);
2131 2132 2133 2134
	padsize = padpos & 3;
	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize)
			return -ENOMEM;
2135

2136 2137
		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
2138 2139
	}

2140
	setup_frame_info(hw, sta, skb, frmlen);
2141 2142 2143
	return 0;
}

2144

2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
/* Upon failure caller should free skb */
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
		 struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ieee80211_sta *sta = txctl->sta;
	struct ieee80211_vif *vif = info->control.vif;
	struct ath_softc *sc = hw->priv;
	struct ath_txq *txq = txctl->txq;
	struct ath_atx_tid *tid = NULL;
	struct ath_buf *bf;
	int q;
	int ret;

	ret = ath_tx_prepare(hw, skb, txctl);
	if (ret)
	    return ret;

	hdr = (struct ieee80211_hdr *) skb->data;
2165 2166 2167 2168 2169
	/*
	 * At this point, the vif, hw_key and sta pointers in the tx control
	 * info are no longer valid (overwritten by the ath_frame_info data.
	 */

2170
	q = skb_get_queue_mapping(skb);
F
Felix Fietkau 已提交
2171 2172

	ath_txq_lock(sc, txq);
2173
	if (txq == sc->tx.txq_map[q] &&
2174 2175
	    ++txq->pending_frames > sc->tx.txq_max_pending[q] &&
	    !txq->stopped) {
2176
		ieee80211_stop_queue(sc->hw, q);
2177
		txq->stopped = true;
2178 2179
	}

2180
	if (txctl->an && ieee80211_is_data_present(hdr->frame_control))
2181 2182
		tid = ath_get_skb_tid(sc, txctl->an, skb);

2183 2184 2185 2186
	if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
		ath_txq_unlock(sc, txq);
		txq = sc->tx.uapsdq;
		ath_txq_lock(sc, txq);
2187 2188
	} else if (txctl->an &&
		   ieee80211_is_data_present(hdr->frame_control)) {
2189 2190
		WARN_ON(tid->ac->txq != txctl->txq);

2191 2192 2193
		if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
			tid->ac->clear_ps_filter = true;

2194
		/*
2195 2196
		 * Add this frame to software queue for scheduling later
		 * for aggregation.
2197
		 */
2198 2199 2200 2201 2202 2203
		TX_STAT_INC(txq->axq_qnum, a_queued_sw);
		__skb_queue_tail(&tid->buf_q, skb);
		if (!txctl->an->sleeping)
			ath_tx_queue_tid(txq, tid);

		ath_txq_schedule(sc, txq);
2204 2205 2206
		goto out;
	}

2207
	bf = ath_tx_setup_buffer(sc, txq, tid, skb);
2208
	if (!bf) {
2209
		ath_txq_skb_done(sc, txq, skb);
2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221
		if (txctl->paprd)
			dev_kfree_skb_any(skb);
		else
			ieee80211_free_txskb(sc->hw, skb);
		goto out;
	}

	bf->bf_state.bfs_paprd = txctl->paprd;

	if (txctl->paprd)
		bf->bf_state.bfs_paprd_timestamp = jiffies;

2222
	ath_set_rates(vif, sta, bf);
2223
	ath_tx_send_normal(sc, txq, tid, skb);
F
Felix Fietkau 已提交
2224

2225
out:
F
Felix Fietkau 已提交
2226
	ath_txq_unlock(sc, txq);
F
Felix Fietkau 已提交
2227

2228
	return 0;
2229 2230
}

2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261
void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		 struct sk_buff *skb)
{
	struct ath_softc *sc = hw->priv;
	struct ath_tx_control txctl = {
		.txq = sc->beacon.cabq
	};
	struct ath_tx_info info = {};
	struct ieee80211_hdr *hdr;
	struct ath_buf *bf_tail = NULL;
	struct ath_buf *bf;
	LIST_HEAD(bf_q);
	int duration = 0;
	int max_duration;

	max_duration =
		sc->cur_beacon_conf.beacon_interval * 1000 *
		sc->cur_beacon_conf.dtim_period / ATH_BCBUF;

	do {
		struct ath_frame_info *fi = get_frame_info(skb);

		if (ath_tx_prepare(hw, skb, &txctl))
			break;

		bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
		if (!bf)
			break;

		bf->bf_lastbf = bf;
		ath_set_rates(vif, NULL, bf);
S
Sujith Manoharan 已提交
2262
		ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298
		duration += info.rates[0].PktDuration;
		if (bf_tail)
			bf_tail->bf_next = bf;

		list_add_tail(&bf->list, &bf_q);
		bf_tail = bf;
		skb = NULL;

		if (duration > max_duration)
			break;

		skb = ieee80211_get_buffered_bc(hw, vif);
	} while(skb);

	if (skb)
		ieee80211_free_txskb(hw, skb);

	if (list_empty(&bf_q))
		return;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;

	if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
		hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
			sizeof(*hdr), DMA_TO_DEVICE);
	}

	ath_txq_lock(sc, txctl.txq);
	ath_tx_fill_desc(sc, bf, txctl.txq, 0);
	ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
	TX_STAT_INC(txctl.txq->axq_qnum, queued);
	ath_txq_unlock(sc, txctl.txq);
}

S
Sujith 已提交
2299 2300 2301
/*****************/
/* TX Completion */
/*****************/
S
Sujith 已提交
2302

S
Sujith 已提交
2303
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2304
			    int tx_flags, struct ath_txq *txq)
S
Sujith 已提交
2305
{
S
Sujith 已提交
2306
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2307
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2308
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2309
	int padpos, padsize;
S
Sujith Manoharan 已提交
2310
	unsigned long flags;
S
Sujith 已提交
2311

2312
	ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
S
Sujith 已提交
2313

2314
	if (sc->sc_ah->caldata)
2315
		set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
2316

2317
	if (!(tx_flags & ATH_TX_ERROR))
S
Sujith 已提交
2318 2319
		/* Frame was ACKed */
		tx_info->flags |= IEEE80211_TX_STAT_ACK;
S
Sujith 已提交
2320

2321
	padpos = ieee80211_hdrlen(hdr->frame_control);
2322 2323 2324 2325 2326 2327 2328 2329
	padsize = padpos & 3;
	if (padsize && skb->len>padpos+padsize) {
		/*
		 * Remove MAC header padding before giving the frame back to
		 * mac80211.
		 */
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
S
Sujith 已提交
2330
	}
S
Sujith 已提交
2331

S
Sujith Manoharan 已提交
2332
	spin_lock_irqsave(&sc->sc_pm_lock, flags);
2333
	if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
S
Sujith 已提交
2334
		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
2335
		ath_dbg(common, PS,
J
Joe Perches 已提交
2336
			"Going back to sleep after having received TX status (0x%lx)\n",
S
Sujith 已提交
2337 2338 2339 2340
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
2341
	}
S
Sujith Manoharan 已提交
2342
	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2343

2344
	__skb_queue_tail(&txq->complete_q, skb);
2345
	ath_txq_skb_done(sc, txq, skb);
S
Sujith 已提交
2346
}
2347

S
Sujith 已提交
2348
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2349
				struct ath_txq *txq, struct list_head *bf_q,
2350
				struct ath_tx_status *ts, int txok)
2351
{
S
Sujith 已提交
2352
	struct sk_buff *skb = bf->bf_mpdu;
2353
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
S
Sujith 已提交
2354
	unsigned long flags;
2355
	int tx_flags = 0;
2356

2357
	if (!txok)
2358
		tx_flags |= ATH_TX_ERROR;
2359

2360 2361 2362
	if (ts->ts_status & ATH9K_TXERR_FILT)
		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;

B
Ben Greear 已提交
2363
	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2364
	bf->bf_buf_addr = 0;
L
Luis R. Rodriguez 已提交
2365 2366
	if (sc->tx99_state)
		goto skip_tx_complete;
2367 2368

	if (bf->bf_state.bfs_paprd) {
2369 2370 2371
		if (time_after(jiffies,
				bf->bf_state.bfs_paprd_timestamp +
				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
2372
			dev_kfree_skb_any(skb);
2373
		else
2374
			complete(&sc->paprd_complete);
2375
	} else {
2376
		ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2377
		ath_tx_complete(sc, skb, tx_flags, txq);
2378
	}
L
Luis R. Rodriguez 已提交
2379
skip_tx_complete:
2380 2381 2382 2383
	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
	 * accidentally reference it later.
	 */
	bf->bf_mpdu = NULL;
S
Sujith 已提交
2384 2385 2386 2387 2388 2389 2390

	/*
	 * Return the list of ath_buf of this mpdu to free queue
	 */
	spin_lock_irqsave(&sc->tx.txbuflock, flags);
	list_splice_tail_init(bf_q, &sc->tx.txbuf);
	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2391 2392
}

F
Felix Fietkau 已提交
2393 2394
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
2395
			     int txok)
2396
{
S
Sujith 已提交
2397
	struct sk_buff *skb = bf->bf_mpdu;
2398
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
S
Sujith 已提交
2399
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
F
Felix Fietkau 已提交
2400
	struct ieee80211_hw *hw = sc->hw;
2401
	struct ath_hw *ah = sc->sc_ah;
2402
	u8 i, tx_rateindex;
2403

S
Sujith 已提交
2404
	if (txok)
2405
		tx_info->status.ack_signal = ts->ts_rssi;
S
Sujith 已提交
2406

2407
	tx_rateindex = ts->ts_rateindex;
2408 2409
	WARN_ON(tx_rateindex >= hw->max_rates);

2410
	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
2411
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2412

2413
		BUG_ON(nbad > nframes);
2414
	}
2415 2416
	tx_info->status.ampdu_len = nframes;
	tx_info->status.ampdu_ack_len = nframes - nbad;
2417

2418
	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2419
	    (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431
		/*
		 * If an underrun error is seen assume it as an excessive
		 * retry only if max frame trigger level has been reached
		 * (2 KB for single stream, and 4 KB for dual stream).
		 * Adjust the long retry as if the frame was tried
		 * hw->max_rate_tries times to affect how rate control updates
		 * PER for the failed rate.
		 * In case of congestion on the bus penalizing this type of
		 * underruns should help hardware actually transmit new frames
		 * successfully by eventually preferring slower rates.
		 * This itself should also alleviate congestion on the bus.
		 */
2432 2433 2434
		if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
		                             ATH9K_TX_DELIM_UNDERRUN)) &&
		    ieee80211_is_data(hdr->frame_control) &&
2435
		    ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2436 2437
			tx_info->status.rates[tx_rateindex].count =
				hw->max_rate_tries;
2438
	}
2439

2440
	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2441
		tx_info->status.rates[i].count = 0;
2442 2443
		tx_info->status.rates[i].idx = -1;
	}
2444

2445
	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2446 2447
}

S
Sujith 已提交
2448
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2449
{
2450
	struct ath_hw *ah = sc->sc_ah;
2451
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
2452
	struct ath_buf *bf, *lastbf, *bf_held = NULL;
2453
	struct list_head bf_head;
S
Sujith 已提交
2454
	struct ath_desc *ds;
2455
	struct ath_tx_status ts;
S
Sujith 已提交
2456
	int status;
2457

2458
	ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
J
Joe Perches 已提交
2459 2460
		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
		txq->axq_link);
2461

F
Felix Fietkau 已提交
2462
	ath_txq_lock(sc, txq);
2463
	for (;;) {
2464
		if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
2465 2466
			break;

2467 2468
		if (list_empty(&txq->axq_q)) {
			txq->axq_link = NULL;
2469
			ath_txq_schedule(sc, txq);
2470 2471 2472 2473
			break;
		}
		bf = list_first_entry(&txq->axq_q, struct ath_buf, list);

S
Sujith 已提交
2474 2475 2476 2477 2478 2479 2480 2481 2482
		/*
		 * There is a race condition that a BH gets scheduled
		 * after sw writes TxE and before hw re-load the last
		 * descriptor to get the newly chained one.
		 * Software must keep the last DONE descriptor as a
		 * holding descriptor - software does so by marking
		 * it with the STALE flag.
		 */
		bf_held = NULL;
2483
		if (bf->bf_state.stale) {
S
Sujith 已提交
2484
			bf_held = bf;
2485
			if (list_is_last(&bf_held->list, &txq->axq_q))
S
Sujith 已提交
2486
				break;
2487 2488 2489

			bf = list_entry(bf_held->list.next, struct ath_buf,
					list);
2490 2491 2492
		}

		lastbf = bf->bf_lastbf;
S
Sujith 已提交
2493
		ds = lastbf->bf_desc;
2494

2495 2496
		memset(&ts, 0, sizeof(ts));
		status = ath9k_hw_txprocdesc(ah, ds, &ts);
2497
		if (status == -EINPROGRESS)
S
Sujith 已提交
2498
			break;
2499

2500
		TX_STAT_INC(txq->axq_qnum, txprocdesc);
2501

S
Sujith 已提交
2502 2503 2504 2505 2506
		/*
		 * Remove ath_buf's of the same transmit unit from txq,
		 * however leave the last descriptor back as the holding
		 * descriptor for hw.
		 */
2507
		lastbf->bf_state.stale = true;
S
Sujith 已提交
2508 2509 2510 2511
		INIT_LIST_HEAD(&bf_head);
		if (!list_is_singular(&lastbf->list))
			list_cut_position(&bf_head,
				&txq->axq_q, lastbf->list.prev);
2512

2513
		if (bf_held) {
2514 2515
			list_del(&bf_held->list);
			ath_tx_return_buffer(sc, bf_held);
S
Sujith 已提交
2516
		}
2517

2518
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2519
	}
F
Felix Fietkau 已提交
2520
	ath_txq_unlock_complete(sc, txq);
2521 2522
}

S
Sujith 已提交
2523
void ath_tx_tasklet(struct ath_softc *sc)
2524
{
2525 2526
	struct ath_hw *ah = sc->sc_ah;
	u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
S
Sujith 已提交
2527
	int i;
2528

S
Sujith 已提交
2529 2530 2531
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
		if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
			ath_tx_processq(sc, &sc->tx.txq[i]);
2532 2533 2534
	}
}

2535 2536
void ath_tx_edma_tasklet(struct ath_softc *sc)
{
2537
	struct ath_tx_status ts;
2538 2539 2540 2541 2542
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct ath_txq *txq;
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
2543
	struct list_head *fifo_list;
2544 2545 2546
	int status;

	for (;;) {
2547
		if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
2548 2549
			break;

2550
		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
2551 2552 2553
		if (status == -EINPROGRESS)
			break;
		if (status == -EIO) {
2554
			ath_dbg(common, XMIT, "Error processing tx status\n");
2555 2556 2557
			break;
		}

2558 2559 2560 2561
		/* Process beacon completions separately */
		if (ts.qid == sc->beacon.beaconq) {
			sc->beacon.tx_processed = true;
			sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2562

2563
			ath9k_csa_update(sc);
2564
			continue;
2565
		}
2566

2567
		txq = &sc->tx.txq[ts.qid];
2568

F
Felix Fietkau 已提交
2569
		ath_txq_lock(sc, txq);
2570

2571 2572
		TX_STAT_INC(txq->axq_qnum, txprocdesc);

2573 2574
		fifo_list = &txq->txq_fifo[txq->txq_tailidx];
		if (list_empty(fifo_list)) {
F
Felix Fietkau 已提交
2575
			ath_txq_unlock(sc, txq);
2576 2577 2578
			return;
		}

2579
		bf = list_first_entry(fifo_list, struct ath_buf, list);
2580
		if (bf->bf_state.stale) {
2581 2582 2583 2584 2585
			list_del(&bf->list);
			ath_tx_return_buffer(sc, bf);
			bf = list_first_entry(fifo_list, struct ath_buf, list);
		}

2586 2587 2588
		lastbf = bf->bf_lastbf;

		INIT_LIST_HEAD(&bf_head);
2589 2590
		if (list_is_last(&lastbf->list, fifo_list)) {
			list_splice_tail_init(fifo_list, &bf_head);
2591
			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2592

2593 2594
			if (!list_empty(&txq->axq_q)) {
				struct list_head bf_q;
2595

2596 2597 2598 2599 2600
				INIT_LIST_HEAD(&bf_q);
				txq->axq_link = NULL;
				list_splice_tail_init(&txq->axq_q, &bf_q);
				ath_tx_txqaddbuf(sc, txq, &bf_q, true);
			}
2601
		} else {
2602
			lastbf->bf_state.stale = true;
2603 2604 2605
			if (bf != lastbf)
				list_cut_position(&bf_head, fifo_list,
						  lastbf->list.prev);
2606
		}
2607

2608
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
F
Felix Fietkau 已提交
2609
		ath_txq_unlock_complete(sc, txq);
2610 2611 2612
	}
}

S
Sujith 已提交
2613 2614 2615
/*****************/
/* Init, Cleanup */
/*****************/
2616

2617 2618 2619 2620 2621 2622
static int ath_txstatus_setup(struct ath_softc *sc, int size)
{
	struct ath_descdma *dd = &sc->txsdma;
	u8 txs_len = sc->sc_ah->caps.txs_len;

	dd->dd_desc_len = size * txs_len;
2623 2624
	dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
					  &dd->dd_desc_paddr, GFP_KERNEL);
2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643
	if (!dd->dd_desc)
		return -ENOMEM;

	return 0;
}

static int ath_tx_edma_init(struct ath_softc *sc)
{
	int err;

	err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
	if (!err)
		ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
					  sc->txsdma.dd_desc_paddr,
					  ATH_TXSTATUS_RING_SIZE);

	return err;
}

S
Sujith 已提交
2644
int ath_tx_init(struct ath_softc *sc, int nbufs)
2645
{
2646
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
2647
	int error = 0;
2648

2649
	spin_lock_init(&sc->tx.txbuflock);
2650

2651
	error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2652
				  "tx", nbufs, 1, 1);
2653
	if (error != 0) {
2654 2655
		ath_err(common,
			"Failed to allocate tx descriptors: %d\n", error);
2656
		return error;
2657
	}
2658

2659
	error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2660
				  "beacon", ATH_BCBUF, 1, 1);
2661
	if (error != 0) {
2662 2663
		ath_err(common,
			"Failed to allocate beacon descriptors: %d\n", error);
2664
		return error;
2665
	}
2666

2667 2668
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);

2669
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2670
		error = ath_tx_edma_init(sc);
2671

S
Sujith 已提交
2672
	return error;
2673 2674 2675 2676
}

void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
2677 2678 2679
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	int tidno, acno;
2680

2681
	for (tidno = 0, tid = &an->tid[tidno];
2682
	     tidno < IEEE80211_NUM_TIDS;
2683 2684 2685 2686 2687 2688 2689
	     tidno++, tid++) {
		tid->an        = an;
		tid->tidno     = tidno;
		tid->seq_start = tid->seq_next = 0;
		tid->baw_size  = WME_MAX_BA;
		tid->baw_head  = tid->baw_tail = 0;
		tid->sched     = false;
2690
		tid->active	   = false;
2691
		__skb_queue_head_init(&tid->buf_q);
2692
		__skb_queue_head_init(&tid->retry_q);
2693
		acno = TID_TO_WME_AC(tidno);
2694
		tid->ac = &an->ac[acno];
2695
	}
2696

2697
	for (acno = 0, ac = &an->ac[acno];
2698
	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
2699
		ac->sched    = false;
2700
		ac->clear_ps_filter = true;
2701
		ac->txq = sc->tx.txq_map[acno];
2702
		INIT_LIST_HEAD(&ac->tid_q);
2703 2704 2705
	}
}

S
Sujith 已提交
2706
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2707
{
2708 2709
	struct ath_atx_ac *ac;
	struct ath_atx_tid *tid;
2710
	struct ath_txq *txq;
2711
	int tidno;
S
Sujith 已提交
2712

2713
	for (tidno = 0, tid = &an->tid[tidno];
2714
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
2715

2716
		ac = tid->ac;
2717
		txq = ac->txq;
2718

F
Felix Fietkau 已提交
2719
		ath_txq_lock(sc, txq);
2720 2721 2722 2723 2724 2725 2726 2727 2728

		if (tid->sched) {
			list_del(&tid->list);
			tid->sched = false;
		}

		if (ac->sched) {
			list_del(&ac->list);
			tid->ac->sched = false;
2729
		}
2730 2731

		ath_tid_drain(sc, txq, tid);
2732
		tid->active = false;
2733

F
Felix Fietkau 已提交
2734
		ath_txq_unlock(sc, txq);
2735 2736
	}
}
L
Luis R. Rodriguez 已提交
2737

2738 2739
#ifdef CONFIG_ATH9K_TX99

L
Luis R. Rodriguez 已提交
2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781
int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
		    struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ath_frame_info *fi = get_frame_info(skb);
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_buf *bf;
	int padpos, padsize;

	padpos = ieee80211_hdrlen(hdr->frame_control);
	padsize = padpos & 3;

	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize) {
			ath_dbg(common, XMIT,
				"tx99 padding failed\n");
		return -EINVAL;
		}

		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
	}

	fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->framelen = skb->len + FCS_LEN;
	fi->keytype = ATH9K_KEY_TYPE_CLEAR;

	bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
	if (!bf) {
		ath_dbg(common, XMIT, "tx99 buffer setup failed\n");
		return -EINVAL;
	}

	ath_set_rates(sc->tx99_vif, NULL, bf);

	ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
	ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);

	ath_tx_send_normal(sc, txctl->txq, NULL, skb);

	return 0;
}
2782 2783

#endif /* CONFIG_ATH9K_TX99 */