xmit.c 67.8 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include <linux/dma-mapping.h>
S
Sujith 已提交
18
#include "ath9k.h"
19
#include "ar9003_mac.h"
20 21 22 23 24 25 26 27 28 29 30 31

#define BITS_PER_BYTE           8
#define OFDM_PLCP_BITS          22
#define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
#define L_STF                   8
#define L_LTF                   8
#define L_SIG                   4
#define HT_SIG                  8
#define HT_STF                  4
#define HT_LTF(_ns)             (4 * (_ns))
#define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
32 33
#define TIME_SYMBOLS(t)         ((t) >> 2)
#define TIME_SYMBOLS_HALFGI(t)  (((t) * 5 - 4) / 18)
34 35 36 37
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)


38
static u16 bits_per_symbol[][2] = {
39 40 41 42 43 44 45 46 47 48 49 50 51
	/* 20MHz 40MHz */
	{    26,   54 },     /*  0: BPSK */
	{    52,  108 },     /*  1: QPSK 1/2 */
	{    78,  162 },     /*  2: QPSK 3/4 */
	{   104,  216 },     /*  3: 16-QAM 1/2 */
	{   156,  324 },     /*  4: 16-QAM 3/4 */
	{   208,  432 },     /*  5: 64-QAM 2/3 */
	{   234,  486 },     /*  6: 64-QAM 3/4 */
	{   260,  540 },     /*  7: 64-QAM 5/6 */
};

#define IS_HT_RATE(_rate)     ((_rate) & 0x80)

F
Felix Fietkau 已提交
52
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
53 54 55
			       struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
			    int tx_flags, struct ath_txq *txq);
S
Sujith 已提交
56
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
57
				struct ath_txq *txq, struct list_head *bf_q,
58
				struct ath_tx_status *ts, int txok);
59
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
60
			     struct list_head *head, bool internal);
F
Felix Fietkau 已提交
61 62
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
63
			     int txok);
64 65
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno);
66 67 68
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
					   struct ath_txq *txq,
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
69
					   struct sk_buff *skb);
70

71
enum {
72 73
	MCS_HT20,
	MCS_HT20_SGI,
74 75 76 77
	MCS_HT40,
	MCS_HT40_SGI,
};

S
Sujith 已提交
78 79 80
/*********************/
/* Aggregation logic */
/*********************/
81

82
void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
83
	__acquires(&txq->axq_lock)
F
Felix Fietkau 已提交
84 85 86 87
{
	spin_lock_bh(&txq->axq_lock);
}

88
void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
89
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
90 91 92 93
{
	spin_unlock_bh(&txq->axq_lock);
}

94
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
95
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
96 97 98 99 100 101 102 103 104 105 106 107
{
	struct sk_buff_head q;
	struct sk_buff *skb;

	__skb_queue_head_init(&q);
	skb_queue_splice_init(&txq->complete_q, &q);
	spin_unlock_bh(&txq->axq_lock);

	while ((skb = __skb_dequeue(&q)))
		ieee80211_tx_status(sc->hw, skb);
}

S
Sujith 已提交
108
static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
S
Sujith 已提交
109
{
S
Sujith 已提交
110
	struct ath_atx_ac *ac = tid->ac;
S
Sujith 已提交
111

S
Sujith 已提交
112 113
	if (tid->paused)
		return;
S
Sujith 已提交
114

S
Sujith 已提交
115 116
	if (tid->sched)
		return;
S
Sujith 已提交
117

S
Sujith 已提交
118 119
	tid->sched = true;
	list_add_tail(&tid->list, &ac->tid_q);
S
Sujith 已提交
120

S
Sujith 已提交
121 122
	if (ac->sched)
		return;
123

S
Sujith 已提交
124 125 126
	ac->sched = true;
	list_add_tail(&ac->list, &txq->axq_acq);
}
127

128
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
129 130
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
131 132 133
	BUILD_BUG_ON(sizeof(struct ath_frame_info) >
		     sizeof(tx_info->rate_driver_data));
	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
134 135
}

136 137 138 139 140 141
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
{
	ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
			   seqno << IEEE80211_SEQ_SEQ_SHIFT);
}

142 143 144 145 146 147 148
static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
			  struct ath_buf *bf)
{
	ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates,
			       ARRAY_SIZE(bf->rates));
}

149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
			     struct sk_buff *skb)
{
	int q;

	q = skb_get_queue_mapping(skb);
	if (txq == sc->tx.uapsdq)
		txq = sc->tx.txq_map[q];

	if (txq != sc->tx.txq_map[q])
		return;

	if (WARN_ON(--txq->pending_frames < 0))
		txq->pending_frames = 0;

	if (txq->stopped &&
	    txq->pending_frames < sc->tx.txq_max_pending[q]) {
		ieee80211_wake_queue(sc->hw, q);
		txq->stopped = false;
	}
}

171 172 173 174 175 176 177 178 179 180
static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
{
	return !skb_queue_empty(&tid->buf_q);
}

static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
{
	return __skb_dequeue(&tid->buf_q);
}

181
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
S
Sujith 已提交
182
{
183
	struct ath_txq *txq = tid->ac->txq;
184
	struct sk_buff *skb;
S
Sujith 已提交
185 186
	struct ath_buf *bf;
	struct list_head bf_head;
187
	struct ath_tx_status ts;
188
	struct ath_frame_info *fi;
189
	bool sendbar = false;
190

191
	INIT_LIST_HEAD(&bf_head);
192

193
	memset(&ts, 0, sizeof(ts));
194

195
	while ((skb = ath_tid_dequeue(tid))) {
196 197 198
		fi = get_frame_info(skb);
		bf = fi->bf;

F
Felix Fietkau 已提交
199
		if (!bf) {
200 201
			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
			if (!bf) {
202
				ath_txq_skb_done(sc, txq, skb);
203 204 205
				ieee80211_free_txskb(sc->hw, skb);
				continue;
			}
F
Felix Fietkau 已提交
206 207
		}

208
		if (fi->retries) {
209
			list_add_tail(&bf->list, &bf_head);
210
			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
211 212
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
			sendbar = true;
213
		} else {
214
			ath_set_rates(tid->an->vif, tid->an->sta, bf);
215
			ath_tx_send_normal(sc, txq, NULL, skb);
216
		}
S
Sujith 已提交
217
	}
218

219
	if (sendbar) {
F
Felix Fietkau 已提交
220
		ath_txq_unlock(sc, txq);
221
		ath_send_bar(tid, tid->seq_start);
F
Felix Fietkau 已提交
222 223
		ath_txq_lock(sc, txq);
	}
S
Sujith 已提交
224
}
225

S
Sujith 已提交
226 227
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno)
S
Sujith 已提交
228
{
S
Sujith 已提交
229
	int index, cindex;
230

S
Sujith 已提交
231 232
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
233

234
	__clear_bit(cindex, tid->tx_buf);
S
Sujith 已提交
235

236
	while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
S
Sujith 已提交
237 238
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
239 240
		if (tid->bar_index >= 0)
			tid->bar_index--;
S
Sujith 已提交
241
	}
S
Sujith 已提交
242
}
243

S
Sujith 已提交
244
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
245
			     u16 seqno)
S
Sujith 已提交
246
{
S
Sujith 已提交
247
	int index, cindex;
S
Sujith 已提交
248

249
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
S
Sujith 已提交
250
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
251
	__set_bit(cindex, tid->tx_buf);
252

S
Sujith 已提交
253 254 255 256
	if (index >= ((tid->baw_tail - tid->baw_head) &
		(ATH_TID_MAX_BUFS - 1))) {
		tid->baw_tail = cindex;
		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
257 258 259 260
	}
}

/*
S
Sujith 已提交
261 262 263 264
 * TODO: For frame(s) that are in the retry state, we will reuse the
 * sequence number(s) without setting the retry bit. The
 * alternative is to give up on these and BAR the receiver's window
 * forward.
265
 */
S
Sujith 已提交
266 267
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
			  struct ath_atx_tid *tid)
268 269

{
270
	struct sk_buff *skb;
S
Sujith 已提交
271 272
	struct ath_buf *bf;
	struct list_head bf_head;
273
	struct ath_tx_status ts;
274
	struct ath_frame_info *fi;
275 276

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
277
	INIT_LIST_HEAD(&bf_head);
278

279
	while ((skb = ath_tid_dequeue(tid))) {
280 281
		fi = get_frame_info(skb);
		bf = fi->bf;
282

283 284 285 286 287
		if (!bf) {
			ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
			continue;
		}

288
		list_add_tail(&bf->list, &bf_head);
289

290
		ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
291
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
292
	}
293

S
Sujith 已提交
294 295
	tid->seq_next = tid->seq_start;
	tid->baw_tail = tid->baw_head;
296
	tid->bar_index = -1;
297 298
}

S
Sujith 已提交
299
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
300
			     struct sk_buff *skb, int count)
301
{
302
	struct ath_frame_info *fi = get_frame_info(skb);
303
	struct ath_buf *bf = fi->bf;
S
Sujith 已提交
304
	struct ieee80211_hdr *hdr;
305
	int prev = fi->retries;
306

S
Sujith 已提交
307
	TX_STAT_INC(txq->axq_qnum, a_retries);
308 309 310
	fi->retries += count;

	if (prev > 0)
311
		return;
312

S
Sujith 已提交
313 314
	hdr = (struct ieee80211_hdr *)skb->data;
	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
315 316
	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
		sizeof(*hdr), DMA_TO_DEVICE);
317 318
}

319
static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
S
Sujith 已提交
320
{
321
	struct ath_buf *bf = NULL;
S
Sujith 已提交
322 323

	spin_lock_bh(&sc->tx.txbuflock);
324 325

	if (unlikely(list_empty(&sc->tx.txbuf))) {
326 327 328
		spin_unlock_bh(&sc->tx.txbuflock);
		return NULL;
	}
329 330 331 332

	bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
	list_del(&bf->list);

S
Sujith 已提交
333 334
	spin_unlock_bh(&sc->tx.txbuflock);

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
	return bf;
}

static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
{
	spin_lock_bh(&sc->tx.txbuflock);
	list_add_tail(&bf->list, &sc->tx.txbuf);
	spin_unlock_bh(&sc->tx.txbuflock);
}

static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
{
	struct ath_buf *tbf;

	tbf = ath_tx_get_buffer(sc);
	if (WARN_ON(!tbf))
		return NULL;

S
Sujith 已提交
353 354 355 356
	ATH_TXBUF_RESET(tbf);

	tbf->bf_mpdu = bf->bf_mpdu;
	tbf->bf_buf_addr = bf->bf_buf_addr;
357
	memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
S
Sujith 已提交
358 359 360 361 362
	tbf->bf_state = bf->bf_state;

	return tbf;
}

363 364 365 366
static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
			        struct ath_tx_status *ts, int txok,
			        int *nframes, int *nbad)
{
367
	struct ath_frame_info *fi;
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
	u16 seq_st = 0;
	u32 ba[WME_BA_BMP_SIZE >> 5];
	int ba_index;
	int isaggr = 0;

	*nbad = 0;
	*nframes = 0;

	isaggr = bf_isaggr(bf);
	if (isaggr) {
		seq_st = ts->ts_seqnum;
		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
	}

	while (bf) {
383
		fi = get_frame_info(bf->bf_mpdu);
384
		ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
385 386 387 388 389 390 391 392 393 394

		(*nframes)++;
		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
			(*nbad)++;

		bf = bf->bf_next;
	}
}


S
Sujith 已提交
395 396
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
				 struct ath_buf *bf, struct list_head *bf_q,
397
				 struct ath_tx_status *ts, int txok)
398
{
S
Sujith 已提交
399 400
	struct ath_node *an = NULL;
	struct sk_buff *skb;
401
	struct ieee80211_sta *sta;
F
Felix Fietkau 已提交
402
	struct ieee80211_hw *hw = sc->hw;
403
	struct ieee80211_hdr *hdr;
404
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
405
	struct ath_atx_tid *tid = NULL;
S
Sujith 已提交
406
	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
407 408
	struct list_head bf_head;
	struct sk_buff_head bf_pending;
409
	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
410
	u32 ba[WME_BA_BMP_SIZE >> 5];
411
	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
412
	bool rc_update = true, isba;
413
	struct ieee80211_tx_rate rates[4];
414
	struct ath_frame_info *fi;
415
	int nframes;
416
	u8 tidno;
417
	bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
418
	int i, retries;
419
	int bar_index = -1;
420

S
Sujith 已提交
421
	skb = bf->bf_mpdu;
422 423
	hdr = (struct ieee80211_hdr *)skb->data;

424 425
	tx_info = IEEE80211_SKB_CB(skb);

426
	memcpy(rates, bf->rates, sizeof(rates));
427

428 429 430 431
	retries = ts->ts_longretry + 1;
	for (i = 0; i < ts->ts_rateindex; i++)
		retries += rates[i].count;

432
	rcu_read_lock();
433

434
	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
435 436
	if (!sta) {
		rcu_read_unlock();
437

438 439 440 441
		INIT_LIST_HEAD(&bf_head);
		while (bf) {
			bf_next = bf->bf_next;

442
			if (!bf->bf_stale || bf_next != NULL)
443 444
				list_move_tail(&bf->list, &bf_head);

445
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
446 447 448

			bf = bf_next;
		}
449
		return;
450 451
	}

452
	an = (struct ath_node *)sta->drv_priv;
453 454
	tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
	tid = ATH_AN_2_TID(an, tidno);
455
	seq_first = tid->seq_start;
456
	isba = ts->ts_flags & ATH9K_TX_BA;
457

458 459 460 461
	/*
	 * The hardware occasionally sends a tx status for the wrong TID.
	 * In this case, the BA status cannot be considered valid and all
	 * subframes need to be retransmitted
462 463 464
	 *
	 * Only BlockAcks have a TID and therefore normal Acks cannot be
	 * checked
465
	 */
466
	if (isba && tidno != ts->tid)
467 468
		txok = false;

S
Sujith 已提交
469
	isaggr = bf_isaggr(bf);
S
Sujith 已提交
470
	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
471

S
Sujith 已提交
472
	if (isaggr && txok) {
473 474 475
		if (ts->ts_flags & ATH9K_TX_BA) {
			seq_st = ts->ts_seqnum;
			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
S
Sujith 已提交
476
		} else {
S
Sujith 已提交
477 478 479 480 481 482 483
			/*
			 * AR5416 can become deaf/mute when BA
			 * issue happens. Chip needs to be reset.
			 * But AP code may have sychronization issues
			 * when perform internal reset in this routine.
			 * Only enable reset in STA mode for now.
			 */
484
			if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
S
Sujith 已提交
485
				needreset = 1;
S
Sujith 已提交
486
		}
487 488
	}

489
	__skb_queue_head_init(&bf_pending);
490

491
	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
S
Sujith 已提交
492
	while (bf) {
493 494
		u16 seqno = bf->bf_state.seqno;

495
		txfail = txpending = sendbar = 0;
S
Sujith 已提交
496
		bf_next = bf->bf_next;
497

498 499
		skb = bf->bf_mpdu;
		tx_info = IEEE80211_SKB_CB(skb);
500
		fi = get_frame_info(skb);
501

502 503 504 505 506 507 508
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
			/*
			 * Outside of the current BlockAck window,
			 * maybe part of a previous session
			 */
			txfail = 1;
		} else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
S
Sujith 已提交
509 510
			/* transmit completion, subframe is
			 * acked by block ack */
511
			acked_cnt++;
S
Sujith 已提交
512 513
		} else if (!isaggr && txok) {
			/* transmit completion */
514
			acked_cnt++;
515 516 517 518 519 520 521 522
		} else if (flush) {
			txpending = 1;
		} else if (fi->retries < ATH_MAX_SW_RETRIES) {
			if (txok || !an->sleeping)
				ath_tx_set_retry(sc, txq, bf->bf_mpdu,
						 retries);

			txpending = 1;
S
Sujith 已提交
523
		} else {
524 525 526 527
			txfail = 1;
			txfail_cnt++;
			bar_index = max_t(int, bar_index,
				ATH_BA_INDEX(seq_first, seqno));
S
Sujith 已提交
528
		}
529

530 531 532 533
		/*
		 * Make sure the last desc is reclaimed if it
		 * not a holding desc.
		 */
534
		INIT_LIST_HEAD(&bf_head);
535
		if (bf_next != NULL || !bf_last->bf_stale)
S
Sujith 已提交
536
			list_move_tail(&bf->list, &bf_head);
537

538
		if (!txpending) {
S
Sujith 已提交
539 540 541 542
			/*
			 * complete the acked-ones/xretried ones; update
			 * block-ack window
			 */
543
			ath_tx_update_baw(sc, tid, seqno);
544

545
			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
546
				memcpy(tx_info->control.rates, rates, sizeof(rates));
547
				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
548 549 550
				rc_update = false;
			}

551
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
552
				!txfail);
S
Sujith 已提交
553
		} else {
554 555 556 557
			if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
				tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
				ieee80211_sta_eosp(sta);
			}
S
Sujith 已提交
558
			/* retry the un-acked ones */
559
			if (bf->bf_next == NULL && bf_last->bf_stale) {
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
				struct ath_buf *tbf;

				tbf = ath_clone_txbuf(sc, bf_last);
				/*
				 * Update tx baw and complete the
				 * frame with failed status if we
				 * run out of tx buf.
				 */
				if (!tbf) {
					ath_tx_update_baw(sc, tid, seqno);

					ath_tx_complete_buf(sc, bf, txq,
							    &bf_head, ts, 0);
					bar_index = max_t(int, bar_index,
						ATH_BA_INDEX(seq_first, seqno));
					break;
576
				}
577 578

				fi->bf = tbf;
S
Sujith 已提交
579 580 581 582 583 584
			}

			/*
			 * Put this buffer to the temporary pending
			 * queue to retain ordering
			 */
585
			__skb_queue_tail(&bf_pending, skb);
S
Sujith 已提交
586 587 588
		}

		bf = bf_next;
589 590
	}

591
	/* prepend un-acked frames to the beginning of the pending frame queue */
592
	if (!skb_queue_empty(&bf_pending)) {
593
		if (an->sleeping)
594
			ieee80211_sta_set_buffered(sta, tid->tidno, true);
595

596
		skb_queue_splice(&bf_pending, &tid->buf_q);
597
		if (!an->sleeping) {
598
			ath_tx_queue_tid(txq, tid);
599

S
Sujith Manoharan 已提交
600
			if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
601 602
				tid->ac->clear_ps_filter = true;
		}
603 604
	}

F
Felix Fietkau 已提交
605 606 607 608 609 610 611 612 613 614 615
	if (bar_index >= 0) {
		u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);

		if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
			tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);

		ath_txq_unlock(sc, txq);
		ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
		ath_txq_lock(sc, txq);
	}

616 617
	rcu_read_unlock();

618 619
	if (needreset)
		ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
S
Sujith 已提交
620
}
621

622 623 624 625 626 627 628 629 630 631
static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
{
    struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
    return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}

static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
				  struct ath_tx_status *ts, struct ath_buf *bf,
				  struct list_head *bf_head)
{
632
	struct ieee80211_tx_info *info;
633 634 635 636 637 638 639 640 641 642 643
	bool txok, flush;

	txok = !(ts->ts_status & ATH9K_TXERR_MASK);
	flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
	txq->axq_tx_inprogress = false;

	txq->axq_depth--;
	if (bf_is_ampdu_not_probing(bf))
		txq->axq_ampdu_depth--;

	if (!bf_isampdu(bf)) {
644 645 646 647
		if (!flush) {
			info = IEEE80211_SKB_CB(bf->bf_mpdu);
			memcpy(info->control.rates, bf->rates,
			       sizeof(info->control.rates));
648
			ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
649
		}
650 651 652 653 654 655 656 657
		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
	} else
		ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);

	if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !flush)
		ath_txq_schedule(sc, txq);
}

658 659 660 661 662 663 664 665 666 667 668
static bool ath_lookup_legacy(struct ath_buf *bf)
{
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	int i;

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
	rates = tx_info->control.rates;

669 670 671 672
	for (i = 0; i < 4; i++) {
		if (!rates[i].count || rates[i].idx < 0)
			break;

673 674 675 676 677 678 679
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
			return true;
	}

	return false;
}

S
Sujith 已提交
680 681
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
			   struct ath_atx_tid *tid)
682
{
S
Sujith 已提交
683 684
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
685
	struct ieee80211_tx_rate *rates;
S
Sujith 已提交
686
	u32 max_4ms_framelen, frmlen;
687
	u16 aggr_limit, bt_aggr_limit, legacy = 0;
688
	int q = tid->ac->txq->mac80211_qnum;
S
Sujith 已提交
689
	int i;
S
Sujith 已提交
690

S
Sujith 已提交
691
	skb = bf->bf_mpdu;
S
Sujith 已提交
692
	tx_info = IEEE80211_SKB_CB(skb);
693
	rates = bf->rates;
S
Sujith 已提交
694

S
Sujith 已提交
695 696
	/*
	 * Find the lowest frame length among the rate series that will have a
697
	 * 4ms (or TXOP limited) transmit duration.
S
Sujith 已提交
698 699
	 */
	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
S
Sujith 已提交
700

S
Sujith 已提交
701
	for (i = 0; i < 4; i++) {
702
		int modeidx;
S
Sujith 已提交
703

704 705
		if (!rates[i].count)
			continue;
706

707 708 709
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
			legacy = 1;
			break;
710
		}
711 712 713 714 715 716 717 718 719

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
			modeidx = MCS_HT40;
		else
			modeidx = MCS_HT20;

		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
			modeidx++;

720
		frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
721
		max_4ms_framelen = min(max_4ms_framelen, frmlen);
722
	}
S
Sujith 已提交
723

724
	/*
S
Sujith 已提交
725 726 727
	 * limit aggregate size by the minimum rate if rate selected is
	 * not a probe rate, if rate selected is a probe rate then
	 * avoid aggregation of this packet.
728
	 */
S
Sujith 已提交
729 730
	if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
		return 0;
731

732 733 734 735 736 737 738 739
	aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);

	/*
	 * Override the default aggregation limit for BTCOEX.
	 */
	bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
	if (bt_aggr_limit)
		aggr_limit = bt_aggr_limit;
740

S
Sujith 已提交
741
	/*
L
Lucas De Marchi 已提交
742 743
	 * h/w can accept aggregates up to 16 bit lengths (65535).
	 * The IE, however can hold up to 65536, which shows up here
S
Sujith 已提交
744
	 * as zero. Ignore 65536 since we  are constrained by hw.
745
	 */
746 747
	if (tid->an->maxampdu)
		aggr_limit = min(aggr_limit, tid->an->maxampdu);
748

S
Sujith 已提交
749 750
	return aggr_limit;
}
751

S
Sujith 已提交
752
/*
S
Sujith 已提交
753
 * Returns the number of delimiters to be added to
S
Sujith 已提交
754 755 756
 * meet the minimum required mpdudensity.
 */
static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
757 758
				  struct ath_buf *bf, u16 frmlen,
				  bool first_subfrm)
S
Sujith 已提交
759
{
760
#define FIRST_DESC_NDELIMS 60
761
	u32 nsymbits, nsymbols;
S
Sujith 已提交
762
	u16 minlen;
763
	u8 flags, rix;
764
	int width, streams, half_gi, ndelim, mindelim;
765
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith 已提交
766 767 768

	/* Select standard number of delimiters based on frame length alone */
	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
769 770

	/*
S
Sujith 已提交
771 772 773 774
	 * If encryption enabled, hardware requires some more padding between
	 * subframes.
	 * TODO - this could be improved to be dependent on the rate.
	 *      The hardware can keep up at lower rates, but not higher rates
775
	 */
776 777
	if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
	    !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
S
Sujith 已提交
778
		ndelim += ATH_AGGR_ENCRYPTDELIM;
779

780 781 782 783
	/*
	 * Add delimiter when using RTS/CTS with aggregation
	 * and non enterprise AR9003 card
	 */
784 785
	if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
	    (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
786 787
		ndelim = max(ndelim, FIRST_DESC_NDELIMS);

S
Sujith 已提交
788 789 790 791 792
	/*
	 * Convert desired mpdu density from microeconds to bytes based
	 * on highest rate in rate series (i.e. first rate) to determine
	 * required minimum length for subframe. Take into account
	 * whether high rate is 20 or 40Mhz and half or full GI.
793
	 *
S
Sujith 已提交
794 795 796
	 * If there is no mpdu density restriction, no further calculation
	 * is needed.
	 */
797 798

	if (tid->an->mpdudensity == 0)
S
Sujith 已提交
799
		return ndelim;
800

801 802
	rix = bf->rates[0].idx;
	flags = bf->rates[0].flags;
S
Sujith 已提交
803 804
	width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
	half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
805

S
Sujith 已提交
806
	if (half_gi)
807
		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
S
Sujith 已提交
808
	else
809
		nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
810

S
Sujith 已提交
811 812
	if (nsymbols == 0)
		nsymbols = 1;
813

814 815
	streams = HT_RC_2_STREAMS(rix);
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
816
	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
817

S
Sujith 已提交
818 819 820
	if (frmlen < minlen) {
		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
		ndelim = max(mindelim, ndelim);
821 822
	}

S
Sujith 已提交
823
	return ndelim;
824 825
}

826 827
static struct ath_buf *
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
828
			struct ath_atx_tid *tid, struct sk_buff_head **q)
829
{
830
	struct ath_frame_info *fi;
831
	struct sk_buff *skb;
832
	struct ath_buf *bf;
833
	u16 seqno;
834

835
	while (1) {
836 837
		*q = &tid->buf_q;
		skb = skb_peek(*q);
838 839 840
		if (!skb)
			break;

841 842
		fi = get_frame_info(skb);
		bf = fi->bf;
843
		if (!fi->bf)
F
Felix Fietkau 已提交
844
			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
845

F
Felix Fietkau 已提交
846
		if (!bf) {
847
			__skb_unlink(skb, *q);
848
			ath_txq_skb_done(sc, txq, skb);
F
Felix Fietkau 已提交
849
			ieee80211_free_txskb(sc->hw, skb);
850
			continue;
F
Felix Fietkau 已提交
851
		}
852

853
		bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
854
		seqno = bf->bf_state.seqno;
855

S
Sujith 已提交
856
		/* do not step over block-ack window */
857
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
S
Sujith 已提交
858
			break;
859

860 861 862 863 864 865
		if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
			struct ath_tx_status ts = {};
			struct list_head bf_head;

			INIT_LIST_HEAD(&bf_head);
			list_add(&bf->list, &bf_head);
866
			__skb_unlink(skb, *q);
867 868 869 870 871
			ath_tx_update_baw(sc, tid, seqno);
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
			continue;
		}

872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894
		bf->bf_next = NULL;
		bf->bf_lastbf = bf;
		return bf;
	}

	return NULL;
}

static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
					     struct ath_txq *txq,
					     struct ath_atx_tid *tid,
					     struct list_head *bf_q,
					     int *aggr_len)
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
	struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
	int rl = 0, nframes = 0, ndelim, prev_al = 0;
	u16 aggr_limit = 0, al = 0, bpad = 0,
		al_delta, h_baw = tid->baw_size / 2;
	enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
	struct ieee80211_tx_info *tx_info;
	struct ath_frame_info *fi;
	struct sk_buff *skb;
895
	struct sk_buff_head *tid_q;
896 897

	do {
898
		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
899 900 901 902 903 904 905 906
		if (!bf) {
			status = ATH_AGGR_BAW_CLOSED;
			break;
		}

		skb = bf->bf_mpdu;
		fi = get_frame_info(skb);

907 908 909
		if (!bf_first)
			bf_first = bf;

S
Sujith 已提交
910
		if (!rl) {
911
			ath_set_rates(tid->an->vif, tid->an->sta, bf);
S
Sujith 已提交
912 913 914
			aggr_limit = ath_lookup_rate(sc, bf, tid);
			rl = 1;
		}
915

S
Sujith 已提交
916
		/* do not exceed aggregation limit */
917
		al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
918

S
Sujith 已提交
919
		if (nframes &&
920 921
		    ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
		     ath_lookup_legacy(bf))) {
S
Sujith 已提交
922 923 924
			status = ATH_AGGR_LIMITED;
			break;
		}
925

926
		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
927
		if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
928 929
			break;

S
Sujith 已提交
930 931
		/* do not exceed subframe limit */
		if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
S
Sujith 已提交
932 933 934
			status = ATH_AGGR_LIMITED;
			break;
		}
935

S
Sujith 已提交
936
		/* add padding for previous frame to aggregation length */
S
Sujith 已提交
937
		al += bpad + al_delta;
938

S
Sujith 已提交
939 940 941 942
		/*
		 * Get the delimiters needed to meet the MPDU
		 * density for this node.
		 */
943 944
		ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
						!nframes);
S
Sujith 已提交
945
		bpad = PADBYTES(al_delta) + (ndelim << 2);
946

947
		nframes++;
S
Sujith 已提交
948
		bf->bf_next = NULL;
949

S
Sujith 已提交
950
		/* link buffers of this frame to the aggregate */
951
		if (!fi->retries)
952
			ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
953
		bf->bf_state.ndelim = ndelim;
954

955
		__skb_unlink(skb, tid_q);
956
		list_add_tail(&bf->list, bf_q);
957
		if (bf_prev)
S
Sujith 已提交
958
			bf_prev->bf_next = bf;
959

S
Sujith 已提交
960
		bf_prev = bf;
S
Sujith 已提交
961

962
	} while (ath_tid_has_buffered(tid));
963

964
	*aggr_len = al;
S
Sujith 已提交
965

S
Sujith 已提交
966 967 968
	return status;
#undef PADBYTES
}
969

970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
/*
 * rix - rate index
 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
 * width  - 0 for 20 MHz, 1 for 40 MHz
 * half_gi - to use 4us v/s 3.6 us for symbol time
 */
static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
			    int width, int half_gi, bool shortPreamble)
{
	u32 nbits, nsymbits, duration, nsymbols;
	int streams;

	/* find number of symbols: PLCP + data */
	streams = HT_RC_2_STREAMS(rix);
	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
	nsymbols = (nbits + nsymbits - 1) / nsymbits;

	if (!half_gi)
		duration = SYMBOL_TIME(nsymbols);
	else
		duration = SYMBOL_TIME_HALFGI(nsymbols);

	/* addup duration for legacy/ht training and signal fields */
	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);

	return duration;
}

999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
{
	int streams = HT_RC_2_STREAMS(mcs);
	int symbols, bits;
	int bytes = 0;

	symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
	bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
	bits -= OFDM_PLCP_BITS;
	bytes = bits / 8;
	bytes -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
	if (bytes > 65532)
		bytes = 65532;

	return bytes;
}

void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
{
	u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi;
	int mcs;

	/* 4ms is the default (and maximum) duration */
	if (!txop || txop > 4096)
		txop = 4096;

	cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
	cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
	cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
	cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
	for (mcs = 0; mcs < 32; mcs++) {
		cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false);
		cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true);
		cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false);
		cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true);
	}
}

1037
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
S
Sujith Manoharan 已提交
1038
			     struct ath_tx_info *info, int len, bool rts)
1039 1040 1041 1042 1043 1044 1045
{
	struct ath_hw *ah = sc->sc_ah;
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	const struct ieee80211_rate *rate;
	struct ieee80211_hdr *hdr;
1046
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith Manoharan 已提交
1047
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
1048 1049
	int i;
	u8 rix = 0;
1050 1051 1052

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
1053
	rates = bf->rates;
1054
	hdr = (struct ieee80211_hdr *)skb->data;
1055 1056 1057

	/* set dur_update_en for l-sig computation except for PS-Poll frames */
	info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
1058
	info->rtscts_rate = fi->rtscts_rate;
1059

1060
	for (i = 0; i < ARRAY_SIZE(bf->rates); i++) {
1061 1062 1063 1064 1065 1066 1067
		bool is_40, is_sgi, is_sp;
		int phy;

		if (!rates[i].count || (rates[i].idx < 0))
			continue;

		rix = rates[i].idx;
1068
		info->rates[i].Tries = rates[i].count;
1069

S
Sujith Manoharan 已提交
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
		/*
		 * Handle RTS threshold for unaggregated HT frames.
		 */
		if (bf_isampdu(bf) && !bf_isaggr(bf) &&
		    (rates[i].flags & IEEE80211_TX_RC_MCS) &&
		    unlikely(rts_thresh != (u32) -1)) {
			if (!rts_thresh || (len > rts_thresh))
				rts = true;
		}

		if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1081 1082
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_RTSENA;
1083
		} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1084 1085
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_CTSENA;
1086 1087 1088
		}

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1089
			info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
1090
		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1091
			info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
1092 1093 1094 1095 1096 1097 1098

		is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
		is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
		is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);

		if (rates[i].flags & IEEE80211_TX_RC_MCS) {
			/* MCS rates */
1099 1100 1101 1102
			info->rates[i].Rate = rix | 0x80;
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
			info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
1103 1104
				 is_40, is_sgi, is_sp);
			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1105
				info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
1106 1107 1108 1109
			continue;
		}

		/* legacy rates */
1110
		rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1111 1112 1113 1114 1115 1116
		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
		    !(rate->flags & IEEE80211_RATE_ERP_G))
			phy = WLAN_RC_PHY_CCK;
		else
			phy = WLAN_RC_PHY_OFDM;

1117
		info->rates[i].Rate = rate->hw_value;
1118 1119
		if (rate->hw_value_short) {
			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1120
				info->rates[i].Rate |= rate->hw_value_short;
1121 1122 1123 1124 1125
		} else {
			is_sp = false;
		}

		if (bf->bf_state.bfs_paprd)
1126
			info->rates[i].ChSel = ah->txchainmask;
1127
		else
1128 1129
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
1130

1131
		info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1132 1133 1134 1135 1136
			phy, rate->bitrate * 100, len, rix, is_sp);
	}

	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
	if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1137
		info->flags &= ~ATH9K_TXDESC_RTSENA;
1138 1139

	/* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1140 1141 1142
	if (info->flags & ATH9K_TXDESC_RTSENA)
		info->flags &= ~ATH9K_TXDESC_CTSENA;
}
1143

1144 1145 1146 1147 1148 1149 1150 1151
static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	enum ath9k_pkt_type htype;
	__le16 fc;

	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
1152

1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
	if (ieee80211_is_beacon(fc))
		htype = ATH9K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = ATH9K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = ATH9K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = ATH9K_PKT_TYPE_PSPOLL;
	else
		htype = ATH9K_PKT_TYPE_NORMAL;

	return htype;
1165 1166
}

1167 1168
static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_txq *txq, int len)
1169 1170
{
	struct ath_hw *ah = sc->sc_ah;
1171
	struct ath_buf *bf_first = NULL;
1172
	struct ath_tx_info info;
S
Sujith Manoharan 已提交
1173 1174
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
	bool rts = false;
1175

1176 1177 1178 1179 1180 1181
	memset(&info, 0, sizeof(info));
	info.is_first = true;
	info.is_last = true;
	info.txpower = MAX_RATE_POWER;
	info.qcu = txq->axq_qnum;

1182
	while (bf) {
1183
		struct sk_buff *skb = bf->bf_mpdu;
1184
		struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1185
		struct ath_frame_info *fi = get_frame_info(skb);
1186
		bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
1187 1188

		info.type = get_hw_packet_type(skb);
1189
		if (bf->bf_next)
1190
			info.link = bf->bf_next->bf_daddr;
1191
		else
1192 1193
			info.link = 0;

1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
		if (!bf_first) {
			bf_first = bf;

			info.flags = ATH9K_TXDESC_INTREQ;
			if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
			    txq == sc->tx.uapsdq)
				info.flags |= ATH9K_TXDESC_CLRDMASK;

			if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
				info.flags |= ATH9K_TXDESC_NOACK;
			if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
				info.flags |= ATH9K_TXDESC_LDPC;

			if (bf->bf_state.bfs_paprd)
				info.flags |= (u32) bf->bf_state.bfs_paprd <<
					      ATH9K_TXDESC_PAPRD_S;

S
Sujith Manoharan 已提交
1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
			/*
			 * mac80211 doesn't handle RTS threshold for HT because
			 * the decision has to be taken based on AMPDU length
			 * and aggregation is done entirely inside ath9k.
			 * Set the RTS/CTS flag for the first subframe based
			 * on the threshold.
			 */
			if (aggr && (bf == bf_first) &&
			    unlikely(rts_thresh != (u32) -1)) {
				/*
				 * "len" is the size of the entire AMPDU.
				 */
				if (!rts_thresh || (len > rts_thresh))
					rts = true;
			}
			ath_buf_set_rate(sc, bf, &info, len, rts);
1227 1228
		}

1229 1230
		info.buf_addr[0] = bf->bf_buf_addr;
		info.buf_len[0] = skb->len;
1231 1232 1233 1234 1235
		info.pkt_len = fi->framelen;
		info.keyix = fi->keyix;
		info.keytype = fi->keytype;

		if (aggr) {
1236
			if (bf == bf_first)
1237
				info.aggr = AGGR_BUF_FIRST;
1238
			else if (bf == bf_first->bf_lastbf)
1239 1240 1241
				info.aggr = AGGR_BUF_LAST;
			else
				info.aggr = AGGR_BUF_MIDDLE;
1242

1243 1244
			info.ndelim = bf->bf_state.ndelim;
			info.aggr_len = len;
1245 1246
		}

1247 1248 1249
		if (bf == bf_first->bf_lastbf)
			bf_first = NULL;

1250
		ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
1251 1252 1253 1254
		bf = bf->bf_next;
	}
}

S
Sujith 已提交
1255 1256 1257
static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
			      struct ath_atx_tid *tid)
{
S
Sujith 已提交
1258
	struct ath_buf *bf;
S
Sujith 已提交
1259
	enum ATH_AGGR_STATUS status;
1260
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
1261
	struct list_head bf_q;
1262
	int aggr_len;
1263

S
Sujith 已提交
1264
	do {
1265
		if (!ath_tid_has_buffered(tid))
S
Sujith 已提交
1266
			return;
1267

S
Sujith 已提交
1268 1269
		INIT_LIST_HEAD(&bf_q);

1270
		status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
1271 1272

		/*
S
Sujith 已提交
1273 1274
		 * no frames picked up to be aggregated;
		 * block-ack window is not open.
1275
		 */
S
Sujith 已提交
1276 1277
		if (list_empty(&bf_q))
			break;
1278

S
Sujith 已提交
1279
		bf = list_first_entry(&bf_q, struct ath_buf, list);
S
Sujith 已提交
1280
		bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
1281
		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1282

1283 1284
		if (tid->ac->clear_ps_filter) {
			tid->ac->clear_ps_filter = false;
1285 1286 1287
			tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
		} else {
			tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
1288 1289
		}

S
Sujith 已提交
1290
		/* if only one frame, send as non-aggregate */
1291
		if (bf == bf->bf_lastbf) {
1292 1293 1294 1295
			aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
			bf->bf_state.bf_type = BUF_AMPDU;
		} else {
			TX_STAT_INC(txq->axq_qnum, a_aggr);
S
Sujith 已提交
1296
		}
1297

1298
		ath_tx_fill_desc(sc, bf, txq, aggr_len);
1299
		ath_tx_txqaddbuf(sc, txq, &bf_q, false);
1300
	} while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
S
Sujith 已提交
1301 1302 1303
		 status != ATH_AGGR_BAW_CLOSED);
}

1304 1305
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
		      u16 tid, u16 *ssn)
S
Sujith 已提交
1306 1307 1308
{
	struct ath_atx_tid *txtid;
	struct ath_node *an;
1309
	u8 density;
S
Sujith 已提交
1310 1311

	an = (struct ath_node *)sta->drv_priv;
S
Sujith 已提交
1312
	txtid = ATH_AN_2_TID(an, tid);
1313

1314 1315 1316 1317
	/* update ampdu factor/density, they may have changed. This may happen
	 * in HT IBSS when a beacon with HT-info is received after the station
	 * has already been added.
	 */
1318
	if (sta->ht_cap.ht_supported) {
1319 1320 1321 1322 1323 1324
		an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
				     sta->ht_cap.ampdu_factor);
		density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
		an->mpdudensity = density;
	}

1325
	txtid->active = true;
1326
	txtid->paused = true;
1327
	*ssn = txtid->seq_start = txtid->seq_next;
1328
	txtid->bar_index = -1;
1329

1330 1331 1332
	memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
	txtid->baw_head = txtid->baw_tail = 0;

1333
	return 0;
S
Sujith 已提交
1334
}
1335

1336
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
S
Sujith 已提交
1337 1338 1339
{
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1340
	struct ath_txq *txq = txtid->ac->txq;
1341

F
Felix Fietkau 已提交
1342
	ath_txq_lock(sc, txq);
1343
	txtid->active = false;
1344
	txtid->paused = true;
1345
	ath_tx_flush_tid(sc, txtid);
F
Felix Fietkau 已提交
1346
	ath_txq_unlock_complete(sc, txq);
S
Sujith 已提交
1347
}
1348

1349 1350
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
		       struct ath_node *an)
1351 1352 1353 1354
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
1355
	bool buffered;
1356 1357 1358
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1359
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1360 1361 1362 1363 1364 1365 1366

		if (!tid->sched)
			continue;

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1367
		ath_txq_lock(sc, txq);
1368

1369
		buffered = ath_tid_has_buffered(tid);
1370 1371 1372 1373 1374 1375 1376 1377 1378

		tid->sched = false;
		list_del(&tid->list);

		if (ac->sched) {
			ac->sched = false;
			list_del(&ac->list);
		}

F
Felix Fietkau 已提交
1379
		ath_txq_unlock(sc, txq);
1380

1381 1382
		ieee80211_sta_set_buffered(sta, tidno, buffered);
	}
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
}

void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1393
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1394 1395 1396 1397

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1398
		ath_txq_lock(sc, txq);
1399 1400
		ac->clear_ps_filter = true;

1401
		if (!tid->paused && ath_tid_has_buffered(tid)) {
1402 1403 1404 1405
			ath_tx_queue_tid(txq, tid);
			ath_txq_schedule(sc, txq);
		}

F
Felix Fietkau 已提交
1406
		ath_txq_unlock_complete(sc, txq);
1407 1408 1409
	}
}

1410 1411
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
			u16 tidno)
S
Sujith 已提交
1412
{
1413
	struct ath_atx_tid *tid;
S
Sujith 已提交
1414
	struct ath_node *an;
1415
	struct ath_txq *txq;
S
Sujith 已提交
1416 1417

	an = (struct ath_node *)sta->drv_priv;
1418 1419
	tid = ATH_AN_2_TID(an, tidno);
	txq = tid->ac->txq;
S
Sujith 已提交
1420

1421 1422 1423 1424 1425
	ath_txq_lock(sc, txq);

	tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
	tid->paused = false;

1426
	if (ath_tid_has_buffered(tid)) {
1427 1428 1429 1430 1431
		ath_tx_queue_tid(txq, tid);
		ath_txq_schedule(sc, txq);
	}

	ath_txq_unlock_complete(sc, txq);
1432 1433
}

1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
				   struct ieee80211_sta *sta,
				   u16 tids, int nframes,
				   enum ieee80211_frame_release_type reason,
				   bool more_data)
{
	struct ath_softc *sc = hw->priv;
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_txq *txq = sc->tx.uapsdq;
	struct ieee80211_tx_info *info;
	struct list_head bf_q;
	struct ath_buf *bf_tail = NULL, *bf;
1446
	struct sk_buff_head *tid_q;
1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
	int sent = 0;
	int i;

	INIT_LIST_HEAD(&bf_q);
	for (i = 0; tids && nframes; i++, tids >>= 1) {
		struct ath_atx_tid *tid;

		if (!(tids & 1))
			continue;

		tid = ATH_AN_2_TID(an, i);
		if (tid->paused)
			continue;

		ath_txq_lock(sc, tid->ac->txq);
1462 1463
		while (nframes > 0) {
			bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
1464 1465 1466
			if (!bf)
				break;

1467
			__skb_unlink(bf->bf_mpdu, tid_q);
1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479
			list_add_tail(&bf->list, &bf_q);
			ath_set_rates(tid->an->vif, tid->an->sta, bf);
			ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
			bf->bf_state.bf_type &= ~BUF_AGGR;
			if (bf_tail)
				bf_tail->bf_next = bf;

			bf_tail = bf;
			nframes--;
			sent++;
			TX_STAT_INC(txq->axq_qnum, a_queued_hw);

1480
			if (!ath_tid_has_buffered(tid))
1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
				ieee80211_sta_set_buffered(an->sta, i, false);
		}
		ath_txq_unlock_complete(sc, tid->ac->txq);
	}

	if (list_empty(&bf_q))
		return;

	info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
	info->flags |= IEEE80211_TX_STATUS_EOSP;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	ath_txq_lock(sc, txq);
	ath_tx_fill_desc(sc, bf, txq, 0);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	ath_txq_unlock(sc, txq);
}

S
Sujith 已提交
1499 1500 1501
/********************/
/* Queue Management */
/********************/
1502

S
Sujith 已提交
1503
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1504
{
1505
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1506
	struct ath9k_tx_queue_info qi;
1507
	static const int subtype_txq_to_hwq[] = {
1508 1509 1510 1511
		[IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
		[IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
		[IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
		[IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
1512
	};
1513
	int axq_qnum, i;
1514

S
Sujith 已提交
1515
	memset(&qi, 0, sizeof(qi));
1516
	qi.tqi_subtype = subtype_txq_to_hwq[subtype];
S
Sujith 已提交
1517 1518 1519 1520
	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_physCompBuf = 0;
1521 1522

	/*
S
Sujith 已提交
1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise waiting for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
	 *
	 * The UAPSD queue is an exception, since we take a desc-
	 * based intr on the EOSP frames.
1536
	 */
1537
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1538
		qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
1539 1540 1541 1542 1543 1544 1545
	} else {
		if (qtype == ATH9K_TX_QUEUE_UAPSD)
			qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
		else
			qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
					TXQ_FLAG_TXDESCINT_ENABLE;
	}
1546 1547
	axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
	if (axq_qnum == -1) {
1548
		/*
S
Sujith 已提交
1549 1550
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
1551
		 */
S
Sujith 已提交
1552
		return NULL;
1553
	}
1554 1555
	if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
		struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1556

1557 1558
		txq->axq_qnum = axq_qnum;
		txq->mac80211_qnum = -1;
S
Sujith 已提交
1559
		txq->axq_link = NULL;
F
Felix Fietkau 已提交
1560
		__skb_queue_head_init(&txq->complete_q);
S
Sujith 已提交
1561 1562 1563 1564
		INIT_LIST_HEAD(&txq->axq_q);
		INIT_LIST_HEAD(&txq->axq_acq);
		spin_lock_init(&txq->axq_lock);
		txq->axq_depth = 0;
1565
		txq->axq_ampdu_depth = 0;
1566
		txq->axq_tx_inprogress = false;
1567
		sc->tx.txqsetup |= 1<<axq_qnum;
1568 1569 1570 1571

		txq->txq_headidx = txq->txq_tailidx = 0;
		for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
			INIT_LIST_HEAD(&txq->txq_fifo[i]);
S
Sujith 已提交
1572
	}
1573
	return &sc->tx.txq[axq_qnum];
1574 1575
}

S
Sujith 已提交
1576 1577 1578
int ath_txq_update(struct ath_softc *sc, int qnum,
		   struct ath9k_tx_queue_info *qinfo)
{
1579
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1580 1581 1582
	int error = 0;
	struct ath9k_tx_queue_info qi;

1583
	BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
S
Sujith 已提交
1584 1585 1586 1587 1588 1589 1590 1591 1592

	ath9k_hw_get_txq_props(ah, qnum, &qi);
	qi.tqi_aifs = qinfo->tqi_aifs;
	qi.tqi_cwmin = qinfo->tqi_cwmin;
	qi.tqi_cwmax = qinfo->tqi_cwmax;
	qi.tqi_burstTime = qinfo->tqi_burstTime;
	qi.tqi_readyTime = qinfo->tqi_readyTime;

	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1593 1594
		ath_err(ath9k_hw_common(sc->sc_ah),
			"Unable to update hardware queue %u!\n", qnum);
S
Sujith 已提交
1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605
		error = -EIO;
	} else {
		ath9k_hw_resettxqueue(ah, qnum);
	}

	return error;
}

int ath_cabq_update(struct ath_softc *sc)
{
	struct ath9k_tx_queue_info qi;
1606
	struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
S
Sujith 已提交
1607
	int qnum = sc->beacon.cabq->axq_qnum;
1608

S
Sujith 已提交
1609
	ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1610
	/*
S
Sujith 已提交
1611
	 * Ensure the readytime % is within the bounds.
1612
	 */
S
Sujith 已提交
1613 1614 1615 1616
	if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
		sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
	else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
		sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1617

1618
	qi.tqi_readyTime = (cur_conf->beacon_interval *
S
Sujith 已提交
1619
			    sc->config.cabqReadytime) / 100;
S
Sujith 已提交
1620 1621 1622
	ath_txq_update(sc, qnum, &qi);

	return 0;
1623 1624
}

1625
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1626
			       struct list_head *list)
1627
{
S
Sujith 已提交
1628 1629
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
1630 1631 1632
	struct ath_tx_status ts;

	memset(&ts, 0, sizeof(ts));
1633
	ts.ts_status = ATH9K_TX_FLUSH;
S
Sujith 已提交
1634
	INIT_LIST_HEAD(&bf_head);
1635

1636 1637
	while (!list_empty(list)) {
		bf = list_first_entry(list, struct ath_buf, list);
1638

1639 1640
		if (bf->bf_stale) {
			list_del(&bf->list);
1641

1642 1643
			ath_tx_return_buffer(sc, bf);
			continue;
S
Sujith 已提交
1644
		}
1645

S
Sujith 已提交
1646
		lastbf = bf->bf_lastbf;
1647
		list_cut_position(&bf_head, list, &lastbf->list);
1648
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
1649
	}
1650
}
1651

1652 1653 1654 1655 1656 1657
/*
 * Drain a given TX queue (could be Beacon or Data)
 *
 * This assumes output has been stopped and
 * we do not need to block ath_tx_tasklet.
 */
1658
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
1659
{
F
Felix Fietkau 已提交
1660 1661
	ath_txq_lock(sc, txq);

1662
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1663
		int idx = txq->txq_tailidx;
1664

1665
		while (!list_empty(&txq->txq_fifo[idx])) {
1666
			ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
1667 1668

			INCR(idx, ATH_TXFIFO_DEPTH);
1669
		}
1670
		txq->txq_tailidx = idx;
1671
	}
1672

1673 1674
	txq->axq_link = NULL;
	txq->axq_tx_inprogress = false;
1675
	ath_drain_txq_list(sc, txq, &txq->axq_q);
1676

F
Felix Fietkau 已提交
1677
	ath_txq_unlock_complete(sc, txq);
1678 1679
}

1680
bool ath_drain_all_txq(struct ath_softc *sc)
1681
{
1682
	struct ath_hw *ah = sc->sc_ah;
1683
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
1684
	struct ath_txq *txq;
1685 1686
	int i;
	u32 npend = 0;
S
Sujith 已提交
1687

S
Sujith Manoharan 已提交
1688
	if (test_bit(SC_OP_INVALID, &sc->sc_flags))
1689
		return true;
S
Sujith 已提交
1690

1691
	ath9k_hw_abort_tx_dma(ah);
S
Sujith 已提交
1692

1693
	/* Check if any queue remains active */
S
Sujith 已提交
1694
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1695 1696 1697
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

1698 1699
		if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
			npend |= BIT(i);
S
Sujith 已提交
1700 1701
	}

1702
	if (npend)
1703
		ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
S
Sujith 已提交
1704 1705

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1706 1707 1708 1709 1710 1711 1712 1713 1714 1715
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

		/*
		 * The caller will resume queues with ieee80211_wake_queues.
		 * Mark the queue as not stopped to prevent ath_tx_complete
		 * from waking the queue too early.
		 */
		txq = &sc->tx.txq[i];
		txq->stopped = false;
1716
		ath_draintxq(sc, txq);
S
Sujith 已提交
1717
	}
1718 1719

	return !npend;
S
Sujith 已提交
1720
}
1721

S
Sujith 已提交
1722
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
S
Sujith 已提交
1723
{
S
Sujith 已提交
1724 1725
	ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
	sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
S
Sujith 已提交
1726
}
1727

1728 1729 1730
/* For each axq_acq entry, for each tid, try to schedule packets
 * for transmit until ampdu_depth has reached min Q depth.
 */
S
Sujith 已提交
1731 1732
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
1733 1734
	struct ath_atx_ac *ac, *ac_tmp, *last_ac;
	struct ath_atx_tid *tid, *last_tid;
1735

1736 1737
	if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
	    list_empty(&txq->axq_acq) ||
1738
	    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
S
Sujith 已提交
1739
		return;
1740

1741 1742
	rcu_read_lock();

S
Sujith 已提交
1743
	ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1744
	last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1745

1746 1747 1748 1749
	list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
		list_del(&ac->list);
		ac->sched = false;
1750

1751 1752 1753 1754 1755
		while (!list_empty(&ac->tid_q)) {
			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
					       list);
			list_del(&tid->list);
			tid->sched = false;
1756

1757 1758
			if (tid->paused)
				continue;
1759

1760
			ath_tx_sched_aggr(sc, txq, tid);
1761

1762 1763 1764 1765
			/*
			 * add tid to round-robin queue if more frames
			 * are pending for the tid
			 */
1766
			if (ath_tid_has_buffered(tid))
1767
				ath_tx_queue_tid(txq, tid);
1768

1769 1770 1771 1772
			if (tid == last_tid ||
			    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
				break;
		}
1773

1774 1775 1776
		if (!list_empty(&ac->tid_q) && !ac->sched) {
			ac->sched = true;
			list_add_tail(&ac->list, &txq->axq_acq);
1777
		}
1778 1779 1780

		if (ac == last_ac ||
		    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1781
			break;
S
Sujith 已提交
1782
	}
1783 1784

	rcu_read_unlock();
S
Sujith 已提交
1785
}
1786

S
Sujith 已提交
1787 1788 1789 1790
/***********/
/* TX, DMA */
/***********/

1791
/*
S
Sujith 已提交
1792 1793
 * Insert a chain of ath_buf (descriptors) on a txq and
 * assume the descriptors are already chained together by caller.
1794
 */
S
Sujith 已提交
1795
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1796
			     struct list_head *head, bool internal)
1797
{
1798
	struct ath_hw *ah = sc->sc_ah;
1799
	struct ath_common *common = ath9k_hw_common(ah);
1800 1801 1802
	struct ath_buf *bf, *bf_last;
	bool puttxbuf = false;
	bool edma;
1803

S
Sujith 已提交
1804 1805 1806 1807
	/*
	 * Insert the frame on the outbound list and
	 * pass it on to the hardware.
	 */
1808

S
Sujith 已提交
1809 1810
	if (list_empty(head))
		return;
1811

1812
	edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
S
Sujith 已提交
1813
	bf = list_first_entry(head, struct ath_buf, list);
1814
	bf_last = list_entry(head->prev, struct ath_buf, list);
1815

1816 1817
	ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
		txq->axq_qnum, txq->axq_depth);
1818

1819 1820
	if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
		list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
1821
		INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1822
		puttxbuf = true;
S
Sujith 已提交
1823
	} else {
1824 1825
		list_splice_tail_init(head, &txq->axq_q);

1826 1827
		if (txq->axq_link) {
			ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
1828
			ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
J
Joe Perches 已提交
1829 1830
				txq->axq_qnum, txq->axq_link,
				ito64(bf->bf_daddr), bf->bf_desc);
1831 1832 1833 1834 1835 1836 1837 1838 1839
		} else if (!edma)
			puttxbuf = true;

		txq->axq_link = bf_last->bf_desc;
	}

	if (puttxbuf) {
		TX_STAT_INC(txq->axq_qnum, puttxbuf);
		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1840
		ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
1841 1842 1843 1844
			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
	}

	if (!edma) {
F
Felix Fietkau 已提交
1845
		TX_STAT_INC(txq->axq_qnum, txstart);
1846
		ath9k_hw_txstart(ah, txq->axq_qnum);
S
Sujith 已提交
1847
	}
1848 1849

	if (!internal) {
1850 1851 1852 1853 1854 1855 1856
		while (bf) {
			txq->axq_depth++;
			if (bf_is_ampdu_not_probing(bf))
				txq->axq_ampdu_depth++;

			bf = bf->bf_lastbf->bf_next;
		}
1857
	}
S
Sujith 已提交
1858
}
1859

1860 1861 1862
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq,
			      struct ath_atx_tid *tid, struct sk_buff *skb,
			      struct ath_tx_control *txctl)
1863
{
1864
	struct ath_frame_info *fi = get_frame_info(skb);
F
Felix Fietkau 已提交
1865
	struct list_head bf_head;
1866
	struct ath_buf *bf;
1867

S
Sujith 已提交
1868 1869 1870 1871 1872 1873 1874
	/*
	 * Do not queue to h/w when any of the following conditions is true:
	 * - there are pending frames in software queue
	 * - the TID is currently paused for ADDBA/BAR request
	 * - seqno is not within block-ack window
	 * - h/w queue depth exceeds low water mark
	 */
1875
	if ((ath_tid_has_buffered(tid) || tid->paused ||
1876 1877 1878
	     !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
	     txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) &&
	    txq != sc->tx.uapsdq) {
1879
		/*
S
Sujith 已提交
1880 1881
		 * Add this frame to software queue for scheduling later
		 * for aggregation.
1882
		 */
1883
		TX_STAT_INC(txq->axq_qnum, a_queued_sw);
1884
		__skb_queue_tail(&tid->buf_q, skb);
1885
		if (!txctl->an || !txctl->an->sleeping)
1886
			ath_tx_queue_tid(txq, tid);
S
Sujith 已提交
1887 1888 1889
		return;
	}

1890
	bf = ath_tx_setup_buffer(sc, txq, tid, skb);
F
Felix Fietkau 已提交
1891
	if (!bf) {
1892
		ath_txq_skb_done(sc, txq, skb);
F
Felix Fietkau 已提交
1893
		ieee80211_free_txskb(sc->hw, skb);
1894
		return;
F
Felix Fietkau 已提交
1895
	}
1896

1897
	ath_set_rates(tid->an->vif, tid->an->sta, bf);
1898
	bf->bf_state.bf_type = BUF_AMPDU;
F
Felix Fietkau 已提交
1899 1900 1901
	INIT_LIST_HEAD(&bf_head);
	list_add(&bf->list, &bf_head);

S
Sujith 已提交
1902
	/* Add sub-frame to BAW */
1903
	ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
S
Sujith 已提交
1904 1905

	/* Queue to h/w without aggregation */
1906
	TX_STAT_INC(txq->axq_qnum, a_queued_hw);
S
Sujith 已提交
1907
	bf->bf_lastbf = bf;
1908 1909
	ath_tx_fill_desc(sc, bf, txq, fi->framelen);
	ath_tx_txqaddbuf(sc, txq, &bf_head, false);
S
Sujith 已提交
1910 1911
}

F
Felix Fietkau 已提交
1912
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1913
			       struct ath_atx_tid *tid, struct sk_buff *skb)
S
Sujith 已提交
1914
{
1915 1916
	struct ath_frame_info *fi = get_frame_info(skb);
	struct list_head bf_head;
S
Sujith 已提交
1917 1918
	struct ath_buf *bf;

1919 1920 1921 1922
	bf = fi->bf;

	INIT_LIST_HEAD(&bf_head);
	list_add_tail(&bf->list, &bf_head);
1923
	bf->bf_state.bf_type = 0;
S
Sujith 已提交
1924

1925
	bf->bf_next = NULL;
S
Sujith 已提交
1926
	bf->bf_lastbf = bf;
1927
	ath_tx_fill_desc(sc, bf, txq, fi->framelen);
1928
	ath_tx_txqaddbuf(sc, txq, &bf_head, false);
S
Sujith 已提交
1929
	TX_STAT_INC(txq->axq_qnum, queued);
S
Sujith 已提交
1930 1931
}

1932 1933 1934
static void setup_frame_info(struct ieee80211_hw *hw,
			     struct ieee80211_sta *sta,
			     struct sk_buff *skb,
1935
			     int framelen)
S
Sujith 已提交
1936 1937
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1938
	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1939
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1940
	const struct ieee80211_rate *rate;
1941
	struct ath_frame_info *fi = get_frame_info(skb);
1942
	struct ath_node *an = NULL;
1943
	enum ath9k_key_type keytype;
1944 1945 1946 1947 1948 1949 1950 1951 1952 1953
	bool short_preamble = false;

	/*
	 * We check if Short Preamble is needed for the CTS rate by
	 * checking the BSS's global flag.
	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
	 */
	if (tx_info->control.vif &&
	    tx_info->control.vif->bss_conf.use_short_preamble)
		short_preamble = true;
S
Sujith 已提交
1954

1955
	rate = ieee80211_get_rts_cts_rate(hw, tx_info);
1956
	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
S
Sujith 已提交
1957

1958 1959 1960
	if (sta)
		an = (struct ath_node *) sta->drv_priv;

1961 1962 1963
	memset(fi, 0, sizeof(*fi));
	if (hw_key)
		fi->keyix = hw_key->hw_key_idx;
1964 1965
	else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
		fi->keyix = an->ps_key;
1966 1967 1968 1969
	else
		fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->keytype = keytype;
	fi->framelen = framelen;
1970 1971 1972
	fi->rtscts_rate = rate->hw_value;
	if (short_preamble)
		fi->rtscts_rate |= rate->hw_value_short;
S
Sujith 已提交
1973 1974
}

1975 1976 1977 1978
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath9k_channel *curchan = ah->curchan;
1979

1980 1981 1982
	if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
	    (curchan->channelFlags & CHANNEL_5GHZ) &&
	    (chainmask == 0x7) && (rate < 0x90))
1983
		return 0x3;
1984 1985 1986
	else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
		 IS_CCK_RATE(rate))
		return 0x2;
1987 1988 1989 1990
	else
		return chainmask;
}

1991 1992 1993 1994
/*
 * Assign a descriptor (and sequence number if necessary,
 * and map buffer for DMA. Frees skb on error
 */
1995
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
F
Felix Fietkau 已提交
1996
					   struct ath_txq *txq,
1997
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
1998
					   struct sk_buff *skb)
1999
{
F
Felix Fietkau 已提交
2000
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2001
	struct ath_frame_info *fi = get_frame_info(skb);
2002
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
F
Felix Fietkau 已提交
2003
	struct ath_buf *bf;
S
Sujith Manoharan 已提交
2004
	int fragno;
2005
	u16 seqno;
F
Felix Fietkau 已提交
2006 2007 2008

	bf = ath_tx_get_buffer(sc);
	if (!bf) {
2009
		ath_dbg(common, XMIT, "TX buffers are full\n");
F
Felix Fietkau 已提交
2010
		return NULL;
F
Felix Fietkau 已提交
2011
	}
2012

S
Sujith 已提交
2013
	ATH_TXBUF_RESET(bf);
2014

2015
	if (tid) {
S
Sujith Manoharan 已提交
2016
		fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
2017 2018
		seqno = tid->seq_next;
		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
S
Sujith Manoharan 已提交
2019 2020 2021 2022 2023 2024 2025

		if (fragno)
			hdr->seq_ctrl |= cpu_to_le16(fragno);

		if (!ieee80211_has_morefrags(hdr->frame_control))
			INCR(tid->seq_next, IEEE80211_SEQ_MAX);

2026 2027 2028
		bf->bf_state.seqno = seqno;
	}

2029
	bf->bf_mpdu = skb;
2030

B
Ben Greear 已提交
2031 2032 2033
	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
2034
		bf->bf_mpdu = NULL;
2035
		bf->bf_buf_addr = 0;
2036 2037
		ath_err(ath9k_hw_common(sc->sc_ah),
			"dma_mapping_error() on TX\n");
F
Felix Fietkau 已提交
2038
		ath_tx_return_buffer(sc, bf);
F
Felix Fietkau 已提交
2039
		return NULL;
2040 2041
	}

2042
	fi->bf = bf;
F
Felix Fietkau 已提交
2043 2044 2045 2046

	return bf;
}

2047 2048
static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
			  struct ath_tx_control *txctl)
2049
{
2050 2051
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2052
	struct ieee80211_sta *sta = txctl->sta;
2053
	struct ieee80211_vif *vif = info->control.vif;
2054
	struct ath_softc *sc = hw->priv;
F
Felix Fietkau 已提交
2055
	int frmlen = skb->len + FCS_LEN;
2056
	int padpos, padsize;
2057

2058 2059 2060 2061
	/* NOTE:  sta can be NULL according to net/mac80211.h */
	if (sta)
		txctl->an = (struct ath_node *)sta->drv_priv;

F
Felix Fietkau 已提交
2062 2063 2064
	if (info->control.hw_key)
		frmlen += info->control.hw_key->icv_len;

2065
	/*
S
Sujith 已提交
2066 2067 2068
	 * As a temporary workaround, assign seq# here; this will likely need
	 * to be cleaned up to work better with Beacon transmission and virtual
	 * BSSes.
2069
	 */
S
Sujith 已提交
2070 2071 2072 2073 2074
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
			sc->tx.seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2075 2076
	}

2077 2078 2079 2080 2081
	if ((vif && vif->type != NL80211_IFTYPE_AP &&
	            vif->type != NL80211_IFTYPE_AP_VLAN) ||
	    !ieee80211_is_data(hdr->frame_control))
		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;

2082
	/* Add the padding after the header if this is not already done */
2083
	padpos = ieee80211_hdrlen(hdr->frame_control);
2084 2085 2086 2087
	padsize = padpos & 3;
	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize)
			return -ENOMEM;
2088

2089 2090
		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
2091 2092
	}

2093
	setup_frame_info(hw, sta, skb, frmlen);
2094 2095 2096
	return 0;
}

2097

2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118
/* Upon failure caller should free skb */
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
		 struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ieee80211_sta *sta = txctl->sta;
	struct ieee80211_vif *vif = info->control.vif;
	struct ath_softc *sc = hw->priv;
	struct ath_txq *txq = txctl->txq;
	struct ath_atx_tid *tid = NULL;
	struct ath_buf *bf;
	u8 tidno;
	int q;
	int ret;

	ret = ath_tx_prepare(hw, skb, txctl);
	if (ret)
	    return ret;

	hdr = (struct ieee80211_hdr *) skb->data;
2119 2120 2121 2122 2123
	/*
	 * At this point, the vif, hw_key and sta pointers in the tx control
	 * info are no longer valid (overwritten by the ath_frame_info data.
	 */

2124
	q = skb_get_queue_mapping(skb);
F
Felix Fietkau 已提交
2125 2126

	ath_txq_lock(sc, txq);
2127
	if (txq == sc->tx.txq_map[q] &&
2128 2129
	    ++txq->pending_frames > sc->tx.txq_max_pending[q] &&
	    !txq->stopped) {
2130
		ieee80211_stop_queue(sc->hw, q);
2131
		txq->stopped = true;
2132 2133
	}

2134 2135 2136 2137 2138 2139
	if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
		ath_txq_unlock(sc, txq);
		txq = sc->tx.uapsdq;
		ath_txq_lock(sc, txq);
	}

2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152
	if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
		tidno = ieee80211_get_qos_ctl(hdr)[0] &
			IEEE80211_QOS_CTL_TID_MASK;
		tid = ATH_AN_2_TID(txctl->an, tidno);

		WARN_ON(tid->ac->txq != txctl->txq);
	}

	if ((info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
		/*
		 * Try aggregation if it's a unicast data frame
		 * and the destination is HT capable.
		 */
2153
		ath_tx_send_ampdu(sc, txq, tid, skb, txctl);
2154 2155 2156
		goto out;
	}

2157
	bf = ath_tx_setup_buffer(sc, txq, tid, skb);
2158
	if (!bf) {
2159
		ath_txq_skb_done(sc, txq, skb);
2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171
		if (txctl->paprd)
			dev_kfree_skb_any(skb);
		else
			ieee80211_free_txskb(sc->hw, skb);
		goto out;
	}

	bf->bf_state.bfs_paprd = txctl->paprd;

	if (txctl->paprd)
		bf->bf_state.bfs_paprd_timestamp = jiffies;

2172
	ath_set_rates(vif, sta, bf);
2173
	ath_tx_send_normal(sc, txq, tid, skb);
F
Felix Fietkau 已提交
2174

2175
out:
F
Felix Fietkau 已提交
2176
	ath_txq_unlock(sc, txq);
F
Felix Fietkau 已提交
2177

2178
	return 0;
2179 2180
}

2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		 struct sk_buff *skb)
{
	struct ath_softc *sc = hw->priv;
	struct ath_tx_control txctl = {
		.txq = sc->beacon.cabq
	};
	struct ath_tx_info info = {};
	struct ieee80211_hdr *hdr;
	struct ath_buf *bf_tail = NULL;
	struct ath_buf *bf;
	LIST_HEAD(bf_q);
	int duration = 0;
	int max_duration;

	max_duration =
		sc->cur_beacon_conf.beacon_interval * 1000 *
		sc->cur_beacon_conf.dtim_period / ATH_BCBUF;

	do {
		struct ath_frame_info *fi = get_frame_info(skb);

		if (ath_tx_prepare(hw, skb, &txctl))
			break;

		bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
		if (!bf)
			break;

		bf->bf_lastbf = bf;
		ath_set_rates(vif, NULL, bf);
S
Sujith Manoharan 已提交
2212
		ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248
		duration += info.rates[0].PktDuration;
		if (bf_tail)
			bf_tail->bf_next = bf;

		list_add_tail(&bf->list, &bf_q);
		bf_tail = bf;
		skb = NULL;

		if (duration > max_duration)
			break;

		skb = ieee80211_get_buffered_bc(hw, vif);
	} while(skb);

	if (skb)
		ieee80211_free_txskb(hw, skb);

	if (list_empty(&bf_q))
		return;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;

	if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
		hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
			sizeof(*hdr), DMA_TO_DEVICE);
	}

	ath_txq_lock(sc, txctl.txq);
	ath_tx_fill_desc(sc, bf, txctl.txq, 0);
	ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
	TX_STAT_INC(txctl.txq->axq_qnum, queued);
	ath_txq_unlock(sc, txctl.txq);
}

S
Sujith 已提交
2249 2250 2251
/*****************/
/* TX Completion */
/*****************/
S
Sujith 已提交
2252

S
Sujith 已提交
2253
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2254
			    int tx_flags, struct ath_txq *txq)
S
Sujith 已提交
2255
{
S
Sujith 已提交
2256
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2257
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2258
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2259
	int padpos, padsize;
S
Sujith Manoharan 已提交
2260
	unsigned long flags;
S
Sujith 已提交
2261

2262
	ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
S
Sujith 已提交
2263

2264 2265 2266
	if (sc->sc_ah->caldata)
		sc->sc_ah->caldata->paprd_packet_sent = true;

2267
	if (!(tx_flags & ATH_TX_ERROR))
S
Sujith 已提交
2268 2269
		/* Frame was ACKed */
		tx_info->flags |= IEEE80211_TX_STAT_ACK;
S
Sujith 已提交
2270

2271
	padpos = ieee80211_hdrlen(hdr->frame_control);
2272 2273 2274 2275 2276 2277 2278 2279
	padsize = padpos & 3;
	if (padsize && skb->len>padpos+padsize) {
		/*
		 * Remove MAC header padding before giving the frame back to
		 * mac80211.
		 */
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
S
Sujith 已提交
2280
	}
S
Sujith 已提交
2281

S
Sujith Manoharan 已提交
2282
	spin_lock_irqsave(&sc->sc_pm_lock, flags);
2283
	if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
S
Sujith 已提交
2284
		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
2285
		ath_dbg(common, PS,
J
Joe Perches 已提交
2286
			"Going back to sleep after having received TX status (0x%lx)\n",
S
Sujith 已提交
2287 2288 2289 2290
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
2291
	}
S
Sujith Manoharan 已提交
2292
	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2293

2294
	__skb_queue_tail(&txq->complete_q, skb);
2295
	ath_txq_skb_done(sc, txq, skb);
S
Sujith 已提交
2296
}
2297

S
Sujith 已提交
2298
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2299
				struct ath_txq *txq, struct list_head *bf_q,
2300
				struct ath_tx_status *ts, int txok)
2301
{
S
Sujith 已提交
2302
	struct sk_buff *skb = bf->bf_mpdu;
2303
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
S
Sujith 已提交
2304
	unsigned long flags;
2305
	int tx_flags = 0;
2306

2307
	if (!txok)
2308
		tx_flags |= ATH_TX_ERROR;
2309

2310 2311 2312
	if (ts->ts_status & ATH9K_TXERR_FILT)
		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;

B
Ben Greear 已提交
2313
	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2314
	bf->bf_buf_addr = 0;
2315 2316

	if (bf->bf_state.bfs_paprd) {
2317 2318 2319
		if (time_after(jiffies,
				bf->bf_state.bfs_paprd_timestamp +
				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
2320
			dev_kfree_skb_any(skb);
2321
		else
2322
			complete(&sc->paprd_complete);
2323
	} else {
2324
		ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2325
		ath_tx_complete(sc, skb, tx_flags, txq);
2326
	}
2327 2328 2329 2330
	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
	 * accidentally reference it later.
	 */
	bf->bf_mpdu = NULL;
S
Sujith 已提交
2331 2332 2333 2334 2335 2336 2337

	/*
	 * Return the list of ath_buf of this mpdu to free queue
	 */
	spin_lock_irqsave(&sc->tx.txbuflock, flags);
	list_splice_tail_init(bf_q, &sc->tx.txbuf);
	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2338 2339
}

F
Felix Fietkau 已提交
2340 2341
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
2342
			     int txok)
2343
{
S
Sujith 已提交
2344
	struct sk_buff *skb = bf->bf_mpdu;
2345
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
S
Sujith 已提交
2346
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
F
Felix Fietkau 已提交
2347
	struct ieee80211_hw *hw = sc->hw;
2348
	struct ath_hw *ah = sc->sc_ah;
2349
	u8 i, tx_rateindex;
2350

S
Sujith 已提交
2351
	if (txok)
2352
		tx_info->status.ack_signal = ts->ts_rssi;
S
Sujith 已提交
2353

2354
	tx_rateindex = ts->ts_rateindex;
2355 2356
	WARN_ON(tx_rateindex >= hw->max_rates);

2357
	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
2358
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2359

2360
		BUG_ON(nbad > nframes);
2361
	}
2362 2363
	tx_info->status.ampdu_len = nframes;
	tx_info->status.ampdu_ack_len = nframes - nbad;
2364

2365
	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2366
	    (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378
		/*
		 * If an underrun error is seen assume it as an excessive
		 * retry only if max frame trigger level has been reached
		 * (2 KB for single stream, and 4 KB for dual stream).
		 * Adjust the long retry as if the frame was tried
		 * hw->max_rate_tries times to affect how rate control updates
		 * PER for the failed rate.
		 * In case of congestion on the bus penalizing this type of
		 * underruns should help hardware actually transmit new frames
		 * successfully by eventually preferring slower rates.
		 * This itself should also alleviate congestion on the bus.
		 */
2379 2380 2381
		if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
		                             ATH9K_TX_DELIM_UNDERRUN)) &&
		    ieee80211_is_data(hdr->frame_control) &&
2382
		    ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2383 2384
			tx_info->status.rates[tx_rateindex].count =
				hw->max_rate_tries;
2385
	}
2386

2387
	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2388
		tx_info->status.rates[i].count = 0;
2389 2390
		tx_info->status.rates[i].idx = -1;
	}
2391

2392
	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2393 2394
}

S
Sujith 已提交
2395
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2396
{
2397
	struct ath_hw *ah = sc->sc_ah;
2398
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
2399
	struct ath_buf *bf, *lastbf, *bf_held = NULL;
2400
	struct list_head bf_head;
S
Sujith 已提交
2401
	struct ath_desc *ds;
2402
	struct ath_tx_status ts;
S
Sujith 已提交
2403
	int status;
2404

2405
	ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
J
Joe Perches 已提交
2406 2407
		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
		txq->axq_link);
2408

F
Felix Fietkau 已提交
2409
	ath_txq_lock(sc, txq);
2410
	for (;;) {
2411
		if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
2412 2413
			break;

2414 2415
		if (list_empty(&txq->axq_q)) {
			txq->axq_link = NULL;
2416
			if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
B
Ben Greear 已提交
2417
				ath_txq_schedule(sc, txq);
2418 2419 2420 2421
			break;
		}
		bf = list_first_entry(&txq->axq_q, struct ath_buf, list);

S
Sujith 已提交
2422 2423 2424 2425 2426 2427 2428 2429 2430
		/*
		 * There is a race condition that a BH gets scheduled
		 * after sw writes TxE and before hw re-load the last
		 * descriptor to get the newly chained one.
		 * Software must keep the last DONE descriptor as a
		 * holding descriptor - software does so by marking
		 * it with the STALE flag.
		 */
		bf_held = NULL;
S
Sujith 已提交
2431
		if (bf->bf_stale) {
S
Sujith 已提交
2432
			bf_held = bf;
2433
			if (list_is_last(&bf_held->list, &txq->axq_q))
S
Sujith 已提交
2434
				break;
2435 2436 2437

			bf = list_entry(bf_held->list.next, struct ath_buf,
					list);
2438 2439 2440
		}

		lastbf = bf->bf_lastbf;
S
Sujith 已提交
2441
		ds = lastbf->bf_desc;
2442

2443 2444
		memset(&ts, 0, sizeof(ts));
		status = ath9k_hw_txprocdesc(ah, ds, &ts);
2445
		if (status == -EINPROGRESS)
S
Sujith 已提交
2446
			break;
2447

2448
		TX_STAT_INC(txq->axq_qnum, txprocdesc);
2449

S
Sujith 已提交
2450 2451 2452 2453 2454
		/*
		 * Remove ath_buf's of the same transmit unit from txq,
		 * however leave the last descriptor back as the holding
		 * descriptor for hw.
		 */
S
Sujith 已提交
2455
		lastbf->bf_stale = true;
S
Sujith 已提交
2456 2457 2458 2459
		INIT_LIST_HEAD(&bf_head);
		if (!list_is_singular(&lastbf->list))
			list_cut_position(&bf_head,
				&txq->axq_q, lastbf->list.prev);
2460

2461
		if (bf_held) {
2462 2463
			list_del(&bf_held->list);
			ath_tx_return_buffer(sc, bf_held);
S
Sujith 已提交
2464
		}
2465

2466
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2467
	}
F
Felix Fietkau 已提交
2468
	ath_txq_unlock_complete(sc, txq);
2469 2470
}

S
Sujith 已提交
2471
void ath_tx_tasklet(struct ath_softc *sc)
2472
{
2473 2474
	struct ath_hw *ah = sc->sc_ah;
	u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
S
Sujith 已提交
2475
	int i;
2476

S
Sujith 已提交
2477 2478 2479
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
		if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
			ath_tx_processq(sc, &sc->tx.txq[i]);
2480 2481 2482
	}
}

2483 2484
void ath_tx_edma_tasklet(struct ath_softc *sc)
{
2485
	struct ath_tx_status ts;
2486 2487 2488 2489 2490
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct ath_txq *txq;
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
2491
	struct list_head *fifo_list;
2492 2493 2494
	int status;

	for (;;) {
2495
		if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
2496 2497
			break;

2498
		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
2499 2500 2501
		if (status == -EINPROGRESS)
			break;
		if (status == -EIO) {
2502
			ath_dbg(common, XMIT, "Error processing tx status\n");
2503 2504 2505
			break;
		}

2506 2507 2508 2509
		/* Process beacon completions separately */
		if (ts.qid == sc->beacon.beaconq) {
			sc->beacon.tx_processed = true;
			sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2510
			continue;
2511
		}
2512

2513
		txq = &sc->tx.txq[ts.qid];
2514

F
Felix Fietkau 已提交
2515
		ath_txq_lock(sc, txq);
2516

2517 2518
		TX_STAT_INC(txq->axq_qnum, txprocdesc);

2519 2520
		fifo_list = &txq->txq_fifo[txq->txq_tailidx];
		if (list_empty(fifo_list)) {
F
Felix Fietkau 已提交
2521
			ath_txq_unlock(sc, txq);
2522 2523 2524
			return;
		}

2525 2526 2527 2528 2529 2530 2531
		bf = list_first_entry(fifo_list, struct ath_buf, list);
		if (bf->bf_stale) {
			list_del(&bf->list);
			ath_tx_return_buffer(sc, bf);
			bf = list_first_entry(fifo_list, struct ath_buf, list);
		}

2532 2533 2534
		lastbf = bf->bf_lastbf;

		INIT_LIST_HEAD(&bf_head);
2535 2536
		if (list_is_last(&lastbf->list, fifo_list)) {
			list_splice_tail_init(fifo_list, &bf_head);
2537
			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2538

2539 2540
			if (!list_empty(&txq->axq_q)) {
				struct list_head bf_q;
2541

2542 2543 2544 2545 2546
				INIT_LIST_HEAD(&bf_q);
				txq->axq_link = NULL;
				list_splice_tail_init(&txq->axq_q, &bf_q);
				ath_tx_txqaddbuf(sc, txq, &bf_q, true);
			}
2547 2548 2549 2550 2551
		} else {
			lastbf->bf_stale = true;
			if (bf != lastbf)
				list_cut_position(&bf_head, fifo_list,
						  lastbf->list.prev);
2552
		}
2553

2554
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
F
Felix Fietkau 已提交
2555
		ath_txq_unlock_complete(sc, txq);
2556 2557 2558
	}
}

S
Sujith 已提交
2559 2560 2561
/*****************/
/* Init, Cleanup */
/*****************/
2562

2563 2564 2565 2566 2567 2568
static int ath_txstatus_setup(struct ath_softc *sc, int size)
{
	struct ath_descdma *dd = &sc->txsdma;
	u8 txs_len = sc->sc_ah->caps.txs_len;

	dd->dd_desc_len = size * txs_len;
2569 2570
	dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
					  &dd->dd_desc_paddr, GFP_KERNEL);
2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589
	if (!dd->dd_desc)
		return -ENOMEM;

	return 0;
}

static int ath_tx_edma_init(struct ath_softc *sc)
{
	int err;

	err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
	if (!err)
		ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
					  sc->txsdma.dd_desc_paddr,
					  ATH_TXSTATUS_RING_SIZE);

	return err;
}

S
Sujith 已提交
2590
int ath_tx_init(struct ath_softc *sc, int nbufs)
2591
{
2592
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
2593
	int error = 0;
2594

2595
	spin_lock_init(&sc->tx.txbuflock);
2596

2597
	error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2598
				  "tx", nbufs, 1, 1);
2599
	if (error != 0) {
2600 2601
		ath_err(common,
			"Failed to allocate tx descriptors: %d\n", error);
2602
		return error;
2603
	}
2604

2605
	error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2606
				  "beacon", ATH_BCBUF, 1, 1);
2607
	if (error != 0) {
2608 2609
		ath_err(common,
			"Failed to allocate beacon descriptors: %d\n", error);
2610
		return error;
2611
	}
2612

2613 2614
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);

2615
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2616
		error = ath_tx_edma_init(sc);
2617

S
Sujith 已提交
2618
	return error;
2619 2620 2621 2622
}

void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
2623 2624 2625
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	int tidno, acno;
2626

2627
	for (tidno = 0, tid = &an->tid[tidno];
2628
	     tidno < IEEE80211_NUM_TIDS;
2629 2630 2631 2632 2633 2634 2635
	     tidno++, tid++) {
		tid->an        = an;
		tid->tidno     = tidno;
		tid->seq_start = tid->seq_next = 0;
		tid->baw_size  = WME_MAX_BA;
		tid->baw_head  = tid->baw_tail = 0;
		tid->sched     = false;
S
Sujith 已提交
2636
		tid->paused    = false;
2637
		tid->active	   = false;
2638
		__skb_queue_head_init(&tid->buf_q);
2639
		acno = TID_TO_WME_AC(tidno);
2640
		tid->ac = &an->ac[acno];
2641
	}
2642

2643
	for (acno = 0, ac = &an->ac[acno];
2644
	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
2645
		ac->sched    = false;
2646
		ac->txq = sc->tx.txq_map[acno];
2647
		INIT_LIST_HEAD(&ac->tid_q);
2648 2649 2650
	}
}

S
Sujith 已提交
2651
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2652
{
2653 2654
	struct ath_atx_ac *ac;
	struct ath_atx_tid *tid;
2655
	struct ath_txq *txq;
2656
	int tidno;
S
Sujith 已提交
2657

2658
	for (tidno = 0, tid = &an->tid[tidno];
2659
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
2660

2661
		ac = tid->ac;
2662
		txq = ac->txq;
2663

F
Felix Fietkau 已提交
2664
		ath_txq_lock(sc, txq);
2665 2666 2667 2668 2669 2670 2671 2672 2673

		if (tid->sched) {
			list_del(&tid->list);
			tid->sched = false;
		}

		if (ac->sched) {
			list_del(&ac->list);
			tid->ac->sched = false;
2674
		}
2675 2676

		ath_tid_drain(sc, txq, tid);
2677
		tid->active = false;
2678

F
Felix Fietkau 已提交
2679
		ath_txq_unlock(sc, txq);
2680 2681
	}
}