xmit.c 66.5 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include <linux/dma-mapping.h>
S
Sujith 已提交
18
#include "ath9k.h"
19
#include "ar9003_mac.h"
20 21 22 23 24 25 26 27 28 29 30 31

#define BITS_PER_BYTE           8
#define OFDM_PLCP_BITS          22
#define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
#define L_STF                   8
#define L_LTF                   8
#define L_SIG                   4
#define HT_SIG                  8
#define HT_STF                  4
#define HT_LTF(_ns)             (4 * (_ns))
#define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
32 33
#define TIME_SYMBOLS(t)         ((t) >> 2)
#define TIME_SYMBOLS_HALFGI(t)  (((t) * 5 - 4) / 18)
34 35 36 37
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)


38
static u16 bits_per_symbol[][2] = {
39 40 41 42 43 44 45 46 47 48 49 50 51
	/* 20MHz 40MHz */
	{    26,   54 },     /*  0: BPSK */
	{    52,  108 },     /*  1: QPSK 1/2 */
	{    78,  162 },     /*  2: QPSK 3/4 */
	{   104,  216 },     /*  3: 16-QAM 1/2 */
	{   156,  324 },     /*  4: 16-QAM 3/4 */
	{   208,  432 },     /*  5: 64-QAM 2/3 */
	{   234,  486 },     /*  6: 64-QAM 3/4 */
	{   260,  540 },     /*  7: 64-QAM 5/6 */
};

#define IS_HT_RATE(_rate)     ((_rate) & 0x80)

F
Felix Fietkau 已提交
52
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
53 54 55
			       struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
			    int tx_flags, struct ath_txq *txq);
S
Sujith 已提交
56
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
57
				struct ath_txq *txq, struct list_head *bf_q,
58
				struct ath_tx_status *ts, int txok);
59
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
60
			     struct list_head *head, bool internal);
F
Felix Fietkau 已提交
61 62
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
63
			     int txok);
64 65
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno);
66 67 68
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
					   struct ath_txq *txq,
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
69
					   struct sk_buff *skb);
70

71
enum {
72 73
	MCS_HT20,
	MCS_HT20_SGI,
74 75 76 77
	MCS_HT40,
	MCS_HT40_SGI,
};

S
Sujith 已提交
78 79 80
/*********************/
/* Aggregation logic */
/*********************/
81

82
void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
83
	__acquires(&txq->axq_lock)
F
Felix Fietkau 已提交
84 85 86 87
{
	spin_lock_bh(&txq->axq_lock);
}

88
void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
89
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
90 91 92 93
{
	spin_unlock_bh(&txq->axq_lock);
}

94
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
95
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
96 97 98 99 100 101 102 103 104 105 106 107
{
	struct sk_buff_head q;
	struct sk_buff *skb;

	__skb_queue_head_init(&q);
	skb_queue_splice_init(&txq->complete_q, &q);
	spin_unlock_bh(&txq->axq_lock);

	while ((skb = __skb_dequeue(&q)))
		ieee80211_tx_status(sc->hw, skb);
}

S
Sujith 已提交
108
static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
S
Sujith 已提交
109
{
S
Sujith 已提交
110
	struct ath_atx_ac *ac = tid->ac;
S
Sujith 已提交
111

S
Sujith 已提交
112 113
	if (tid->paused)
		return;
S
Sujith 已提交
114

S
Sujith 已提交
115 116
	if (tid->sched)
		return;
S
Sujith 已提交
117

S
Sujith 已提交
118 119
	tid->sched = true;
	list_add_tail(&tid->list, &ac->tid_q);
S
Sujith 已提交
120

S
Sujith 已提交
121 122
	if (ac->sched)
		return;
123

S
Sujith 已提交
124 125 126
	ac->sched = true;
	list_add_tail(&ac->list, &txq->axq_acq);
}
127

128
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
129 130
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
131 132 133
	BUILD_BUG_ON(sizeof(struct ath_frame_info) >
		     sizeof(tx_info->rate_driver_data));
	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
134 135
}

136 137 138 139 140 141
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
{
	ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
			   seqno << IEEE80211_SEQ_SEQ_SHIFT);
}

142 143 144 145 146 147 148
static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
			  struct ath_buf *bf)
{
	ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates,
			       ARRAY_SIZE(bf->rates));
}

149
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
S
Sujith 已提交
150
{
151
	struct ath_txq *txq = tid->ac->txq;
152
	struct sk_buff *skb;
S
Sujith 已提交
153 154
	struct ath_buf *bf;
	struct list_head bf_head;
155
	struct ath_tx_status ts;
156
	struct ath_frame_info *fi;
157
	bool sendbar = false;
158

159
	INIT_LIST_HEAD(&bf_head);
160

161
	memset(&ts, 0, sizeof(ts));
162

163 164 165 166
	while ((skb = __skb_dequeue(&tid->buf_q))) {
		fi = get_frame_info(skb);
		bf = fi->bf;

F
Felix Fietkau 已提交
167
		if (!bf) {
168 169 170 171 172
			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
			if (!bf) {
				ieee80211_free_txskb(sc->hw, skb);
				continue;
			}
F
Felix Fietkau 已提交
173 174
		}

175
		if (fi->retries) {
176
			list_add_tail(&bf->list, &bf_head);
177
			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
178 179
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
			sendbar = true;
180
		} else {
181
			ath_set_rates(tid->an->vif, tid->an->sta, bf);
182
			ath_tx_send_normal(sc, txq, NULL, skb);
183
		}
S
Sujith 已提交
184
	}
185

186
	if (sendbar) {
F
Felix Fietkau 已提交
187
		ath_txq_unlock(sc, txq);
188
		ath_send_bar(tid, tid->seq_start);
F
Felix Fietkau 已提交
189 190
		ath_txq_lock(sc, txq);
	}
S
Sujith 已提交
191
}
192

S
Sujith 已提交
193 194
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno)
S
Sujith 已提交
195
{
S
Sujith 已提交
196
	int index, cindex;
197

S
Sujith 已提交
198 199
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
200

201
	__clear_bit(cindex, tid->tx_buf);
S
Sujith 已提交
202

203
	while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
S
Sujith 已提交
204 205
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
206 207
		if (tid->bar_index >= 0)
			tid->bar_index--;
S
Sujith 已提交
208
	}
S
Sujith 已提交
209
}
210

S
Sujith 已提交
211
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
212
			     u16 seqno)
S
Sujith 已提交
213
{
S
Sujith 已提交
214
	int index, cindex;
S
Sujith 已提交
215

216
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
S
Sujith 已提交
217
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
218
	__set_bit(cindex, tid->tx_buf);
219

S
Sujith 已提交
220 221 222 223
	if (index >= ((tid->baw_tail - tid->baw_head) &
		(ATH_TID_MAX_BUFS - 1))) {
		tid->baw_tail = cindex;
		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
224 225 226 227
	}
}

/*
S
Sujith 已提交
228 229 230 231
 * TODO: For frame(s) that are in the retry state, we will reuse the
 * sequence number(s) without setting the retry bit. The
 * alternative is to give up on these and BAR the receiver's window
 * forward.
232
 */
S
Sujith 已提交
233 234
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
			  struct ath_atx_tid *tid)
235 236

{
237
	struct sk_buff *skb;
S
Sujith 已提交
238 239
	struct ath_buf *bf;
	struct list_head bf_head;
240
	struct ath_tx_status ts;
241
	struct ath_frame_info *fi;
242 243

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
244
	INIT_LIST_HEAD(&bf_head);
245

246 247 248
	while ((skb = __skb_dequeue(&tid->buf_q))) {
		fi = get_frame_info(skb);
		bf = fi->bf;
249

250 251 252 253 254
		if (!bf) {
			ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
			continue;
		}

255
		list_add_tail(&bf->list, &bf_head);
256

257
		ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
258
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
259
	}
260

S
Sujith 已提交
261 262
	tid->seq_next = tid->seq_start;
	tid->baw_tail = tid->baw_head;
263
	tid->bar_index = -1;
264 265
}

S
Sujith 已提交
266
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
267
			     struct sk_buff *skb, int count)
268
{
269
	struct ath_frame_info *fi = get_frame_info(skb);
270
	struct ath_buf *bf = fi->bf;
S
Sujith 已提交
271
	struct ieee80211_hdr *hdr;
272
	int prev = fi->retries;
273

S
Sujith 已提交
274
	TX_STAT_INC(txq->axq_qnum, a_retries);
275 276 277
	fi->retries += count;

	if (prev > 0)
278
		return;
279

S
Sujith 已提交
280 281
	hdr = (struct ieee80211_hdr *)skb->data;
	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
282 283
	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
		sizeof(*hdr), DMA_TO_DEVICE);
284 285
}

286
static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
S
Sujith 已提交
287
{
288
	struct ath_buf *bf = NULL;
S
Sujith 已提交
289 290

	spin_lock_bh(&sc->tx.txbuflock);
291 292

	if (unlikely(list_empty(&sc->tx.txbuf))) {
293 294 295
		spin_unlock_bh(&sc->tx.txbuflock);
		return NULL;
	}
296 297 298 299

	bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
	list_del(&bf->list);

S
Sujith 已提交
300 301
	spin_unlock_bh(&sc->tx.txbuflock);

302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
	return bf;
}

static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
{
	spin_lock_bh(&sc->tx.txbuflock);
	list_add_tail(&bf->list, &sc->tx.txbuf);
	spin_unlock_bh(&sc->tx.txbuflock);
}

static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
{
	struct ath_buf *tbf;

	tbf = ath_tx_get_buffer(sc);
	if (WARN_ON(!tbf))
		return NULL;

S
Sujith 已提交
320 321 322 323
	ATH_TXBUF_RESET(tbf);

	tbf->bf_mpdu = bf->bf_mpdu;
	tbf->bf_buf_addr = bf->bf_buf_addr;
324
	memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
S
Sujith 已提交
325 326 327 328 329
	tbf->bf_state = bf->bf_state;

	return tbf;
}

330 331 332 333
static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
			        struct ath_tx_status *ts, int txok,
			        int *nframes, int *nbad)
{
334
	struct ath_frame_info *fi;
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
	u16 seq_st = 0;
	u32 ba[WME_BA_BMP_SIZE >> 5];
	int ba_index;
	int isaggr = 0;

	*nbad = 0;
	*nframes = 0;

	isaggr = bf_isaggr(bf);
	if (isaggr) {
		seq_st = ts->ts_seqnum;
		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
	}

	while (bf) {
350
		fi = get_frame_info(bf->bf_mpdu);
351
		ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
352 353 354 355 356 357 358 359 360 361

		(*nframes)++;
		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
			(*nbad)++;

		bf = bf->bf_next;
	}
}


S
Sujith 已提交
362 363
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
				 struct ath_buf *bf, struct list_head *bf_q,
364
				 struct ath_tx_status *ts, int txok)
365
{
S
Sujith 已提交
366 367
	struct ath_node *an = NULL;
	struct sk_buff *skb;
368
	struct ieee80211_sta *sta;
F
Felix Fietkau 已提交
369
	struct ieee80211_hw *hw = sc->hw;
370
	struct ieee80211_hdr *hdr;
371
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
372
	struct ath_atx_tid *tid = NULL;
S
Sujith 已提交
373
	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
374 375
	struct list_head bf_head;
	struct sk_buff_head bf_pending;
376
	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
377
	u32 ba[WME_BA_BMP_SIZE >> 5];
378
	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
379
	bool rc_update = true, isba;
380
	struct ieee80211_tx_rate rates[4];
381
	struct ath_frame_info *fi;
382
	int nframes;
383
	u8 tidno;
384
	bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
385
	int i, retries;
386
	int bar_index = -1;
387

S
Sujith 已提交
388
	skb = bf->bf_mpdu;
389 390
	hdr = (struct ieee80211_hdr *)skb->data;

391 392
	tx_info = IEEE80211_SKB_CB(skb);

393
	memcpy(rates, bf->rates, sizeof(rates));
394

395 396 397 398
	retries = ts->ts_longretry + 1;
	for (i = 0; i < ts->ts_rateindex; i++)
		retries += rates[i].count;

399
	rcu_read_lock();
400

401
	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
402 403
	if (!sta) {
		rcu_read_unlock();
404

405 406 407 408
		INIT_LIST_HEAD(&bf_head);
		while (bf) {
			bf_next = bf->bf_next;

409
			if (!bf->bf_stale || bf_next != NULL)
410 411
				list_move_tail(&bf->list, &bf_head);

412
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
413 414 415

			bf = bf_next;
		}
416
		return;
417 418
	}

419
	an = (struct ath_node *)sta->drv_priv;
420 421
	tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
	tid = ATH_AN_2_TID(an, tidno);
422
	seq_first = tid->seq_start;
423
	isba = ts->ts_flags & ATH9K_TX_BA;
424

425 426 427 428
	/*
	 * The hardware occasionally sends a tx status for the wrong TID.
	 * In this case, the BA status cannot be considered valid and all
	 * subframes need to be retransmitted
429 430 431
	 *
	 * Only BlockAcks have a TID and therefore normal Acks cannot be
	 * checked
432
	 */
433
	if (isba && tidno != ts->tid)
434 435
		txok = false;

S
Sujith 已提交
436
	isaggr = bf_isaggr(bf);
S
Sujith 已提交
437
	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
438

S
Sujith 已提交
439
	if (isaggr && txok) {
440 441 442
		if (ts->ts_flags & ATH9K_TX_BA) {
			seq_st = ts->ts_seqnum;
			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
S
Sujith 已提交
443
		} else {
S
Sujith 已提交
444 445 446 447 448 449 450
			/*
			 * AR5416 can become deaf/mute when BA
			 * issue happens. Chip needs to be reset.
			 * But AP code may have sychronization issues
			 * when perform internal reset in this routine.
			 * Only enable reset in STA mode for now.
			 */
451
			if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
S
Sujith 已提交
452
				needreset = 1;
S
Sujith 已提交
453
		}
454 455
	}

456
	__skb_queue_head_init(&bf_pending);
457

458
	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
S
Sujith 已提交
459
	while (bf) {
460 461
		u16 seqno = bf->bf_state.seqno;

462
		txfail = txpending = sendbar = 0;
S
Sujith 已提交
463
		bf_next = bf->bf_next;
464

465 466
		skb = bf->bf_mpdu;
		tx_info = IEEE80211_SKB_CB(skb);
467
		fi = get_frame_info(skb);
468

469 470 471 472 473 474 475
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
			/*
			 * Outside of the current BlockAck window,
			 * maybe part of a previous session
			 */
			txfail = 1;
		} else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
S
Sujith 已提交
476 477
			/* transmit completion, subframe is
			 * acked by block ack */
478
			acked_cnt++;
S
Sujith 已提交
479 480
		} else if (!isaggr && txok) {
			/* transmit completion */
481
			acked_cnt++;
482 483 484 485 486 487 488 489
		} else if (flush) {
			txpending = 1;
		} else if (fi->retries < ATH_MAX_SW_RETRIES) {
			if (txok || !an->sleeping)
				ath_tx_set_retry(sc, txq, bf->bf_mpdu,
						 retries);

			txpending = 1;
S
Sujith 已提交
490
		} else {
491 492 493 494
			txfail = 1;
			txfail_cnt++;
			bar_index = max_t(int, bar_index,
				ATH_BA_INDEX(seq_first, seqno));
S
Sujith 已提交
495
		}
496

497 498 499 500
		/*
		 * Make sure the last desc is reclaimed if it
		 * not a holding desc.
		 */
501
		INIT_LIST_HEAD(&bf_head);
502
		if (bf_next != NULL || !bf_last->bf_stale)
S
Sujith 已提交
503
			list_move_tail(&bf->list, &bf_head);
504

505
		if (!txpending) {
S
Sujith 已提交
506 507 508 509
			/*
			 * complete the acked-ones/xretried ones; update
			 * block-ack window
			 */
510
			ath_tx_update_baw(sc, tid, seqno);
511

512
			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
513
				memcpy(tx_info->control.rates, rates, sizeof(rates));
514
				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
515 516 517
				rc_update = false;
			}

518
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
519
				!txfail);
S
Sujith 已提交
520
		} else {
521 522 523 524
			if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
				tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
				ieee80211_sta_eosp(sta);
			}
S
Sujith 已提交
525
			/* retry the un-acked ones */
526
			if (bf->bf_next == NULL && bf_last->bf_stale) {
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
				struct ath_buf *tbf;

				tbf = ath_clone_txbuf(sc, bf_last);
				/*
				 * Update tx baw and complete the
				 * frame with failed status if we
				 * run out of tx buf.
				 */
				if (!tbf) {
					ath_tx_update_baw(sc, tid, seqno);

					ath_tx_complete_buf(sc, bf, txq,
							    &bf_head, ts, 0);
					bar_index = max_t(int, bar_index,
						ATH_BA_INDEX(seq_first, seqno));
					break;
543
				}
544 545

				fi->bf = tbf;
S
Sujith 已提交
546 547 548 549 550 551
			}

			/*
			 * Put this buffer to the temporary pending
			 * queue to retain ordering
			 */
552
			__skb_queue_tail(&bf_pending, skb);
S
Sujith 已提交
553 554 555
		}

		bf = bf_next;
556 557
	}

558
	/* prepend un-acked frames to the beginning of the pending frame queue */
559
	if (!skb_queue_empty(&bf_pending)) {
560
		if (an->sleeping)
561
			ieee80211_sta_set_buffered(sta, tid->tidno, true);
562

563
		skb_queue_splice(&bf_pending, &tid->buf_q);
564
		if (!an->sleeping) {
565
			ath_tx_queue_tid(txq, tid);
566

S
Sujith Manoharan 已提交
567
			if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
568 569
				tid->ac->clear_ps_filter = true;
		}
570 571
	}

F
Felix Fietkau 已提交
572 573 574 575 576 577 578 579 580 581 582
	if (bar_index >= 0) {
		u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);

		if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
			tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);

		ath_txq_unlock(sc, txq);
		ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
		ath_txq_lock(sc, txq);
	}

583 584
	rcu_read_unlock();

585 586
	if (needreset)
		ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
S
Sujith 已提交
587
}
588

589 590 591 592 593 594 595 596 597 598
static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
{
    struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
    return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}

static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
				  struct ath_tx_status *ts, struct ath_buf *bf,
				  struct list_head *bf_head)
{
599
	struct ieee80211_tx_info *info;
600 601 602 603 604 605 606 607 608 609 610
	bool txok, flush;

	txok = !(ts->ts_status & ATH9K_TXERR_MASK);
	flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
	txq->axq_tx_inprogress = false;

	txq->axq_depth--;
	if (bf_is_ampdu_not_probing(bf))
		txq->axq_ampdu_depth--;

	if (!bf_isampdu(bf)) {
611 612 613 614
		if (!flush) {
			info = IEEE80211_SKB_CB(bf->bf_mpdu);
			memcpy(info->control.rates, bf->rates,
			       sizeof(info->control.rates));
615
			ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
616
		}
617 618 619 620 621 622 623 624
		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
	} else
		ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);

	if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !flush)
		ath_txq_schedule(sc, txq);
}

625 626 627 628 629 630 631 632 633 634 635
static bool ath_lookup_legacy(struct ath_buf *bf)
{
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	int i;

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
	rates = tx_info->control.rates;

636 637 638 639
	for (i = 0; i < 4; i++) {
		if (!rates[i].count || rates[i].idx < 0)
			break;

640 641 642 643 644 645 646
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
			return true;
	}

	return false;
}

S
Sujith 已提交
647 648
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
			   struct ath_atx_tid *tid)
649
{
S
Sujith 已提交
650 651
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
652
	struct ieee80211_tx_rate *rates;
S
Sujith 已提交
653
	u32 max_4ms_framelen, frmlen;
654
	u16 aggr_limit, bt_aggr_limit, legacy = 0;
655
	int q = tid->ac->txq->mac80211_qnum;
S
Sujith 已提交
656
	int i;
S
Sujith 已提交
657

S
Sujith 已提交
658
	skb = bf->bf_mpdu;
S
Sujith 已提交
659
	tx_info = IEEE80211_SKB_CB(skb);
660
	rates = bf->rates;
S
Sujith 已提交
661

S
Sujith 已提交
662 663
	/*
	 * Find the lowest frame length among the rate series that will have a
664
	 * 4ms (or TXOP limited) transmit duration.
S
Sujith 已提交
665 666
	 */
	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
S
Sujith 已提交
667

S
Sujith 已提交
668
	for (i = 0; i < 4; i++) {
669
		int modeidx;
S
Sujith 已提交
670

671 672
		if (!rates[i].count)
			continue;
673

674 675 676
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
			legacy = 1;
			break;
677
		}
678 679 680 681 682 683 684 685 686

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
			modeidx = MCS_HT40;
		else
			modeidx = MCS_HT20;

		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
			modeidx++;

687
		frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
688
		max_4ms_framelen = min(max_4ms_framelen, frmlen);
689
	}
S
Sujith 已提交
690

691
	/*
S
Sujith 已提交
692 693 694
	 * limit aggregate size by the minimum rate if rate selected is
	 * not a probe rate, if rate selected is a probe rate then
	 * avoid aggregation of this packet.
695
	 */
S
Sujith 已提交
696 697
	if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
		return 0;
698

699 700 701 702 703 704 705 706
	aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);

	/*
	 * Override the default aggregation limit for BTCOEX.
	 */
	bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
	if (bt_aggr_limit)
		aggr_limit = bt_aggr_limit;
707

S
Sujith 已提交
708
	/*
L
Lucas De Marchi 已提交
709 710
	 * h/w can accept aggregates up to 16 bit lengths (65535).
	 * The IE, however can hold up to 65536, which shows up here
S
Sujith 已提交
711
	 * as zero. Ignore 65536 since we  are constrained by hw.
712
	 */
713 714
	if (tid->an->maxampdu)
		aggr_limit = min(aggr_limit, tid->an->maxampdu);
715

S
Sujith 已提交
716 717
	return aggr_limit;
}
718

S
Sujith 已提交
719
/*
S
Sujith 已提交
720
 * Returns the number of delimiters to be added to
S
Sujith 已提交
721 722 723
 * meet the minimum required mpdudensity.
 */
static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
724 725
				  struct ath_buf *bf, u16 frmlen,
				  bool first_subfrm)
S
Sujith 已提交
726
{
727
#define FIRST_DESC_NDELIMS 60
728
	u32 nsymbits, nsymbols;
S
Sujith 已提交
729
	u16 minlen;
730
	u8 flags, rix;
731
	int width, streams, half_gi, ndelim, mindelim;
732
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith 已提交
733 734 735

	/* Select standard number of delimiters based on frame length alone */
	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
736 737

	/*
S
Sujith 已提交
738 739 740 741
	 * If encryption enabled, hardware requires some more padding between
	 * subframes.
	 * TODO - this could be improved to be dependent on the rate.
	 *      The hardware can keep up at lower rates, but not higher rates
742
	 */
743 744
	if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
	    !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
S
Sujith 已提交
745
		ndelim += ATH_AGGR_ENCRYPTDELIM;
746

747 748 749 750
	/*
	 * Add delimiter when using RTS/CTS with aggregation
	 * and non enterprise AR9003 card
	 */
751 752
	if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
	    (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
753 754
		ndelim = max(ndelim, FIRST_DESC_NDELIMS);

S
Sujith 已提交
755 756 757 758 759
	/*
	 * Convert desired mpdu density from microeconds to bytes based
	 * on highest rate in rate series (i.e. first rate) to determine
	 * required minimum length for subframe. Take into account
	 * whether high rate is 20 or 40Mhz and half or full GI.
760
	 *
S
Sujith 已提交
761 762 763
	 * If there is no mpdu density restriction, no further calculation
	 * is needed.
	 */
764 765

	if (tid->an->mpdudensity == 0)
S
Sujith 已提交
766
		return ndelim;
767

768 769
	rix = bf->rates[0].idx;
	flags = bf->rates[0].flags;
S
Sujith 已提交
770 771
	width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
	half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
772

S
Sujith 已提交
773
	if (half_gi)
774
		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
S
Sujith 已提交
775
	else
776
		nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
777

S
Sujith 已提交
778 779
	if (nsymbols == 0)
		nsymbols = 1;
780

781 782
	streams = HT_RC_2_STREAMS(rix);
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
783
	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
784

S
Sujith 已提交
785 786 787
	if (frmlen < minlen) {
		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
		ndelim = max(mindelim, ndelim);
788 789
	}

S
Sujith 已提交
790
	return ndelim;
791 792
}

793 794 795
static struct ath_buf *
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
			struct ath_atx_tid *tid)
796
{
797
	struct ath_frame_info *fi;
798
	struct sk_buff *skb;
799
	struct ath_buf *bf;
800
	u16 seqno;
801

802
	while (1) {
803
		skb = skb_peek(&tid->buf_q);
804 805 806
		if (!skb)
			break;

807 808
		fi = get_frame_info(skb);
		bf = fi->bf;
809
		if (!fi->bf)
F
Felix Fietkau 已提交
810
			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
811

F
Felix Fietkau 已提交
812 813 814
		if (!bf) {
			__skb_unlink(skb, &tid->buf_q);
			ieee80211_free_txskb(sc->hw, skb);
815
			continue;
F
Felix Fietkau 已提交
816
		}
817

818
		bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
819
		seqno = bf->bf_state.seqno;
820

S
Sujith 已提交
821
		/* do not step over block-ack window */
822
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
S
Sujith 已提交
823
			break;
824

825 826 827 828 829 830 831 832 833 834 835 836
		if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
			struct ath_tx_status ts = {};
			struct list_head bf_head;

			INIT_LIST_HEAD(&bf_head);
			list_add(&bf->list, &bf_head);
			__skb_unlink(skb, &tid->buf_q);
			ath_tx_update_baw(sc, tid, seqno);
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
			continue;
		}

837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870
		bf->bf_next = NULL;
		bf->bf_lastbf = bf;
		return bf;
	}

	return NULL;
}

static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
					     struct ath_txq *txq,
					     struct ath_atx_tid *tid,
					     struct list_head *bf_q,
					     int *aggr_len)
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
	struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
	int rl = 0, nframes = 0, ndelim, prev_al = 0;
	u16 aggr_limit = 0, al = 0, bpad = 0,
		al_delta, h_baw = tid->baw_size / 2;
	enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
	struct ieee80211_tx_info *tx_info;
	struct ath_frame_info *fi;
	struct sk_buff *skb;

	do {
		bf = ath_tx_get_tid_subframe(sc, txq, tid);
		if (!bf) {
			status = ATH_AGGR_BAW_CLOSED;
			break;
		}

		skb = bf->bf_mpdu;
		fi = get_frame_info(skb);

871 872 873
		if (!bf_first)
			bf_first = bf;

S
Sujith 已提交
874
		if (!rl) {
875
			ath_set_rates(tid->an->vif, tid->an->sta, bf);
S
Sujith 已提交
876 877 878
			aggr_limit = ath_lookup_rate(sc, bf, tid);
			rl = 1;
		}
879

S
Sujith 已提交
880
		/* do not exceed aggregation limit */
881
		al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
882

S
Sujith 已提交
883
		if (nframes &&
884 885
		    ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
		     ath_lookup_legacy(bf))) {
S
Sujith 已提交
886 887 888
			status = ATH_AGGR_LIMITED;
			break;
		}
889

890
		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
891
		if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
892 893
			break;

S
Sujith 已提交
894 895
		/* do not exceed subframe limit */
		if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
S
Sujith 已提交
896 897 898
			status = ATH_AGGR_LIMITED;
			break;
		}
899

S
Sujith 已提交
900
		/* add padding for previous frame to aggregation length */
S
Sujith 已提交
901
		al += bpad + al_delta;
902

S
Sujith 已提交
903 904 905 906
		/*
		 * Get the delimiters needed to meet the MPDU
		 * density for this node.
		 */
907 908
		ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
						!nframes);
S
Sujith 已提交
909
		bpad = PADBYTES(al_delta) + (ndelim << 2);
910

911
		nframes++;
S
Sujith 已提交
912
		bf->bf_next = NULL;
913

S
Sujith 已提交
914
		/* link buffers of this frame to the aggregate */
915
		if (!fi->retries)
916
			ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
917
		bf->bf_state.ndelim = ndelim;
918 919 920

		__skb_unlink(skb, &tid->buf_q);
		list_add_tail(&bf->list, bf_q);
921
		if (bf_prev)
S
Sujith 已提交
922
			bf_prev->bf_next = bf;
923

S
Sujith 已提交
924
		bf_prev = bf;
S
Sujith 已提交
925

926
	} while (!skb_queue_empty(&tid->buf_q));
927

928
	*aggr_len = al;
S
Sujith 已提交
929

S
Sujith 已提交
930 931 932
	return status;
#undef PADBYTES
}
933

934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
/*
 * rix - rate index
 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
 * width  - 0 for 20 MHz, 1 for 40 MHz
 * half_gi - to use 4us v/s 3.6 us for symbol time
 */
static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
			    int width, int half_gi, bool shortPreamble)
{
	u32 nbits, nsymbits, duration, nsymbols;
	int streams;

	/* find number of symbols: PLCP + data */
	streams = HT_RC_2_STREAMS(rix);
	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
	nsymbols = (nbits + nsymbits - 1) / nsymbits;

	if (!half_gi)
		duration = SYMBOL_TIME(nsymbols);
	else
		duration = SYMBOL_TIME_HALFGI(nsymbols);

	/* addup duration for legacy/ht training and signal fields */
	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);

	return duration;
}

963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
{
	int streams = HT_RC_2_STREAMS(mcs);
	int symbols, bits;
	int bytes = 0;

	symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
	bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
	bits -= OFDM_PLCP_BITS;
	bytes = bits / 8;
	bytes -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
	if (bytes > 65532)
		bytes = 65532;

	return bytes;
}

void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
{
	u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi;
	int mcs;

	/* 4ms is the default (and maximum) duration */
	if (!txop || txop > 4096)
		txop = 4096;

	cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
	cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
	cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
	cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
	for (mcs = 0; mcs < 32; mcs++) {
		cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false);
		cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true);
		cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false);
		cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true);
	}
}

1001 1002
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_info *info, int len)
1003 1004 1005 1006 1007 1008 1009
{
	struct ath_hw *ah = sc->sc_ah;
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	const struct ieee80211_rate *rate;
	struct ieee80211_hdr *hdr;
1010
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
1011 1012
	int i;
	u8 rix = 0;
1013 1014 1015

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
1016
	rates = bf->rates;
1017
	hdr = (struct ieee80211_hdr *)skb->data;
1018 1019 1020

	/* set dur_update_en for l-sig computation except for PS-Poll frames */
	info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
1021
	info->rtscts_rate = fi->rtscts_rate;
1022

1023
	for (i = 0; i < ARRAY_SIZE(bf->rates); i++) {
1024 1025 1026 1027 1028 1029 1030
		bool is_40, is_sgi, is_sp;
		int phy;

		if (!rates[i].count || (rates[i].idx < 0))
			continue;

		rix = rates[i].idx;
1031
		info->rates[i].Tries = rates[i].count;
1032 1033

		    if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1034 1035
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_RTSENA;
1036
		} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1037 1038
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_CTSENA;
1039 1040 1041
		}

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1042
			info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
1043
		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1044
			info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
1045 1046 1047 1048 1049 1050 1051

		is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
		is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
		is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);

		if (rates[i].flags & IEEE80211_TX_RC_MCS) {
			/* MCS rates */
1052 1053 1054 1055
			info->rates[i].Rate = rix | 0x80;
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
			info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
1056 1057
				 is_40, is_sgi, is_sp);
			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1058
				info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
1059 1060 1061 1062
			continue;
		}

		/* legacy rates */
1063
		rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1064 1065 1066 1067 1068 1069
		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
		    !(rate->flags & IEEE80211_RATE_ERP_G))
			phy = WLAN_RC_PHY_CCK;
		else
			phy = WLAN_RC_PHY_OFDM;

1070
		info->rates[i].Rate = rate->hw_value;
1071 1072
		if (rate->hw_value_short) {
			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1073
				info->rates[i].Rate |= rate->hw_value_short;
1074 1075 1076 1077 1078
		} else {
			is_sp = false;
		}

		if (bf->bf_state.bfs_paprd)
1079
			info->rates[i].ChSel = ah->txchainmask;
1080
		else
1081 1082
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
1083

1084
		info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1085 1086 1087 1088 1089
			phy, rate->bitrate * 100, len, rix, is_sp);
	}

	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
	if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1090
		info->flags &= ~ATH9K_TXDESC_RTSENA;
1091 1092

	/* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1093 1094 1095
	if (info->flags & ATH9K_TXDESC_RTSENA)
		info->flags &= ~ATH9K_TXDESC_CTSENA;
}
1096

1097 1098 1099 1100 1101 1102 1103 1104
static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	enum ath9k_pkt_type htype;
	__le16 fc;

	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
1105

1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
	if (ieee80211_is_beacon(fc))
		htype = ATH9K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = ATH9K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = ATH9K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = ATH9K_PKT_TYPE_PSPOLL;
	else
		htype = ATH9K_PKT_TYPE_NORMAL;

	return htype;
1118 1119
}

1120 1121
static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_txq *txq, int len)
1122 1123
{
	struct ath_hw *ah = sc->sc_ah;
1124
	struct ath_buf *bf_first = NULL;
1125
	struct ath_tx_info info;
1126

1127 1128 1129 1130 1131 1132
	memset(&info, 0, sizeof(info));
	info.is_first = true;
	info.is_last = true;
	info.txpower = MAX_RATE_POWER;
	info.qcu = txq->axq_qnum;

1133
	while (bf) {
1134
		struct sk_buff *skb = bf->bf_mpdu;
1135
		struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1136
		struct ath_frame_info *fi = get_frame_info(skb);
1137
		bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
1138 1139

		info.type = get_hw_packet_type(skb);
1140
		if (bf->bf_next)
1141
			info.link = bf->bf_next->bf_daddr;
1142
		else
1143 1144
			info.link = 0;

1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
		if (!bf_first) {
			bf_first = bf;

			info.flags = ATH9K_TXDESC_INTREQ;
			if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
			    txq == sc->tx.uapsdq)
				info.flags |= ATH9K_TXDESC_CLRDMASK;

			if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
				info.flags |= ATH9K_TXDESC_NOACK;
			if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
				info.flags |= ATH9K_TXDESC_LDPC;

			if (bf->bf_state.bfs_paprd)
				info.flags |= (u32) bf->bf_state.bfs_paprd <<
					      ATH9K_TXDESC_PAPRD_S;

			ath_buf_set_rate(sc, bf, &info, len);
		}

1165 1166
		info.buf_addr[0] = bf->bf_buf_addr;
		info.buf_len[0] = skb->len;
1167 1168 1169 1170 1171
		info.pkt_len = fi->framelen;
		info.keyix = fi->keyix;
		info.keytype = fi->keytype;

		if (aggr) {
1172
			if (bf == bf_first)
1173
				info.aggr = AGGR_BUF_FIRST;
1174
			else if (bf == bf_first->bf_lastbf)
1175 1176 1177
				info.aggr = AGGR_BUF_LAST;
			else
				info.aggr = AGGR_BUF_MIDDLE;
1178

1179 1180
			info.ndelim = bf->bf_state.ndelim;
			info.aggr_len = len;
1181 1182
		}

1183 1184 1185
		if (bf == bf_first->bf_lastbf)
			bf_first = NULL;

1186
		ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
1187 1188 1189 1190
		bf = bf->bf_next;
	}
}

S
Sujith 已提交
1191 1192 1193
static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
			      struct ath_atx_tid *tid)
{
S
Sujith 已提交
1194
	struct ath_buf *bf;
S
Sujith 已提交
1195
	enum ATH_AGGR_STATUS status;
1196
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
1197
	struct list_head bf_q;
1198
	int aggr_len;
1199

S
Sujith 已提交
1200
	do {
1201
		if (skb_queue_empty(&tid->buf_q))
S
Sujith 已提交
1202
			return;
1203

S
Sujith 已提交
1204 1205
		INIT_LIST_HEAD(&bf_q);

1206
		status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
1207 1208

		/*
S
Sujith 已提交
1209 1210
		 * no frames picked up to be aggregated;
		 * block-ack window is not open.
1211
		 */
S
Sujith 已提交
1212 1213
		if (list_empty(&bf_q))
			break;
1214

S
Sujith 已提交
1215
		bf = list_first_entry(&bf_q, struct ath_buf, list);
S
Sujith 已提交
1216
		bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
1217
		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1218

1219 1220
		if (tid->ac->clear_ps_filter) {
			tid->ac->clear_ps_filter = false;
1221 1222 1223
			tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
		} else {
			tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
1224 1225
		}

S
Sujith 已提交
1226
		/* if only one frame, send as non-aggregate */
1227
		if (bf == bf->bf_lastbf) {
1228 1229 1230 1231
			aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
			bf->bf_state.bf_type = BUF_AMPDU;
		} else {
			TX_STAT_INC(txq->axq_qnum, a_aggr);
S
Sujith 已提交
1232
		}
1233

1234
		ath_tx_fill_desc(sc, bf, txq, aggr_len);
1235
		ath_tx_txqaddbuf(sc, txq, &bf_q, false);
1236
	} while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
S
Sujith 已提交
1237 1238 1239
		 status != ATH_AGGR_BAW_CLOSED);
}

1240 1241
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
		      u16 tid, u16 *ssn)
S
Sujith 已提交
1242 1243 1244
{
	struct ath_atx_tid *txtid;
	struct ath_node *an;
1245
	u8 density;
S
Sujith 已提交
1246 1247

	an = (struct ath_node *)sta->drv_priv;
S
Sujith 已提交
1248
	txtid = ATH_AN_2_TID(an, tid);
1249

1250 1251 1252 1253
	/* update ampdu factor/density, they may have changed. This may happen
	 * in HT IBSS when a beacon with HT-info is received after the station
	 * has already been added.
	 */
1254
	if (sta->ht_cap.ht_supported) {
1255 1256 1257 1258 1259 1260
		an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
				     sta->ht_cap.ampdu_factor);
		density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
		an->mpdudensity = density;
	}

1261
	txtid->active = true;
1262
	txtid->paused = true;
1263
	*ssn = txtid->seq_start = txtid->seq_next;
1264
	txtid->bar_index = -1;
1265

1266 1267 1268
	memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
	txtid->baw_head = txtid->baw_tail = 0;

1269
	return 0;
S
Sujith 已提交
1270
}
1271

1272
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
S
Sujith 已提交
1273 1274 1275
{
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1276
	struct ath_txq *txq = txtid->ac->txq;
1277

F
Felix Fietkau 已提交
1278
	ath_txq_lock(sc, txq);
1279
	txtid->active = false;
1280
	txtid->paused = true;
1281
	ath_tx_flush_tid(sc, txtid);
F
Felix Fietkau 已提交
1282
	ath_txq_unlock_complete(sc, txq);
S
Sujith 已提交
1283
}
1284

1285 1286
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
		       struct ath_node *an)
1287 1288 1289 1290
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
1291
	bool buffered;
1292 1293 1294
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1295
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1296 1297 1298 1299 1300 1301 1302

		if (!tid->sched)
			continue;

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1303
		ath_txq_lock(sc, txq);
1304

1305
		buffered = !skb_queue_empty(&tid->buf_q);
1306 1307 1308 1309 1310 1311 1312 1313 1314

		tid->sched = false;
		list_del(&tid->list);

		if (ac->sched) {
			ac->sched = false;
			list_del(&ac->list);
		}

F
Felix Fietkau 已提交
1315
		ath_txq_unlock(sc, txq);
1316

1317 1318
		ieee80211_sta_set_buffered(sta, tidno, buffered);
	}
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
}

void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1329
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1330 1331 1332 1333

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1334
		ath_txq_lock(sc, txq);
1335 1336
		ac->clear_ps_filter = true;

1337
		if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
1338 1339 1340 1341
			ath_tx_queue_tid(txq, tid);
			ath_txq_schedule(sc, txq);
		}

F
Felix Fietkau 已提交
1342
		ath_txq_unlock_complete(sc, txq);
1343 1344 1345
	}
}

1346 1347
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
			u16 tidno)
S
Sujith 已提交
1348
{
1349
	struct ath_atx_tid *tid;
S
Sujith 已提交
1350
	struct ath_node *an;
1351
	struct ath_txq *txq;
S
Sujith 已提交
1352 1353

	an = (struct ath_node *)sta->drv_priv;
1354 1355
	tid = ATH_AN_2_TID(an, tidno);
	txq = tid->ac->txq;
S
Sujith 已提交
1356

1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
	ath_txq_lock(sc, txq);

	tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
	tid->paused = false;

	if (!skb_queue_empty(&tid->buf_q)) {
		ath_tx_queue_tid(txq, tid);
		ath_txq_schedule(sc, txq);
	}

	ath_txq_unlock_complete(sc, txq);
1368 1369
}

1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
				   struct ieee80211_sta *sta,
				   u16 tids, int nframes,
				   enum ieee80211_frame_release_type reason,
				   bool more_data)
{
	struct ath_softc *sc = hw->priv;
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_txq *txq = sc->tx.uapsdq;
	struct ieee80211_tx_info *info;
	struct list_head bf_q;
	struct ath_buf *bf_tail = NULL, *bf;
	int sent = 0;
	int i;

	INIT_LIST_HEAD(&bf_q);
	for (i = 0; tids && nframes; i++, tids >>= 1) {
		struct ath_atx_tid *tid;

		if (!(tids & 1))
			continue;

		tid = ATH_AN_2_TID(an, i);
		if (tid->paused)
			continue;

		ath_txq_lock(sc, tid->ac->txq);
		while (!skb_queue_empty(&tid->buf_q) && nframes > 0) {
			bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
			if (!bf)
				break;

			__skb_unlink(bf->bf_mpdu, &tid->buf_q);
			list_add_tail(&bf->list, &bf_q);
			ath_set_rates(tid->an->vif, tid->an->sta, bf);
			ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
			bf->bf_state.bf_type &= ~BUF_AGGR;
			if (bf_tail)
				bf_tail->bf_next = bf;

			bf_tail = bf;
			nframes--;
			sent++;
			TX_STAT_INC(txq->axq_qnum, a_queued_hw);

			if (skb_queue_empty(&tid->buf_q))
				ieee80211_sta_set_buffered(an->sta, i, false);
		}
		ath_txq_unlock_complete(sc, tid->ac->txq);
	}

	if (list_empty(&bf_q))
		return;

	info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
	info->flags |= IEEE80211_TX_STATUS_EOSP;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	ath_txq_lock(sc, txq);
	ath_tx_fill_desc(sc, bf, txq, 0);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	ath_txq_unlock(sc, txq);
}

S
Sujith 已提交
1434 1435 1436
/********************/
/* Queue Management */
/********************/
1437

S
Sujith 已提交
1438
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1439
{
1440
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1441
	struct ath9k_tx_queue_info qi;
1442
	static const int subtype_txq_to_hwq[] = {
1443 1444 1445 1446
		[IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
		[IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
		[IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
		[IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
1447
	};
1448
	int axq_qnum, i;
1449

S
Sujith 已提交
1450
	memset(&qi, 0, sizeof(qi));
1451
	qi.tqi_subtype = subtype_txq_to_hwq[subtype];
S
Sujith 已提交
1452 1453 1454 1455
	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_physCompBuf = 0;
1456 1457

	/*
S
Sujith 已提交
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise waiting for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
	 *
	 * The UAPSD queue is an exception, since we take a desc-
	 * based intr on the EOSP frames.
1471
	 */
1472
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1473
		qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
1474 1475 1476 1477 1478 1479 1480
	} else {
		if (qtype == ATH9K_TX_QUEUE_UAPSD)
			qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
		else
			qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
					TXQ_FLAG_TXDESCINT_ENABLE;
	}
1481 1482
	axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
	if (axq_qnum == -1) {
1483
		/*
S
Sujith 已提交
1484 1485
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
1486
		 */
S
Sujith 已提交
1487
		return NULL;
1488
	}
1489 1490
	if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
		struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1491

1492 1493
		txq->axq_qnum = axq_qnum;
		txq->mac80211_qnum = -1;
S
Sujith 已提交
1494
		txq->axq_link = NULL;
F
Felix Fietkau 已提交
1495
		__skb_queue_head_init(&txq->complete_q);
S
Sujith 已提交
1496 1497 1498 1499
		INIT_LIST_HEAD(&txq->axq_q);
		INIT_LIST_HEAD(&txq->axq_acq);
		spin_lock_init(&txq->axq_lock);
		txq->axq_depth = 0;
1500
		txq->axq_ampdu_depth = 0;
1501
		txq->axq_tx_inprogress = false;
1502
		sc->tx.txqsetup |= 1<<axq_qnum;
1503 1504 1505 1506

		txq->txq_headidx = txq->txq_tailidx = 0;
		for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
			INIT_LIST_HEAD(&txq->txq_fifo[i]);
S
Sujith 已提交
1507
	}
1508
	return &sc->tx.txq[axq_qnum];
1509 1510
}

S
Sujith 已提交
1511 1512 1513
int ath_txq_update(struct ath_softc *sc, int qnum,
		   struct ath9k_tx_queue_info *qinfo)
{
1514
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1515 1516 1517
	int error = 0;
	struct ath9k_tx_queue_info qi;

1518
	BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
S
Sujith 已提交
1519 1520 1521 1522 1523 1524 1525 1526 1527

	ath9k_hw_get_txq_props(ah, qnum, &qi);
	qi.tqi_aifs = qinfo->tqi_aifs;
	qi.tqi_cwmin = qinfo->tqi_cwmin;
	qi.tqi_cwmax = qinfo->tqi_cwmax;
	qi.tqi_burstTime = qinfo->tqi_burstTime;
	qi.tqi_readyTime = qinfo->tqi_readyTime;

	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1528 1529
		ath_err(ath9k_hw_common(sc->sc_ah),
			"Unable to update hardware queue %u!\n", qnum);
S
Sujith 已提交
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
		error = -EIO;
	} else {
		ath9k_hw_resettxqueue(ah, qnum);
	}

	return error;
}

int ath_cabq_update(struct ath_softc *sc)
{
	struct ath9k_tx_queue_info qi;
1541
	struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
S
Sujith 已提交
1542
	int qnum = sc->beacon.cabq->axq_qnum;
1543

S
Sujith 已提交
1544
	ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1545
	/*
S
Sujith 已提交
1546
	 * Ensure the readytime % is within the bounds.
1547
	 */
S
Sujith 已提交
1548 1549 1550 1551
	if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
		sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
	else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
		sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1552

1553
	qi.tqi_readyTime = (cur_conf->beacon_interval *
S
Sujith 已提交
1554
			    sc->config.cabqReadytime) / 100;
S
Sujith 已提交
1555 1556 1557
	ath_txq_update(sc, qnum, &qi);

	return 0;
1558 1559
}

1560
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1561
			       struct list_head *list)
1562
{
S
Sujith 已提交
1563 1564
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
1565 1566 1567
	struct ath_tx_status ts;

	memset(&ts, 0, sizeof(ts));
1568
	ts.ts_status = ATH9K_TX_FLUSH;
S
Sujith 已提交
1569
	INIT_LIST_HEAD(&bf_head);
1570

1571 1572
	while (!list_empty(list)) {
		bf = list_first_entry(list, struct ath_buf, list);
1573

1574 1575
		if (bf->bf_stale) {
			list_del(&bf->list);
1576

1577 1578
			ath_tx_return_buffer(sc, bf);
			continue;
S
Sujith 已提交
1579
		}
1580

S
Sujith 已提交
1581
		lastbf = bf->bf_lastbf;
1582
		list_cut_position(&bf_head, list, &lastbf->list);
1583
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
1584
	}
1585
}
1586

1587 1588 1589 1590 1591 1592
/*
 * Drain a given TX queue (could be Beacon or Data)
 *
 * This assumes output has been stopped and
 * we do not need to block ath_tx_tasklet.
 */
1593
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
1594
{
F
Felix Fietkau 已提交
1595 1596
	ath_txq_lock(sc, txq);

1597
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1598
		int idx = txq->txq_tailidx;
1599

1600
		while (!list_empty(&txq->txq_fifo[idx])) {
1601
			ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
1602 1603

			INCR(idx, ATH_TXFIFO_DEPTH);
1604
		}
1605
		txq->txq_tailidx = idx;
1606
	}
1607

1608 1609
	txq->axq_link = NULL;
	txq->axq_tx_inprogress = false;
1610
	ath_drain_txq_list(sc, txq, &txq->axq_q);
1611

F
Felix Fietkau 已提交
1612
	ath_txq_unlock_complete(sc, txq);
1613 1614
}

1615
bool ath_drain_all_txq(struct ath_softc *sc)
1616
{
1617
	struct ath_hw *ah = sc->sc_ah;
1618
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
1619
	struct ath_txq *txq;
1620 1621
	int i;
	u32 npend = 0;
S
Sujith 已提交
1622

S
Sujith Manoharan 已提交
1623
	if (test_bit(SC_OP_INVALID, &sc->sc_flags))
1624
		return true;
S
Sujith 已提交
1625

1626
	ath9k_hw_abort_tx_dma(ah);
S
Sujith 已提交
1627

1628
	/* Check if any queue remains active */
S
Sujith 已提交
1629
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1630 1631 1632
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

1633 1634
		if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
			npend |= BIT(i);
S
Sujith 已提交
1635 1636
	}

1637
	if (npend)
1638
		ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
S
Sujith 已提交
1639 1640

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

		/*
		 * The caller will resume queues with ieee80211_wake_queues.
		 * Mark the queue as not stopped to prevent ath_tx_complete
		 * from waking the queue too early.
		 */
		txq = &sc->tx.txq[i];
		txq->stopped = false;
1651
		ath_draintxq(sc, txq);
S
Sujith 已提交
1652
	}
1653 1654

	return !npend;
S
Sujith 已提交
1655
}
1656

S
Sujith 已提交
1657
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
S
Sujith 已提交
1658
{
S
Sujith 已提交
1659 1660
	ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
	sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
S
Sujith 已提交
1661
}
1662

1663 1664 1665
/* For each axq_acq entry, for each tid, try to schedule packets
 * for transmit until ampdu_depth has reached min Q depth.
 */
S
Sujith 已提交
1666 1667
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
1668 1669
	struct ath_atx_ac *ac, *ac_tmp, *last_ac;
	struct ath_atx_tid *tid, *last_tid;
1670

1671 1672
	if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
	    list_empty(&txq->axq_acq) ||
1673
	    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
S
Sujith 已提交
1674
		return;
1675

1676 1677
	rcu_read_lock();

S
Sujith 已提交
1678
	ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1679
	last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1680

1681 1682 1683 1684
	list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
		list_del(&ac->list);
		ac->sched = false;
1685

1686 1687 1688 1689 1690
		while (!list_empty(&ac->tid_q)) {
			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
					       list);
			list_del(&tid->list);
			tid->sched = false;
1691

1692 1693
			if (tid->paused)
				continue;
1694

1695
			ath_tx_sched_aggr(sc, txq, tid);
1696

1697 1698 1699 1700
			/*
			 * add tid to round-robin queue if more frames
			 * are pending for the tid
			 */
1701
			if (!skb_queue_empty(&tid->buf_q))
1702
				ath_tx_queue_tid(txq, tid);
1703

1704 1705 1706 1707
			if (tid == last_tid ||
			    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
				break;
		}
1708

1709 1710 1711
		if (!list_empty(&ac->tid_q) && !ac->sched) {
			ac->sched = true;
			list_add_tail(&ac->list, &txq->axq_acq);
1712
		}
1713 1714 1715

		if (ac == last_ac ||
		    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1716
			break;
S
Sujith 已提交
1717
	}
1718 1719

	rcu_read_unlock();
S
Sujith 已提交
1720
}
1721

S
Sujith 已提交
1722 1723 1724 1725
/***********/
/* TX, DMA */
/***********/

1726
/*
S
Sujith 已提交
1727 1728
 * Insert a chain of ath_buf (descriptors) on a txq and
 * assume the descriptors are already chained together by caller.
1729
 */
S
Sujith 已提交
1730
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1731
			     struct list_head *head, bool internal)
1732
{
1733
	struct ath_hw *ah = sc->sc_ah;
1734
	struct ath_common *common = ath9k_hw_common(ah);
1735 1736 1737
	struct ath_buf *bf, *bf_last;
	bool puttxbuf = false;
	bool edma;
1738

S
Sujith 已提交
1739 1740 1741 1742
	/*
	 * Insert the frame on the outbound list and
	 * pass it on to the hardware.
	 */
1743

S
Sujith 已提交
1744 1745
	if (list_empty(head))
		return;
1746

1747
	edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
S
Sujith 已提交
1748
	bf = list_first_entry(head, struct ath_buf, list);
1749
	bf_last = list_entry(head->prev, struct ath_buf, list);
1750

1751 1752
	ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
		txq->axq_qnum, txq->axq_depth);
1753

1754 1755
	if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
		list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
1756
		INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1757
		puttxbuf = true;
S
Sujith 已提交
1758
	} else {
1759 1760
		list_splice_tail_init(head, &txq->axq_q);

1761 1762
		if (txq->axq_link) {
			ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
1763
			ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
J
Joe Perches 已提交
1764 1765
				txq->axq_qnum, txq->axq_link,
				ito64(bf->bf_daddr), bf->bf_desc);
1766 1767 1768 1769 1770 1771 1772 1773 1774
		} else if (!edma)
			puttxbuf = true;

		txq->axq_link = bf_last->bf_desc;
	}

	if (puttxbuf) {
		TX_STAT_INC(txq->axq_qnum, puttxbuf);
		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1775
		ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
1776 1777 1778 1779
			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
	}

	if (!edma) {
F
Felix Fietkau 已提交
1780
		TX_STAT_INC(txq->axq_qnum, txstart);
1781
		ath9k_hw_txstart(ah, txq->axq_qnum);
S
Sujith 已提交
1782
	}
1783 1784

	if (!internal) {
1785 1786 1787 1788 1789 1790 1791
		while (bf) {
			txq->axq_depth++;
			if (bf_is_ampdu_not_probing(bf))
				txq->axq_ampdu_depth++;

			bf = bf->bf_lastbf->bf_next;
		}
1792
	}
S
Sujith 已提交
1793
}
1794

1795 1796 1797
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq,
			      struct ath_atx_tid *tid, struct sk_buff *skb,
			      struct ath_tx_control *txctl)
1798
{
1799
	struct ath_frame_info *fi = get_frame_info(skb);
F
Felix Fietkau 已提交
1800
	struct list_head bf_head;
1801
	struct ath_buf *bf;
1802

S
Sujith 已提交
1803 1804 1805 1806 1807 1808 1809
	/*
	 * Do not queue to h/w when any of the following conditions is true:
	 * - there are pending frames in software queue
	 * - the TID is currently paused for ADDBA/BAR request
	 * - seqno is not within block-ack window
	 * - h/w queue depth exceeds low water mark
	 */
1810 1811 1812 1813
	if ((!skb_queue_empty(&tid->buf_q) || tid->paused ||
	     !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
	     txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) &&
	    txq != sc->tx.uapsdq) {
1814
		/*
S
Sujith 已提交
1815 1816
		 * Add this frame to software queue for scheduling later
		 * for aggregation.
1817
		 */
1818
		TX_STAT_INC(txq->axq_qnum, a_queued_sw);
1819
		__skb_queue_tail(&tid->buf_q, skb);
1820
		if (!txctl->an || !txctl->an->sleeping)
1821
			ath_tx_queue_tid(txq, tid);
S
Sujith 已提交
1822 1823 1824
		return;
	}

1825
	bf = ath_tx_setup_buffer(sc, txq, tid, skb);
F
Felix Fietkau 已提交
1826 1827
	if (!bf) {
		ieee80211_free_txskb(sc->hw, skb);
1828
		return;
F
Felix Fietkau 已提交
1829
	}
1830

1831
	ath_set_rates(tid->an->vif, tid->an->sta, bf);
1832
	bf->bf_state.bf_type = BUF_AMPDU;
F
Felix Fietkau 已提交
1833 1834 1835
	INIT_LIST_HEAD(&bf_head);
	list_add(&bf->list, &bf_head);

S
Sujith 已提交
1836
	/* Add sub-frame to BAW */
1837
	ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
S
Sujith 已提交
1838 1839

	/* Queue to h/w without aggregation */
1840
	TX_STAT_INC(txq->axq_qnum, a_queued_hw);
S
Sujith 已提交
1841
	bf->bf_lastbf = bf;
1842 1843
	ath_tx_fill_desc(sc, bf, txq, fi->framelen);
	ath_tx_txqaddbuf(sc, txq, &bf_head, false);
S
Sujith 已提交
1844 1845
}

F
Felix Fietkau 已提交
1846
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1847
			       struct ath_atx_tid *tid, struct sk_buff *skb)
S
Sujith 已提交
1848
{
1849 1850
	struct ath_frame_info *fi = get_frame_info(skb);
	struct list_head bf_head;
S
Sujith 已提交
1851 1852
	struct ath_buf *bf;

1853 1854 1855 1856
	bf = fi->bf;

	INIT_LIST_HEAD(&bf_head);
	list_add_tail(&bf->list, &bf_head);
1857
	bf->bf_state.bf_type = 0;
S
Sujith 已提交
1858

1859
	bf->bf_next = NULL;
S
Sujith 已提交
1860
	bf->bf_lastbf = bf;
1861
	ath_tx_fill_desc(sc, bf, txq, fi->framelen);
1862
	ath_tx_txqaddbuf(sc, txq, &bf_head, false);
S
Sujith 已提交
1863
	TX_STAT_INC(txq->axq_qnum, queued);
S
Sujith 已提交
1864 1865
}

1866 1867 1868
static void setup_frame_info(struct ieee80211_hw *hw,
			     struct ieee80211_sta *sta,
			     struct sk_buff *skb,
1869
			     int framelen)
S
Sujith 已提交
1870 1871
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1872
	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1873
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1874
	const struct ieee80211_rate *rate;
1875
	struct ath_frame_info *fi = get_frame_info(skb);
1876
	struct ath_node *an = NULL;
1877
	enum ath9k_key_type keytype;
1878 1879 1880 1881 1882 1883 1884 1885 1886 1887
	bool short_preamble = false;

	/*
	 * We check if Short Preamble is needed for the CTS rate by
	 * checking the BSS's global flag.
	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
	 */
	if (tx_info->control.vif &&
	    tx_info->control.vif->bss_conf.use_short_preamble)
		short_preamble = true;
S
Sujith 已提交
1888

1889
	rate = ieee80211_get_rts_cts_rate(hw, tx_info);
1890
	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
S
Sujith 已提交
1891

1892 1893 1894
	if (sta)
		an = (struct ath_node *) sta->drv_priv;

1895 1896 1897
	memset(fi, 0, sizeof(*fi));
	if (hw_key)
		fi->keyix = hw_key->hw_key_idx;
1898 1899
	else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
		fi->keyix = an->ps_key;
1900 1901 1902 1903
	else
		fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->keytype = keytype;
	fi->framelen = framelen;
1904 1905 1906
	fi->rtscts_rate = rate->hw_value;
	if (short_preamble)
		fi->rtscts_rate |= rate->hw_value_short;
S
Sujith 已提交
1907 1908
}

1909 1910 1911 1912
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath9k_channel *curchan = ah->curchan;
1913

1914 1915 1916
	if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
	    (curchan->channelFlags & CHANNEL_5GHZ) &&
	    (chainmask == 0x7) && (rate < 0x90))
1917
		return 0x3;
1918 1919 1920
	else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
		 IS_CCK_RATE(rate))
		return 0x2;
1921 1922 1923 1924
	else
		return chainmask;
}

1925 1926 1927 1928
/*
 * Assign a descriptor (and sequence number if necessary,
 * and map buffer for DMA. Frees skb on error
 */
1929
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
F
Felix Fietkau 已提交
1930
					   struct ath_txq *txq,
1931
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
1932
					   struct sk_buff *skb)
1933
{
F
Felix Fietkau 已提交
1934
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1935
	struct ath_frame_info *fi = get_frame_info(skb);
1936
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
F
Felix Fietkau 已提交
1937
	struct ath_buf *bf;
S
Sujith Manoharan 已提交
1938
	int fragno;
1939
	u16 seqno;
F
Felix Fietkau 已提交
1940 1941 1942

	bf = ath_tx_get_buffer(sc);
	if (!bf) {
1943
		ath_dbg(common, XMIT, "TX buffers are full\n");
F
Felix Fietkau 已提交
1944
		return NULL;
F
Felix Fietkau 已提交
1945
	}
1946

S
Sujith 已提交
1947
	ATH_TXBUF_RESET(bf);
1948

1949
	if (tid) {
S
Sujith Manoharan 已提交
1950
		fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1951 1952
		seqno = tid->seq_next;
		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
S
Sujith Manoharan 已提交
1953 1954 1955 1956 1957 1958 1959

		if (fragno)
			hdr->seq_ctrl |= cpu_to_le16(fragno);

		if (!ieee80211_has_morefrags(hdr->frame_control))
			INCR(tid->seq_next, IEEE80211_SEQ_MAX);

1960 1961 1962
		bf->bf_state.seqno = seqno;
	}

1963
	bf->bf_mpdu = skb;
1964

B
Ben Greear 已提交
1965 1966 1967
	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
1968
		bf->bf_mpdu = NULL;
1969
		bf->bf_buf_addr = 0;
1970 1971
		ath_err(ath9k_hw_common(sc->sc_ah),
			"dma_mapping_error() on TX\n");
F
Felix Fietkau 已提交
1972
		ath_tx_return_buffer(sc, bf);
F
Felix Fietkau 已提交
1973
		return NULL;
1974 1975
	}

1976
	fi->bf = bf;
F
Felix Fietkau 已提交
1977 1978 1979 1980

	return bf;
}

1981 1982
static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
			  struct ath_tx_control *txctl)
1983
{
1984 1985
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1986
	struct ieee80211_sta *sta = txctl->sta;
1987
	struct ieee80211_vif *vif = info->control.vif;
1988
	struct ath_softc *sc = hw->priv;
F
Felix Fietkau 已提交
1989
	int frmlen = skb->len + FCS_LEN;
1990
	int padpos, padsize;
1991

1992 1993 1994 1995
	/* NOTE:  sta can be NULL according to net/mac80211.h */
	if (sta)
		txctl->an = (struct ath_node *)sta->drv_priv;

F
Felix Fietkau 已提交
1996 1997 1998
	if (info->control.hw_key)
		frmlen += info->control.hw_key->icv_len;

1999
	/*
S
Sujith 已提交
2000 2001 2002
	 * As a temporary workaround, assign seq# here; this will likely need
	 * to be cleaned up to work better with Beacon transmission and virtual
	 * BSSes.
2003
	 */
S
Sujith 已提交
2004 2005 2006 2007 2008
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
			sc->tx.seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2009 2010
	}

2011 2012 2013 2014 2015
	if ((vif && vif->type != NL80211_IFTYPE_AP &&
	            vif->type != NL80211_IFTYPE_AP_VLAN) ||
	    !ieee80211_is_data(hdr->frame_control))
		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;

2016
	/* Add the padding after the header if this is not already done */
2017
	padpos = ieee80211_hdrlen(hdr->frame_control);
2018 2019 2020 2021
	padsize = padpos & 3;
	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize)
			return -ENOMEM;
2022

2023 2024
		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
2025 2026
	}

2027
	setup_frame_info(hw, sta, skb, frmlen);
2028 2029 2030
	return 0;
}

2031

2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052
/* Upon failure caller should free skb */
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
		 struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ieee80211_sta *sta = txctl->sta;
	struct ieee80211_vif *vif = info->control.vif;
	struct ath_softc *sc = hw->priv;
	struct ath_txq *txq = txctl->txq;
	struct ath_atx_tid *tid = NULL;
	struct ath_buf *bf;
	u8 tidno;
	int q;
	int ret;

	ret = ath_tx_prepare(hw, skb, txctl);
	if (ret)
	    return ret;

	hdr = (struct ieee80211_hdr *) skb->data;
2053 2054 2055 2056 2057
	/*
	 * At this point, the vif, hw_key and sta pointers in the tx control
	 * info are no longer valid (overwritten by the ath_frame_info data.
	 */

2058
	q = skb_get_queue_mapping(skb);
F
Felix Fietkau 已提交
2059 2060

	ath_txq_lock(sc, txq);
2061
	if (txq == sc->tx.txq_map[q] &&
2062 2063
	    ++txq->pending_frames > sc->tx.txq_max_pending[q] &&
	    !txq->stopped) {
2064
		ieee80211_stop_queue(sc->hw, q);
2065
		txq->stopped = true;
2066 2067
	}

2068 2069 2070 2071 2072 2073
	if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
		ath_txq_unlock(sc, txq);
		txq = sc->tx.uapsdq;
		ath_txq_lock(sc, txq);
	}

2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
	if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
		tidno = ieee80211_get_qos_ctl(hdr)[0] &
			IEEE80211_QOS_CTL_TID_MASK;
		tid = ATH_AN_2_TID(txctl->an, tidno);

		WARN_ON(tid->ac->txq != txctl->txq);
	}

	if ((info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
		/*
		 * Try aggregation if it's a unicast data frame
		 * and the destination is HT capable.
		 */
2087
		ath_tx_send_ampdu(sc, txq, tid, skb, txctl);
2088 2089 2090
		goto out;
	}

2091
	bf = ath_tx_setup_buffer(sc, txq, tid, skb);
2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104
	if (!bf) {
		if (txctl->paprd)
			dev_kfree_skb_any(skb);
		else
			ieee80211_free_txskb(sc->hw, skb);
		goto out;
	}

	bf->bf_state.bfs_paprd = txctl->paprd;

	if (txctl->paprd)
		bf->bf_state.bfs_paprd_timestamp = jiffies;

2105
	ath_set_rates(vif, sta, bf);
2106
	ath_tx_send_normal(sc, txq, tid, skb);
F
Felix Fietkau 已提交
2107

2108
out:
F
Felix Fietkau 已提交
2109
	ath_txq_unlock(sc, txq);
F
Felix Fietkau 已提交
2110

2111
	return 0;
2112 2113
}

2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181
void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		 struct sk_buff *skb)
{
	struct ath_softc *sc = hw->priv;
	struct ath_tx_control txctl = {
		.txq = sc->beacon.cabq
	};
	struct ath_tx_info info = {};
	struct ieee80211_hdr *hdr;
	struct ath_buf *bf_tail = NULL;
	struct ath_buf *bf;
	LIST_HEAD(bf_q);
	int duration = 0;
	int max_duration;

	max_duration =
		sc->cur_beacon_conf.beacon_interval * 1000 *
		sc->cur_beacon_conf.dtim_period / ATH_BCBUF;

	do {
		struct ath_frame_info *fi = get_frame_info(skb);

		if (ath_tx_prepare(hw, skb, &txctl))
			break;

		bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
		if (!bf)
			break;

		bf->bf_lastbf = bf;
		ath_set_rates(vif, NULL, bf);
		ath_buf_set_rate(sc, bf, &info, fi->framelen);
		duration += info.rates[0].PktDuration;
		if (bf_tail)
			bf_tail->bf_next = bf;

		list_add_tail(&bf->list, &bf_q);
		bf_tail = bf;
		skb = NULL;

		if (duration > max_duration)
			break;

		skb = ieee80211_get_buffered_bc(hw, vif);
	} while(skb);

	if (skb)
		ieee80211_free_txskb(hw, skb);

	if (list_empty(&bf_q))
		return;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;

	if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
		hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
			sizeof(*hdr), DMA_TO_DEVICE);
	}

	ath_txq_lock(sc, txctl.txq);
	ath_tx_fill_desc(sc, bf, txctl.txq, 0);
	ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
	TX_STAT_INC(txctl.txq->axq_qnum, queued);
	ath_txq_unlock(sc, txctl.txq);
}

S
Sujith 已提交
2182 2183 2184
/*****************/
/* TX Completion */
/*****************/
S
Sujith 已提交
2185

S
Sujith 已提交
2186
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2187
			    int tx_flags, struct ath_txq *txq)
S
Sujith 已提交
2188
{
S
Sujith 已提交
2189
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2190
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2191
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2192
	int q, padpos, padsize;
S
Sujith Manoharan 已提交
2193
	unsigned long flags;
S
Sujith 已提交
2194

2195
	ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
S
Sujith 已提交
2196

2197 2198 2199
	if (sc->sc_ah->caldata)
		sc->sc_ah->caldata->paprd_packet_sent = true;

2200
	if (!(tx_flags & ATH_TX_ERROR))
S
Sujith 已提交
2201 2202
		/* Frame was ACKed */
		tx_info->flags |= IEEE80211_TX_STAT_ACK;
S
Sujith 已提交
2203

2204
	padpos = ieee80211_hdrlen(hdr->frame_control);
2205 2206 2207 2208 2209 2210 2211 2212
	padsize = padpos & 3;
	if (padsize && skb->len>padpos+padsize) {
		/*
		 * Remove MAC header padding before giving the frame back to
		 * mac80211.
		 */
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
S
Sujith 已提交
2213
	}
S
Sujith 已提交
2214

S
Sujith Manoharan 已提交
2215
	spin_lock_irqsave(&sc->sc_pm_lock, flags);
2216
	if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
S
Sujith 已提交
2217
		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
2218
		ath_dbg(common, PS,
J
Joe Perches 已提交
2219
			"Going back to sleep after having received TX status (0x%lx)\n",
S
Sujith 已提交
2220 2221 2222 2223
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
2224
	}
S
Sujith Manoharan 已提交
2225
	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2226

2227 2228
	__skb_queue_tail(&txq->complete_q, skb);

2229
	q = skb_get_queue_mapping(skb);
2230 2231 2232
	if (txq == sc->tx.uapsdq)
		txq = sc->tx.txq_map[q];

2233 2234 2235
	if (txq == sc->tx.txq_map[q]) {
		if (WARN_ON(--txq->pending_frames < 0))
			txq->pending_frames = 0;
2236

2237 2238
		if (txq->stopped &&
		    txq->pending_frames < sc->tx.txq_max_pending[q]) {
2239
			ieee80211_wake_queue(sc->hw, q);
2240
			txq->stopped = false;
2241
		}
2242
	}
S
Sujith 已提交
2243
}
2244

S
Sujith 已提交
2245
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2246
				struct ath_txq *txq, struct list_head *bf_q,
2247
				struct ath_tx_status *ts, int txok)
2248
{
S
Sujith 已提交
2249
	struct sk_buff *skb = bf->bf_mpdu;
2250
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
S
Sujith 已提交
2251
	unsigned long flags;
2252
	int tx_flags = 0;
2253

2254
	if (!txok)
2255
		tx_flags |= ATH_TX_ERROR;
2256

2257 2258 2259
	if (ts->ts_status & ATH9K_TXERR_FILT)
		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;

B
Ben Greear 已提交
2260
	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2261
	bf->bf_buf_addr = 0;
2262 2263

	if (bf->bf_state.bfs_paprd) {
2264 2265 2266
		if (time_after(jiffies,
				bf->bf_state.bfs_paprd_timestamp +
				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
2267
			dev_kfree_skb_any(skb);
2268
		else
2269
			complete(&sc->paprd_complete);
2270
	} else {
2271
		ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2272
		ath_tx_complete(sc, skb, tx_flags, txq);
2273
	}
2274 2275 2276 2277
	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
	 * accidentally reference it later.
	 */
	bf->bf_mpdu = NULL;
S
Sujith 已提交
2278 2279 2280 2281 2282 2283 2284

	/*
	 * Return the list of ath_buf of this mpdu to free queue
	 */
	spin_lock_irqsave(&sc->tx.txbuflock, flags);
	list_splice_tail_init(bf_q, &sc->tx.txbuf);
	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2285 2286
}

F
Felix Fietkau 已提交
2287 2288
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
2289
			     int txok)
2290
{
S
Sujith 已提交
2291
	struct sk_buff *skb = bf->bf_mpdu;
2292
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
S
Sujith 已提交
2293
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
F
Felix Fietkau 已提交
2294
	struct ieee80211_hw *hw = sc->hw;
2295
	struct ath_hw *ah = sc->sc_ah;
2296
	u8 i, tx_rateindex;
2297

S
Sujith 已提交
2298
	if (txok)
2299
		tx_info->status.ack_signal = ts->ts_rssi;
S
Sujith 已提交
2300

2301
	tx_rateindex = ts->ts_rateindex;
2302 2303
	WARN_ON(tx_rateindex >= hw->max_rates);

2304
	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
2305
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2306

2307
		BUG_ON(nbad > nframes);
2308
	}
2309 2310
	tx_info->status.ampdu_len = nframes;
	tx_info->status.ampdu_ack_len = nframes - nbad;
2311

2312
	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2313
	    (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325
		/*
		 * If an underrun error is seen assume it as an excessive
		 * retry only if max frame trigger level has been reached
		 * (2 KB for single stream, and 4 KB for dual stream).
		 * Adjust the long retry as if the frame was tried
		 * hw->max_rate_tries times to affect how rate control updates
		 * PER for the failed rate.
		 * In case of congestion on the bus penalizing this type of
		 * underruns should help hardware actually transmit new frames
		 * successfully by eventually preferring slower rates.
		 * This itself should also alleviate congestion on the bus.
		 */
2326 2327 2328
		if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
		                             ATH9K_TX_DELIM_UNDERRUN)) &&
		    ieee80211_is_data(hdr->frame_control) &&
2329
		    ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2330 2331
			tx_info->status.rates[tx_rateindex].count =
				hw->max_rate_tries;
2332
	}
2333

2334
	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2335
		tx_info->status.rates[i].count = 0;
2336 2337
		tx_info->status.rates[i].idx = -1;
	}
2338

2339
	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2340 2341
}

S
Sujith 已提交
2342
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2343
{
2344
	struct ath_hw *ah = sc->sc_ah;
2345
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
2346
	struct ath_buf *bf, *lastbf, *bf_held = NULL;
2347
	struct list_head bf_head;
S
Sujith 已提交
2348
	struct ath_desc *ds;
2349
	struct ath_tx_status ts;
S
Sujith 已提交
2350
	int status;
2351

2352
	ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
J
Joe Perches 已提交
2353 2354
		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
		txq->axq_link);
2355

F
Felix Fietkau 已提交
2356
	ath_txq_lock(sc, txq);
2357
	for (;;) {
2358
		if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
2359 2360
			break;

2361 2362
		if (list_empty(&txq->axq_q)) {
			txq->axq_link = NULL;
2363
			if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
B
Ben Greear 已提交
2364
				ath_txq_schedule(sc, txq);
2365 2366 2367 2368
			break;
		}
		bf = list_first_entry(&txq->axq_q, struct ath_buf, list);

S
Sujith 已提交
2369 2370 2371 2372 2373 2374 2375 2376 2377
		/*
		 * There is a race condition that a BH gets scheduled
		 * after sw writes TxE and before hw re-load the last
		 * descriptor to get the newly chained one.
		 * Software must keep the last DONE descriptor as a
		 * holding descriptor - software does so by marking
		 * it with the STALE flag.
		 */
		bf_held = NULL;
S
Sujith 已提交
2378
		if (bf->bf_stale) {
S
Sujith 已提交
2379
			bf_held = bf;
2380
			if (list_is_last(&bf_held->list, &txq->axq_q))
S
Sujith 已提交
2381
				break;
2382 2383 2384

			bf = list_entry(bf_held->list.next, struct ath_buf,
					list);
2385 2386 2387
		}

		lastbf = bf->bf_lastbf;
S
Sujith 已提交
2388
		ds = lastbf->bf_desc;
2389

2390 2391
		memset(&ts, 0, sizeof(ts));
		status = ath9k_hw_txprocdesc(ah, ds, &ts);
2392
		if (status == -EINPROGRESS)
S
Sujith 已提交
2393
			break;
2394

2395
		TX_STAT_INC(txq->axq_qnum, txprocdesc);
2396

S
Sujith 已提交
2397 2398 2399 2400 2401
		/*
		 * Remove ath_buf's of the same transmit unit from txq,
		 * however leave the last descriptor back as the holding
		 * descriptor for hw.
		 */
S
Sujith 已提交
2402
		lastbf->bf_stale = true;
S
Sujith 已提交
2403 2404 2405 2406
		INIT_LIST_HEAD(&bf_head);
		if (!list_is_singular(&lastbf->list))
			list_cut_position(&bf_head,
				&txq->axq_q, lastbf->list.prev);
2407

2408
		if (bf_held) {
2409 2410
			list_del(&bf_held->list);
			ath_tx_return_buffer(sc, bf_held);
S
Sujith 已提交
2411
		}
2412

2413
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2414
	}
F
Felix Fietkau 已提交
2415
	ath_txq_unlock_complete(sc, txq);
2416 2417
}

S
Sujith 已提交
2418
void ath_tx_tasklet(struct ath_softc *sc)
2419
{
2420 2421
	struct ath_hw *ah = sc->sc_ah;
	u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
S
Sujith 已提交
2422
	int i;
2423

S
Sujith 已提交
2424 2425 2426
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
		if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
			ath_tx_processq(sc, &sc->tx.txq[i]);
2427 2428 2429
	}
}

2430 2431
void ath_tx_edma_tasklet(struct ath_softc *sc)
{
2432
	struct ath_tx_status ts;
2433 2434 2435 2436 2437
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct ath_txq *txq;
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
2438
	struct list_head *fifo_list;
2439 2440 2441
	int status;

	for (;;) {
2442
		if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
2443 2444
			break;

2445
		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
2446 2447 2448
		if (status == -EINPROGRESS)
			break;
		if (status == -EIO) {
2449
			ath_dbg(common, XMIT, "Error processing tx status\n");
2450 2451 2452
			break;
		}

2453 2454 2455 2456
		/* Process beacon completions separately */
		if (ts.qid == sc->beacon.beaconq) {
			sc->beacon.tx_processed = true;
			sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2457
			continue;
2458
		}
2459

2460
		txq = &sc->tx.txq[ts.qid];
2461

F
Felix Fietkau 已提交
2462
		ath_txq_lock(sc, txq);
2463

2464 2465
		TX_STAT_INC(txq->axq_qnum, txprocdesc);

2466 2467
		fifo_list = &txq->txq_fifo[txq->txq_tailidx];
		if (list_empty(fifo_list)) {
F
Felix Fietkau 已提交
2468
			ath_txq_unlock(sc, txq);
2469 2470 2471
			return;
		}

2472 2473 2474 2475 2476 2477 2478
		bf = list_first_entry(fifo_list, struct ath_buf, list);
		if (bf->bf_stale) {
			list_del(&bf->list);
			ath_tx_return_buffer(sc, bf);
			bf = list_first_entry(fifo_list, struct ath_buf, list);
		}

2479 2480 2481
		lastbf = bf->bf_lastbf;

		INIT_LIST_HEAD(&bf_head);
2482 2483
		if (list_is_last(&lastbf->list, fifo_list)) {
			list_splice_tail_init(fifo_list, &bf_head);
2484
			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2485

2486 2487
			if (!list_empty(&txq->axq_q)) {
				struct list_head bf_q;
2488

2489 2490 2491 2492 2493
				INIT_LIST_HEAD(&bf_q);
				txq->axq_link = NULL;
				list_splice_tail_init(&txq->axq_q, &bf_q);
				ath_tx_txqaddbuf(sc, txq, &bf_q, true);
			}
2494 2495 2496 2497 2498
		} else {
			lastbf->bf_stale = true;
			if (bf != lastbf)
				list_cut_position(&bf_head, fifo_list,
						  lastbf->list.prev);
2499
		}
2500

2501
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
F
Felix Fietkau 已提交
2502
		ath_txq_unlock_complete(sc, txq);
2503 2504 2505
	}
}

S
Sujith 已提交
2506 2507 2508
/*****************/
/* Init, Cleanup */
/*****************/
2509

2510 2511 2512 2513 2514 2515
static int ath_txstatus_setup(struct ath_softc *sc, int size)
{
	struct ath_descdma *dd = &sc->txsdma;
	u8 txs_len = sc->sc_ah->caps.txs_len;

	dd->dd_desc_len = size * txs_len;
2516 2517
	dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
					  &dd->dd_desc_paddr, GFP_KERNEL);
2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536
	if (!dd->dd_desc)
		return -ENOMEM;

	return 0;
}

static int ath_tx_edma_init(struct ath_softc *sc)
{
	int err;

	err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
	if (!err)
		ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
					  sc->txsdma.dd_desc_paddr,
					  ATH_TXSTATUS_RING_SIZE);

	return err;
}

S
Sujith 已提交
2537
int ath_tx_init(struct ath_softc *sc, int nbufs)
2538
{
2539
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
2540
	int error = 0;
2541

2542
	spin_lock_init(&sc->tx.txbuflock);
2543

2544
	error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2545
				  "tx", nbufs, 1, 1);
2546
	if (error != 0) {
2547 2548
		ath_err(common,
			"Failed to allocate tx descriptors: %d\n", error);
2549
		return error;
2550
	}
2551

2552
	error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2553
				  "beacon", ATH_BCBUF, 1, 1);
2554
	if (error != 0) {
2555 2556
		ath_err(common,
			"Failed to allocate beacon descriptors: %d\n", error);
2557
		return error;
2558
	}
2559

2560 2561
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);

2562
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2563
		error = ath_tx_edma_init(sc);
2564

S
Sujith 已提交
2565
	return error;
2566 2567 2568 2569
}

void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
2570 2571 2572
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	int tidno, acno;
2573

2574
	for (tidno = 0, tid = &an->tid[tidno];
2575
	     tidno < IEEE80211_NUM_TIDS;
2576 2577 2578 2579 2580 2581 2582
	     tidno++, tid++) {
		tid->an        = an;
		tid->tidno     = tidno;
		tid->seq_start = tid->seq_next = 0;
		tid->baw_size  = WME_MAX_BA;
		tid->baw_head  = tid->baw_tail = 0;
		tid->sched     = false;
S
Sujith 已提交
2583
		tid->paused    = false;
2584
		tid->active	   = false;
2585
		__skb_queue_head_init(&tid->buf_q);
2586
		acno = TID_TO_WME_AC(tidno);
2587
		tid->ac = &an->ac[acno];
2588
	}
2589

2590
	for (acno = 0, ac = &an->ac[acno];
2591
	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
2592
		ac->sched    = false;
2593
		ac->txq = sc->tx.txq_map[acno];
2594
		INIT_LIST_HEAD(&ac->tid_q);
2595 2596 2597
	}
}

S
Sujith 已提交
2598
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2599
{
2600 2601
	struct ath_atx_ac *ac;
	struct ath_atx_tid *tid;
2602
	struct ath_txq *txq;
2603
	int tidno;
S
Sujith 已提交
2604

2605
	for (tidno = 0, tid = &an->tid[tidno];
2606
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
2607

2608
		ac = tid->ac;
2609
		txq = ac->txq;
2610

F
Felix Fietkau 已提交
2611
		ath_txq_lock(sc, txq);
2612 2613 2614 2615 2616 2617 2618 2619 2620

		if (tid->sched) {
			list_del(&tid->list);
			tid->sched = false;
		}

		if (ac->sched) {
			list_del(&ac->list);
			tid->ac->sched = false;
2621
		}
2622 2623

		ath_tid_drain(sc, txq, tid);
2624
		tid->active = false;
2625

F
Felix Fietkau 已提交
2626
		ath_txq_unlock(sc, txq);
2627 2628
	}
}