xmit.c 71.4 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include <linux/dma-mapping.h>
S
Sujith 已提交
18
#include "ath9k.h"
19
#include "ar9003_mac.h"
20 21 22 23 24 25 26 27 28 29 30 31

#define BITS_PER_BYTE           8
#define OFDM_PLCP_BITS          22
#define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
#define L_STF                   8
#define L_LTF                   8
#define L_SIG                   4
#define HT_SIG                  8
#define HT_STF                  4
#define HT_LTF(_ns)             (4 * (_ns))
#define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
32 33
#define TIME_SYMBOLS(t)         ((t) >> 2)
#define TIME_SYMBOLS_HALFGI(t)  (((t) * 5 - 4) / 18)
34 35 36 37
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)


38
static u16 bits_per_symbol[][2] = {
39 40 41 42 43 44 45 46 47 48 49
	/* 20MHz 40MHz */
	{    26,   54 },     /*  0: BPSK */
	{    52,  108 },     /*  1: QPSK 1/2 */
	{    78,  162 },     /*  2: QPSK 3/4 */
	{   104,  216 },     /*  3: 16-QAM 1/2 */
	{   156,  324 },     /*  4: 16-QAM 3/4 */
	{   208,  432 },     /*  5: 64-QAM 2/3 */
	{   234,  486 },     /*  6: 64-QAM 3/4 */
	{   260,  540 },     /*  7: 64-QAM 5/6 */
};

F
Felix Fietkau 已提交
50
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
51 52 53
			       struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
			    int tx_flags, struct ath_txq *txq);
S
Sujith 已提交
54
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
55
				struct ath_txq *txq, struct list_head *bf_q,
56
				struct ath_tx_status *ts, int txok);
57
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58
			     struct list_head *head, bool internal);
F
Felix Fietkau 已提交
59 60
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
61
			     int txok);
62 63
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno);
64 65 66
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
					   struct ath_txq *txq,
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
67
					   struct sk_buff *skb);
68

69
enum {
70 71
	MCS_HT20,
	MCS_HT20_SGI,
72 73 74 75
	MCS_HT40,
	MCS_HT40_SGI,
};

S
Sujith 已提交
76 77 78
/*********************/
/* Aggregation logic */
/*********************/
79

80
void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
81
	__acquires(&txq->axq_lock)
F
Felix Fietkau 已提交
82 83 84 85
{
	spin_lock_bh(&txq->axq_lock);
}

86
void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
87
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
88 89 90 91
{
	spin_unlock_bh(&txq->axq_lock);
}

92
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
93
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
94 95 96 97 98 99 100 101 102 103 104 105
{
	struct sk_buff_head q;
	struct sk_buff *skb;

	__skb_queue_head_init(&q);
	skb_queue_splice_init(&txq->complete_q, &q);
	spin_unlock_bh(&txq->axq_lock);

	while ((skb = __skb_dequeue(&q)))
		ieee80211_tx_status(sc->hw, skb);
}

106 107
static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
			     struct ath_atx_tid *tid)
S
Sujith 已提交
108
{
S
Sujith 已提交
109
	struct ath_atx_ac *ac = tid->ac;
110 111 112 113 114 115
	struct list_head *list;
	struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
	struct ath_chanctx *ctx = avp->chanctx;

	if (!ctx)
		return;
S
Sujith 已提交
116

S
Sujith 已提交
117 118
	if (tid->sched)
		return;
S
Sujith 已提交
119

S
Sujith 已提交
120 121
	tid->sched = true;
	list_add_tail(&tid->list, &ac->tid_q);
S
Sujith 已提交
122

S
Sujith 已提交
123 124
	if (ac->sched)
		return;
125

S
Sujith 已提交
126
	ac->sched = true;
127 128 129

	list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
	list_add_tail(&ac->list, list);
S
Sujith 已提交
130
}
131

132
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
133 134
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
135 136 137
	BUILD_BUG_ON(sizeof(struct ath_frame_info) >
		     sizeof(tx_info->rate_driver_data));
	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
138 139
}

140 141
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
{
142 143 144
	if (!tid->an->sta)
		return;

145 146 147 148
	ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
			   seqno << IEEE80211_SEQ_SEQ_SHIFT);
}

149 150 151 152 153 154 155
static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
			  struct ath_buf *bf)
{
	ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates,
			       ARRAY_SIZE(bf->rates));
}

156 157 158
static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
			     struct sk_buff *skb)
{
159
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
160 161 162
	struct ath_frame_info *fi = get_frame_info(skb);
	int hw_queue;
	int q = fi->txq;
163

164
	if (q < 0)
165 166
		return;

167
	txq = sc->tx.txq_map[q];
168 169 170
	if (WARN_ON(--txq->pending_frames < 0))
		txq->pending_frames = 0;

171
	hw_queue = (info->hw_queue >= sc->hw->queues - 2) ? q : info->hw_queue;
172 173
	if (txq->stopped &&
	    txq->pending_frames < sc->tx.txq_max_pending[q]) {
174
		ieee80211_wake_queue(sc->hw, hw_queue);
175 176 177 178
		txq->stopped = false;
	}
}

179 180 181
static struct ath_atx_tid *
ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
{
182
	u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
183 184 185
	return ATH_AN_2_TID(an, tidno);
}

186 187
static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
{
188
	return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
189 190 191 192
}

static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
{
193 194 195 196 197 198 199
	struct sk_buff *skb;

	skb = __skb_dequeue(&tid->retry_q);
	if (!skb)
		skb = __skb_dequeue(&tid->buf_q);

	return skb;
200 201
}

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
/*
 * ath_tx_tid_change_state:
 * - clears a-mpdu flag of previous session
 * - force sequence number allocation to fix next BlockAck Window
 */
static void
ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
{
	struct ath_txq *txq = tid->ac->txq;
	struct ieee80211_tx_info *tx_info;
	struct sk_buff *skb, *tskb;
	struct ath_buf *bf;
	struct ath_frame_info *fi;

	skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
		fi = get_frame_info(skb);
		bf = fi->bf;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;

		if (bf)
			continue;

		bf = ath_tx_setup_buffer(sc, txq, tid, skb);
		if (!bf) {
			__skb_unlink(skb, &tid->buf_q);
			ath_txq_skb_done(sc, txq, skb);
			ieee80211_free_txskb(sc->hw, skb);
			continue;
		}
	}

}

237
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
S
Sujith 已提交
238
{
239
	struct ath_txq *txq = tid->ac->txq;
240
	struct sk_buff *skb;
S
Sujith 已提交
241 242
	struct ath_buf *bf;
	struct list_head bf_head;
243
	struct ath_tx_status ts;
244
	struct ath_frame_info *fi;
245
	bool sendbar = false;
246

247
	INIT_LIST_HEAD(&bf_head);
248

249
	memset(&ts, 0, sizeof(ts));
250

251
	while ((skb = __skb_dequeue(&tid->retry_q))) {
252 253
		fi = get_frame_info(skb);
		bf = fi->bf;
F
Felix Fietkau 已提交
254
		if (!bf) {
255 256 257
			ath_txq_skb_done(sc, txq, skb);
			ieee80211_free_txskb(sc->hw, skb);
			continue;
F
Felix Fietkau 已提交
258 259
		}

260
		if (fi->baw_tracked) {
261
			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
262
			sendbar = true;
263
		}
264 265 266

		list_add_tail(&bf->list, &bf_head);
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
267
	}
268

269
	if (sendbar) {
F
Felix Fietkau 已提交
270
		ath_txq_unlock(sc, txq);
271
		ath_send_bar(tid, tid->seq_start);
F
Felix Fietkau 已提交
272 273
		ath_txq_lock(sc, txq);
	}
S
Sujith 已提交
274
}
275

S
Sujith 已提交
276 277
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno)
S
Sujith 已提交
278
{
S
Sujith 已提交
279
	int index, cindex;
280

S
Sujith 已提交
281 282
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
283

284
	__clear_bit(cindex, tid->tx_buf);
S
Sujith 已提交
285

286
	while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
S
Sujith 已提交
287 288
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
289 290
		if (tid->bar_index >= 0)
			tid->bar_index--;
S
Sujith 已提交
291
	}
S
Sujith 已提交
292
}
293

S
Sujith 已提交
294
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
295
			     struct ath_buf *bf)
S
Sujith 已提交
296
{
297 298
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
	u16 seqno = bf->bf_state.seqno;
S
Sujith 已提交
299
	int index, cindex;
S
Sujith 已提交
300

301
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
S
Sujith 已提交
302
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
303
	__set_bit(cindex, tid->tx_buf);
304
	fi->baw_tracked = 1;
305

S
Sujith 已提交
306 307 308 309
	if (index >= ((tid->baw_tail - tid->baw_head) &
		(ATH_TID_MAX_BUFS - 1))) {
		tid->baw_tail = cindex;
		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
310 311 312
	}
}

S
Sujith 已提交
313 314
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
			  struct ath_atx_tid *tid)
315 316

{
317
	struct sk_buff *skb;
S
Sujith 已提交
318 319
	struct ath_buf *bf;
	struct list_head bf_head;
320
	struct ath_tx_status ts;
321
	struct ath_frame_info *fi;
322 323

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
324
	INIT_LIST_HEAD(&bf_head);
325

326
	while ((skb = ath_tid_dequeue(tid))) {
327 328
		fi = get_frame_info(skb);
		bf = fi->bf;
329

330 331 332 333 334
		if (!bf) {
			ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
			continue;
		}

335
		list_add_tail(&bf->list, &bf_head);
336
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
337
	}
338 339
}

S
Sujith 已提交
340
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
341
			     struct sk_buff *skb, int count)
342
{
343
	struct ath_frame_info *fi = get_frame_info(skb);
344
	struct ath_buf *bf = fi->bf;
S
Sujith 已提交
345
	struct ieee80211_hdr *hdr;
346
	int prev = fi->retries;
347

S
Sujith 已提交
348
	TX_STAT_INC(txq->axq_qnum, a_retries);
349 350 351
	fi->retries += count;

	if (prev > 0)
352
		return;
353

S
Sujith 已提交
354 355
	hdr = (struct ieee80211_hdr *)skb->data;
	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
356 357
	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
		sizeof(*hdr), DMA_TO_DEVICE);
358 359
}

360
static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
S
Sujith 已提交
361
{
362
	struct ath_buf *bf = NULL;
S
Sujith 已提交
363 364

	spin_lock_bh(&sc->tx.txbuflock);
365 366

	if (unlikely(list_empty(&sc->tx.txbuf))) {
367 368 369
		spin_unlock_bh(&sc->tx.txbuflock);
		return NULL;
	}
370 371 372 373

	bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
	list_del(&bf->list);

S
Sujith 已提交
374 375
	spin_unlock_bh(&sc->tx.txbuflock);

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
	return bf;
}

static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
{
	spin_lock_bh(&sc->tx.txbuflock);
	list_add_tail(&bf->list, &sc->tx.txbuf);
	spin_unlock_bh(&sc->tx.txbuflock);
}

static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
{
	struct ath_buf *tbf;

	tbf = ath_tx_get_buffer(sc);
	if (WARN_ON(!tbf))
		return NULL;

S
Sujith 已提交
394 395 396 397
	ATH_TXBUF_RESET(tbf);

	tbf->bf_mpdu = bf->bf_mpdu;
	tbf->bf_buf_addr = bf->bf_buf_addr;
398
	memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
S
Sujith 已提交
399
	tbf->bf_state = bf->bf_state;
400
	tbf->bf_state.stale = false;
S
Sujith 已提交
401 402 403 404

	return tbf;
}

405 406 407 408
static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
			        struct ath_tx_status *ts, int txok,
			        int *nframes, int *nbad)
{
409
	struct ath_frame_info *fi;
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
	u16 seq_st = 0;
	u32 ba[WME_BA_BMP_SIZE >> 5];
	int ba_index;
	int isaggr = 0;

	*nbad = 0;
	*nframes = 0;

	isaggr = bf_isaggr(bf);
	if (isaggr) {
		seq_st = ts->ts_seqnum;
		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
	}

	while (bf) {
425
		fi = get_frame_info(bf->bf_mpdu);
426
		ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
427 428 429 430 431 432 433 434 435 436

		(*nframes)++;
		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
			(*nbad)++;

		bf = bf->bf_next;
	}
}


S
Sujith 已提交
437 438
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
				 struct ath_buf *bf, struct list_head *bf_q,
439
				 struct ath_tx_status *ts, int txok)
440
{
S
Sujith 已提交
441 442
	struct ath_node *an = NULL;
	struct sk_buff *skb;
443
	struct ieee80211_sta *sta;
F
Felix Fietkau 已提交
444
	struct ieee80211_hw *hw = sc->hw;
445
	struct ieee80211_hdr *hdr;
446
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
447
	struct ath_atx_tid *tid = NULL;
S
Sujith 已提交
448
	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
449 450
	struct list_head bf_head;
	struct sk_buff_head bf_pending;
451
	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
452
	u32 ba[WME_BA_BMP_SIZE >> 5];
453
	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
454
	bool rc_update = true, isba;
455
	struct ieee80211_tx_rate rates[4];
456
	struct ath_frame_info *fi;
457
	int nframes;
458
	bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
459
	int i, retries;
460
	int bar_index = -1;
461

S
Sujith 已提交
462
	skb = bf->bf_mpdu;
463 464
	hdr = (struct ieee80211_hdr *)skb->data;

465 466
	tx_info = IEEE80211_SKB_CB(skb);

467
	memcpy(rates, bf->rates, sizeof(rates));
468

469 470 471 472
	retries = ts->ts_longretry + 1;
	for (i = 0; i < ts->ts_rateindex; i++)
		retries += rates[i].count;

473
	rcu_read_lock();
474

475
	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
476 477
	if (!sta) {
		rcu_read_unlock();
478

479 480 481 482
		INIT_LIST_HEAD(&bf_head);
		while (bf) {
			bf_next = bf->bf_next;

483
			if (!bf->bf_state.stale || bf_next != NULL)
484 485
				list_move_tail(&bf->list, &bf_head);

486
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
487 488 489

			bf = bf_next;
		}
490
		return;
491 492
	}

493
	an = (struct ath_node *)sta->drv_priv;
494
	tid = ath_get_skb_tid(sc, an, skb);
495
	seq_first = tid->seq_start;
496
	isba = ts->ts_flags & ATH9K_TX_BA;
497

498 499 500 501
	/*
	 * The hardware occasionally sends a tx status for the wrong TID.
	 * In this case, the BA status cannot be considered valid and all
	 * subframes need to be retransmitted
502 503 504
	 *
	 * Only BlockAcks have a TID and therefore normal Acks cannot be
	 * checked
505
	 */
506
	if (isba && tid->tidno != ts->tid)
507 508
		txok = false;

S
Sujith 已提交
509
	isaggr = bf_isaggr(bf);
S
Sujith 已提交
510
	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
511

S
Sujith 已提交
512
	if (isaggr && txok) {
513 514 515
		if (ts->ts_flags & ATH9K_TX_BA) {
			seq_st = ts->ts_seqnum;
			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
S
Sujith 已提交
516
		} else {
S
Sujith 已提交
517 518 519 520 521 522 523
			/*
			 * AR5416 can become deaf/mute when BA
			 * issue happens. Chip needs to be reset.
			 * But AP code may have sychronization issues
			 * when perform internal reset in this routine.
			 * Only enable reset in STA mode for now.
			 */
524
			if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
S
Sujith 已提交
525
				needreset = 1;
S
Sujith 已提交
526
		}
527 528
	}

529
	__skb_queue_head_init(&bf_pending);
530

531
	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
S
Sujith 已提交
532
	while (bf) {
533 534
		u16 seqno = bf->bf_state.seqno;

535
		txfail = txpending = sendbar = 0;
S
Sujith 已提交
536
		bf_next = bf->bf_next;
537

538 539
		skb = bf->bf_mpdu;
		tx_info = IEEE80211_SKB_CB(skb);
540
		fi = get_frame_info(skb);
541

542 543
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
		    !tid->active) {
544 545 546 547 548 549
			/*
			 * Outside of the current BlockAck window,
			 * maybe part of a previous session
			 */
			txfail = 1;
		} else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
S
Sujith 已提交
550 551
			/* transmit completion, subframe is
			 * acked by block ack */
552
			acked_cnt++;
S
Sujith 已提交
553 554
		} else if (!isaggr && txok) {
			/* transmit completion */
555
			acked_cnt++;
556 557 558 559 560 561 562 563
		} else if (flush) {
			txpending = 1;
		} else if (fi->retries < ATH_MAX_SW_RETRIES) {
			if (txok || !an->sleeping)
				ath_tx_set_retry(sc, txq, bf->bf_mpdu,
						 retries);

			txpending = 1;
S
Sujith 已提交
564
		} else {
565 566 567 568
			txfail = 1;
			txfail_cnt++;
			bar_index = max_t(int, bar_index,
				ATH_BA_INDEX(seq_first, seqno));
S
Sujith 已提交
569
		}
570

571 572 573 574
		/*
		 * Make sure the last desc is reclaimed if it
		 * not a holding desc.
		 */
575
		INIT_LIST_HEAD(&bf_head);
576
		if (bf_next != NULL || !bf_last->bf_state.stale)
S
Sujith 已提交
577
			list_move_tail(&bf->list, &bf_head);
578

579
		if (!txpending) {
S
Sujith 已提交
580 581 582 583
			/*
			 * complete the acked-ones/xretried ones; update
			 * block-ack window
			 */
584
			ath_tx_update_baw(sc, tid, seqno);
585

586
			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
587
				memcpy(tx_info->control.rates, rates, sizeof(rates));
588
				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
589 590 591
				rc_update = false;
			}

592
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
593
				!txfail);
S
Sujith 已提交
594
		} else {
595 596 597 598
			if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
				tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
				ieee80211_sta_eosp(sta);
			}
S
Sujith 已提交
599
			/* retry the un-acked ones */
600
			if (bf->bf_next == NULL && bf_last->bf_state.stale) {
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
				struct ath_buf *tbf;

				tbf = ath_clone_txbuf(sc, bf_last);
				/*
				 * Update tx baw and complete the
				 * frame with failed status if we
				 * run out of tx buf.
				 */
				if (!tbf) {
					ath_tx_update_baw(sc, tid, seqno);

					ath_tx_complete_buf(sc, bf, txq,
							    &bf_head, ts, 0);
					bar_index = max_t(int, bar_index,
						ATH_BA_INDEX(seq_first, seqno));
					break;
617
				}
618 619

				fi->bf = tbf;
S
Sujith 已提交
620 621 622 623 624 625
			}

			/*
			 * Put this buffer to the temporary pending
			 * queue to retain ordering
			 */
626
			__skb_queue_tail(&bf_pending, skb);
S
Sujith 已提交
627 628 629
		}

		bf = bf_next;
630 631
	}

632
	/* prepend un-acked frames to the beginning of the pending frame queue */
633
	if (!skb_queue_empty(&bf_pending)) {
634
		if (an->sleeping)
635
			ieee80211_sta_set_buffered(sta, tid->tidno, true);
636

637
		skb_queue_splice_tail(&bf_pending, &tid->retry_q);
638
		if (!an->sleeping) {
639
			ath_tx_queue_tid(sc, txq, tid);
640

S
Sujith Manoharan 已提交
641
			if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
642 643
				tid->ac->clear_ps_filter = true;
		}
644 645
	}

F
Felix Fietkau 已提交
646 647 648 649 650 651 652 653 654 655 656
	if (bar_index >= 0) {
		u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);

		if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
			tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);

		ath_txq_unlock(sc, txq);
		ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
		ath_txq_lock(sc, txq);
	}

657 658
	rcu_read_unlock();

659 660
	if (needreset)
		ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
S
Sujith 已提交
661
}
662

663 664 665 666 667 668 669 670 671 672
static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
{
    struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
    return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}

static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
				  struct ath_tx_status *ts, struct ath_buf *bf,
				  struct list_head *bf_head)
{
673
	struct ieee80211_tx_info *info;
674 675 676 677 678 679 680 681 682 683 684
	bool txok, flush;

	txok = !(ts->ts_status & ATH9K_TXERR_MASK);
	flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
	txq->axq_tx_inprogress = false;

	txq->axq_depth--;
	if (bf_is_ampdu_not_probing(bf))
		txq->axq_ampdu_depth--;

	if (!bf_isampdu(bf)) {
685 686 687 688
		if (!flush) {
			info = IEEE80211_SKB_CB(bf->bf_mpdu);
			memcpy(info->control.rates, bf->rates,
			       sizeof(info->control.rates));
689
			ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
690
		}
691 692 693 694
		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
	} else
		ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);

695
	if (!flush)
696 697 698
		ath_txq_schedule(sc, txq);
}

699 700 701 702 703 704 705 706 707 708 709
static bool ath_lookup_legacy(struct ath_buf *bf)
{
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	int i;

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
	rates = tx_info->control.rates;

710 711 712 713
	for (i = 0; i < 4; i++) {
		if (!rates[i].count || rates[i].idx < 0)
			break;

714 715 716 717 718 719 720
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
			return true;
	}

	return false;
}

S
Sujith 已提交
721 722
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
			   struct ath_atx_tid *tid)
723
{
S
Sujith 已提交
724 725
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
726
	struct ieee80211_tx_rate *rates;
S
Sujith 已提交
727
	u32 max_4ms_framelen, frmlen;
728
	u16 aggr_limit, bt_aggr_limit, legacy = 0;
729
	int q = tid->ac->txq->mac80211_qnum;
S
Sujith 已提交
730
	int i;
S
Sujith 已提交
731

S
Sujith 已提交
732
	skb = bf->bf_mpdu;
S
Sujith 已提交
733
	tx_info = IEEE80211_SKB_CB(skb);
734
	rates = bf->rates;
S
Sujith 已提交
735

S
Sujith 已提交
736 737
	/*
	 * Find the lowest frame length among the rate series that will have a
738
	 * 4ms (or TXOP limited) transmit duration.
S
Sujith 已提交
739 740
	 */
	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
S
Sujith 已提交
741

S
Sujith 已提交
742
	for (i = 0; i < 4; i++) {
743
		int modeidx;
S
Sujith 已提交
744

745 746
		if (!rates[i].count)
			continue;
747

748 749 750
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
			legacy = 1;
			break;
751
		}
752 753 754 755 756 757 758 759 760

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
			modeidx = MCS_HT40;
		else
			modeidx = MCS_HT20;

		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
			modeidx++;

761
		frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
762
		max_4ms_framelen = min(max_4ms_framelen, frmlen);
763
	}
S
Sujith 已提交
764

765
	/*
S
Sujith 已提交
766 767 768
	 * limit aggregate size by the minimum rate if rate selected is
	 * not a probe rate, if rate selected is a probe rate then
	 * avoid aggregation of this packet.
769
	 */
S
Sujith 已提交
770 771
	if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
		return 0;
772

773 774 775 776 777 778 779 780
	aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);

	/*
	 * Override the default aggregation limit for BTCOEX.
	 */
	bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
	if (bt_aggr_limit)
		aggr_limit = bt_aggr_limit;
781

782 783
	if (tid->an->maxampdu)
		aggr_limit = min(aggr_limit, tid->an->maxampdu);
784

S
Sujith 已提交
785 786
	return aggr_limit;
}
787

S
Sujith 已提交
788
/*
S
Sujith 已提交
789
 * Returns the number of delimiters to be added to
S
Sujith 已提交
790 791 792
 * meet the minimum required mpdudensity.
 */
static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
793 794
				  struct ath_buf *bf, u16 frmlen,
				  bool first_subfrm)
S
Sujith 已提交
795
{
796
#define FIRST_DESC_NDELIMS 60
797
	u32 nsymbits, nsymbols;
S
Sujith 已提交
798
	u16 minlen;
799
	u8 flags, rix;
800
	int width, streams, half_gi, ndelim, mindelim;
801
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith 已提交
802 803 804

	/* Select standard number of delimiters based on frame length alone */
	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
805 806

	/*
S
Sujith 已提交
807 808 809 810
	 * If encryption enabled, hardware requires some more padding between
	 * subframes.
	 * TODO - this could be improved to be dependent on the rate.
	 *      The hardware can keep up at lower rates, but not higher rates
811
	 */
812 813
	if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
	    !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
S
Sujith 已提交
814
		ndelim += ATH_AGGR_ENCRYPTDELIM;
815

816 817 818 819
	/*
	 * Add delimiter when using RTS/CTS with aggregation
	 * and non enterprise AR9003 card
	 */
820 821
	if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
	    (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
822 823
		ndelim = max(ndelim, FIRST_DESC_NDELIMS);

S
Sujith 已提交
824 825 826 827 828
	/*
	 * Convert desired mpdu density from microeconds to bytes based
	 * on highest rate in rate series (i.e. first rate) to determine
	 * required minimum length for subframe. Take into account
	 * whether high rate is 20 or 40Mhz and half or full GI.
829
	 *
S
Sujith 已提交
830 831 832
	 * If there is no mpdu density restriction, no further calculation
	 * is needed.
	 */
833 834

	if (tid->an->mpdudensity == 0)
S
Sujith 已提交
835
		return ndelim;
836

837 838
	rix = bf->rates[0].idx;
	flags = bf->rates[0].flags;
S
Sujith 已提交
839 840
	width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
	half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
841

S
Sujith 已提交
842
	if (half_gi)
843
		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
S
Sujith 已提交
844
	else
845
		nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
846

S
Sujith 已提交
847 848
	if (nsymbols == 0)
		nsymbols = 1;
849

850 851
	streams = HT_RC_2_STREAMS(rix);
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
852
	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
853

S
Sujith 已提交
854 855 856
	if (frmlen < minlen) {
		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
		ndelim = max(mindelim, ndelim);
857 858
	}

S
Sujith 已提交
859
	return ndelim;
860 861
}

862 863
static struct ath_buf *
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
864
			struct ath_atx_tid *tid, struct sk_buff_head **q)
865
{
866
	struct ieee80211_tx_info *tx_info;
867
	struct ath_frame_info *fi;
868
	struct sk_buff *skb;
869
	struct ath_buf *bf;
870
	u16 seqno;
871

872
	while (1) {
873 874 875 876
		*q = &tid->retry_q;
		if (skb_queue_empty(*q))
			*q = &tid->buf_q;

877
		skb = skb_peek(*q);
878 879 880
		if (!skb)
			break;

881 882
		fi = get_frame_info(skb);
		bf = fi->bf;
883
		if (!fi->bf)
F
Felix Fietkau 已提交
884
			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
885 886
		else
			bf->bf_state.stale = false;
887

F
Felix Fietkau 已提交
888
		if (!bf) {
889
			__skb_unlink(skb, *q);
890
			ath_txq_skb_done(sc, txq, skb);
F
Felix Fietkau 已提交
891
			ieee80211_free_txskb(sc->hw, skb);
892
			continue;
F
Felix Fietkau 已提交
893
		}
894

895 896 897 898 899
		bf->bf_next = NULL;
		bf->bf_lastbf = bf;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
900 901 902 903 904 905 906 907 908

		/*
		 * No aggregation session is running, but there may be frames
		 * from a previous session or a failed attempt in the queue.
		 * Send them out as normal data frames
		 */
		if (!tid->active)
			tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;

909 910 911 912 913
		if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
			bf->bf_state.bf_type = 0;
			return bf;
		}

914
		bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
915
		seqno = bf->bf_state.seqno;
916

S
Sujith 已提交
917
		/* do not step over block-ack window */
918
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
S
Sujith 已提交
919
			break;
920

921 922 923 924 925 926
		if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
			struct ath_tx_status ts = {};
			struct list_head bf_head;

			INIT_LIST_HEAD(&bf_head);
			list_add(&bf->list, &bf_head);
927
			__skb_unlink(skb, *q);
928 929 930 931 932
			ath_tx_update_baw(sc, tid, seqno);
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
			continue;
		}

933 934 935 936 937 938
		return bf;
	}

	return NULL;
}

939 940 941 942 943
static bool
ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
		 struct ath_atx_tid *tid, struct list_head *bf_q,
		 struct ath_buf *bf_first, struct sk_buff_head *tid_q,
		 int *aggr_len)
944 945
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
946
	struct ath_buf *bf = bf_first, *bf_prev = NULL;
F
Felix Fietkau 已提交
947
	int nframes = 0, ndelim;
948
	u16 aggr_limit = 0, al = 0, bpad = 0,
F
Felix Fietkau 已提交
949
	    al_delta, h_baw = tid->baw_size / 2;
950 951 952
	struct ieee80211_tx_info *tx_info;
	struct ath_frame_info *fi;
	struct sk_buff *skb;
953
	bool closed = false;
954

955 956
	bf = bf_first;
	aggr_limit = ath_lookup_rate(sc, bf, tid);
957

958
	do {
959 960 961
		skb = bf->bf_mpdu;
		fi = get_frame_info(skb);

S
Sujith 已提交
962
		/* do not exceed aggregation limit */
963
		al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
F
Felix Fietkau 已提交
964 965
		if (nframes) {
			if (aggr_limit < al + bpad + al_delta ||
966
			    ath_lookup_legacy(bf) || nframes >= h_baw)
F
Felix Fietkau 已提交
967
				break;
968

F
Felix Fietkau 已提交
969
			tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
970 971
			if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
			    !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
F
Felix Fietkau 已提交
972
				break;
S
Sujith 已提交
973
		}
974

S
Sujith 已提交
975
		/* add padding for previous frame to aggregation length */
S
Sujith 已提交
976
		al += bpad + al_delta;
977

S
Sujith 已提交
978 979 980 981
		/*
		 * Get the delimiters needed to meet the MPDU
		 * density for this node.
		 */
982 983
		ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
						!nframes);
S
Sujith 已提交
984
		bpad = PADBYTES(al_delta) + (ndelim << 2);
985

986
		nframes++;
S
Sujith 已提交
987
		bf->bf_next = NULL;
988

S
Sujith 已提交
989
		/* link buffers of this frame to the aggregate */
990 991
		if (!fi->baw_tracked)
			ath_tx_addto_baw(sc, tid, bf);
992
		bf->bf_state.ndelim = ndelim;
993

994
		__skb_unlink(skb, tid_q);
995
		list_add_tail(&bf->list, bf_q);
996
		if (bf_prev)
S
Sujith 已提交
997
			bf_prev->bf_next = bf;
998

S
Sujith 已提交
999
		bf_prev = bf;
S
Sujith 已提交
1000

1001 1002 1003 1004 1005
		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf) {
			closed = true;
			break;
		}
1006
	} while (ath_tid_has_buffered(tid));
1007

1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
	bf = bf_first;
	bf->bf_lastbf = bf_prev;

	if (bf == bf_prev) {
		al = get_frame_info(bf->bf_mpdu)->framelen;
		bf->bf_state.bf_type = BUF_AMPDU;
	} else {
		TX_STAT_INC(txq->axq_qnum, a_aggr);
	}

1018
	*aggr_len = al;
S
Sujith 已提交
1019

1020
	return closed;
S
Sujith 已提交
1021 1022
#undef PADBYTES
}
1023

1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
/*
 * rix - rate index
 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
 * width  - 0 for 20 MHz, 1 for 40 MHz
 * half_gi - to use 4us v/s 3.6 us for symbol time
 */
static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
			    int width, int half_gi, bool shortPreamble)
{
	u32 nbits, nsymbits, duration, nsymbols;
	int streams;

	/* find number of symbols: PLCP + data */
	streams = HT_RC_2_STREAMS(rix);
	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
	nsymbols = (nbits + nsymbits - 1) / nsymbits;

	if (!half_gi)
		duration = SYMBOL_TIME(nsymbols);
	else
		duration = SYMBOL_TIME_HALFGI(nsymbols);

	/* addup duration for legacy/ht training and signal fields */
	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);

	return duration;
}

1053 1054 1055 1056 1057 1058
static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
{
	int streams = HT_RC_2_STREAMS(mcs);
	int symbols, bits;
	int bytes = 0;

1059
	usec -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
	symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
	bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
	bits -= OFDM_PLCP_BITS;
	bytes = bits / 8;
	if (bytes > 65532)
		bytes = 65532;

	return bytes;
}

void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
{
	u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi;
	int mcs;

	/* 4ms is the default (and maximum) duration */
	if (!txop || txop > 4096)
		txop = 4096;

	cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
	cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
	cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
	cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
	for (mcs = 0; mcs < 32; mcs++) {
		cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false);
		cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true);
		cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false);
		cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true);
	}
}

1091
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
S
Sujith Manoharan 已提交
1092
			     struct ath_tx_info *info, int len, bool rts)
1093 1094
{
	struct ath_hw *ah = sc->sc_ah;
1095
	struct ath_common *common = ath9k_hw_common(ah);
1096 1097 1098 1099 1100
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	const struct ieee80211_rate *rate;
	struct ieee80211_hdr *hdr;
1101
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith Manoharan 已提交
1102
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
1103 1104
	int i;
	u8 rix = 0;
1105 1106 1107

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
1108
	rates = bf->rates;
1109
	hdr = (struct ieee80211_hdr *)skb->data;
1110 1111 1112

	/* set dur_update_en for l-sig computation except for PS-Poll frames */
	info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
1113
	info->rtscts_rate = fi->rtscts_rate;
1114

1115
	for (i = 0; i < ARRAY_SIZE(bf->rates); i++) {
1116 1117 1118 1119 1120 1121 1122
		bool is_40, is_sgi, is_sp;
		int phy;

		if (!rates[i].count || (rates[i].idx < 0))
			continue;

		rix = rates[i].idx;
1123
		info->rates[i].Tries = rates[i].count;
1124

S
Sujith Manoharan 已提交
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
		/*
		 * Handle RTS threshold for unaggregated HT frames.
		 */
		if (bf_isampdu(bf) && !bf_isaggr(bf) &&
		    (rates[i].flags & IEEE80211_TX_RC_MCS) &&
		    unlikely(rts_thresh != (u32) -1)) {
			if (!rts_thresh || (len > rts_thresh))
				rts = true;
		}

		if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1136 1137
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_RTSENA;
1138
		} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1139 1140
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_CTSENA;
1141 1142 1143
		}

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1144
			info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
1145
		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1146
			info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
1147 1148 1149 1150 1151 1152 1153

		is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
		is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
		is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);

		if (rates[i].flags & IEEE80211_TX_RC_MCS) {
			/* MCS rates */
1154 1155 1156 1157
			info->rates[i].Rate = rix | 0x80;
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
			info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
1158 1159
				 is_40, is_sgi, is_sp);
			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1160
				info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
1161 1162 1163 1164
			continue;
		}

		/* legacy rates */
1165
		rate = &common->sbands[tx_info->band].bitrates[rates[i].idx];
1166 1167 1168 1169 1170 1171
		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
		    !(rate->flags & IEEE80211_RATE_ERP_G))
			phy = WLAN_RC_PHY_CCK;
		else
			phy = WLAN_RC_PHY_OFDM;

1172
		info->rates[i].Rate = rate->hw_value;
1173 1174
		if (rate->hw_value_short) {
			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1175
				info->rates[i].Rate |= rate->hw_value_short;
1176 1177 1178 1179 1180
		} else {
			is_sp = false;
		}

		if (bf->bf_state.bfs_paprd)
1181
			info->rates[i].ChSel = ah->txchainmask;
1182
		else
1183 1184
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
1185

1186
		info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1187 1188 1189 1190 1191
			phy, rate->bitrate * 100, len, rix, is_sp);
	}

	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
	if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1192
		info->flags &= ~ATH9K_TXDESC_RTSENA;
1193 1194

	/* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1195 1196 1197
	if (info->flags & ATH9K_TXDESC_RTSENA)
		info->flags &= ~ATH9K_TXDESC_CTSENA;
}
1198

1199 1200 1201 1202 1203 1204 1205 1206
static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	enum ath9k_pkt_type htype;
	__le16 fc;

	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
1207

1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
	if (ieee80211_is_beacon(fc))
		htype = ATH9K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = ATH9K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = ATH9K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = ATH9K_PKT_TYPE_PSPOLL;
	else
		htype = ATH9K_PKT_TYPE_NORMAL;

	return htype;
1220 1221
}

1222 1223
static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_txq *txq, int len)
1224 1225
{
	struct ath_hw *ah = sc->sc_ah;
1226
	struct ath_buf *bf_first = NULL;
1227
	struct ath_tx_info info;
S
Sujith Manoharan 已提交
1228 1229
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
	bool rts = false;
1230

1231 1232 1233 1234 1235 1236
	memset(&info, 0, sizeof(info));
	info.is_first = true;
	info.is_last = true;
	info.txpower = MAX_RATE_POWER;
	info.qcu = txq->axq_qnum;

1237
	while (bf) {
1238
		struct sk_buff *skb = bf->bf_mpdu;
1239
		struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1240
		struct ath_frame_info *fi = get_frame_info(skb);
1241
		bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
1242 1243

		info.type = get_hw_packet_type(skb);
1244
		if (bf->bf_next)
1245
			info.link = bf->bf_next->bf_daddr;
1246
		else
L
Luis R. Rodriguez 已提交
1247
			info.link = (sc->tx99_state) ? bf->bf_daddr : 0;
1248

1249 1250 1251
		if (!bf_first) {
			bf_first = bf;

L
Luis R. Rodriguez 已提交
1252 1253
			if (!sc->tx99_state)
				info.flags = ATH9K_TXDESC_INTREQ;
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
			if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
			    txq == sc->tx.uapsdq)
				info.flags |= ATH9K_TXDESC_CLRDMASK;

			if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
				info.flags |= ATH9K_TXDESC_NOACK;
			if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
				info.flags |= ATH9K_TXDESC_LDPC;

			if (bf->bf_state.bfs_paprd)
				info.flags |= (u32) bf->bf_state.bfs_paprd <<
					      ATH9K_TXDESC_PAPRD_S;

S
Sujith Manoharan 已提交
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
			/*
			 * mac80211 doesn't handle RTS threshold for HT because
			 * the decision has to be taken based on AMPDU length
			 * and aggregation is done entirely inside ath9k.
			 * Set the RTS/CTS flag for the first subframe based
			 * on the threshold.
			 */
			if (aggr && (bf == bf_first) &&
			    unlikely(rts_thresh != (u32) -1)) {
				/*
				 * "len" is the size of the entire AMPDU.
				 */
				if (!rts_thresh || (len > rts_thresh))
					rts = true;
			}
1282 1283 1284 1285

			if (!aggr)
				len = fi->framelen;

S
Sujith Manoharan 已提交
1286
			ath_buf_set_rate(sc, bf, &info, len, rts);
1287 1288
		}

1289 1290
		info.buf_addr[0] = bf->bf_buf_addr;
		info.buf_len[0] = skb->len;
1291 1292 1293 1294 1295
		info.pkt_len = fi->framelen;
		info.keyix = fi->keyix;
		info.keytype = fi->keytype;

		if (aggr) {
1296
			if (bf == bf_first)
1297
				info.aggr = AGGR_BUF_FIRST;
1298
			else if (bf == bf_first->bf_lastbf)
1299 1300 1301
				info.aggr = AGGR_BUF_LAST;
			else
				info.aggr = AGGR_BUF_MIDDLE;
1302

1303 1304
			info.ndelim = bf->bf_state.ndelim;
			info.aggr_len = len;
1305 1306
		}

1307 1308 1309
		if (bf == bf_first->bf_lastbf)
			bf_first = NULL;

1310
		ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
1311 1312 1313 1314
		bf = bf->bf_next;
	}
}

1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
static void
ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
		  struct ath_atx_tid *tid, struct list_head *bf_q,
		  struct ath_buf *bf_first, struct sk_buff_head *tid_q)
{
	struct ath_buf *bf = bf_first, *bf_prev = NULL;
	struct sk_buff *skb;
	int nframes = 0;

	do {
		struct ieee80211_tx_info *tx_info;
		skb = bf->bf_mpdu;

		nframes++;
		__skb_unlink(skb, tid_q);
		list_add_tail(&bf->list, bf_q);
		if (bf_prev)
			bf_prev->bf_next = bf;
		bf_prev = bf;

		if (nframes >= 2)
			break;

		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf)
			break;

		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
		if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
			break;

		ath_set_rates(tid->an->vif, tid->an->sta, bf);
	} while (1);
}

1350 1351
static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
			      struct ath_atx_tid *tid, bool *stop)
S
Sujith 已提交
1352
{
S
Sujith 已提交
1353
	struct ath_buf *bf;
1354
	struct ieee80211_tx_info *tx_info;
1355
	struct sk_buff_head *tid_q;
S
Sujith 已提交
1356
	struct list_head bf_q;
1357 1358
	int aggr_len = 0;
	bool aggr, last = true;
1359

1360 1361
	if (!ath_tid_has_buffered(tid))
		return false;
1362

1363
	INIT_LIST_HEAD(&bf_q);
S
Sujith 已提交
1364

1365 1366 1367
	bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
	if (!bf)
		return false;
1368

1369 1370 1371 1372 1373 1374 1375
	tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
	aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
	if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
		(!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
		*stop = true;
		return false;
	}
1376

1377 1378 1379 1380 1381 1382
	ath_set_rates(tid->an->vif, tid->an->sta, bf);
	if (aggr)
		last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
					tid_q, &aggr_len);
	else
		ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
1383

1384 1385
	if (list_empty(&bf_q))
		return false;
1386

1387
	if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
1388 1389 1390
		tid->ac->clear_ps_filter = false;
		tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
	}
1391

1392 1393 1394
	ath_tx_fill_desc(sc, bf, txq, aggr_len);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	return true;
S
Sujith 已提交
1395 1396
}

1397 1398
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
		      u16 tid, u16 *ssn)
S
Sujith 已提交
1399 1400
{
	struct ath_atx_tid *txtid;
1401
	struct ath_txq *txq;
S
Sujith 已提交
1402
	struct ath_node *an;
1403
	u8 density;
S
Sujith 已提交
1404 1405

	an = (struct ath_node *)sta->drv_priv;
S
Sujith 已提交
1406
	txtid = ATH_AN_2_TID(an, tid);
1407 1408 1409
	txq = txtid->ac->txq;

	ath_txq_lock(sc, txq);
1410

1411 1412 1413 1414
	/* update ampdu factor/density, they may have changed. This may happen
	 * in HT IBSS when a beacon with HT-info is received after the station
	 * has already been added.
	 */
1415
	if (sta->ht_cap.ht_supported) {
1416 1417
		an->maxampdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
				      sta->ht_cap.ampdu_factor)) - 1;
1418 1419 1420 1421
		density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
		an->mpdudensity = density;
	}

1422 1423 1424
	/* force sequence number allocation for pending frames */
	ath_tx_tid_change_state(sc, txtid);

1425
	txtid->active = true;
1426
	*ssn = txtid->seq_start = txtid->seq_next;
1427
	txtid->bar_index = -1;
1428

1429 1430 1431
	memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
	txtid->baw_head = txtid->baw_tail = 0;

1432 1433
	ath_txq_unlock_complete(sc, txq);

1434
	return 0;
S
Sujith 已提交
1435
}
1436

1437
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
S
Sujith 已提交
1438 1439 1440
{
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1441
	struct ath_txq *txq = txtid->ac->txq;
1442

F
Felix Fietkau 已提交
1443
	ath_txq_lock(sc, txq);
1444 1445
	txtid->active = false;
	ath_tx_flush_tid(sc, txtid);
1446
	ath_tx_tid_change_state(sc, txtid);
F
Felix Fietkau 已提交
1447
	ath_txq_unlock_complete(sc, txq);
S
Sujith 已提交
1448
}
1449

1450 1451
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
		       struct ath_node *an)
1452 1453 1454 1455
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
1456
	bool buffered;
1457 1458 1459
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1460
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1461 1462 1463 1464

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1465
		ath_txq_lock(sc, txq);
1466

1467 1468 1469 1470 1471
		if (!tid->sched) {
			ath_txq_unlock(sc, txq);
			continue;
		}

1472
		buffered = ath_tid_has_buffered(tid);
1473 1474 1475 1476 1477 1478 1479 1480 1481

		tid->sched = false;
		list_del(&tid->list);

		if (ac->sched) {
			ac->sched = false;
			list_del(&ac->list);
		}

F
Felix Fietkau 已提交
1482
		ath_txq_unlock(sc, txq);
1483

1484 1485
		ieee80211_sta_set_buffered(sta, tidno, buffered);
	}
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
}

void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1496
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1497 1498 1499 1500

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1501
		ath_txq_lock(sc, txq);
1502 1503
		ac->clear_ps_filter = true;

F
Felix Fietkau 已提交
1504
		if (ath_tid_has_buffered(tid)) {
1505
			ath_tx_queue_tid(sc, txq, tid);
1506 1507 1508
			ath_txq_schedule(sc, txq);
		}

F
Felix Fietkau 已提交
1509
		ath_txq_unlock_complete(sc, txq);
1510 1511 1512
	}
}

1513 1514
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
			u16 tidno)
S
Sujith 已提交
1515
{
1516
	struct ath_atx_tid *tid;
S
Sujith 已提交
1517
	struct ath_node *an;
1518
	struct ath_txq *txq;
S
Sujith 已提交
1519 1520

	an = (struct ath_node *)sta->drv_priv;
1521 1522
	tid = ATH_AN_2_TID(an, tidno);
	txq = tid->ac->txq;
S
Sujith 已提交
1523

1524 1525 1526 1527
	ath_txq_lock(sc, txq);

	tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;

1528
	if (ath_tid_has_buffered(tid)) {
1529
		ath_tx_queue_tid(sc, txq, tid);
1530 1531 1532 1533
		ath_txq_schedule(sc, txq);
	}

	ath_txq_unlock_complete(sc, txq);
1534 1535
}

1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
				   struct ieee80211_sta *sta,
				   u16 tids, int nframes,
				   enum ieee80211_frame_release_type reason,
				   bool more_data)
{
	struct ath_softc *sc = hw->priv;
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_txq *txq = sc->tx.uapsdq;
	struct ieee80211_tx_info *info;
	struct list_head bf_q;
	struct ath_buf *bf_tail = NULL, *bf;
1548
	struct sk_buff_head *tid_q;
1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561
	int sent = 0;
	int i;

	INIT_LIST_HEAD(&bf_q);
	for (i = 0; tids && nframes; i++, tids >>= 1) {
		struct ath_atx_tid *tid;

		if (!(tids & 1))
			continue;

		tid = ATH_AN_2_TID(an, i);

		ath_txq_lock(sc, tid->ac->txq);
1562 1563
		while (nframes > 0) {
			bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
1564 1565 1566
			if (!bf)
				break;

1567
			__skb_unlink(bf->bf_mpdu, tid_q);
1568 1569
			list_add_tail(&bf->list, &bf_q);
			ath_set_rates(tid->an->vif, tid->an->sta, bf);
1570 1571 1572 1573
			if (bf_isampdu(bf)) {
				ath_tx_addto_baw(sc, tid, bf);
				bf->bf_state.bf_type &= ~BUF_AGGR;
			}
1574 1575 1576 1577 1578 1579 1580 1581
			if (bf_tail)
				bf_tail->bf_next = bf;

			bf_tail = bf;
			nframes--;
			sent++;
			TX_STAT_INC(txq->axq_qnum, a_queued_hw);

1582
			if (an->sta && !ath_tid_has_buffered(tid))
1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600
				ieee80211_sta_set_buffered(an->sta, i, false);
		}
		ath_txq_unlock_complete(sc, tid->ac->txq);
	}

	if (list_empty(&bf_q))
		return;

	info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
	info->flags |= IEEE80211_TX_STATUS_EOSP;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	ath_txq_lock(sc, txq);
	ath_tx_fill_desc(sc, bf, txq, 0);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	ath_txq_unlock(sc, txq);
}

S
Sujith 已提交
1601 1602 1603
/********************/
/* Queue Management */
/********************/
1604

S
Sujith 已提交
1605
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1606
{
1607
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1608
	struct ath9k_tx_queue_info qi;
1609
	static const int subtype_txq_to_hwq[] = {
1610 1611 1612 1613
		[IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
		[IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
		[IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
		[IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
1614
	};
1615
	int axq_qnum, i;
1616

S
Sujith 已提交
1617
	memset(&qi, 0, sizeof(qi));
1618
	qi.tqi_subtype = subtype_txq_to_hwq[subtype];
S
Sujith 已提交
1619 1620 1621 1622
	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_physCompBuf = 0;
1623 1624

	/*
S
Sujith 已提交
1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise waiting for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
	 *
	 * The UAPSD queue is an exception, since we take a desc-
	 * based intr on the EOSP frames.
1638
	 */
1639
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1640
		qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
1641 1642 1643 1644 1645 1646 1647
	} else {
		if (qtype == ATH9K_TX_QUEUE_UAPSD)
			qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
		else
			qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
					TXQ_FLAG_TXDESCINT_ENABLE;
	}
1648 1649
	axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
	if (axq_qnum == -1) {
1650
		/*
S
Sujith 已提交
1651 1652
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
1653
		 */
S
Sujith 已提交
1654
		return NULL;
1655
	}
1656 1657
	if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
		struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1658

1659 1660
		txq->axq_qnum = axq_qnum;
		txq->mac80211_qnum = -1;
S
Sujith 已提交
1661
		txq->axq_link = NULL;
F
Felix Fietkau 已提交
1662
		__skb_queue_head_init(&txq->complete_q);
S
Sujith 已提交
1663 1664 1665
		INIT_LIST_HEAD(&txq->axq_q);
		spin_lock_init(&txq->axq_lock);
		txq->axq_depth = 0;
1666
		txq->axq_ampdu_depth = 0;
1667
		txq->axq_tx_inprogress = false;
1668
		sc->tx.txqsetup |= 1<<axq_qnum;
1669 1670 1671 1672

		txq->txq_headidx = txq->txq_tailidx = 0;
		for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
			INIT_LIST_HEAD(&txq->txq_fifo[i]);
S
Sujith 已提交
1673
	}
1674
	return &sc->tx.txq[axq_qnum];
1675 1676
}

S
Sujith 已提交
1677 1678 1679
int ath_txq_update(struct ath_softc *sc, int qnum,
		   struct ath9k_tx_queue_info *qinfo)
{
1680
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1681 1682 1683
	int error = 0;
	struct ath9k_tx_queue_info qi;

1684
	BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
S
Sujith 已提交
1685 1686 1687 1688 1689 1690 1691 1692 1693

	ath9k_hw_get_txq_props(ah, qnum, &qi);
	qi.tqi_aifs = qinfo->tqi_aifs;
	qi.tqi_cwmin = qinfo->tqi_cwmin;
	qi.tqi_cwmax = qinfo->tqi_cwmax;
	qi.tqi_burstTime = qinfo->tqi_burstTime;
	qi.tqi_readyTime = qinfo->tqi_readyTime;

	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1694 1695
		ath_err(ath9k_hw_common(sc->sc_ah),
			"Unable to update hardware queue %u!\n", qnum);
S
Sujith 已提交
1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706
		error = -EIO;
	} else {
		ath9k_hw_resettxqueue(ah, qnum);
	}

	return error;
}

int ath_cabq_update(struct ath_softc *sc)
{
	struct ath9k_tx_queue_info qi;
1707
	struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
S
Sujith 已提交
1708
	int qnum = sc->beacon.cabq->axq_qnum;
1709

S
Sujith 已提交
1710
	ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1711

1712
	qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) *
1713
			    ATH_CABQ_READY_TIME) / 100;
S
Sujith 已提交
1714 1715 1716
	ath_txq_update(sc, qnum, &qi);

	return 0;
1717 1718
}

1719
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1720
			       struct list_head *list)
1721
{
S
Sujith 已提交
1722 1723
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
1724 1725 1726
	struct ath_tx_status ts;

	memset(&ts, 0, sizeof(ts));
1727
	ts.ts_status = ATH9K_TX_FLUSH;
S
Sujith 已提交
1728
	INIT_LIST_HEAD(&bf_head);
1729

1730 1731
	while (!list_empty(list)) {
		bf = list_first_entry(list, struct ath_buf, list);
1732

1733
		if (bf->bf_state.stale) {
1734
			list_del(&bf->list);
1735

1736 1737
			ath_tx_return_buffer(sc, bf);
			continue;
S
Sujith 已提交
1738
		}
1739

S
Sujith 已提交
1740
		lastbf = bf->bf_lastbf;
1741
		list_cut_position(&bf_head, list, &lastbf->list);
1742
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
1743
	}
1744
}
1745

1746 1747 1748 1749 1750 1751
/*
 * Drain a given TX queue (could be Beacon or Data)
 *
 * This assumes output has been stopped and
 * we do not need to block ath_tx_tasklet.
 */
1752
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
1753
{
F
Felix Fietkau 已提交
1754 1755
	ath_txq_lock(sc, txq);

1756
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1757
		int idx = txq->txq_tailidx;
1758

1759
		while (!list_empty(&txq->txq_fifo[idx])) {
1760
			ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
1761 1762

			INCR(idx, ATH_TXFIFO_DEPTH);
1763
		}
1764
		txq->txq_tailidx = idx;
1765
	}
1766

1767 1768
	txq->axq_link = NULL;
	txq->axq_tx_inprogress = false;
1769
	ath_drain_txq_list(sc, txq, &txq->axq_q);
1770

F
Felix Fietkau 已提交
1771
	ath_txq_unlock_complete(sc, txq);
1772 1773
}

1774
bool ath_drain_all_txq(struct ath_softc *sc)
1775
{
1776
	struct ath_hw *ah = sc->sc_ah;
1777
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
1778
	struct ath_txq *txq;
1779 1780
	int i;
	u32 npend = 0;
S
Sujith 已提交
1781

1782
	if (test_bit(ATH_OP_INVALID, &common->op_flags))
1783
		return true;
S
Sujith 已提交
1784

1785
	ath9k_hw_abort_tx_dma(ah);
S
Sujith 已提交
1786

1787
	/* Check if any queue remains active */
S
Sujith 已提交
1788
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1789 1790 1791
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

F
Felix Fietkau 已提交
1792 1793 1794
		if (!sc->tx.txq[i].axq_depth)
			continue;

1795 1796
		if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
			npend |= BIT(i);
S
Sujith 已提交
1797 1798
	}

1799
	if (npend)
1800
		ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
S
Sujith 已提交
1801 1802

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

		/*
		 * The caller will resume queues with ieee80211_wake_queues.
		 * Mark the queue as not stopped to prevent ath_tx_complete
		 * from waking the queue too early.
		 */
		txq = &sc->tx.txq[i];
		txq->stopped = false;
1813
		ath_draintxq(sc, txq);
S
Sujith 已提交
1814
	}
1815 1816

	return !npend;
S
Sujith 已提交
1817
}
1818

S
Sujith 已提交
1819
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
S
Sujith 已提交
1820
{
S
Sujith 已提交
1821 1822
	ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
	sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
S
Sujith 已提交
1823
}
1824

1825
/* For each acq entry, for each tid, try to schedule packets
1826 1827
 * for transmit until ampdu_depth has reached min Q depth.
 */
S
Sujith 已提交
1828 1829
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
1830
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1831
	struct ath_atx_ac *ac, *last_ac;
1832
	struct ath_atx_tid *tid, *last_tid;
1833
	struct list_head *ac_list;
1834
	bool sent = false;
1835

1836 1837 1838
	if (txq->mac80211_qnum < 0)
		return;

1839
	spin_lock_bh(&sc->chan_lock);
1840
	ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
1841
	spin_unlock_bh(&sc->chan_lock);
1842

1843
	if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
1844
	    list_empty(ac_list))
S
Sujith 已提交
1845
		return;
1846

1847
	spin_lock_bh(&sc->chan_lock);
1848 1849
	rcu_read_lock();

1850 1851
	last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
	while (!list_empty(ac_list)) {
1852
		bool stop = false;
1853

1854 1855 1856
		if (sc->cur_chan->stopped)
			break;

1857
		ac = list_first_entry(ac_list, struct ath_atx_ac, list);
1858 1859 1860
		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
		list_del(&ac->list);
		ac->sched = false;
1861

1862
		while (!list_empty(&ac->tid_q)) {
1863

1864 1865 1866 1867
			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
					       list);
			list_del(&tid->list);
			tid->sched = false;
1868

1869 1870
			if (ath_tx_sched_aggr(sc, txq, tid, &stop))
				sent = true;
1871

1872 1873 1874 1875
			/*
			 * add tid to round-robin queue if more frames
			 * are pending for the tid
			 */
1876
			if (ath_tid_has_buffered(tid))
1877
				ath_tx_queue_tid(sc, txq, tid);
1878

1879
			if (stop || tid == last_tid)
1880 1881
				break;
		}
1882

1883 1884
		if (!list_empty(&ac->tid_q) && !ac->sched) {
			ac->sched = true;
1885
			list_add_tail(&ac->list, ac_list);
1886
		}
1887

1888
		if (stop)
1889
			break;
1890 1891 1892 1893 1894 1895

		if (ac == last_ac) {
			if (!sent)
				break;

			sent = false;
1896
			last_ac = list_entry(ac_list->prev,
1897 1898
					     struct ath_atx_ac, list);
		}
S
Sujith 已提交
1899
	}
1900 1901

	rcu_read_unlock();
1902
	spin_unlock_bh(&sc->chan_lock);
S
Sujith 已提交
1903
}
1904

1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918
void ath_txq_schedule_all(struct ath_softc *sc)
{
	struct ath_txq *txq;
	int i;

	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
		txq = sc->tx.txq_map[i];

		spin_lock_bh(&txq->axq_lock);
		ath_txq_schedule(sc, txq);
		spin_unlock_bh(&txq->axq_lock);
	}
}

S
Sujith 已提交
1919 1920 1921 1922
/***********/
/* TX, DMA */
/***********/

1923
/*
S
Sujith 已提交
1924 1925
 * Insert a chain of ath_buf (descriptors) on a txq and
 * assume the descriptors are already chained together by caller.
1926
 */
S
Sujith 已提交
1927
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1928
			     struct list_head *head, bool internal)
1929
{
1930
	struct ath_hw *ah = sc->sc_ah;
1931
	struct ath_common *common = ath9k_hw_common(ah);
1932 1933 1934
	struct ath_buf *bf, *bf_last;
	bool puttxbuf = false;
	bool edma;
1935

S
Sujith 已提交
1936 1937 1938 1939
	/*
	 * Insert the frame on the outbound list and
	 * pass it on to the hardware.
	 */
1940

S
Sujith 已提交
1941 1942
	if (list_empty(head))
		return;
1943

1944
	edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
S
Sujith 已提交
1945
	bf = list_first_entry(head, struct ath_buf, list);
1946
	bf_last = list_entry(head->prev, struct ath_buf, list);
1947

1948 1949
	ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
		txq->axq_qnum, txq->axq_depth);
1950

1951 1952
	if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
		list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
1953
		INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1954
		puttxbuf = true;
S
Sujith 已提交
1955
	} else {
1956 1957
		list_splice_tail_init(head, &txq->axq_q);

1958 1959
		if (txq->axq_link) {
			ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
1960
			ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
J
Joe Perches 已提交
1961 1962
				txq->axq_qnum, txq->axq_link,
				ito64(bf->bf_daddr), bf->bf_desc);
1963 1964 1965 1966 1967 1968 1969 1970 1971
		} else if (!edma)
			puttxbuf = true;

		txq->axq_link = bf_last->bf_desc;
	}

	if (puttxbuf) {
		TX_STAT_INC(txq->axq_qnum, puttxbuf);
		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1972
		ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
1973 1974 1975
			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
	}

L
Luis R. Rodriguez 已提交
1976
	if (!edma || sc->tx99_state) {
F
Felix Fietkau 已提交
1977
		TX_STAT_INC(txq->axq_qnum, txstart);
1978
		ath9k_hw_txstart(ah, txq->axq_qnum);
S
Sujith 已提交
1979
	}
1980 1981

	if (!internal) {
1982 1983 1984 1985 1986
		while (bf) {
			txq->axq_depth++;
			if (bf_is_ampdu_not_probing(bf))
				txq->axq_ampdu_depth++;

1987 1988 1989
			bf_last = bf->bf_lastbf;
			bf = bf_last->bf_next;
			bf_last->bf_next = NULL;
1990
		}
1991
	}
S
Sujith 已提交
1992
}
1993

F
Felix Fietkau 已提交
1994
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1995
			       struct ath_atx_tid *tid, struct sk_buff *skb)
S
Sujith 已提交
1996
{
1997
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1998 1999
	struct ath_frame_info *fi = get_frame_info(skb);
	struct list_head bf_head;
2000
	struct ath_buf *bf = fi->bf;
2001 2002 2003

	INIT_LIST_HEAD(&bf_head);
	list_add_tail(&bf->list, &bf_head);
2004
	bf->bf_state.bf_type = 0;
2005 2006 2007 2008
	if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
		bf->bf_state.bf_type = BUF_AMPDU;
		ath_tx_addto_baw(sc, tid, bf);
	}
S
Sujith 已提交
2009

2010
	bf->bf_next = NULL;
S
Sujith 已提交
2011
	bf->bf_lastbf = bf;
2012
	ath_tx_fill_desc(sc, bf, txq, fi->framelen);
2013
	ath_tx_txqaddbuf(sc, txq, &bf_head, false);
S
Sujith 已提交
2014
	TX_STAT_INC(txq->axq_qnum, queued);
S
Sujith 已提交
2015 2016
}

2017 2018 2019
static void setup_frame_info(struct ieee80211_hw *hw,
			     struct ieee80211_sta *sta,
			     struct sk_buff *skb,
2020
			     int framelen)
S
Sujith 已提交
2021 2022
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2023
	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
2024
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2025
	const struct ieee80211_rate *rate;
2026
	struct ath_frame_info *fi = get_frame_info(skb);
2027
	struct ath_node *an = NULL;
2028
	enum ath9k_key_type keytype;
2029 2030 2031 2032 2033 2034 2035 2036 2037 2038
	bool short_preamble = false;

	/*
	 * We check if Short Preamble is needed for the CTS rate by
	 * checking the BSS's global flag.
	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
	 */
	if (tx_info->control.vif &&
	    tx_info->control.vif->bss_conf.use_short_preamble)
		short_preamble = true;
S
Sujith 已提交
2039

2040
	rate = ieee80211_get_rts_cts_rate(hw, tx_info);
2041
	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
S
Sujith 已提交
2042

2043 2044 2045
	if (sta)
		an = (struct ath_node *) sta->drv_priv;

2046
	memset(fi, 0, sizeof(*fi));
2047
	fi->txq = -1;
2048 2049
	if (hw_key)
		fi->keyix = hw_key->hw_key_idx;
2050 2051
	else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
		fi->keyix = an->ps_key;
2052 2053 2054 2055
	else
		fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->keytype = keytype;
	fi->framelen = framelen;
2056 2057 2058

	if (!rate)
		return;
2059 2060 2061
	fi->rtscts_rate = rate->hw_value;
	if (short_preamble)
		fi->rtscts_rate |= rate->hw_value_short;
S
Sujith 已提交
2062 2063
}

2064 2065 2066 2067
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath9k_channel *curchan = ah->curchan;
2068

2069
	if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && IS_CHAN_5GHZ(curchan) &&
2070
	    (chainmask == 0x7) && (rate < 0x90))
2071
		return 0x3;
2072 2073 2074
	else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
		 IS_CCK_RATE(rate))
		return 0x2;
2075 2076 2077 2078
	else
		return chainmask;
}

2079 2080 2081 2082
/*
 * Assign a descriptor (and sequence number if necessary,
 * and map buffer for DMA. Frees skb on error
 */
2083
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
F
Felix Fietkau 已提交
2084
					   struct ath_txq *txq,
2085
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
2086
					   struct sk_buff *skb)
2087
{
F
Felix Fietkau 已提交
2088
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2089
	struct ath_frame_info *fi = get_frame_info(skb);
2090
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
F
Felix Fietkau 已提交
2091
	struct ath_buf *bf;
S
Sujith Manoharan 已提交
2092
	int fragno;
2093
	u16 seqno;
F
Felix Fietkau 已提交
2094 2095 2096

	bf = ath_tx_get_buffer(sc);
	if (!bf) {
2097
		ath_dbg(common, XMIT, "TX buffers are full\n");
F
Felix Fietkau 已提交
2098
		return NULL;
F
Felix Fietkau 已提交
2099
	}
2100

S
Sujith 已提交
2101
	ATH_TXBUF_RESET(bf);
2102

2103
	if (tid && ieee80211_is_data_present(hdr->frame_control)) {
S
Sujith Manoharan 已提交
2104
		fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
2105 2106
		seqno = tid->seq_next;
		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
S
Sujith Manoharan 已提交
2107 2108 2109 2110 2111 2112 2113

		if (fragno)
			hdr->seq_ctrl |= cpu_to_le16(fragno);

		if (!ieee80211_has_morefrags(hdr->frame_control))
			INCR(tid->seq_next, IEEE80211_SEQ_MAX);

2114 2115 2116
		bf->bf_state.seqno = seqno;
	}

2117
	bf->bf_mpdu = skb;
2118

B
Ben Greear 已提交
2119 2120 2121
	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
2122
		bf->bf_mpdu = NULL;
2123
		bf->bf_buf_addr = 0;
2124 2125
		ath_err(ath9k_hw_common(sc->sc_ah),
			"dma_mapping_error() on TX\n");
F
Felix Fietkau 已提交
2126
		ath_tx_return_buffer(sc, bf);
F
Felix Fietkau 已提交
2127
		return NULL;
2128 2129
	}

2130
	fi->bf = bf;
F
Felix Fietkau 已提交
2131 2132 2133 2134

	return bf;
}

2135 2136
static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
			  struct ath_tx_control *txctl)
2137
{
2138 2139
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2140
	struct ieee80211_sta *sta = txctl->sta;
2141
	struct ieee80211_vif *vif = info->control.vif;
2142
	struct ath_vif *avp;
2143
	struct ath_softc *sc = hw->priv;
F
Felix Fietkau 已提交
2144
	int frmlen = skb->len + FCS_LEN;
2145
	int padpos, padsize;
2146

2147 2148 2149
	/* NOTE:  sta can be NULL according to net/mac80211.h */
	if (sta)
		txctl->an = (struct ath_node *)sta->drv_priv;
2150 2151 2152 2153
	else if (vif && ieee80211_is_data(hdr->frame_control)) {
		avp = (void *)vif->drv_priv;
		txctl->an = &avp->mcast_node;
	}
2154

F
Felix Fietkau 已提交
2155 2156 2157
	if (info->control.hw_key)
		frmlen += info->control.hw_key->icv_len;

2158
	/*
S
Sujith 已提交
2159 2160 2161
	 * As a temporary workaround, assign seq# here; this will likely need
	 * to be cleaned up to work better with Beacon transmission and virtual
	 * BSSes.
2162
	 */
S
Sujith 已提交
2163 2164 2165 2166 2167
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
			sc->tx.seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2168 2169
	}

2170 2171 2172 2173 2174
	if ((vif && vif->type != NL80211_IFTYPE_AP &&
	            vif->type != NL80211_IFTYPE_AP_VLAN) ||
	    !ieee80211_is_data(hdr->frame_control))
		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;

2175
	/* Add the padding after the header if this is not already done */
2176
	padpos = ieee80211_hdrlen(hdr->frame_control);
2177 2178 2179 2180
	padsize = padpos & 3;
	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize)
			return -ENOMEM;
2181

2182 2183
		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
2184 2185
	}

2186
	setup_frame_info(hw, sta, skb, frmlen);
2187 2188 2189
	return 0;
}

2190

2191 2192 2193 2194 2195 2196 2197 2198
/* Upon failure caller should free skb */
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
		 struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ieee80211_sta *sta = txctl->sta;
	struct ieee80211_vif *vif = info->control.vif;
2199
	struct ath_frame_info *fi = get_frame_info(skb);
2200
	struct ath_vif *avp = NULL;
2201 2202 2203 2204
	struct ath_softc *sc = hw->priv;
	struct ath_txq *txq = txctl->txq;
	struct ath_atx_tid *tid = NULL;
	struct ath_buf *bf;
2205
	bool queue;
2206
	int q, hw_queue;
2207 2208
	int ret;

2209 2210 2211
	if (vif)
		avp = (void *)vif->drv_priv;

2212 2213 2214
	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
		txctl->force_channel = true;

2215 2216 2217 2218 2219
	ret = ath_tx_prepare(hw, skb, txctl);
	if (ret)
	    return ret;

	hdr = (struct ieee80211_hdr *) skb->data;
2220 2221 2222 2223 2224
	/*
	 * At this point, the vif, hw_key and sta pointers in the tx control
	 * info are no longer valid (overwritten by the ath_frame_info data.
	 */

2225
	q = skb_get_queue_mapping(skb);
2226
	hw_queue = (info->hw_queue >= sc->hw->queues - 2) ? q : info->hw_queue;
F
Felix Fietkau 已提交
2227 2228

	ath_txq_lock(sc, txq);
2229 2230 2231 2232 2233 2234 2235
	if (txq == sc->tx.txq_map[q]) {
		fi->txq = q;
		if (++txq->pending_frames > sc->tx.txq_max_pending[q] &&
		    !txq->stopped) {
			ieee80211_stop_queue(sc->hw, hw_queue);
			txq->stopped = true;
		}
2236 2237
	}

2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
	queue = ieee80211_is_data_present(hdr->frame_control);

	/* Force queueing of all frames that belong to a virtual interface on
	 * a different channel context, to ensure that they are sent on the
	 * correct channel.
	 */
	if (((avp && avp->chanctx != sc->cur_chan) ||
	     sc->cur_chan->stopped) && !txctl->force_channel) {
		if (!txctl->an)
			txctl->an = &avp->mcast_node;
		info->flags &= ~IEEE80211_TX_CTL_PS_RESPONSE;
		queue = true;
	}

	if (txctl->an && queue)
2253 2254
		tid = ath_get_skb_tid(sc, txctl->an, skb);

2255 2256
	if (info->flags & (IEEE80211_TX_CTL_PS_RESPONSE |
			   IEEE80211_TX_CTL_TX_OFFCHAN)) {
2257 2258 2259
		ath_txq_unlock(sc, txq);
		txq = sc->tx.uapsdq;
		ath_txq_lock(sc, txq);
2260
	} else if (txctl->an && queue) {
2261 2262
		WARN_ON(tid->ac->txq != txctl->txq);

2263 2264 2265
		if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
			tid->ac->clear_ps_filter = true;

2266
		/*
2267 2268
		 * Add this frame to software queue for scheduling later
		 * for aggregation.
2269
		 */
2270 2271 2272
		TX_STAT_INC(txq->axq_qnum, a_queued_sw);
		__skb_queue_tail(&tid->buf_q, skb);
		if (!txctl->an->sleeping)
2273
			ath_tx_queue_tid(sc, txq, tid);
2274 2275

		ath_txq_schedule(sc, txq);
2276 2277 2278
		goto out;
	}

2279
	bf = ath_tx_setup_buffer(sc, txq, tid, skb);
2280
	if (!bf) {
2281
		ath_txq_skb_done(sc, txq, skb);
2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293
		if (txctl->paprd)
			dev_kfree_skb_any(skb);
		else
			ieee80211_free_txskb(sc->hw, skb);
		goto out;
	}

	bf->bf_state.bfs_paprd = txctl->paprd;

	if (txctl->paprd)
		bf->bf_state.bfs_paprd_timestamp = jiffies;

2294
	ath_set_rates(vif, sta, bf);
2295
	ath_tx_send_normal(sc, txq, tid, skb);
F
Felix Fietkau 已提交
2296

2297
out:
F
Felix Fietkau 已提交
2298
	ath_txq_unlock(sc, txq);
F
Felix Fietkau 已提交
2299

2300
	return 0;
2301 2302
}

2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318
void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		 struct sk_buff *skb)
{
	struct ath_softc *sc = hw->priv;
	struct ath_tx_control txctl = {
		.txq = sc->beacon.cabq
	};
	struct ath_tx_info info = {};
	struct ieee80211_hdr *hdr;
	struct ath_buf *bf_tail = NULL;
	struct ath_buf *bf;
	LIST_HEAD(bf_q);
	int duration = 0;
	int max_duration;

	max_duration =
2319 2320
		sc->cur_chan->beacon.beacon_interval * 1000 *
		sc->cur_chan->beacon.dtim_period / ATH_BCBUF;
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333

	do {
		struct ath_frame_info *fi = get_frame_info(skb);

		if (ath_tx_prepare(hw, skb, &txctl))
			break;

		bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
		if (!bf)
			break;

		bf->bf_lastbf = bf;
		ath_set_rates(vif, NULL, bf);
S
Sujith Manoharan 已提交
2334
		ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370
		duration += info.rates[0].PktDuration;
		if (bf_tail)
			bf_tail->bf_next = bf;

		list_add_tail(&bf->list, &bf_q);
		bf_tail = bf;
		skb = NULL;

		if (duration > max_duration)
			break;

		skb = ieee80211_get_buffered_bc(hw, vif);
	} while(skb);

	if (skb)
		ieee80211_free_txskb(hw, skb);

	if (list_empty(&bf_q))
		return;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;

	if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
		hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
			sizeof(*hdr), DMA_TO_DEVICE);
	}

	ath_txq_lock(sc, txctl.txq);
	ath_tx_fill_desc(sc, bf, txctl.txq, 0);
	ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
	TX_STAT_INC(txctl.txq->axq_qnum, queued);
	ath_txq_unlock(sc, txctl.txq);
}

S
Sujith 已提交
2371 2372 2373
/*****************/
/* TX Completion */
/*****************/
S
Sujith 已提交
2374

S
Sujith 已提交
2375
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2376
			    int tx_flags, struct ath_txq *txq)
S
Sujith 已提交
2377
{
S
Sujith 已提交
2378
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2379
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2380
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2381
	int padpos, padsize;
S
Sujith Manoharan 已提交
2382
	unsigned long flags;
S
Sujith 已提交
2383

2384
	ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
S
Sujith 已提交
2385

2386
	if (sc->sc_ah->caldata)
2387
		set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
2388

2389
	if (!(tx_flags & ATH_TX_ERROR))
S
Sujith 已提交
2390 2391
		/* Frame was ACKed */
		tx_info->flags |= IEEE80211_TX_STAT_ACK;
S
Sujith 已提交
2392

2393
	padpos = ieee80211_hdrlen(hdr->frame_control);
2394 2395 2396 2397 2398 2399 2400 2401
	padsize = padpos & 3;
	if (padsize && skb->len>padpos+padsize) {
		/*
		 * Remove MAC header padding before giving the frame back to
		 * mac80211.
		 */
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
S
Sujith 已提交
2402
	}
S
Sujith 已提交
2403

S
Sujith Manoharan 已提交
2404
	spin_lock_irqsave(&sc->sc_pm_lock, flags);
2405
	if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
S
Sujith 已提交
2406
		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
2407
		ath_dbg(common, PS,
J
Joe Perches 已提交
2408
			"Going back to sleep after having received TX status (0x%lx)\n",
S
Sujith 已提交
2409 2410 2411 2412
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
2413
	}
S
Sujith Manoharan 已提交
2414
	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2415

2416
	__skb_queue_tail(&txq->complete_q, skb);
2417
	ath_txq_skb_done(sc, txq, skb);
S
Sujith 已提交
2418
}
2419

S
Sujith 已提交
2420
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2421
				struct ath_txq *txq, struct list_head *bf_q,
2422
				struct ath_tx_status *ts, int txok)
2423
{
S
Sujith 已提交
2424
	struct sk_buff *skb = bf->bf_mpdu;
2425
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
S
Sujith 已提交
2426
	unsigned long flags;
2427
	int tx_flags = 0;
2428

2429
	if (!txok)
2430
		tx_flags |= ATH_TX_ERROR;
2431

2432 2433 2434
	if (ts->ts_status & ATH9K_TXERR_FILT)
		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;

B
Ben Greear 已提交
2435
	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2436
	bf->bf_buf_addr = 0;
L
Luis R. Rodriguez 已提交
2437 2438
	if (sc->tx99_state)
		goto skip_tx_complete;
2439 2440

	if (bf->bf_state.bfs_paprd) {
2441 2442 2443
		if (time_after(jiffies,
				bf->bf_state.bfs_paprd_timestamp +
				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
2444
			dev_kfree_skb_any(skb);
2445
		else
2446
			complete(&sc->paprd_complete);
2447
	} else {
2448
		ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2449
		ath_tx_complete(sc, skb, tx_flags, txq);
2450
	}
L
Luis R. Rodriguez 已提交
2451
skip_tx_complete:
2452 2453 2454 2455
	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
	 * accidentally reference it later.
	 */
	bf->bf_mpdu = NULL;
S
Sujith 已提交
2456 2457 2458 2459 2460 2461 2462

	/*
	 * Return the list of ath_buf of this mpdu to free queue
	 */
	spin_lock_irqsave(&sc->tx.txbuflock, flags);
	list_splice_tail_init(bf_q, &sc->tx.txbuf);
	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2463 2464
}

F
Felix Fietkau 已提交
2465 2466
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
2467
			     int txok)
2468
{
S
Sujith 已提交
2469
	struct sk_buff *skb = bf->bf_mpdu;
2470
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
S
Sujith 已提交
2471
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
F
Felix Fietkau 已提交
2472
	struct ieee80211_hw *hw = sc->hw;
2473
	struct ath_hw *ah = sc->sc_ah;
2474
	u8 i, tx_rateindex;
2475

S
Sujith 已提交
2476
	if (txok)
2477
		tx_info->status.ack_signal = ts->ts_rssi;
S
Sujith 已提交
2478

2479
	tx_rateindex = ts->ts_rateindex;
2480 2481
	WARN_ON(tx_rateindex >= hw->max_rates);

2482
	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
2483
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2484

2485
		BUG_ON(nbad > nframes);
2486
	}
2487 2488
	tx_info->status.ampdu_len = nframes;
	tx_info->status.ampdu_ack_len = nframes - nbad;
2489

2490
	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2491
	    (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503
		/*
		 * If an underrun error is seen assume it as an excessive
		 * retry only if max frame trigger level has been reached
		 * (2 KB for single stream, and 4 KB for dual stream).
		 * Adjust the long retry as if the frame was tried
		 * hw->max_rate_tries times to affect how rate control updates
		 * PER for the failed rate.
		 * In case of congestion on the bus penalizing this type of
		 * underruns should help hardware actually transmit new frames
		 * successfully by eventually preferring slower rates.
		 * This itself should also alleviate congestion on the bus.
		 */
2504 2505 2506
		if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
		                             ATH9K_TX_DELIM_UNDERRUN)) &&
		    ieee80211_is_data(hdr->frame_control) &&
2507
		    ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2508 2509
			tx_info->status.rates[tx_rateindex].count =
				hw->max_rate_tries;
2510
	}
2511

2512
	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2513
		tx_info->status.rates[i].count = 0;
2514 2515
		tx_info->status.rates[i].idx = -1;
	}
2516

2517
	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2518 2519
}

S
Sujith 已提交
2520
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2521
{
2522
	struct ath_hw *ah = sc->sc_ah;
2523
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
2524
	struct ath_buf *bf, *lastbf, *bf_held = NULL;
2525
	struct list_head bf_head;
S
Sujith 已提交
2526
	struct ath_desc *ds;
2527
	struct ath_tx_status ts;
S
Sujith 已提交
2528
	int status;
2529

2530
	ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
J
Joe Perches 已提交
2531 2532
		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
		txq->axq_link);
2533

F
Felix Fietkau 已提交
2534
	ath_txq_lock(sc, txq);
2535
	for (;;) {
2536
		if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
2537 2538
			break;

2539 2540
		if (list_empty(&txq->axq_q)) {
			txq->axq_link = NULL;
2541
			ath_txq_schedule(sc, txq);
2542 2543 2544 2545
			break;
		}
		bf = list_first_entry(&txq->axq_q, struct ath_buf, list);

S
Sujith 已提交
2546 2547 2548 2549 2550 2551 2552 2553 2554
		/*
		 * There is a race condition that a BH gets scheduled
		 * after sw writes TxE and before hw re-load the last
		 * descriptor to get the newly chained one.
		 * Software must keep the last DONE descriptor as a
		 * holding descriptor - software does so by marking
		 * it with the STALE flag.
		 */
		bf_held = NULL;
2555
		if (bf->bf_state.stale) {
S
Sujith 已提交
2556
			bf_held = bf;
2557
			if (list_is_last(&bf_held->list, &txq->axq_q))
S
Sujith 已提交
2558
				break;
2559 2560 2561

			bf = list_entry(bf_held->list.next, struct ath_buf,
					list);
2562 2563 2564
		}

		lastbf = bf->bf_lastbf;
S
Sujith 已提交
2565
		ds = lastbf->bf_desc;
2566

2567 2568
		memset(&ts, 0, sizeof(ts));
		status = ath9k_hw_txprocdesc(ah, ds, &ts);
2569
		if (status == -EINPROGRESS)
S
Sujith 已提交
2570
			break;
2571

2572
		TX_STAT_INC(txq->axq_qnum, txprocdesc);
2573

S
Sujith 已提交
2574 2575 2576 2577 2578
		/*
		 * Remove ath_buf's of the same transmit unit from txq,
		 * however leave the last descriptor back as the holding
		 * descriptor for hw.
		 */
2579
		lastbf->bf_state.stale = true;
S
Sujith 已提交
2580 2581 2582 2583
		INIT_LIST_HEAD(&bf_head);
		if (!list_is_singular(&lastbf->list))
			list_cut_position(&bf_head,
				&txq->axq_q, lastbf->list.prev);
2584

2585
		if (bf_held) {
2586 2587
			list_del(&bf_held->list);
			ath_tx_return_buffer(sc, bf_held);
S
Sujith 已提交
2588
		}
2589

2590
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2591
	}
F
Felix Fietkau 已提交
2592
	ath_txq_unlock_complete(sc, txq);
2593 2594
}

S
Sujith 已提交
2595
void ath_tx_tasklet(struct ath_softc *sc)
2596
{
2597 2598
	struct ath_hw *ah = sc->sc_ah;
	u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
S
Sujith 已提交
2599
	int i;
2600

S
Sujith 已提交
2601 2602 2603
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
		if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
			ath_tx_processq(sc, &sc->tx.txq[i]);
2604 2605 2606
	}
}

2607 2608
void ath_tx_edma_tasklet(struct ath_softc *sc)
{
2609
	struct ath_tx_status ts;
2610 2611 2612 2613 2614
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct ath_txq *txq;
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
2615
	struct list_head *fifo_list;
2616 2617 2618
	int status;

	for (;;) {
2619
		if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
2620 2621
			break;

2622
		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
2623 2624 2625
		if (status == -EINPROGRESS)
			break;
		if (status == -EIO) {
2626
			ath_dbg(common, XMIT, "Error processing tx status\n");
2627 2628 2629
			break;
		}

2630 2631 2632 2633
		/* Process beacon completions separately */
		if (ts.qid == sc->beacon.beaconq) {
			sc->beacon.tx_processed = true;
			sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2634

2635 2636 2637 2638 2639
			if (ath9k_is_chanctx_enabled()) {
				ath_chanctx_event(sc, NULL,
						  ATH_CHANCTX_EVENT_BEACON_SENT);
			}

2640
			ath9k_csa_update(sc);
2641
			continue;
2642
		}
2643

2644
		txq = &sc->tx.txq[ts.qid];
2645

F
Felix Fietkau 已提交
2646
		ath_txq_lock(sc, txq);
2647

2648 2649
		TX_STAT_INC(txq->axq_qnum, txprocdesc);

2650 2651
		fifo_list = &txq->txq_fifo[txq->txq_tailidx];
		if (list_empty(fifo_list)) {
F
Felix Fietkau 已提交
2652
			ath_txq_unlock(sc, txq);
2653 2654 2655
			return;
		}

2656
		bf = list_first_entry(fifo_list, struct ath_buf, list);
2657
		if (bf->bf_state.stale) {
2658 2659 2660 2661 2662
			list_del(&bf->list);
			ath_tx_return_buffer(sc, bf);
			bf = list_first_entry(fifo_list, struct ath_buf, list);
		}

2663 2664 2665
		lastbf = bf->bf_lastbf;

		INIT_LIST_HEAD(&bf_head);
2666 2667
		if (list_is_last(&lastbf->list, fifo_list)) {
			list_splice_tail_init(fifo_list, &bf_head);
2668
			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2669

2670 2671
			if (!list_empty(&txq->axq_q)) {
				struct list_head bf_q;
2672

2673 2674 2675 2676 2677
				INIT_LIST_HEAD(&bf_q);
				txq->axq_link = NULL;
				list_splice_tail_init(&txq->axq_q, &bf_q);
				ath_tx_txqaddbuf(sc, txq, &bf_q, true);
			}
2678
		} else {
2679
			lastbf->bf_state.stale = true;
2680 2681 2682
			if (bf != lastbf)
				list_cut_position(&bf_head, fifo_list,
						  lastbf->list.prev);
2683
		}
2684

2685
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
F
Felix Fietkau 已提交
2686
		ath_txq_unlock_complete(sc, txq);
2687 2688 2689
	}
}

S
Sujith 已提交
2690 2691 2692
/*****************/
/* Init, Cleanup */
/*****************/
2693

2694 2695 2696 2697 2698 2699
static int ath_txstatus_setup(struct ath_softc *sc, int size)
{
	struct ath_descdma *dd = &sc->txsdma;
	u8 txs_len = sc->sc_ah->caps.txs_len;

	dd->dd_desc_len = size * txs_len;
2700 2701
	dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
					  &dd->dd_desc_paddr, GFP_KERNEL);
2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720
	if (!dd->dd_desc)
		return -ENOMEM;

	return 0;
}

static int ath_tx_edma_init(struct ath_softc *sc)
{
	int err;

	err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
	if (!err)
		ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
					  sc->txsdma.dd_desc_paddr,
					  ATH_TXSTATUS_RING_SIZE);

	return err;
}

S
Sujith 已提交
2721
int ath_tx_init(struct ath_softc *sc, int nbufs)
2722
{
2723
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
2724
	int error = 0;
2725

2726
	spin_lock_init(&sc->tx.txbuflock);
2727

2728
	error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2729
				  "tx", nbufs, 1, 1);
2730
	if (error != 0) {
2731 2732
		ath_err(common,
			"Failed to allocate tx descriptors: %d\n", error);
2733
		return error;
2734
	}
2735

2736
	error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2737
				  "beacon", ATH_BCBUF, 1, 1);
2738
	if (error != 0) {
2739 2740
		ath_err(common,
			"Failed to allocate beacon descriptors: %d\n", error);
2741
		return error;
2742
	}
2743

2744 2745
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);

2746
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2747
		error = ath_tx_edma_init(sc);
2748

S
Sujith 已提交
2749
	return error;
2750 2751 2752 2753
}

void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
2754 2755 2756
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	int tidno, acno;
2757

2758
	for (tidno = 0, tid = &an->tid[tidno];
2759
	     tidno < IEEE80211_NUM_TIDS;
2760 2761 2762 2763 2764 2765 2766
	     tidno++, tid++) {
		tid->an        = an;
		tid->tidno     = tidno;
		tid->seq_start = tid->seq_next = 0;
		tid->baw_size  = WME_MAX_BA;
		tid->baw_head  = tid->baw_tail = 0;
		tid->sched     = false;
2767
		tid->active	   = false;
2768
		__skb_queue_head_init(&tid->buf_q);
2769
		__skb_queue_head_init(&tid->retry_q);
2770
		acno = TID_TO_WME_AC(tidno);
2771
		tid->ac = &an->ac[acno];
2772
	}
2773

2774
	for (acno = 0, ac = &an->ac[acno];
2775
	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
2776
		ac->sched    = false;
2777
		ac->clear_ps_filter = true;
2778
		ac->txq = sc->tx.txq_map[acno];
2779
		INIT_LIST_HEAD(&ac->tid_q);
2780 2781 2782
	}
}

S
Sujith 已提交
2783
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2784
{
2785 2786
	struct ath_atx_ac *ac;
	struct ath_atx_tid *tid;
2787
	struct ath_txq *txq;
2788
	int tidno;
S
Sujith 已提交
2789

2790
	for (tidno = 0, tid = &an->tid[tidno];
2791
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
2792

2793
		ac = tid->ac;
2794
		txq = ac->txq;
2795

F
Felix Fietkau 已提交
2796
		ath_txq_lock(sc, txq);
2797 2798 2799 2800 2801 2802 2803 2804 2805

		if (tid->sched) {
			list_del(&tid->list);
			tid->sched = false;
		}

		if (ac->sched) {
			list_del(&ac->list);
			tid->ac->sched = false;
2806
		}
2807 2808

		ath_tid_drain(sc, txq, tid);
2809
		tid->active = false;
2810

F
Felix Fietkau 已提交
2811
		ath_txq_unlock(sc, txq);
2812 2813
	}
}
L
Luis R. Rodriguez 已提交
2814

2815 2816
#ifdef CONFIG_ATH9K_TX99

L
Luis R. Rodriguez 已提交
2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858
int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
		    struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ath_frame_info *fi = get_frame_info(skb);
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_buf *bf;
	int padpos, padsize;

	padpos = ieee80211_hdrlen(hdr->frame_control);
	padsize = padpos & 3;

	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize) {
			ath_dbg(common, XMIT,
				"tx99 padding failed\n");
		return -EINVAL;
		}

		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
	}

	fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->framelen = skb->len + FCS_LEN;
	fi->keytype = ATH9K_KEY_TYPE_CLEAR;

	bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
	if (!bf) {
		ath_dbg(common, XMIT, "tx99 buffer setup failed\n");
		return -EINVAL;
	}

	ath_set_rates(sc->tx99_vif, NULL, bf);

	ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
	ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);

	ath_tx_send_normal(sc, txctl->txq, NULL, skb);

	return 0;
}
2859 2860

#endif /* CONFIG_ATH9K_TX99 */