xmit.c 70.8 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include <linux/dma-mapping.h>
S
Sujith 已提交
18
#include "ath9k.h"
19
#include "ar9003_mac.h"
20 21 22 23 24 25 26 27 28 29 30 31

#define BITS_PER_BYTE           8
#define OFDM_PLCP_BITS          22
#define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
#define L_STF                   8
#define L_LTF                   8
#define L_SIG                   4
#define HT_SIG                  8
#define HT_STF                  4
#define HT_LTF(_ns)             (4 * (_ns))
#define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
32 33
#define TIME_SYMBOLS(t)         ((t) >> 2)
#define TIME_SYMBOLS_HALFGI(t)  (((t) * 5 - 4) / 18)
34 35 36 37
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)


38
static u16 bits_per_symbol[][2] = {
39 40 41 42 43 44 45 46 47 48 49
	/* 20MHz 40MHz */
	{    26,   54 },     /*  0: BPSK */
	{    52,  108 },     /*  1: QPSK 1/2 */
	{    78,  162 },     /*  2: QPSK 3/4 */
	{   104,  216 },     /*  3: 16-QAM 1/2 */
	{   156,  324 },     /*  4: 16-QAM 3/4 */
	{   208,  432 },     /*  5: 64-QAM 2/3 */
	{   234,  486 },     /*  6: 64-QAM 3/4 */
	{   260,  540 },     /*  7: 64-QAM 5/6 */
};

F
Felix Fietkau 已提交
50
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
51 52 53
			       struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
			    int tx_flags, struct ath_txq *txq);
S
Sujith 已提交
54
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
55
				struct ath_txq *txq, struct list_head *bf_q,
56
				struct ath_tx_status *ts, int txok);
57
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58
			     struct list_head *head, bool internal);
F
Felix Fietkau 已提交
59 60
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
61
			     int txok);
62 63
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno);
64 65 66
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
					   struct ath_txq *txq,
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
67
					   struct sk_buff *skb);
68

69
enum {
70 71
	MCS_HT20,
	MCS_HT20_SGI,
72 73 74 75
	MCS_HT40,
	MCS_HT40_SGI,
};

S
Sujith 已提交
76 77 78
/*********************/
/* Aggregation logic */
/*********************/
79

80
void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
81
	__acquires(&txq->axq_lock)
F
Felix Fietkau 已提交
82 83 84 85
{
	spin_lock_bh(&txq->axq_lock);
}

86
void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
87
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
88 89 90 91
{
	spin_unlock_bh(&txq->axq_lock);
}

92
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
93
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
94 95 96 97 98 99 100 101 102 103 104 105
{
	struct sk_buff_head q;
	struct sk_buff *skb;

	__skb_queue_head_init(&q);
	skb_queue_splice_init(&txq->complete_q, &q);
	spin_unlock_bh(&txq->axq_lock);

	while ((skb = __skb_dequeue(&q)))
		ieee80211_tx_status(sc->hw, skb);
}

106 107
static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
			     struct ath_atx_tid *tid)
S
Sujith 已提交
108
{
S
Sujith 已提交
109
	struct ath_atx_ac *ac = tid->ac;
110 111 112 113 114 115
	struct list_head *list;
	struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
	struct ath_chanctx *ctx = avp->chanctx;

	if (!ctx)
		return;
S
Sujith 已提交
116

S
Sujith 已提交
117 118
	if (tid->sched)
		return;
S
Sujith 已提交
119

S
Sujith 已提交
120 121
	tid->sched = true;
	list_add_tail(&tid->list, &ac->tid_q);
S
Sujith 已提交
122

S
Sujith 已提交
123 124
	if (ac->sched)
		return;
125

S
Sujith 已提交
126
	ac->sched = true;
127 128 129

	list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
	list_add_tail(&ac->list, list);
S
Sujith 已提交
130
}
131

132
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
133 134
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
135 136 137
	BUILD_BUG_ON(sizeof(struct ath_frame_info) >
		     sizeof(tx_info->rate_driver_data));
	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
138 139
}

140 141
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
{
142 143 144
	if (!tid->an->sta)
		return;

145 146 147 148
	ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
			   seqno << IEEE80211_SEQ_SEQ_SHIFT);
}

149 150 151 152 153 154 155
static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
			  struct ath_buf *bf)
{
	ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates,
			       ARRAY_SIZE(bf->rates));
}

156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
			     struct sk_buff *skb)
{
	int q;

	q = skb_get_queue_mapping(skb);
	if (txq == sc->tx.uapsdq)
		txq = sc->tx.txq_map[q];

	if (txq != sc->tx.txq_map[q])
		return;

	if (WARN_ON(--txq->pending_frames < 0))
		txq->pending_frames = 0;

	if (txq->stopped &&
	    txq->pending_frames < sc->tx.txq_max_pending[q]) {
		ieee80211_wake_queue(sc->hw, q);
		txq->stopped = false;
	}
}

178 179 180
static struct ath_atx_tid *
ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
{
181
	u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
182 183 184
	return ATH_AN_2_TID(an, tidno);
}

185 186
static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
{
187
	return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
188 189 190 191
}

static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
{
192 193 194 195 196 197 198
	struct sk_buff *skb;

	skb = __skb_dequeue(&tid->retry_q);
	if (!skb)
		skb = __skb_dequeue(&tid->buf_q);

	return skb;
199 200
}

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
/*
 * ath_tx_tid_change_state:
 * - clears a-mpdu flag of previous session
 * - force sequence number allocation to fix next BlockAck Window
 */
static void
ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
{
	struct ath_txq *txq = tid->ac->txq;
	struct ieee80211_tx_info *tx_info;
	struct sk_buff *skb, *tskb;
	struct ath_buf *bf;
	struct ath_frame_info *fi;

	skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
		fi = get_frame_info(skb);
		bf = fi->bf;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;

		if (bf)
			continue;

		bf = ath_tx_setup_buffer(sc, txq, tid, skb);
		if (!bf) {
			__skb_unlink(skb, &tid->buf_q);
			ath_txq_skb_done(sc, txq, skb);
			ieee80211_free_txskb(sc->hw, skb);
			continue;
		}
	}

}

236
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
S
Sujith 已提交
237
{
238
	struct ath_txq *txq = tid->ac->txq;
239
	struct sk_buff *skb;
S
Sujith 已提交
240 241
	struct ath_buf *bf;
	struct list_head bf_head;
242
	struct ath_tx_status ts;
243
	struct ath_frame_info *fi;
244
	bool sendbar = false;
245

246
	INIT_LIST_HEAD(&bf_head);
247

248
	memset(&ts, 0, sizeof(ts));
249

250
	while ((skb = __skb_dequeue(&tid->retry_q))) {
251 252
		fi = get_frame_info(skb);
		bf = fi->bf;
F
Felix Fietkau 已提交
253
		if (!bf) {
254 255 256
			ath_txq_skb_done(sc, txq, skb);
			ieee80211_free_txskb(sc->hw, skb);
			continue;
F
Felix Fietkau 已提交
257 258
		}

259
		if (fi->baw_tracked) {
260
			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
261
			sendbar = true;
262
		}
263 264 265

		list_add_tail(&bf->list, &bf_head);
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
266
	}
267

268
	if (sendbar) {
F
Felix Fietkau 已提交
269
		ath_txq_unlock(sc, txq);
270
		ath_send_bar(tid, tid->seq_start);
F
Felix Fietkau 已提交
271 272
		ath_txq_lock(sc, txq);
	}
S
Sujith 已提交
273
}
274

S
Sujith 已提交
275 276
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno)
S
Sujith 已提交
277
{
S
Sujith 已提交
278
	int index, cindex;
279

S
Sujith 已提交
280 281
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
282

283
	__clear_bit(cindex, tid->tx_buf);
S
Sujith 已提交
284

285
	while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
S
Sujith 已提交
286 287
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
288 289
		if (tid->bar_index >= 0)
			tid->bar_index--;
S
Sujith 已提交
290
	}
S
Sujith 已提交
291
}
292

S
Sujith 已提交
293
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
294
			     struct ath_buf *bf)
S
Sujith 已提交
295
{
296 297
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
	u16 seqno = bf->bf_state.seqno;
S
Sujith 已提交
298
	int index, cindex;
S
Sujith 已提交
299

300
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
S
Sujith 已提交
301
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
302
	__set_bit(cindex, tid->tx_buf);
303
	fi->baw_tracked = 1;
304

S
Sujith 已提交
305 306 307 308
	if (index >= ((tid->baw_tail - tid->baw_head) &
		(ATH_TID_MAX_BUFS - 1))) {
		tid->baw_tail = cindex;
		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
309 310 311
	}
}

S
Sujith 已提交
312 313
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
			  struct ath_atx_tid *tid)
314 315

{
316
	struct sk_buff *skb;
S
Sujith 已提交
317 318
	struct ath_buf *bf;
	struct list_head bf_head;
319
	struct ath_tx_status ts;
320
	struct ath_frame_info *fi;
321 322

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
323
	INIT_LIST_HEAD(&bf_head);
324

325
	while ((skb = ath_tid_dequeue(tid))) {
326 327
		fi = get_frame_info(skb);
		bf = fi->bf;
328

329 330 331 332 333
		if (!bf) {
			ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
			continue;
		}

334
		list_add_tail(&bf->list, &bf_head);
335
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
336
	}
337 338
}

S
Sujith 已提交
339
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
340
			     struct sk_buff *skb, int count)
341
{
342
	struct ath_frame_info *fi = get_frame_info(skb);
343
	struct ath_buf *bf = fi->bf;
S
Sujith 已提交
344
	struct ieee80211_hdr *hdr;
345
	int prev = fi->retries;
346

S
Sujith 已提交
347
	TX_STAT_INC(txq->axq_qnum, a_retries);
348 349 350
	fi->retries += count;

	if (prev > 0)
351
		return;
352

S
Sujith 已提交
353 354
	hdr = (struct ieee80211_hdr *)skb->data;
	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
355 356
	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
		sizeof(*hdr), DMA_TO_DEVICE);
357 358
}

359
static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
S
Sujith 已提交
360
{
361
	struct ath_buf *bf = NULL;
S
Sujith 已提交
362 363

	spin_lock_bh(&sc->tx.txbuflock);
364 365

	if (unlikely(list_empty(&sc->tx.txbuf))) {
366 367 368
		spin_unlock_bh(&sc->tx.txbuflock);
		return NULL;
	}
369 370 371 372

	bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
	list_del(&bf->list);

S
Sujith 已提交
373 374
	spin_unlock_bh(&sc->tx.txbuflock);

375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
	return bf;
}

static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
{
	spin_lock_bh(&sc->tx.txbuflock);
	list_add_tail(&bf->list, &sc->tx.txbuf);
	spin_unlock_bh(&sc->tx.txbuflock);
}

static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
{
	struct ath_buf *tbf;

	tbf = ath_tx_get_buffer(sc);
	if (WARN_ON(!tbf))
		return NULL;

S
Sujith 已提交
393 394 395 396
	ATH_TXBUF_RESET(tbf);

	tbf->bf_mpdu = bf->bf_mpdu;
	tbf->bf_buf_addr = bf->bf_buf_addr;
397
	memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
S
Sujith 已提交
398
	tbf->bf_state = bf->bf_state;
399
	tbf->bf_state.stale = false;
S
Sujith 已提交
400 401 402 403

	return tbf;
}

404 405 406 407
static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
			        struct ath_tx_status *ts, int txok,
			        int *nframes, int *nbad)
{
408
	struct ath_frame_info *fi;
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
	u16 seq_st = 0;
	u32 ba[WME_BA_BMP_SIZE >> 5];
	int ba_index;
	int isaggr = 0;

	*nbad = 0;
	*nframes = 0;

	isaggr = bf_isaggr(bf);
	if (isaggr) {
		seq_st = ts->ts_seqnum;
		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
	}

	while (bf) {
424
		fi = get_frame_info(bf->bf_mpdu);
425
		ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
426 427 428 429 430 431 432 433 434 435

		(*nframes)++;
		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
			(*nbad)++;

		bf = bf->bf_next;
	}
}


S
Sujith 已提交
436 437
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
				 struct ath_buf *bf, struct list_head *bf_q,
438
				 struct ath_tx_status *ts, int txok)
439
{
S
Sujith 已提交
440 441
	struct ath_node *an = NULL;
	struct sk_buff *skb;
442
	struct ieee80211_sta *sta;
F
Felix Fietkau 已提交
443
	struct ieee80211_hw *hw = sc->hw;
444
	struct ieee80211_hdr *hdr;
445
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
446
	struct ath_atx_tid *tid = NULL;
S
Sujith 已提交
447
	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
448 449
	struct list_head bf_head;
	struct sk_buff_head bf_pending;
450
	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
451
	u32 ba[WME_BA_BMP_SIZE >> 5];
452
	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
453
	bool rc_update = true, isba;
454
	struct ieee80211_tx_rate rates[4];
455
	struct ath_frame_info *fi;
456
	int nframes;
457
	bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
458
	int i, retries;
459
	int bar_index = -1;
460

S
Sujith 已提交
461
	skb = bf->bf_mpdu;
462 463
	hdr = (struct ieee80211_hdr *)skb->data;

464 465
	tx_info = IEEE80211_SKB_CB(skb);

466
	memcpy(rates, bf->rates, sizeof(rates));
467

468 469 470 471
	retries = ts->ts_longretry + 1;
	for (i = 0; i < ts->ts_rateindex; i++)
		retries += rates[i].count;

472
	rcu_read_lock();
473

474
	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
475 476
	if (!sta) {
		rcu_read_unlock();
477

478 479 480 481
		INIT_LIST_HEAD(&bf_head);
		while (bf) {
			bf_next = bf->bf_next;

482
			if (!bf->bf_state.stale || bf_next != NULL)
483 484
				list_move_tail(&bf->list, &bf_head);

485
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
486 487 488

			bf = bf_next;
		}
489
		return;
490 491
	}

492
	an = (struct ath_node *)sta->drv_priv;
493
	tid = ath_get_skb_tid(sc, an, skb);
494
	seq_first = tid->seq_start;
495
	isba = ts->ts_flags & ATH9K_TX_BA;
496

497 498 499 500
	/*
	 * The hardware occasionally sends a tx status for the wrong TID.
	 * In this case, the BA status cannot be considered valid and all
	 * subframes need to be retransmitted
501 502 503
	 *
	 * Only BlockAcks have a TID and therefore normal Acks cannot be
	 * checked
504
	 */
505
	if (isba && tid->tidno != ts->tid)
506 507
		txok = false;

S
Sujith 已提交
508
	isaggr = bf_isaggr(bf);
S
Sujith 已提交
509
	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
510

S
Sujith 已提交
511
	if (isaggr && txok) {
512 513 514
		if (ts->ts_flags & ATH9K_TX_BA) {
			seq_st = ts->ts_seqnum;
			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
S
Sujith 已提交
515
		} else {
S
Sujith 已提交
516 517 518 519 520 521 522
			/*
			 * AR5416 can become deaf/mute when BA
			 * issue happens. Chip needs to be reset.
			 * But AP code may have sychronization issues
			 * when perform internal reset in this routine.
			 * Only enable reset in STA mode for now.
			 */
523
			if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
S
Sujith 已提交
524
				needreset = 1;
S
Sujith 已提交
525
		}
526 527
	}

528
	__skb_queue_head_init(&bf_pending);
529

530
	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
S
Sujith 已提交
531
	while (bf) {
532 533
		u16 seqno = bf->bf_state.seqno;

534
		txfail = txpending = sendbar = 0;
S
Sujith 已提交
535
		bf_next = bf->bf_next;
536

537 538
		skb = bf->bf_mpdu;
		tx_info = IEEE80211_SKB_CB(skb);
539
		fi = get_frame_info(skb);
540

541 542
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
		    !tid->active) {
543 544 545 546 547 548
			/*
			 * Outside of the current BlockAck window,
			 * maybe part of a previous session
			 */
			txfail = 1;
		} else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
S
Sujith 已提交
549 550
			/* transmit completion, subframe is
			 * acked by block ack */
551
			acked_cnt++;
S
Sujith 已提交
552 553
		} else if (!isaggr && txok) {
			/* transmit completion */
554
			acked_cnt++;
555 556 557 558 559 560 561 562
		} else if (flush) {
			txpending = 1;
		} else if (fi->retries < ATH_MAX_SW_RETRIES) {
			if (txok || !an->sleeping)
				ath_tx_set_retry(sc, txq, bf->bf_mpdu,
						 retries);

			txpending = 1;
S
Sujith 已提交
563
		} else {
564 565 566 567
			txfail = 1;
			txfail_cnt++;
			bar_index = max_t(int, bar_index,
				ATH_BA_INDEX(seq_first, seqno));
S
Sujith 已提交
568
		}
569

570 571 572 573
		/*
		 * Make sure the last desc is reclaimed if it
		 * not a holding desc.
		 */
574
		INIT_LIST_HEAD(&bf_head);
575
		if (bf_next != NULL || !bf_last->bf_state.stale)
S
Sujith 已提交
576
			list_move_tail(&bf->list, &bf_head);
577

578
		if (!txpending) {
S
Sujith 已提交
579 580 581 582
			/*
			 * complete the acked-ones/xretried ones; update
			 * block-ack window
			 */
583
			ath_tx_update_baw(sc, tid, seqno);
584

585
			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
586
				memcpy(tx_info->control.rates, rates, sizeof(rates));
587
				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
588 589 590
				rc_update = false;
			}

591
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
592
				!txfail);
S
Sujith 已提交
593
		} else {
594 595 596 597
			if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
				tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
				ieee80211_sta_eosp(sta);
			}
S
Sujith 已提交
598
			/* retry the un-acked ones */
599
			if (bf->bf_next == NULL && bf_last->bf_state.stale) {
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
				struct ath_buf *tbf;

				tbf = ath_clone_txbuf(sc, bf_last);
				/*
				 * Update tx baw and complete the
				 * frame with failed status if we
				 * run out of tx buf.
				 */
				if (!tbf) {
					ath_tx_update_baw(sc, tid, seqno);

					ath_tx_complete_buf(sc, bf, txq,
							    &bf_head, ts, 0);
					bar_index = max_t(int, bar_index,
						ATH_BA_INDEX(seq_first, seqno));
					break;
616
				}
617 618

				fi->bf = tbf;
S
Sujith 已提交
619 620 621 622 623 624
			}

			/*
			 * Put this buffer to the temporary pending
			 * queue to retain ordering
			 */
625
			__skb_queue_tail(&bf_pending, skb);
S
Sujith 已提交
626 627 628
		}

		bf = bf_next;
629 630
	}

631
	/* prepend un-acked frames to the beginning of the pending frame queue */
632
	if (!skb_queue_empty(&bf_pending)) {
633
		if (an->sleeping)
634
			ieee80211_sta_set_buffered(sta, tid->tidno, true);
635

636
		skb_queue_splice_tail(&bf_pending, &tid->retry_q);
637
		if (!an->sleeping) {
638
			ath_tx_queue_tid(sc, txq, tid);
639

S
Sujith Manoharan 已提交
640
			if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
641 642
				tid->ac->clear_ps_filter = true;
		}
643 644
	}

F
Felix Fietkau 已提交
645 646 647 648 649 650 651 652 653 654 655
	if (bar_index >= 0) {
		u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);

		if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
			tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);

		ath_txq_unlock(sc, txq);
		ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
		ath_txq_lock(sc, txq);
	}

656 657
	rcu_read_unlock();

658 659
	if (needreset)
		ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
S
Sujith 已提交
660
}
661

662 663 664 665 666 667 668 669 670 671
static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
{
    struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
    return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}

static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
				  struct ath_tx_status *ts, struct ath_buf *bf,
				  struct list_head *bf_head)
{
672
	struct ieee80211_tx_info *info;
673 674 675 676 677 678 679 680 681 682 683
	bool txok, flush;

	txok = !(ts->ts_status & ATH9K_TXERR_MASK);
	flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
	txq->axq_tx_inprogress = false;

	txq->axq_depth--;
	if (bf_is_ampdu_not_probing(bf))
		txq->axq_ampdu_depth--;

	if (!bf_isampdu(bf)) {
684 685 686 687
		if (!flush) {
			info = IEEE80211_SKB_CB(bf->bf_mpdu);
			memcpy(info->control.rates, bf->rates,
			       sizeof(info->control.rates));
688
			ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
689
		}
690 691 692 693
		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
	} else
		ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);

694
	if (!flush)
695 696 697
		ath_txq_schedule(sc, txq);
}

698 699 700 701 702 703 704 705 706 707 708
static bool ath_lookup_legacy(struct ath_buf *bf)
{
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	int i;

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
	rates = tx_info->control.rates;

709 710 711 712
	for (i = 0; i < 4; i++) {
		if (!rates[i].count || rates[i].idx < 0)
			break;

713 714 715 716 717 718 719
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
			return true;
	}

	return false;
}

S
Sujith 已提交
720 721
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
			   struct ath_atx_tid *tid)
722
{
S
Sujith 已提交
723 724
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
725
	struct ieee80211_tx_rate *rates;
S
Sujith 已提交
726
	u32 max_4ms_framelen, frmlen;
727
	u16 aggr_limit, bt_aggr_limit, legacy = 0;
728
	int q = tid->ac->txq->mac80211_qnum;
S
Sujith 已提交
729
	int i;
S
Sujith 已提交
730

S
Sujith 已提交
731
	skb = bf->bf_mpdu;
S
Sujith 已提交
732
	tx_info = IEEE80211_SKB_CB(skb);
733
	rates = bf->rates;
S
Sujith 已提交
734

S
Sujith 已提交
735 736
	/*
	 * Find the lowest frame length among the rate series that will have a
737
	 * 4ms (or TXOP limited) transmit duration.
S
Sujith 已提交
738 739
	 */
	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
S
Sujith 已提交
740

S
Sujith 已提交
741
	for (i = 0; i < 4; i++) {
742
		int modeidx;
S
Sujith 已提交
743

744 745
		if (!rates[i].count)
			continue;
746

747 748 749
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
			legacy = 1;
			break;
750
		}
751 752 753 754 755 756 757 758 759

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
			modeidx = MCS_HT40;
		else
			modeidx = MCS_HT20;

		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
			modeidx++;

760
		frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
761
		max_4ms_framelen = min(max_4ms_framelen, frmlen);
762
	}
S
Sujith 已提交
763

764
	/*
S
Sujith 已提交
765 766 767
	 * limit aggregate size by the minimum rate if rate selected is
	 * not a probe rate, if rate selected is a probe rate then
	 * avoid aggregation of this packet.
768
	 */
S
Sujith 已提交
769 770
	if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
		return 0;
771

772 773 774 775 776 777 778 779
	aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);

	/*
	 * Override the default aggregation limit for BTCOEX.
	 */
	bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
	if (bt_aggr_limit)
		aggr_limit = bt_aggr_limit;
780

781 782
	if (tid->an->maxampdu)
		aggr_limit = min(aggr_limit, tid->an->maxampdu);
783

S
Sujith 已提交
784 785
	return aggr_limit;
}
786

S
Sujith 已提交
787
/*
S
Sujith 已提交
788
 * Returns the number of delimiters to be added to
S
Sujith 已提交
789 790 791
 * meet the minimum required mpdudensity.
 */
static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
792 793
				  struct ath_buf *bf, u16 frmlen,
				  bool first_subfrm)
S
Sujith 已提交
794
{
795
#define FIRST_DESC_NDELIMS 60
796
	u32 nsymbits, nsymbols;
S
Sujith 已提交
797
	u16 minlen;
798
	u8 flags, rix;
799
	int width, streams, half_gi, ndelim, mindelim;
800
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith 已提交
801 802 803

	/* Select standard number of delimiters based on frame length alone */
	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
804 805

	/*
S
Sujith 已提交
806 807 808 809
	 * If encryption enabled, hardware requires some more padding between
	 * subframes.
	 * TODO - this could be improved to be dependent on the rate.
	 *      The hardware can keep up at lower rates, but not higher rates
810
	 */
811 812
	if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
	    !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
S
Sujith 已提交
813
		ndelim += ATH_AGGR_ENCRYPTDELIM;
814

815 816 817 818
	/*
	 * Add delimiter when using RTS/CTS with aggregation
	 * and non enterprise AR9003 card
	 */
819 820
	if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
	    (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
821 822
		ndelim = max(ndelim, FIRST_DESC_NDELIMS);

S
Sujith 已提交
823 824 825 826 827
	/*
	 * Convert desired mpdu density from microeconds to bytes based
	 * on highest rate in rate series (i.e. first rate) to determine
	 * required minimum length for subframe. Take into account
	 * whether high rate is 20 or 40Mhz and half or full GI.
828
	 *
S
Sujith 已提交
829 830 831
	 * If there is no mpdu density restriction, no further calculation
	 * is needed.
	 */
832 833

	if (tid->an->mpdudensity == 0)
S
Sujith 已提交
834
		return ndelim;
835

836 837
	rix = bf->rates[0].idx;
	flags = bf->rates[0].flags;
S
Sujith 已提交
838 839
	width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
	half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
840

S
Sujith 已提交
841
	if (half_gi)
842
		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
S
Sujith 已提交
843
	else
844
		nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
845

S
Sujith 已提交
846 847
	if (nsymbols == 0)
		nsymbols = 1;
848

849 850
	streams = HT_RC_2_STREAMS(rix);
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
851
	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
852

S
Sujith 已提交
853 854 855
	if (frmlen < minlen) {
		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
		ndelim = max(mindelim, ndelim);
856 857
	}

S
Sujith 已提交
858
	return ndelim;
859 860
}

861 862
static struct ath_buf *
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
863
			struct ath_atx_tid *tid, struct sk_buff_head **q)
864
{
865
	struct ieee80211_tx_info *tx_info;
866
	struct ath_frame_info *fi;
867
	struct sk_buff *skb;
868
	struct ath_buf *bf;
869
	u16 seqno;
870

871
	while (1) {
872 873 874 875
		*q = &tid->retry_q;
		if (skb_queue_empty(*q))
			*q = &tid->buf_q;

876
		skb = skb_peek(*q);
877 878 879
		if (!skb)
			break;

880 881
		fi = get_frame_info(skb);
		bf = fi->bf;
882
		if (!fi->bf)
F
Felix Fietkau 已提交
883
			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
884 885
		else
			bf->bf_state.stale = false;
886

F
Felix Fietkau 已提交
887
		if (!bf) {
888
			__skb_unlink(skb, *q);
889
			ath_txq_skb_done(sc, txq, skb);
F
Felix Fietkau 已提交
890
			ieee80211_free_txskb(sc->hw, skb);
891
			continue;
F
Felix Fietkau 已提交
892
		}
893

894 895 896 897 898 899 900 901 902 903
		bf->bf_next = NULL;
		bf->bf_lastbf = bf;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
		if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
			bf->bf_state.bf_type = 0;
			return bf;
		}

904
		bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
905
		seqno = bf->bf_state.seqno;
906

S
Sujith 已提交
907
		/* do not step over block-ack window */
908
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
S
Sujith 已提交
909
			break;
910

911 912 913 914 915 916
		if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
			struct ath_tx_status ts = {};
			struct list_head bf_head;

			INIT_LIST_HEAD(&bf_head);
			list_add(&bf->list, &bf_head);
917
			__skb_unlink(skb, *q);
918 919 920 921 922
			ath_tx_update_baw(sc, tid, seqno);
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
			continue;
		}

923 924 925 926 927 928
		return bf;
	}

	return NULL;
}

929 930 931 932 933
static bool
ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
		 struct ath_atx_tid *tid, struct list_head *bf_q,
		 struct ath_buf *bf_first, struct sk_buff_head *tid_q,
		 int *aggr_len)
934 935
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
936
	struct ath_buf *bf = bf_first, *bf_prev = NULL;
F
Felix Fietkau 已提交
937
	int nframes = 0, ndelim;
938
	u16 aggr_limit = 0, al = 0, bpad = 0,
F
Felix Fietkau 已提交
939
	    al_delta, h_baw = tid->baw_size / 2;
940 941 942
	struct ieee80211_tx_info *tx_info;
	struct ath_frame_info *fi;
	struct sk_buff *skb;
943
	bool closed = false;
944

945 946
	bf = bf_first;
	aggr_limit = ath_lookup_rate(sc, bf, tid);
947

948
	do {
949 950 951
		skb = bf->bf_mpdu;
		fi = get_frame_info(skb);

S
Sujith 已提交
952
		/* do not exceed aggregation limit */
953
		al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
F
Felix Fietkau 已提交
954 955
		if (nframes) {
			if (aggr_limit < al + bpad + al_delta ||
956
			    ath_lookup_legacy(bf) || nframes >= h_baw)
F
Felix Fietkau 已提交
957
				break;
958

F
Felix Fietkau 已提交
959
			tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
960 961
			if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
			    !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
F
Felix Fietkau 已提交
962
				break;
S
Sujith 已提交
963
		}
964

S
Sujith 已提交
965
		/* add padding for previous frame to aggregation length */
S
Sujith 已提交
966
		al += bpad + al_delta;
967

S
Sujith 已提交
968 969 970 971
		/*
		 * Get the delimiters needed to meet the MPDU
		 * density for this node.
		 */
972 973
		ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
						!nframes);
S
Sujith 已提交
974
		bpad = PADBYTES(al_delta) + (ndelim << 2);
975

976
		nframes++;
S
Sujith 已提交
977
		bf->bf_next = NULL;
978

S
Sujith 已提交
979
		/* link buffers of this frame to the aggregate */
980 981
		if (!fi->baw_tracked)
			ath_tx_addto_baw(sc, tid, bf);
982
		bf->bf_state.ndelim = ndelim;
983

984
		__skb_unlink(skb, tid_q);
985
		list_add_tail(&bf->list, bf_q);
986
		if (bf_prev)
S
Sujith 已提交
987
			bf_prev->bf_next = bf;
988

S
Sujith 已提交
989
		bf_prev = bf;
S
Sujith 已提交
990

991 992 993 994 995
		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf) {
			closed = true;
			break;
		}
996
	} while (ath_tid_has_buffered(tid));
997

998 999 1000 1001 1002 1003 1004 1005 1006 1007
	bf = bf_first;
	bf->bf_lastbf = bf_prev;

	if (bf == bf_prev) {
		al = get_frame_info(bf->bf_mpdu)->framelen;
		bf->bf_state.bf_type = BUF_AMPDU;
	} else {
		TX_STAT_INC(txq->axq_qnum, a_aggr);
	}

1008
	*aggr_len = al;
S
Sujith 已提交
1009

1010
	return closed;
S
Sujith 已提交
1011 1012
#undef PADBYTES
}
1013

1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
/*
 * rix - rate index
 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
 * width  - 0 for 20 MHz, 1 for 40 MHz
 * half_gi - to use 4us v/s 3.6 us for symbol time
 */
static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
			    int width, int half_gi, bool shortPreamble)
{
	u32 nbits, nsymbits, duration, nsymbols;
	int streams;

	/* find number of symbols: PLCP + data */
	streams = HT_RC_2_STREAMS(rix);
	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
	nsymbols = (nbits + nsymbits - 1) / nsymbits;

	if (!half_gi)
		duration = SYMBOL_TIME(nsymbols);
	else
		duration = SYMBOL_TIME_HALFGI(nsymbols);

	/* addup duration for legacy/ht training and signal fields */
	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);

	return duration;
}

1043 1044 1045 1046 1047 1048
static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
{
	int streams = HT_RC_2_STREAMS(mcs);
	int symbols, bits;
	int bytes = 0;

1049
	usec -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
	symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
	bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
	bits -= OFDM_PLCP_BITS;
	bytes = bits / 8;
	if (bytes > 65532)
		bytes = 65532;

	return bytes;
}

void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
{
	u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi;
	int mcs;

	/* 4ms is the default (and maximum) duration */
	if (!txop || txop > 4096)
		txop = 4096;

	cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
	cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
	cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
	cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
	for (mcs = 0; mcs < 32; mcs++) {
		cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false);
		cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true);
		cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false);
		cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true);
	}
}

1081
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
S
Sujith Manoharan 已提交
1082
			     struct ath_tx_info *info, int len, bool rts)
1083 1084
{
	struct ath_hw *ah = sc->sc_ah;
1085
	struct ath_common *common = ath9k_hw_common(ah);
1086 1087 1088 1089 1090
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	const struct ieee80211_rate *rate;
	struct ieee80211_hdr *hdr;
1091
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith Manoharan 已提交
1092
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
1093 1094
	int i;
	u8 rix = 0;
1095 1096 1097

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
1098
	rates = bf->rates;
1099
	hdr = (struct ieee80211_hdr *)skb->data;
1100 1101 1102

	/* set dur_update_en for l-sig computation except for PS-Poll frames */
	info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
1103
	info->rtscts_rate = fi->rtscts_rate;
1104

1105
	for (i = 0; i < ARRAY_SIZE(bf->rates); i++) {
1106 1107 1108 1109 1110 1111 1112
		bool is_40, is_sgi, is_sp;
		int phy;

		if (!rates[i].count || (rates[i].idx < 0))
			continue;

		rix = rates[i].idx;
1113
		info->rates[i].Tries = rates[i].count;
1114

S
Sujith Manoharan 已提交
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
		/*
		 * Handle RTS threshold for unaggregated HT frames.
		 */
		if (bf_isampdu(bf) && !bf_isaggr(bf) &&
		    (rates[i].flags & IEEE80211_TX_RC_MCS) &&
		    unlikely(rts_thresh != (u32) -1)) {
			if (!rts_thresh || (len > rts_thresh))
				rts = true;
		}

		if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1126 1127
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_RTSENA;
1128
		} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1129 1130
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_CTSENA;
1131 1132 1133
		}

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1134
			info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
1135
		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1136
			info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
1137 1138 1139 1140 1141 1142 1143

		is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
		is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
		is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);

		if (rates[i].flags & IEEE80211_TX_RC_MCS) {
			/* MCS rates */
1144 1145 1146 1147
			info->rates[i].Rate = rix | 0x80;
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
			info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
1148 1149
				 is_40, is_sgi, is_sp);
			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1150
				info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
1151 1152 1153 1154
			continue;
		}

		/* legacy rates */
1155
		rate = &common->sbands[tx_info->band].bitrates[rates[i].idx];
1156 1157 1158 1159 1160 1161
		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
		    !(rate->flags & IEEE80211_RATE_ERP_G))
			phy = WLAN_RC_PHY_CCK;
		else
			phy = WLAN_RC_PHY_OFDM;

1162
		info->rates[i].Rate = rate->hw_value;
1163 1164
		if (rate->hw_value_short) {
			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1165
				info->rates[i].Rate |= rate->hw_value_short;
1166 1167 1168 1169 1170
		} else {
			is_sp = false;
		}

		if (bf->bf_state.bfs_paprd)
1171
			info->rates[i].ChSel = ah->txchainmask;
1172
		else
1173 1174
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
1175

1176
		info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1177 1178 1179 1180 1181
			phy, rate->bitrate * 100, len, rix, is_sp);
	}

	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
	if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1182
		info->flags &= ~ATH9K_TXDESC_RTSENA;
1183 1184

	/* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1185 1186 1187
	if (info->flags & ATH9K_TXDESC_RTSENA)
		info->flags &= ~ATH9K_TXDESC_CTSENA;
}
1188

1189 1190 1191 1192 1193 1194 1195 1196
static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	enum ath9k_pkt_type htype;
	__le16 fc;

	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
1197

1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
	if (ieee80211_is_beacon(fc))
		htype = ATH9K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = ATH9K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = ATH9K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = ATH9K_PKT_TYPE_PSPOLL;
	else
		htype = ATH9K_PKT_TYPE_NORMAL;

	return htype;
1210 1211
}

1212 1213
static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_txq *txq, int len)
1214 1215
{
	struct ath_hw *ah = sc->sc_ah;
1216
	struct ath_buf *bf_first = NULL;
1217
	struct ath_tx_info info;
S
Sujith Manoharan 已提交
1218 1219
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
	bool rts = false;
1220

1221 1222 1223 1224 1225 1226
	memset(&info, 0, sizeof(info));
	info.is_first = true;
	info.is_last = true;
	info.txpower = MAX_RATE_POWER;
	info.qcu = txq->axq_qnum;

1227
	while (bf) {
1228
		struct sk_buff *skb = bf->bf_mpdu;
1229
		struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1230
		struct ath_frame_info *fi = get_frame_info(skb);
1231
		bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
1232 1233

		info.type = get_hw_packet_type(skb);
1234
		if (bf->bf_next)
1235
			info.link = bf->bf_next->bf_daddr;
1236
		else
L
Luis R. Rodriguez 已提交
1237
			info.link = (sc->tx99_state) ? bf->bf_daddr : 0;
1238

1239 1240 1241
		if (!bf_first) {
			bf_first = bf;

L
Luis R. Rodriguez 已提交
1242 1243
			if (!sc->tx99_state)
				info.flags = ATH9K_TXDESC_INTREQ;
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
			if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
			    txq == sc->tx.uapsdq)
				info.flags |= ATH9K_TXDESC_CLRDMASK;

			if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
				info.flags |= ATH9K_TXDESC_NOACK;
			if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
				info.flags |= ATH9K_TXDESC_LDPC;

			if (bf->bf_state.bfs_paprd)
				info.flags |= (u32) bf->bf_state.bfs_paprd <<
					      ATH9K_TXDESC_PAPRD_S;

S
Sujith Manoharan 已提交
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
			/*
			 * mac80211 doesn't handle RTS threshold for HT because
			 * the decision has to be taken based on AMPDU length
			 * and aggregation is done entirely inside ath9k.
			 * Set the RTS/CTS flag for the first subframe based
			 * on the threshold.
			 */
			if (aggr && (bf == bf_first) &&
			    unlikely(rts_thresh != (u32) -1)) {
				/*
				 * "len" is the size of the entire AMPDU.
				 */
				if (!rts_thresh || (len > rts_thresh))
					rts = true;
			}
1272 1273 1274 1275

			if (!aggr)
				len = fi->framelen;

S
Sujith Manoharan 已提交
1276
			ath_buf_set_rate(sc, bf, &info, len, rts);
1277 1278
		}

1279 1280
		info.buf_addr[0] = bf->bf_buf_addr;
		info.buf_len[0] = skb->len;
1281 1282 1283 1284 1285
		info.pkt_len = fi->framelen;
		info.keyix = fi->keyix;
		info.keytype = fi->keytype;

		if (aggr) {
1286
			if (bf == bf_first)
1287
				info.aggr = AGGR_BUF_FIRST;
1288
			else if (bf == bf_first->bf_lastbf)
1289 1290 1291
				info.aggr = AGGR_BUF_LAST;
			else
				info.aggr = AGGR_BUF_MIDDLE;
1292

1293 1294
			info.ndelim = bf->bf_state.ndelim;
			info.aggr_len = len;
1295 1296
		}

1297 1298 1299
		if (bf == bf_first->bf_lastbf)
			bf_first = NULL;

1300
		ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
1301 1302 1303 1304
		bf = bf->bf_next;
	}
}

1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
static void
ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
		  struct ath_atx_tid *tid, struct list_head *bf_q,
		  struct ath_buf *bf_first, struct sk_buff_head *tid_q)
{
	struct ath_buf *bf = bf_first, *bf_prev = NULL;
	struct sk_buff *skb;
	int nframes = 0;

	do {
		struct ieee80211_tx_info *tx_info;
		skb = bf->bf_mpdu;

		nframes++;
		__skb_unlink(skb, tid_q);
		list_add_tail(&bf->list, bf_q);
		if (bf_prev)
			bf_prev->bf_next = bf;
		bf_prev = bf;

		if (nframes >= 2)
			break;

		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf)
			break;

		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
		if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
			break;

		ath_set_rates(tid->an->vif, tid->an->sta, bf);
	} while (1);
}

1340 1341
static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
			      struct ath_atx_tid *tid, bool *stop)
S
Sujith 已提交
1342
{
S
Sujith 已提交
1343
	struct ath_buf *bf;
1344
	struct ieee80211_tx_info *tx_info;
1345
	struct sk_buff_head *tid_q;
S
Sujith 已提交
1346
	struct list_head bf_q;
1347 1348
	int aggr_len = 0;
	bool aggr, last = true;
1349

1350 1351
	if (!ath_tid_has_buffered(tid))
		return false;
1352

1353
	INIT_LIST_HEAD(&bf_q);
S
Sujith 已提交
1354

1355 1356 1357
	bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
	if (!bf)
		return false;
1358

1359 1360 1361 1362 1363 1364 1365
	tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
	aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
	if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
		(!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
		*stop = true;
		return false;
	}
1366

1367 1368 1369 1370 1371 1372
	ath_set_rates(tid->an->vif, tid->an->sta, bf);
	if (aggr)
		last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
					tid_q, &aggr_len);
	else
		ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
1373

1374 1375
	if (list_empty(&bf_q))
		return false;
1376

1377
	if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
1378 1379 1380
		tid->ac->clear_ps_filter = false;
		tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
	}
1381

1382 1383 1384
	ath_tx_fill_desc(sc, bf, txq, aggr_len);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	return true;
S
Sujith 已提交
1385 1386
}

1387 1388
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
		      u16 tid, u16 *ssn)
S
Sujith 已提交
1389 1390
{
	struct ath_atx_tid *txtid;
1391
	struct ath_txq *txq;
S
Sujith 已提交
1392
	struct ath_node *an;
1393
	u8 density;
S
Sujith 已提交
1394 1395

	an = (struct ath_node *)sta->drv_priv;
S
Sujith 已提交
1396
	txtid = ATH_AN_2_TID(an, tid);
1397 1398 1399
	txq = txtid->ac->txq;

	ath_txq_lock(sc, txq);
1400

1401 1402 1403 1404
	/* update ampdu factor/density, they may have changed. This may happen
	 * in HT IBSS when a beacon with HT-info is received after the station
	 * has already been added.
	 */
1405
	if (sta->ht_cap.ht_supported) {
1406 1407
		an->maxampdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
				      sta->ht_cap.ampdu_factor)) - 1;
1408 1409 1410 1411
		density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
		an->mpdudensity = density;
	}

1412 1413 1414
	/* force sequence number allocation for pending frames */
	ath_tx_tid_change_state(sc, txtid);

1415
	txtid->active = true;
1416
	*ssn = txtid->seq_start = txtid->seq_next;
1417
	txtid->bar_index = -1;
1418

1419 1420 1421
	memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
	txtid->baw_head = txtid->baw_tail = 0;

1422 1423
	ath_txq_unlock_complete(sc, txq);

1424
	return 0;
S
Sujith 已提交
1425
}
1426

1427
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
S
Sujith 已提交
1428 1429 1430
{
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1431
	struct ath_txq *txq = txtid->ac->txq;
1432

F
Felix Fietkau 已提交
1433
	ath_txq_lock(sc, txq);
1434 1435
	txtid->active = false;
	ath_tx_flush_tid(sc, txtid);
1436
	ath_tx_tid_change_state(sc, txtid);
F
Felix Fietkau 已提交
1437
	ath_txq_unlock_complete(sc, txq);
S
Sujith 已提交
1438
}
1439

1440 1441
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
		       struct ath_node *an)
1442 1443 1444 1445
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
1446
	bool buffered;
1447 1448 1449
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1450
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1451 1452 1453 1454

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1455
		ath_txq_lock(sc, txq);
1456

1457 1458 1459 1460 1461
		if (!tid->sched) {
			ath_txq_unlock(sc, txq);
			continue;
		}

1462
		buffered = ath_tid_has_buffered(tid);
1463 1464 1465 1466 1467 1468 1469 1470 1471

		tid->sched = false;
		list_del(&tid->list);

		if (ac->sched) {
			ac->sched = false;
			list_del(&ac->list);
		}

F
Felix Fietkau 已提交
1472
		ath_txq_unlock(sc, txq);
1473

1474 1475
		ieee80211_sta_set_buffered(sta, tidno, buffered);
	}
1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
}

void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1486
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1487 1488 1489 1490

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1491
		ath_txq_lock(sc, txq);
1492 1493
		ac->clear_ps_filter = true;

F
Felix Fietkau 已提交
1494
		if (ath_tid_has_buffered(tid)) {
1495
			ath_tx_queue_tid(sc, txq, tid);
1496 1497 1498
			ath_txq_schedule(sc, txq);
		}

F
Felix Fietkau 已提交
1499
		ath_txq_unlock_complete(sc, txq);
1500 1501 1502
	}
}

1503 1504
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
			u16 tidno)
S
Sujith 已提交
1505
{
1506
	struct ath_atx_tid *tid;
S
Sujith 已提交
1507
	struct ath_node *an;
1508
	struct ath_txq *txq;
S
Sujith 已提交
1509 1510

	an = (struct ath_node *)sta->drv_priv;
1511 1512
	tid = ATH_AN_2_TID(an, tidno);
	txq = tid->ac->txq;
S
Sujith 已提交
1513

1514 1515 1516 1517
	ath_txq_lock(sc, txq);

	tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;

1518
	if (ath_tid_has_buffered(tid)) {
1519
		ath_tx_queue_tid(sc, txq, tid);
1520 1521 1522 1523
		ath_txq_schedule(sc, txq);
	}

	ath_txq_unlock_complete(sc, txq);
1524 1525
}

1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
				   struct ieee80211_sta *sta,
				   u16 tids, int nframes,
				   enum ieee80211_frame_release_type reason,
				   bool more_data)
{
	struct ath_softc *sc = hw->priv;
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_txq *txq = sc->tx.uapsdq;
	struct ieee80211_tx_info *info;
	struct list_head bf_q;
	struct ath_buf *bf_tail = NULL, *bf;
1538
	struct sk_buff_head *tid_q;
1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551
	int sent = 0;
	int i;

	INIT_LIST_HEAD(&bf_q);
	for (i = 0; tids && nframes; i++, tids >>= 1) {
		struct ath_atx_tid *tid;

		if (!(tids & 1))
			continue;

		tid = ATH_AN_2_TID(an, i);

		ath_txq_lock(sc, tid->ac->txq);
1552 1553
		while (nframes > 0) {
			bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
1554 1555 1556
			if (!bf)
				break;

1557
			__skb_unlink(bf->bf_mpdu, tid_q);
1558 1559
			list_add_tail(&bf->list, &bf_q);
			ath_set_rates(tid->an->vif, tid->an->sta, bf);
1560 1561 1562 1563
			if (bf_isampdu(bf)) {
				ath_tx_addto_baw(sc, tid, bf);
				bf->bf_state.bf_type &= ~BUF_AGGR;
			}
1564 1565 1566 1567 1568 1569 1570 1571
			if (bf_tail)
				bf_tail->bf_next = bf;

			bf_tail = bf;
			nframes--;
			sent++;
			TX_STAT_INC(txq->axq_qnum, a_queued_hw);

1572
			if (an->sta && !ath_tid_has_buffered(tid))
1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
				ieee80211_sta_set_buffered(an->sta, i, false);
		}
		ath_txq_unlock_complete(sc, tid->ac->txq);
	}

	if (list_empty(&bf_q))
		return;

	info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
	info->flags |= IEEE80211_TX_STATUS_EOSP;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	ath_txq_lock(sc, txq);
	ath_tx_fill_desc(sc, bf, txq, 0);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	ath_txq_unlock(sc, txq);
}

S
Sujith 已提交
1591 1592 1593
/********************/
/* Queue Management */
/********************/
1594

S
Sujith 已提交
1595
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1596
{
1597
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1598
	struct ath9k_tx_queue_info qi;
1599
	static const int subtype_txq_to_hwq[] = {
1600 1601 1602 1603
		[IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
		[IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
		[IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
		[IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
1604
	};
1605
	int axq_qnum, i;
1606

S
Sujith 已提交
1607
	memset(&qi, 0, sizeof(qi));
1608
	qi.tqi_subtype = subtype_txq_to_hwq[subtype];
S
Sujith 已提交
1609 1610 1611 1612
	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_physCompBuf = 0;
1613 1614

	/*
S
Sujith 已提交
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise waiting for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
	 *
	 * The UAPSD queue is an exception, since we take a desc-
	 * based intr on the EOSP frames.
1628
	 */
1629
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1630
		qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
1631 1632 1633 1634 1635 1636 1637
	} else {
		if (qtype == ATH9K_TX_QUEUE_UAPSD)
			qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
		else
			qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
					TXQ_FLAG_TXDESCINT_ENABLE;
	}
1638 1639
	axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
	if (axq_qnum == -1) {
1640
		/*
S
Sujith 已提交
1641 1642
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
1643
		 */
S
Sujith 已提交
1644
		return NULL;
1645
	}
1646 1647
	if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
		struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1648

1649 1650
		txq->axq_qnum = axq_qnum;
		txq->mac80211_qnum = -1;
S
Sujith 已提交
1651
		txq->axq_link = NULL;
F
Felix Fietkau 已提交
1652
		__skb_queue_head_init(&txq->complete_q);
S
Sujith 已提交
1653 1654 1655
		INIT_LIST_HEAD(&txq->axq_q);
		spin_lock_init(&txq->axq_lock);
		txq->axq_depth = 0;
1656
		txq->axq_ampdu_depth = 0;
1657
		txq->axq_tx_inprogress = false;
1658
		sc->tx.txqsetup |= 1<<axq_qnum;
1659 1660 1661 1662

		txq->txq_headidx = txq->txq_tailidx = 0;
		for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
			INIT_LIST_HEAD(&txq->txq_fifo[i]);
S
Sujith 已提交
1663
	}
1664
	return &sc->tx.txq[axq_qnum];
1665 1666
}

S
Sujith 已提交
1667 1668 1669
int ath_txq_update(struct ath_softc *sc, int qnum,
		   struct ath9k_tx_queue_info *qinfo)
{
1670
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1671 1672 1673
	int error = 0;
	struct ath9k_tx_queue_info qi;

1674
	BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
S
Sujith 已提交
1675 1676 1677 1678 1679 1680 1681 1682 1683

	ath9k_hw_get_txq_props(ah, qnum, &qi);
	qi.tqi_aifs = qinfo->tqi_aifs;
	qi.tqi_cwmin = qinfo->tqi_cwmin;
	qi.tqi_cwmax = qinfo->tqi_cwmax;
	qi.tqi_burstTime = qinfo->tqi_burstTime;
	qi.tqi_readyTime = qinfo->tqi_readyTime;

	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1684 1685
		ath_err(ath9k_hw_common(sc->sc_ah),
			"Unable to update hardware queue %u!\n", qnum);
S
Sujith 已提交
1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
		error = -EIO;
	} else {
		ath9k_hw_resettxqueue(ah, qnum);
	}

	return error;
}

int ath_cabq_update(struct ath_softc *sc)
{
	struct ath9k_tx_queue_info qi;
1697
	struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
S
Sujith 已提交
1698
	int qnum = sc->beacon.cabq->axq_qnum;
1699

S
Sujith 已提交
1700
	ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1701

1702
	qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) *
1703
			    ATH_CABQ_READY_TIME) / 100;
S
Sujith 已提交
1704 1705 1706
	ath_txq_update(sc, qnum, &qi);

	return 0;
1707 1708
}

1709
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1710
			       struct list_head *list)
1711
{
S
Sujith 已提交
1712 1713
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
1714 1715 1716
	struct ath_tx_status ts;

	memset(&ts, 0, sizeof(ts));
1717
	ts.ts_status = ATH9K_TX_FLUSH;
S
Sujith 已提交
1718
	INIT_LIST_HEAD(&bf_head);
1719

1720 1721
	while (!list_empty(list)) {
		bf = list_first_entry(list, struct ath_buf, list);
1722

1723
		if (bf->bf_state.stale) {
1724
			list_del(&bf->list);
1725

1726 1727
			ath_tx_return_buffer(sc, bf);
			continue;
S
Sujith 已提交
1728
		}
1729

S
Sujith 已提交
1730
		lastbf = bf->bf_lastbf;
1731
		list_cut_position(&bf_head, list, &lastbf->list);
1732
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
1733
	}
1734
}
1735

1736 1737 1738 1739 1740 1741
/*
 * Drain a given TX queue (could be Beacon or Data)
 *
 * This assumes output has been stopped and
 * we do not need to block ath_tx_tasklet.
 */
1742
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
1743
{
F
Felix Fietkau 已提交
1744 1745
	ath_txq_lock(sc, txq);

1746
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1747
		int idx = txq->txq_tailidx;
1748

1749
		while (!list_empty(&txq->txq_fifo[idx])) {
1750
			ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
1751 1752

			INCR(idx, ATH_TXFIFO_DEPTH);
1753
		}
1754
		txq->txq_tailidx = idx;
1755
	}
1756

1757 1758
	txq->axq_link = NULL;
	txq->axq_tx_inprogress = false;
1759
	ath_drain_txq_list(sc, txq, &txq->axq_q);
1760

F
Felix Fietkau 已提交
1761
	ath_txq_unlock_complete(sc, txq);
1762 1763
}

1764
bool ath_drain_all_txq(struct ath_softc *sc)
1765
{
1766
	struct ath_hw *ah = sc->sc_ah;
1767
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
1768
	struct ath_txq *txq;
1769 1770
	int i;
	u32 npend = 0;
S
Sujith 已提交
1771

1772
	if (test_bit(ATH_OP_INVALID, &common->op_flags))
1773
		return true;
S
Sujith 已提交
1774

1775
	ath9k_hw_abort_tx_dma(ah);
S
Sujith 已提交
1776

1777
	/* Check if any queue remains active */
S
Sujith 已提交
1778
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1779 1780 1781
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

F
Felix Fietkau 已提交
1782 1783 1784
		if (!sc->tx.txq[i].axq_depth)
			continue;

1785 1786
		if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
			npend |= BIT(i);
S
Sujith 已提交
1787 1788
	}

1789
	if (npend)
1790
		ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
S
Sujith 已提交
1791 1792

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1793 1794 1795 1796 1797 1798 1799 1800 1801 1802
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

		/*
		 * The caller will resume queues with ieee80211_wake_queues.
		 * Mark the queue as not stopped to prevent ath_tx_complete
		 * from waking the queue too early.
		 */
		txq = &sc->tx.txq[i];
		txq->stopped = false;
1803
		ath_draintxq(sc, txq);
S
Sujith 已提交
1804
	}
1805 1806

	return !npend;
S
Sujith 已提交
1807
}
1808

S
Sujith 已提交
1809
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
S
Sujith 已提交
1810
{
S
Sujith 已提交
1811 1812
	ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
	sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
S
Sujith 已提交
1813
}
1814

1815
/* For each acq entry, for each tid, try to schedule packets
1816 1817
 * for transmit until ampdu_depth has reached min Q depth.
 */
S
Sujith 已提交
1818 1819
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
1820
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1821
	struct ath_atx_ac *ac, *last_ac;
1822
	struct ath_atx_tid *tid, *last_tid;
1823
	struct list_head *ac_list;
1824
	bool sent = false;
1825

1826 1827 1828
	if (txq->mac80211_qnum < 0)
		return;

1829
	spin_lock_bh(&sc->chan_lock);
1830
	ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
1831
	spin_unlock_bh(&sc->chan_lock);
1832

1833
	if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
1834
	    list_empty(ac_list))
S
Sujith 已提交
1835
		return;
1836

1837
	spin_lock_bh(&sc->chan_lock);
1838 1839
	rcu_read_lock();

1840 1841
	last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
	while (!list_empty(ac_list)) {
1842
		bool stop = false;
1843

1844 1845 1846
		if (sc->cur_chan->stopped)
			break;

1847
		ac = list_first_entry(ac_list, struct ath_atx_ac, list);
1848 1849 1850
		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
		list_del(&ac->list);
		ac->sched = false;
1851

1852
		while (!list_empty(&ac->tid_q)) {
1853

1854 1855 1856 1857
			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
					       list);
			list_del(&tid->list);
			tid->sched = false;
1858

1859 1860
			if (ath_tx_sched_aggr(sc, txq, tid, &stop))
				sent = true;
1861

1862 1863 1864 1865
			/*
			 * add tid to round-robin queue if more frames
			 * are pending for the tid
			 */
1866
			if (ath_tid_has_buffered(tid))
1867
				ath_tx_queue_tid(sc, txq, tid);
1868

1869
			if (stop || tid == last_tid)
1870 1871
				break;
		}
1872

1873 1874
		if (!list_empty(&ac->tid_q) && !ac->sched) {
			ac->sched = true;
1875
			list_add_tail(&ac->list, ac_list);
1876
		}
1877

1878
		if (stop)
1879
			break;
1880 1881 1882 1883 1884 1885

		if (ac == last_ac) {
			if (!sent)
				break;

			sent = false;
1886
			last_ac = list_entry(ac_list->prev,
1887 1888
					     struct ath_atx_ac, list);
		}
S
Sujith 已提交
1889
	}
1890 1891

	rcu_read_unlock();
1892
	spin_unlock_bh(&sc->chan_lock);
S
Sujith 已提交
1893
}
1894

1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
void ath_txq_schedule_all(struct ath_softc *sc)
{
	struct ath_txq *txq;
	int i;

	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
		txq = sc->tx.txq_map[i];

		spin_lock_bh(&txq->axq_lock);
		ath_txq_schedule(sc, txq);
		spin_unlock_bh(&txq->axq_lock);
	}
}

S
Sujith 已提交
1909 1910 1911 1912
/***********/
/* TX, DMA */
/***********/

1913
/*
S
Sujith 已提交
1914 1915
 * Insert a chain of ath_buf (descriptors) on a txq and
 * assume the descriptors are already chained together by caller.
1916
 */
S
Sujith 已提交
1917
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1918
			     struct list_head *head, bool internal)
1919
{
1920
	struct ath_hw *ah = sc->sc_ah;
1921
	struct ath_common *common = ath9k_hw_common(ah);
1922 1923 1924
	struct ath_buf *bf, *bf_last;
	bool puttxbuf = false;
	bool edma;
1925

S
Sujith 已提交
1926 1927 1928 1929
	/*
	 * Insert the frame on the outbound list and
	 * pass it on to the hardware.
	 */
1930

S
Sujith 已提交
1931 1932
	if (list_empty(head))
		return;
1933

1934
	edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
S
Sujith 已提交
1935
	bf = list_first_entry(head, struct ath_buf, list);
1936
	bf_last = list_entry(head->prev, struct ath_buf, list);
1937

1938 1939
	ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
		txq->axq_qnum, txq->axq_depth);
1940

1941 1942
	if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
		list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
1943
		INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1944
		puttxbuf = true;
S
Sujith 已提交
1945
	} else {
1946 1947
		list_splice_tail_init(head, &txq->axq_q);

1948 1949
		if (txq->axq_link) {
			ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
1950
			ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
J
Joe Perches 已提交
1951 1952
				txq->axq_qnum, txq->axq_link,
				ito64(bf->bf_daddr), bf->bf_desc);
1953 1954 1955 1956 1957 1958 1959 1960 1961
		} else if (!edma)
			puttxbuf = true;

		txq->axq_link = bf_last->bf_desc;
	}

	if (puttxbuf) {
		TX_STAT_INC(txq->axq_qnum, puttxbuf);
		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1962
		ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
1963 1964 1965
			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
	}

L
Luis R. Rodriguez 已提交
1966
	if (!edma || sc->tx99_state) {
F
Felix Fietkau 已提交
1967
		TX_STAT_INC(txq->axq_qnum, txstart);
1968
		ath9k_hw_txstart(ah, txq->axq_qnum);
S
Sujith 已提交
1969
	}
1970 1971

	if (!internal) {
1972 1973 1974 1975 1976
		while (bf) {
			txq->axq_depth++;
			if (bf_is_ampdu_not_probing(bf))
				txq->axq_ampdu_depth++;

1977 1978 1979
			bf_last = bf->bf_lastbf;
			bf = bf_last->bf_next;
			bf_last->bf_next = NULL;
1980
		}
1981
	}
S
Sujith 已提交
1982
}
1983

F
Felix Fietkau 已提交
1984
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1985
			       struct ath_atx_tid *tid, struct sk_buff *skb)
S
Sujith 已提交
1986
{
1987
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1988 1989
	struct ath_frame_info *fi = get_frame_info(skb);
	struct list_head bf_head;
1990
	struct ath_buf *bf = fi->bf;
1991 1992 1993

	INIT_LIST_HEAD(&bf_head);
	list_add_tail(&bf->list, &bf_head);
1994
	bf->bf_state.bf_type = 0;
1995 1996 1997 1998
	if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
		bf->bf_state.bf_type = BUF_AMPDU;
		ath_tx_addto_baw(sc, tid, bf);
	}
S
Sujith 已提交
1999

2000
	bf->bf_next = NULL;
S
Sujith 已提交
2001
	bf->bf_lastbf = bf;
2002
	ath_tx_fill_desc(sc, bf, txq, fi->framelen);
2003
	ath_tx_txqaddbuf(sc, txq, &bf_head, false);
S
Sujith 已提交
2004
	TX_STAT_INC(txq->axq_qnum, queued);
S
Sujith 已提交
2005 2006
}

2007 2008 2009
static void setup_frame_info(struct ieee80211_hw *hw,
			     struct ieee80211_sta *sta,
			     struct sk_buff *skb,
2010
			     int framelen)
S
Sujith 已提交
2011 2012
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2013
	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
2014
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2015
	const struct ieee80211_rate *rate;
2016
	struct ath_frame_info *fi = get_frame_info(skb);
2017
	struct ath_node *an = NULL;
2018
	enum ath9k_key_type keytype;
2019 2020 2021 2022 2023 2024 2025 2026 2027 2028
	bool short_preamble = false;

	/*
	 * We check if Short Preamble is needed for the CTS rate by
	 * checking the BSS's global flag.
	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
	 */
	if (tx_info->control.vif &&
	    tx_info->control.vif->bss_conf.use_short_preamble)
		short_preamble = true;
S
Sujith 已提交
2029

2030
	rate = ieee80211_get_rts_cts_rate(hw, tx_info);
2031
	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
S
Sujith 已提交
2032

2033 2034 2035
	if (sta)
		an = (struct ath_node *) sta->drv_priv;

2036 2037 2038
	memset(fi, 0, sizeof(*fi));
	if (hw_key)
		fi->keyix = hw_key->hw_key_idx;
2039 2040
	else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
		fi->keyix = an->ps_key;
2041 2042 2043 2044
	else
		fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->keytype = keytype;
	fi->framelen = framelen;
2045 2046 2047

	if (!rate)
		return;
2048 2049 2050
	fi->rtscts_rate = rate->hw_value;
	if (short_preamble)
		fi->rtscts_rate |= rate->hw_value_short;
S
Sujith 已提交
2051 2052
}

2053 2054 2055 2056
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath9k_channel *curchan = ah->curchan;
2057

2058
	if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && IS_CHAN_5GHZ(curchan) &&
2059
	    (chainmask == 0x7) && (rate < 0x90))
2060
		return 0x3;
2061 2062 2063
	else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
		 IS_CCK_RATE(rate))
		return 0x2;
2064 2065 2066 2067
	else
		return chainmask;
}

2068 2069 2070 2071
/*
 * Assign a descriptor (and sequence number if necessary,
 * and map buffer for DMA. Frees skb on error
 */
2072
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
F
Felix Fietkau 已提交
2073
					   struct ath_txq *txq,
2074
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
2075
					   struct sk_buff *skb)
2076
{
F
Felix Fietkau 已提交
2077
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2078
	struct ath_frame_info *fi = get_frame_info(skb);
2079
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
F
Felix Fietkau 已提交
2080
	struct ath_buf *bf;
S
Sujith Manoharan 已提交
2081
	int fragno;
2082
	u16 seqno;
F
Felix Fietkau 已提交
2083 2084 2085

	bf = ath_tx_get_buffer(sc);
	if (!bf) {
2086
		ath_dbg(common, XMIT, "TX buffers are full\n");
F
Felix Fietkau 已提交
2087
		return NULL;
F
Felix Fietkau 已提交
2088
	}
2089

S
Sujith 已提交
2090
	ATH_TXBUF_RESET(bf);
2091

2092
	if (tid && ieee80211_is_data_present(hdr->frame_control)) {
S
Sujith Manoharan 已提交
2093
		fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
2094 2095
		seqno = tid->seq_next;
		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
S
Sujith Manoharan 已提交
2096 2097 2098 2099 2100 2101 2102

		if (fragno)
			hdr->seq_ctrl |= cpu_to_le16(fragno);

		if (!ieee80211_has_morefrags(hdr->frame_control))
			INCR(tid->seq_next, IEEE80211_SEQ_MAX);

2103 2104 2105
		bf->bf_state.seqno = seqno;
	}

2106
	bf->bf_mpdu = skb;
2107

B
Ben Greear 已提交
2108 2109 2110
	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
2111
		bf->bf_mpdu = NULL;
2112
		bf->bf_buf_addr = 0;
2113 2114
		ath_err(ath9k_hw_common(sc->sc_ah),
			"dma_mapping_error() on TX\n");
F
Felix Fietkau 已提交
2115
		ath_tx_return_buffer(sc, bf);
F
Felix Fietkau 已提交
2116
		return NULL;
2117 2118
	}

2119
	fi->bf = bf;
F
Felix Fietkau 已提交
2120 2121 2122 2123

	return bf;
}

2124 2125
static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
			  struct ath_tx_control *txctl)
2126
{
2127 2128
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2129
	struct ieee80211_sta *sta = txctl->sta;
2130
	struct ieee80211_vif *vif = info->control.vif;
2131
	struct ath_vif *avp;
2132
	struct ath_softc *sc = hw->priv;
F
Felix Fietkau 已提交
2133
	int frmlen = skb->len + FCS_LEN;
2134
	int padpos, padsize;
2135

2136 2137 2138
	/* NOTE:  sta can be NULL according to net/mac80211.h */
	if (sta)
		txctl->an = (struct ath_node *)sta->drv_priv;
2139 2140 2141 2142
	else if (vif && ieee80211_is_data(hdr->frame_control)) {
		avp = (void *)vif->drv_priv;
		txctl->an = &avp->mcast_node;
	}
2143

F
Felix Fietkau 已提交
2144 2145 2146
	if (info->control.hw_key)
		frmlen += info->control.hw_key->icv_len;

2147
	/*
S
Sujith 已提交
2148 2149 2150
	 * As a temporary workaround, assign seq# here; this will likely need
	 * to be cleaned up to work better with Beacon transmission and virtual
	 * BSSes.
2151
	 */
S
Sujith 已提交
2152 2153 2154 2155 2156
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
			sc->tx.seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2157 2158
	}

2159 2160 2161 2162 2163
	if ((vif && vif->type != NL80211_IFTYPE_AP &&
	            vif->type != NL80211_IFTYPE_AP_VLAN) ||
	    !ieee80211_is_data(hdr->frame_control))
		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;

2164
	/* Add the padding after the header if this is not already done */
2165
	padpos = ieee80211_hdrlen(hdr->frame_control);
2166 2167 2168 2169
	padsize = padpos & 3;
	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize)
			return -ENOMEM;
2170

2171 2172
		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
2173 2174
	}

2175
	setup_frame_info(hw, sta, skb, frmlen);
2176 2177 2178
	return 0;
}

2179

2180 2181 2182 2183 2184 2185 2186 2187
/* Upon failure caller should free skb */
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
		 struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ieee80211_sta *sta = txctl->sta;
	struct ieee80211_vif *vif = info->control.vif;
2188
	struct ath_vif *avp = NULL;
2189 2190 2191 2192
	struct ath_softc *sc = hw->priv;
	struct ath_txq *txq = txctl->txq;
	struct ath_atx_tid *tid = NULL;
	struct ath_buf *bf;
2193
	bool queue;
2194 2195 2196
	int q;
	int ret;

2197 2198 2199
	if (vif)
		avp = (void *)vif->drv_priv;

2200 2201 2202
	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
		txctl->force_channel = true;

2203 2204 2205 2206 2207
	ret = ath_tx_prepare(hw, skb, txctl);
	if (ret)
	    return ret;

	hdr = (struct ieee80211_hdr *) skb->data;
2208 2209 2210 2211 2212
	/*
	 * At this point, the vif, hw_key and sta pointers in the tx control
	 * info are no longer valid (overwritten by the ath_frame_info data.
	 */

2213
	q = skb_get_queue_mapping(skb);
F
Felix Fietkau 已提交
2214 2215

	ath_txq_lock(sc, txq);
2216
	if (txq == sc->tx.txq_map[q] &&
2217 2218
	    ++txq->pending_frames > sc->tx.txq_max_pending[q] &&
	    !txq->stopped) {
2219
		ieee80211_stop_queue(sc->hw, q);
2220
		txq->stopped = true;
2221 2222
	}

2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237
	queue = ieee80211_is_data_present(hdr->frame_control);

	/* Force queueing of all frames that belong to a virtual interface on
	 * a different channel context, to ensure that they are sent on the
	 * correct channel.
	 */
	if (((avp && avp->chanctx != sc->cur_chan) ||
	     sc->cur_chan->stopped) && !txctl->force_channel) {
		if (!txctl->an)
			txctl->an = &avp->mcast_node;
		info->flags &= ~IEEE80211_TX_CTL_PS_RESPONSE;
		queue = true;
	}

	if (txctl->an && queue)
2238 2239
		tid = ath_get_skb_tid(sc, txctl->an, skb);

2240 2241
	if (info->flags & (IEEE80211_TX_CTL_PS_RESPONSE |
			   IEEE80211_TX_CTL_TX_OFFCHAN)) {
2242 2243 2244
		ath_txq_unlock(sc, txq);
		txq = sc->tx.uapsdq;
		ath_txq_lock(sc, txq);
2245
	} else if (txctl->an && queue) {
2246 2247
		WARN_ON(tid->ac->txq != txctl->txq);

2248 2249 2250
		if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
			tid->ac->clear_ps_filter = true;

2251
		/*
2252 2253
		 * Add this frame to software queue for scheduling later
		 * for aggregation.
2254
		 */
2255 2256 2257
		TX_STAT_INC(txq->axq_qnum, a_queued_sw);
		__skb_queue_tail(&tid->buf_q, skb);
		if (!txctl->an->sleeping)
2258
			ath_tx_queue_tid(sc, txq, tid);
2259 2260

		ath_txq_schedule(sc, txq);
2261 2262 2263
		goto out;
	}

2264
	bf = ath_tx_setup_buffer(sc, txq, tid, skb);
2265
	if (!bf) {
2266
		ath_txq_skb_done(sc, txq, skb);
2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278
		if (txctl->paprd)
			dev_kfree_skb_any(skb);
		else
			ieee80211_free_txskb(sc->hw, skb);
		goto out;
	}

	bf->bf_state.bfs_paprd = txctl->paprd;

	if (txctl->paprd)
		bf->bf_state.bfs_paprd_timestamp = jiffies;

2279
	ath_set_rates(vif, sta, bf);
2280
	ath_tx_send_normal(sc, txq, tid, skb);
F
Felix Fietkau 已提交
2281

2282
out:
F
Felix Fietkau 已提交
2283
	ath_txq_unlock(sc, txq);
F
Felix Fietkau 已提交
2284

2285
	return 0;
2286 2287
}

2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318
void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		 struct sk_buff *skb)
{
	struct ath_softc *sc = hw->priv;
	struct ath_tx_control txctl = {
		.txq = sc->beacon.cabq
	};
	struct ath_tx_info info = {};
	struct ieee80211_hdr *hdr;
	struct ath_buf *bf_tail = NULL;
	struct ath_buf *bf;
	LIST_HEAD(bf_q);
	int duration = 0;
	int max_duration;

	max_duration =
		sc->cur_beacon_conf.beacon_interval * 1000 *
		sc->cur_beacon_conf.dtim_period / ATH_BCBUF;

	do {
		struct ath_frame_info *fi = get_frame_info(skb);

		if (ath_tx_prepare(hw, skb, &txctl))
			break;

		bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
		if (!bf)
			break;

		bf->bf_lastbf = bf;
		ath_set_rates(vif, NULL, bf);
S
Sujith Manoharan 已提交
2319
		ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355
		duration += info.rates[0].PktDuration;
		if (bf_tail)
			bf_tail->bf_next = bf;

		list_add_tail(&bf->list, &bf_q);
		bf_tail = bf;
		skb = NULL;

		if (duration > max_duration)
			break;

		skb = ieee80211_get_buffered_bc(hw, vif);
	} while(skb);

	if (skb)
		ieee80211_free_txskb(hw, skb);

	if (list_empty(&bf_q))
		return;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;

	if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
		hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
			sizeof(*hdr), DMA_TO_DEVICE);
	}

	ath_txq_lock(sc, txctl.txq);
	ath_tx_fill_desc(sc, bf, txctl.txq, 0);
	ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
	TX_STAT_INC(txctl.txq->axq_qnum, queued);
	ath_txq_unlock(sc, txctl.txq);
}

S
Sujith 已提交
2356 2357 2358
/*****************/
/* TX Completion */
/*****************/
S
Sujith 已提交
2359

S
Sujith 已提交
2360
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2361
			    int tx_flags, struct ath_txq *txq)
S
Sujith 已提交
2362
{
S
Sujith 已提交
2363
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2364
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2365
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2366
	int padpos, padsize;
S
Sujith Manoharan 已提交
2367
	unsigned long flags;
S
Sujith 已提交
2368

2369
	ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
S
Sujith 已提交
2370

2371
	if (sc->sc_ah->caldata)
2372
		set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
2373

2374
	if (!(tx_flags & ATH_TX_ERROR))
S
Sujith 已提交
2375 2376
		/* Frame was ACKed */
		tx_info->flags |= IEEE80211_TX_STAT_ACK;
S
Sujith 已提交
2377

2378
	padpos = ieee80211_hdrlen(hdr->frame_control);
2379 2380 2381 2382 2383 2384 2385 2386
	padsize = padpos & 3;
	if (padsize && skb->len>padpos+padsize) {
		/*
		 * Remove MAC header padding before giving the frame back to
		 * mac80211.
		 */
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
S
Sujith 已提交
2387
	}
S
Sujith 已提交
2388

S
Sujith Manoharan 已提交
2389
	spin_lock_irqsave(&sc->sc_pm_lock, flags);
2390
	if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
S
Sujith 已提交
2391
		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
2392
		ath_dbg(common, PS,
J
Joe Perches 已提交
2393
			"Going back to sleep after having received TX status (0x%lx)\n",
S
Sujith 已提交
2394 2395 2396 2397
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
2398
	}
S
Sujith Manoharan 已提交
2399
	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2400

2401
	__skb_queue_tail(&txq->complete_q, skb);
2402
	ath_txq_skb_done(sc, txq, skb);
S
Sujith 已提交
2403
}
2404

S
Sujith 已提交
2405
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2406
				struct ath_txq *txq, struct list_head *bf_q,
2407
				struct ath_tx_status *ts, int txok)
2408
{
S
Sujith 已提交
2409
	struct sk_buff *skb = bf->bf_mpdu;
2410
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
S
Sujith 已提交
2411
	unsigned long flags;
2412
	int tx_flags = 0;
2413

2414
	if (!txok)
2415
		tx_flags |= ATH_TX_ERROR;
2416

2417 2418 2419
	if (ts->ts_status & ATH9K_TXERR_FILT)
		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;

B
Ben Greear 已提交
2420
	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2421
	bf->bf_buf_addr = 0;
L
Luis R. Rodriguez 已提交
2422 2423
	if (sc->tx99_state)
		goto skip_tx_complete;
2424 2425

	if (bf->bf_state.bfs_paprd) {
2426 2427 2428
		if (time_after(jiffies,
				bf->bf_state.bfs_paprd_timestamp +
				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
2429
			dev_kfree_skb_any(skb);
2430
		else
2431
			complete(&sc->paprd_complete);
2432
	} else {
2433
		ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2434
		ath_tx_complete(sc, skb, tx_flags, txq);
2435
	}
L
Luis R. Rodriguez 已提交
2436
skip_tx_complete:
2437 2438 2439 2440
	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
	 * accidentally reference it later.
	 */
	bf->bf_mpdu = NULL;
S
Sujith 已提交
2441 2442 2443 2444 2445 2446 2447

	/*
	 * Return the list of ath_buf of this mpdu to free queue
	 */
	spin_lock_irqsave(&sc->tx.txbuflock, flags);
	list_splice_tail_init(bf_q, &sc->tx.txbuf);
	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2448 2449
}

F
Felix Fietkau 已提交
2450 2451
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
2452
			     int txok)
2453
{
S
Sujith 已提交
2454
	struct sk_buff *skb = bf->bf_mpdu;
2455
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
S
Sujith 已提交
2456
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
F
Felix Fietkau 已提交
2457
	struct ieee80211_hw *hw = sc->hw;
2458
	struct ath_hw *ah = sc->sc_ah;
2459
	u8 i, tx_rateindex;
2460

S
Sujith 已提交
2461
	if (txok)
2462
		tx_info->status.ack_signal = ts->ts_rssi;
S
Sujith 已提交
2463

2464
	tx_rateindex = ts->ts_rateindex;
2465 2466
	WARN_ON(tx_rateindex >= hw->max_rates);

2467
	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
2468
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2469

2470
		BUG_ON(nbad > nframes);
2471
	}
2472 2473
	tx_info->status.ampdu_len = nframes;
	tx_info->status.ampdu_ack_len = nframes - nbad;
2474

2475
	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2476
	    (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488
		/*
		 * If an underrun error is seen assume it as an excessive
		 * retry only if max frame trigger level has been reached
		 * (2 KB for single stream, and 4 KB for dual stream).
		 * Adjust the long retry as if the frame was tried
		 * hw->max_rate_tries times to affect how rate control updates
		 * PER for the failed rate.
		 * In case of congestion on the bus penalizing this type of
		 * underruns should help hardware actually transmit new frames
		 * successfully by eventually preferring slower rates.
		 * This itself should also alleviate congestion on the bus.
		 */
2489 2490 2491
		if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
		                             ATH9K_TX_DELIM_UNDERRUN)) &&
		    ieee80211_is_data(hdr->frame_control) &&
2492
		    ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2493 2494
			tx_info->status.rates[tx_rateindex].count =
				hw->max_rate_tries;
2495
	}
2496

2497
	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2498
		tx_info->status.rates[i].count = 0;
2499 2500
		tx_info->status.rates[i].idx = -1;
	}
2501

2502
	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2503 2504
}

S
Sujith 已提交
2505
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2506
{
2507
	struct ath_hw *ah = sc->sc_ah;
2508
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
2509
	struct ath_buf *bf, *lastbf, *bf_held = NULL;
2510
	struct list_head bf_head;
S
Sujith 已提交
2511
	struct ath_desc *ds;
2512
	struct ath_tx_status ts;
S
Sujith 已提交
2513
	int status;
2514

2515
	ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
J
Joe Perches 已提交
2516 2517
		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
		txq->axq_link);
2518

F
Felix Fietkau 已提交
2519
	ath_txq_lock(sc, txq);
2520
	for (;;) {
2521
		if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
2522 2523
			break;

2524 2525
		if (list_empty(&txq->axq_q)) {
			txq->axq_link = NULL;
2526
			ath_txq_schedule(sc, txq);
2527 2528 2529 2530
			break;
		}
		bf = list_first_entry(&txq->axq_q, struct ath_buf, list);

S
Sujith 已提交
2531 2532 2533 2534 2535 2536 2537 2538 2539
		/*
		 * There is a race condition that a BH gets scheduled
		 * after sw writes TxE and before hw re-load the last
		 * descriptor to get the newly chained one.
		 * Software must keep the last DONE descriptor as a
		 * holding descriptor - software does so by marking
		 * it with the STALE flag.
		 */
		bf_held = NULL;
2540
		if (bf->bf_state.stale) {
S
Sujith 已提交
2541
			bf_held = bf;
2542
			if (list_is_last(&bf_held->list, &txq->axq_q))
S
Sujith 已提交
2543
				break;
2544 2545 2546

			bf = list_entry(bf_held->list.next, struct ath_buf,
					list);
2547 2548 2549
		}

		lastbf = bf->bf_lastbf;
S
Sujith 已提交
2550
		ds = lastbf->bf_desc;
2551

2552 2553
		memset(&ts, 0, sizeof(ts));
		status = ath9k_hw_txprocdesc(ah, ds, &ts);
2554
		if (status == -EINPROGRESS)
S
Sujith 已提交
2555
			break;
2556

2557
		TX_STAT_INC(txq->axq_qnum, txprocdesc);
2558

S
Sujith 已提交
2559 2560 2561 2562 2563
		/*
		 * Remove ath_buf's of the same transmit unit from txq,
		 * however leave the last descriptor back as the holding
		 * descriptor for hw.
		 */
2564
		lastbf->bf_state.stale = true;
S
Sujith 已提交
2565 2566 2567 2568
		INIT_LIST_HEAD(&bf_head);
		if (!list_is_singular(&lastbf->list))
			list_cut_position(&bf_head,
				&txq->axq_q, lastbf->list.prev);
2569

2570
		if (bf_held) {
2571 2572
			list_del(&bf_held->list);
			ath_tx_return_buffer(sc, bf_held);
S
Sujith 已提交
2573
		}
2574

2575
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2576
	}
F
Felix Fietkau 已提交
2577
	ath_txq_unlock_complete(sc, txq);
2578 2579
}

S
Sujith 已提交
2580
void ath_tx_tasklet(struct ath_softc *sc)
2581
{
2582 2583
	struct ath_hw *ah = sc->sc_ah;
	u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
S
Sujith 已提交
2584
	int i;
2585

S
Sujith 已提交
2586 2587 2588
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
		if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
			ath_tx_processq(sc, &sc->tx.txq[i]);
2589 2590 2591
	}
}

2592 2593
void ath_tx_edma_tasklet(struct ath_softc *sc)
{
2594
	struct ath_tx_status ts;
2595 2596 2597 2598 2599
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct ath_txq *txq;
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
2600
	struct list_head *fifo_list;
2601 2602 2603
	int status;

	for (;;) {
2604
		if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
2605 2606
			break;

2607
		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
2608 2609 2610
		if (status == -EINPROGRESS)
			break;
		if (status == -EIO) {
2611
			ath_dbg(common, XMIT, "Error processing tx status\n");
2612 2613 2614
			break;
		}

2615 2616 2617 2618
		/* Process beacon completions separately */
		if (ts.qid == sc->beacon.beaconq) {
			sc->beacon.tx_processed = true;
			sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2619

2620
			ath9k_csa_update(sc);
2621
			continue;
2622
		}
2623

2624
		txq = &sc->tx.txq[ts.qid];
2625

F
Felix Fietkau 已提交
2626
		ath_txq_lock(sc, txq);
2627

2628 2629
		TX_STAT_INC(txq->axq_qnum, txprocdesc);

2630 2631
		fifo_list = &txq->txq_fifo[txq->txq_tailidx];
		if (list_empty(fifo_list)) {
F
Felix Fietkau 已提交
2632
			ath_txq_unlock(sc, txq);
2633 2634 2635
			return;
		}

2636
		bf = list_first_entry(fifo_list, struct ath_buf, list);
2637
		if (bf->bf_state.stale) {
2638 2639 2640 2641 2642
			list_del(&bf->list);
			ath_tx_return_buffer(sc, bf);
			bf = list_first_entry(fifo_list, struct ath_buf, list);
		}

2643 2644 2645
		lastbf = bf->bf_lastbf;

		INIT_LIST_HEAD(&bf_head);
2646 2647
		if (list_is_last(&lastbf->list, fifo_list)) {
			list_splice_tail_init(fifo_list, &bf_head);
2648
			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2649

2650 2651
			if (!list_empty(&txq->axq_q)) {
				struct list_head bf_q;
2652

2653 2654 2655 2656 2657
				INIT_LIST_HEAD(&bf_q);
				txq->axq_link = NULL;
				list_splice_tail_init(&txq->axq_q, &bf_q);
				ath_tx_txqaddbuf(sc, txq, &bf_q, true);
			}
2658
		} else {
2659
			lastbf->bf_state.stale = true;
2660 2661 2662
			if (bf != lastbf)
				list_cut_position(&bf_head, fifo_list,
						  lastbf->list.prev);
2663
		}
2664

2665
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
F
Felix Fietkau 已提交
2666
		ath_txq_unlock_complete(sc, txq);
2667 2668 2669
	}
}

S
Sujith 已提交
2670 2671 2672
/*****************/
/* Init, Cleanup */
/*****************/
2673

2674 2675 2676 2677 2678 2679
static int ath_txstatus_setup(struct ath_softc *sc, int size)
{
	struct ath_descdma *dd = &sc->txsdma;
	u8 txs_len = sc->sc_ah->caps.txs_len;

	dd->dd_desc_len = size * txs_len;
2680 2681
	dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
					  &dd->dd_desc_paddr, GFP_KERNEL);
2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700
	if (!dd->dd_desc)
		return -ENOMEM;

	return 0;
}

static int ath_tx_edma_init(struct ath_softc *sc)
{
	int err;

	err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
	if (!err)
		ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
					  sc->txsdma.dd_desc_paddr,
					  ATH_TXSTATUS_RING_SIZE);

	return err;
}

S
Sujith 已提交
2701
int ath_tx_init(struct ath_softc *sc, int nbufs)
2702
{
2703
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
2704
	int error = 0;
2705

2706
	spin_lock_init(&sc->tx.txbuflock);
2707

2708
	error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2709
				  "tx", nbufs, 1, 1);
2710
	if (error != 0) {
2711 2712
		ath_err(common,
			"Failed to allocate tx descriptors: %d\n", error);
2713
		return error;
2714
	}
2715

2716
	error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2717
				  "beacon", ATH_BCBUF, 1, 1);
2718
	if (error != 0) {
2719 2720
		ath_err(common,
			"Failed to allocate beacon descriptors: %d\n", error);
2721
		return error;
2722
	}
2723

2724 2725
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);

2726
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2727
		error = ath_tx_edma_init(sc);
2728

S
Sujith 已提交
2729
	return error;
2730 2731 2732 2733
}

void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
2734 2735 2736
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	int tidno, acno;
2737

2738
	for (tidno = 0, tid = &an->tid[tidno];
2739
	     tidno < IEEE80211_NUM_TIDS;
2740 2741 2742 2743 2744 2745 2746
	     tidno++, tid++) {
		tid->an        = an;
		tid->tidno     = tidno;
		tid->seq_start = tid->seq_next = 0;
		tid->baw_size  = WME_MAX_BA;
		tid->baw_head  = tid->baw_tail = 0;
		tid->sched     = false;
2747
		tid->active	   = false;
2748
		__skb_queue_head_init(&tid->buf_q);
2749
		__skb_queue_head_init(&tid->retry_q);
2750
		acno = TID_TO_WME_AC(tidno);
2751
		tid->ac = &an->ac[acno];
2752
	}
2753

2754
	for (acno = 0, ac = &an->ac[acno];
2755
	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
2756
		ac->sched    = false;
2757
		ac->clear_ps_filter = true;
2758
		ac->txq = sc->tx.txq_map[acno];
2759
		INIT_LIST_HEAD(&ac->tid_q);
2760 2761 2762
	}
}

S
Sujith 已提交
2763
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2764
{
2765 2766
	struct ath_atx_ac *ac;
	struct ath_atx_tid *tid;
2767
	struct ath_txq *txq;
2768
	int tidno;
S
Sujith 已提交
2769

2770
	for (tidno = 0, tid = &an->tid[tidno];
2771
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
2772

2773
		ac = tid->ac;
2774
		txq = ac->txq;
2775

F
Felix Fietkau 已提交
2776
		ath_txq_lock(sc, txq);
2777 2778 2779 2780 2781 2782 2783 2784 2785

		if (tid->sched) {
			list_del(&tid->list);
			tid->sched = false;
		}

		if (ac->sched) {
			list_del(&ac->list);
			tid->ac->sched = false;
2786
		}
2787 2788

		ath_tid_drain(sc, txq, tid);
2789
		tid->active = false;
2790

F
Felix Fietkau 已提交
2791
		ath_txq_unlock(sc, txq);
2792 2793
	}
}
L
Luis R. Rodriguez 已提交
2794

2795 2796
#ifdef CONFIG_ATH9K_TX99

L
Luis R. Rodriguez 已提交
2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838
int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
		    struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ath_frame_info *fi = get_frame_info(skb);
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_buf *bf;
	int padpos, padsize;

	padpos = ieee80211_hdrlen(hdr->frame_control);
	padsize = padpos & 3;

	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize) {
			ath_dbg(common, XMIT,
				"tx99 padding failed\n");
		return -EINVAL;
		}

		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
	}

	fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->framelen = skb->len + FCS_LEN;
	fi->keytype = ATH9K_KEY_TYPE_CLEAR;

	bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
	if (!bf) {
		ath_dbg(common, XMIT, "tx99 buffer setup failed\n");
		return -EINVAL;
	}

	ath_set_rates(sc->tx99_vif, NULL, bf);

	ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
	ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);

	ath_tx_send_normal(sc, txctl->txq, NULL, skb);

	return 0;
}
2839 2840

#endif /* CONFIG_ATH9K_TX99 */