xmit.c 71.4 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include <linux/dma-mapping.h>
S
Sujith 已提交
18
#include "ath9k.h"
19
#include "ar9003_mac.h"
20 21 22 23 24 25 26 27 28 29 30 31

#define BITS_PER_BYTE           8
#define OFDM_PLCP_BITS          22
#define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
#define L_STF                   8
#define L_LTF                   8
#define L_SIG                   4
#define HT_SIG                  8
#define HT_STF                  4
#define HT_LTF(_ns)             (4 * (_ns))
#define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
32 33
#define TIME_SYMBOLS(t)         ((t) >> 2)
#define TIME_SYMBOLS_HALFGI(t)  (((t) * 5 - 4) / 18)
34 35 36 37
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)


38
static u16 bits_per_symbol[][2] = {
39 40 41 42 43 44 45 46 47 48 49
	/* 20MHz 40MHz */
	{    26,   54 },     /*  0: BPSK */
	{    52,  108 },     /*  1: QPSK 1/2 */
	{    78,  162 },     /*  2: QPSK 3/4 */
	{   104,  216 },     /*  3: 16-QAM 1/2 */
	{   156,  324 },     /*  4: 16-QAM 3/4 */
	{   208,  432 },     /*  5: 64-QAM 2/3 */
	{   234,  486 },     /*  6: 64-QAM 3/4 */
	{   260,  540 },     /*  7: 64-QAM 5/6 */
};

F
Felix Fietkau 已提交
50
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
51 52 53
			       struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
			    int tx_flags, struct ath_txq *txq);
S
Sujith 已提交
54
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
55
				struct ath_txq *txq, struct list_head *bf_q,
56
				struct ath_tx_status *ts, int txok);
57
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58
			     struct list_head *head, bool internal);
F
Felix Fietkau 已提交
59 60
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
61
			     int txok);
62 63
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno);
64 65 66
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
					   struct ath_txq *txq,
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
67
					   struct sk_buff *skb);
68

69
enum {
70 71
	MCS_HT20,
	MCS_HT20_SGI,
72 73 74 75
	MCS_HT40,
	MCS_HT40_SGI,
};

S
Sujith 已提交
76 77 78
/*********************/
/* Aggregation logic */
/*********************/
79

80
void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
81
	__acquires(&txq->axq_lock)
F
Felix Fietkau 已提交
82 83 84 85
{
	spin_lock_bh(&txq->axq_lock);
}

86
void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
87
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
88 89 90 91
{
	spin_unlock_bh(&txq->axq_lock);
}

92
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
93
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
94 95 96 97 98 99 100 101 102 103 104 105
{
	struct sk_buff_head q;
	struct sk_buff *skb;

	__skb_queue_head_init(&q);
	skb_queue_splice_init(&txq->complete_q, &q);
	spin_unlock_bh(&txq->axq_lock);

	while ((skb = __skb_dequeue(&q)))
		ieee80211_tx_status(sc->hw, skb);
}

106 107
static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
			     struct ath_atx_tid *tid)
S
Sujith 已提交
108
{
S
Sujith 已提交
109
	struct ath_atx_ac *ac = tid->ac;
110 111 112 113 114 115
	struct list_head *list;
	struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
	struct ath_chanctx *ctx = avp->chanctx;

	if (!ctx)
		return;
S
Sujith 已提交
116

S
Sujith 已提交
117 118
	if (tid->sched)
		return;
S
Sujith 已提交
119

S
Sujith 已提交
120 121
	tid->sched = true;
	list_add_tail(&tid->list, &ac->tid_q);
S
Sujith 已提交
122

S
Sujith 已提交
123 124
	if (ac->sched)
		return;
125

S
Sujith 已提交
126
	ac->sched = true;
127 128 129

	list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
	list_add_tail(&ac->list, list);
S
Sujith 已提交
130
}
131

132
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
133 134
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
135 136 137
	BUILD_BUG_ON(sizeof(struct ath_frame_info) >
		     sizeof(tx_info->rate_driver_data));
	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
138 139
}

140 141
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
{
142 143 144
	if (!tid->an->sta)
		return;

145 146 147 148
	ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
			   seqno << IEEE80211_SEQ_SEQ_SHIFT);
}

149 150 151 152 153 154 155
static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
			  struct ath_buf *bf)
{
	ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates,
			       ARRAY_SIZE(bf->rates));
}

156 157 158
static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
			     struct sk_buff *skb)
{
159
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
160 161
	struct ath_frame_info *fi = get_frame_info(skb);
	int q = fi->txq;
162

163
	if (q < 0)
164 165
		return;

166
	txq = sc->tx.txq_map[q];
167 168 169 170 171
	if (WARN_ON(--txq->pending_frames < 0))
		txq->pending_frames = 0;

	if (txq->stopped &&
	    txq->pending_frames < sc->tx.txq_max_pending[q]) {
S
Sujith Manoharan 已提交
172
		ieee80211_wake_queue(sc->hw, info->hw_queue);
173 174 175 176
		txq->stopped = false;
	}
}

177 178 179
static struct ath_atx_tid *
ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
{
180
	u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
181 182 183
	return ATH_AN_2_TID(an, tidno);
}

184 185
static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
{
186
	return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
187 188 189 190
}

static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
{
191 192 193 194 195 196 197
	struct sk_buff *skb;

	skb = __skb_dequeue(&tid->retry_q);
	if (!skb)
		skb = __skb_dequeue(&tid->buf_q);

	return skb;
198 199
}

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
/*
 * ath_tx_tid_change_state:
 * - clears a-mpdu flag of previous session
 * - force sequence number allocation to fix next BlockAck Window
 */
static void
ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
{
	struct ath_txq *txq = tid->ac->txq;
	struct ieee80211_tx_info *tx_info;
	struct sk_buff *skb, *tskb;
	struct ath_buf *bf;
	struct ath_frame_info *fi;

	skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
		fi = get_frame_info(skb);
		bf = fi->bf;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;

		if (bf)
			continue;

		bf = ath_tx_setup_buffer(sc, txq, tid, skb);
		if (!bf) {
			__skb_unlink(skb, &tid->buf_q);
			ath_txq_skb_done(sc, txq, skb);
			ieee80211_free_txskb(sc->hw, skb);
			continue;
		}
	}

}

235
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
S
Sujith 已提交
236
{
237
	struct ath_txq *txq = tid->ac->txq;
238
	struct sk_buff *skb;
S
Sujith 已提交
239 240
	struct ath_buf *bf;
	struct list_head bf_head;
241
	struct ath_tx_status ts;
242
	struct ath_frame_info *fi;
243
	bool sendbar = false;
244

245
	INIT_LIST_HEAD(&bf_head);
246

247
	memset(&ts, 0, sizeof(ts));
248

249
	while ((skb = __skb_dequeue(&tid->retry_q))) {
250 251
		fi = get_frame_info(skb);
		bf = fi->bf;
F
Felix Fietkau 已提交
252
		if (!bf) {
253 254 255
			ath_txq_skb_done(sc, txq, skb);
			ieee80211_free_txskb(sc->hw, skb);
			continue;
F
Felix Fietkau 已提交
256 257
		}

258
		if (fi->baw_tracked) {
259
			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
260
			sendbar = true;
261
		}
262 263 264

		list_add_tail(&bf->list, &bf_head);
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
265
	}
266

267
	if (sendbar) {
F
Felix Fietkau 已提交
268
		ath_txq_unlock(sc, txq);
269
		ath_send_bar(tid, tid->seq_start);
F
Felix Fietkau 已提交
270 271
		ath_txq_lock(sc, txq);
	}
S
Sujith 已提交
272
}
273

S
Sujith 已提交
274 275
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno)
S
Sujith 已提交
276
{
S
Sujith 已提交
277
	int index, cindex;
278

S
Sujith 已提交
279 280
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
281

282
	__clear_bit(cindex, tid->tx_buf);
S
Sujith 已提交
283

284
	while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
S
Sujith 已提交
285 286
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
287 288
		if (tid->bar_index >= 0)
			tid->bar_index--;
S
Sujith 已提交
289
	}
S
Sujith 已提交
290
}
291

S
Sujith 已提交
292
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
293
			     struct ath_buf *bf)
S
Sujith 已提交
294
{
295 296
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
	u16 seqno = bf->bf_state.seqno;
S
Sujith 已提交
297
	int index, cindex;
S
Sujith 已提交
298

299
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
S
Sujith 已提交
300
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
301
	__set_bit(cindex, tid->tx_buf);
302
	fi->baw_tracked = 1;
303

S
Sujith 已提交
304 305 306 307
	if (index >= ((tid->baw_tail - tid->baw_head) &
		(ATH_TID_MAX_BUFS - 1))) {
		tid->baw_tail = cindex;
		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
308 309 310
	}
}

S
Sujith 已提交
311 312
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
			  struct ath_atx_tid *tid)
313 314

{
315
	struct sk_buff *skb;
S
Sujith 已提交
316 317
	struct ath_buf *bf;
	struct list_head bf_head;
318
	struct ath_tx_status ts;
319
	struct ath_frame_info *fi;
320 321

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
322
	INIT_LIST_HEAD(&bf_head);
323

324
	while ((skb = ath_tid_dequeue(tid))) {
325 326
		fi = get_frame_info(skb);
		bf = fi->bf;
327

328 329 330 331 332
		if (!bf) {
			ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
			continue;
		}

333
		list_add_tail(&bf->list, &bf_head);
334
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
335
	}
336 337
}

S
Sujith 已提交
338
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
339
			     struct sk_buff *skb, int count)
340
{
341
	struct ath_frame_info *fi = get_frame_info(skb);
342
	struct ath_buf *bf = fi->bf;
S
Sujith 已提交
343
	struct ieee80211_hdr *hdr;
344
	int prev = fi->retries;
345

S
Sujith 已提交
346
	TX_STAT_INC(txq->axq_qnum, a_retries);
347 348 349
	fi->retries += count;

	if (prev > 0)
350
		return;
351

S
Sujith 已提交
352 353
	hdr = (struct ieee80211_hdr *)skb->data;
	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
354 355
	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
		sizeof(*hdr), DMA_TO_DEVICE);
356 357
}

358
static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
S
Sujith 已提交
359
{
360
	struct ath_buf *bf = NULL;
S
Sujith 已提交
361 362

	spin_lock_bh(&sc->tx.txbuflock);
363 364

	if (unlikely(list_empty(&sc->tx.txbuf))) {
365 366 367
		spin_unlock_bh(&sc->tx.txbuflock);
		return NULL;
	}
368 369 370 371

	bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
	list_del(&bf->list);

S
Sujith 已提交
372 373
	spin_unlock_bh(&sc->tx.txbuflock);

374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
	return bf;
}

static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
{
	spin_lock_bh(&sc->tx.txbuflock);
	list_add_tail(&bf->list, &sc->tx.txbuf);
	spin_unlock_bh(&sc->tx.txbuflock);
}

static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
{
	struct ath_buf *tbf;

	tbf = ath_tx_get_buffer(sc);
	if (WARN_ON(!tbf))
		return NULL;

S
Sujith 已提交
392 393 394 395
	ATH_TXBUF_RESET(tbf);

	tbf->bf_mpdu = bf->bf_mpdu;
	tbf->bf_buf_addr = bf->bf_buf_addr;
396
	memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
S
Sujith 已提交
397
	tbf->bf_state = bf->bf_state;
398
	tbf->bf_state.stale = false;
S
Sujith 已提交
399 400 401 402

	return tbf;
}

403 404 405 406
static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
			        struct ath_tx_status *ts, int txok,
			        int *nframes, int *nbad)
{
407
	struct ath_frame_info *fi;
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
	u16 seq_st = 0;
	u32 ba[WME_BA_BMP_SIZE >> 5];
	int ba_index;
	int isaggr = 0;

	*nbad = 0;
	*nframes = 0;

	isaggr = bf_isaggr(bf);
	if (isaggr) {
		seq_st = ts->ts_seqnum;
		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
	}

	while (bf) {
423
		fi = get_frame_info(bf->bf_mpdu);
424
		ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
425 426 427 428 429 430 431 432 433 434

		(*nframes)++;
		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
			(*nbad)++;

		bf = bf->bf_next;
	}
}


S
Sujith 已提交
435 436
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
				 struct ath_buf *bf, struct list_head *bf_q,
437
				 struct ath_tx_status *ts, int txok)
438
{
S
Sujith 已提交
439 440
	struct ath_node *an = NULL;
	struct sk_buff *skb;
441
	struct ieee80211_sta *sta;
F
Felix Fietkau 已提交
442
	struct ieee80211_hw *hw = sc->hw;
443
	struct ieee80211_hdr *hdr;
444
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
445
	struct ath_atx_tid *tid = NULL;
S
Sujith 已提交
446
	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
447 448
	struct list_head bf_head;
	struct sk_buff_head bf_pending;
449
	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
450
	u32 ba[WME_BA_BMP_SIZE >> 5];
451
	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
452
	bool rc_update = true, isba;
453
	struct ieee80211_tx_rate rates[4];
454
	struct ath_frame_info *fi;
455
	int nframes;
456
	bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
457
	int i, retries;
458
	int bar_index = -1;
459

S
Sujith 已提交
460
	skb = bf->bf_mpdu;
461 462
	hdr = (struct ieee80211_hdr *)skb->data;

463 464
	tx_info = IEEE80211_SKB_CB(skb);

465
	memcpy(rates, bf->rates, sizeof(rates));
466

467 468 469 470
	retries = ts->ts_longretry + 1;
	for (i = 0; i < ts->ts_rateindex; i++)
		retries += rates[i].count;

471
	rcu_read_lock();
472

473
	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
474 475
	if (!sta) {
		rcu_read_unlock();
476

477 478 479 480
		INIT_LIST_HEAD(&bf_head);
		while (bf) {
			bf_next = bf->bf_next;

481
			if (!bf->bf_state.stale || bf_next != NULL)
482 483
				list_move_tail(&bf->list, &bf_head);

484
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
485 486 487

			bf = bf_next;
		}
488
		return;
489 490
	}

491
	an = (struct ath_node *)sta->drv_priv;
492
	tid = ath_get_skb_tid(sc, an, skb);
493
	seq_first = tid->seq_start;
494
	isba = ts->ts_flags & ATH9K_TX_BA;
495

496 497 498 499
	/*
	 * The hardware occasionally sends a tx status for the wrong TID.
	 * In this case, the BA status cannot be considered valid and all
	 * subframes need to be retransmitted
500 501 502
	 *
	 * Only BlockAcks have a TID and therefore normal Acks cannot be
	 * checked
503
	 */
504
	if (isba && tid->tidno != ts->tid)
505 506
		txok = false;

S
Sujith 已提交
507
	isaggr = bf_isaggr(bf);
S
Sujith 已提交
508
	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
509

S
Sujith 已提交
510
	if (isaggr && txok) {
511 512 513
		if (ts->ts_flags & ATH9K_TX_BA) {
			seq_st = ts->ts_seqnum;
			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
S
Sujith 已提交
514
		} else {
S
Sujith 已提交
515 516 517 518 519 520 521
			/*
			 * AR5416 can become deaf/mute when BA
			 * issue happens. Chip needs to be reset.
			 * But AP code may have sychronization issues
			 * when perform internal reset in this routine.
			 * Only enable reset in STA mode for now.
			 */
522
			if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
S
Sujith 已提交
523
				needreset = 1;
S
Sujith 已提交
524
		}
525 526
	}

527
	__skb_queue_head_init(&bf_pending);
528

529
	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
S
Sujith 已提交
530
	while (bf) {
531 532
		u16 seqno = bf->bf_state.seqno;

533
		txfail = txpending = sendbar = 0;
S
Sujith 已提交
534
		bf_next = bf->bf_next;
535

536 537
		skb = bf->bf_mpdu;
		tx_info = IEEE80211_SKB_CB(skb);
538
		fi = get_frame_info(skb);
539

540 541
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
		    !tid->active) {
542 543 544 545 546 547
			/*
			 * Outside of the current BlockAck window,
			 * maybe part of a previous session
			 */
			txfail = 1;
		} else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
S
Sujith 已提交
548 549
			/* transmit completion, subframe is
			 * acked by block ack */
550
			acked_cnt++;
S
Sujith 已提交
551 552
		} else if (!isaggr && txok) {
			/* transmit completion */
553
			acked_cnt++;
554 555 556 557 558 559 560 561
		} else if (flush) {
			txpending = 1;
		} else if (fi->retries < ATH_MAX_SW_RETRIES) {
			if (txok || !an->sleeping)
				ath_tx_set_retry(sc, txq, bf->bf_mpdu,
						 retries);

			txpending = 1;
S
Sujith 已提交
562
		} else {
563 564 565 566
			txfail = 1;
			txfail_cnt++;
			bar_index = max_t(int, bar_index,
				ATH_BA_INDEX(seq_first, seqno));
S
Sujith 已提交
567
		}
568

569 570 571 572
		/*
		 * Make sure the last desc is reclaimed if it
		 * not a holding desc.
		 */
573
		INIT_LIST_HEAD(&bf_head);
574
		if (bf_next != NULL || !bf_last->bf_state.stale)
S
Sujith 已提交
575
			list_move_tail(&bf->list, &bf_head);
576

577
		if (!txpending) {
S
Sujith 已提交
578 579 580 581
			/*
			 * complete the acked-ones/xretried ones; update
			 * block-ack window
			 */
582
			ath_tx_update_baw(sc, tid, seqno);
583

584
			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
585
				memcpy(tx_info->control.rates, rates, sizeof(rates));
586
				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
587
				rc_update = false;
588 589 590 591
				if (bf == bf->bf_lastbf)
					ath_dynack_sample_tx_ts(sc->sc_ah,
								bf->bf_mpdu,
								ts);
592 593
			}

594
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
595
				!txfail);
S
Sujith 已提交
596
		} else {
597 598 599 600
			if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
				tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
				ieee80211_sta_eosp(sta);
			}
S
Sujith 已提交
601
			/* retry the un-acked ones */
602
			if (bf->bf_next == NULL && bf_last->bf_state.stale) {
603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
				struct ath_buf *tbf;

				tbf = ath_clone_txbuf(sc, bf_last);
				/*
				 * Update tx baw and complete the
				 * frame with failed status if we
				 * run out of tx buf.
				 */
				if (!tbf) {
					ath_tx_update_baw(sc, tid, seqno);

					ath_tx_complete_buf(sc, bf, txq,
							    &bf_head, ts, 0);
					bar_index = max_t(int, bar_index,
						ATH_BA_INDEX(seq_first, seqno));
					break;
619
				}
620 621

				fi->bf = tbf;
S
Sujith 已提交
622 623 624 625 626 627
			}

			/*
			 * Put this buffer to the temporary pending
			 * queue to retain ordering
			 */
628
			__skb_queue_tail(&bf_pending, skb);
S
Sujith 已提交
629 630 631
		}

		bf = bf_next;
632 633
	}

634
	/* prepend un-acked frames to the beginning of the pending frame queue */
635
	if (!skb_queue_empty(&bf_pending)) {
636
		if (an->sleeping)
637
			ieee80211_sta_set_buffered(sta, tid->tidno, true);
638

639
		skb_queue_splice_tail(&bf_pending, &tid->retry_q);
640
		if (!an->sleeping) {
641
			ath_tx_queue_tid(sc, txq, tid);
642

S
Sujith Manoharan 已提交
643
			if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
644 645
				tid->ac->clear_ps_filter = true;
		}
646 647
	}

F
Felix Fietkau 已提交
648 649 650 651 652 653 654 655 656 657 658
	if (bar_index >= 0) {
		u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);

		if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
			tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);

		ath_txq_unlock(sc, txq);
		ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
		ath_txq_lock(sc, txq);
	}

659 660
	rcu_read_unlock();

661 662
	if (needreset)
		ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
S
Sujith 已提交
663
}
664

665 666 667 668 669 670 671 672 673 674
static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
{
    struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
    return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}

static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
				  struct ath_tx_status *ts, struct ath_buf *bf,
				  struct list_head *bf_head)
{
675
	struct ieee80211_tx_info *info;
676 677 678 679 680 681 682 683 684 685 686
	bool txok, flush;

	txok = !(ts->ts_status & ATH9K_TXERR_MASK);
	flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
	txq->axq_tx_inprogress = false;

	txq->axq_depth--;
	if (bf_is_ampdu_not_probing(bf))
		txq->axq_ampdu_depth--;

	if (!bf_isampdu(bf)) {
687 688 689 690
		if (!flush) {
			info = IEEE80211_SKB_CB(bf->bf_mpdu);
			memcpy(info->control.rates, bf->rates,
			       sizeof(info->control.rates));
691
			ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
692
			ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts);
693
		}
694 695 696 697
		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
	} else
		ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);

698
	if (!flush)
699 700 701
		ath_txq_schedule(sc, txq);
}

702 703 704 705 706 707 708 709 710 711 712
static bool ath_lookup_legacy(struct ath_buf *bf)
{
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	int i;

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
	rates = tx_info->control.rates;

713 714 715 716
	for (i = 0; i < 4; i++) {
		if (!rates[i].count || rates[i].idx < 0)
			break;

717 718 719 720 721 722 723
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
			return true;
	}

	return false;
}

S
Sujith 已提交
724 725
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
			   struct ath_atx_tid *tid)
726
{
S
Sujith 已提交
727 728
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
729
	struct ieee80211_tx_rate *rates;
S
Sujith 已提交
730
	u32 max_4ms_framelen, frmlen;
731
	u16 aggr_limit, bt_aggr_limit, legacy = 0;
732
	int q = tid->ac->txq->mac80211_qnum;
S
Sujith 已提交
733
	int i;
S
Sujith 已提交
734

S
Sujith 已提交
735
	skb = bf->bf_mpdu;
S
Sujith 已提交
736
	tx_info = IEEE80211_SKB_CB(skb);
737
	rates = bf->rates;
S
Sujith 已提交
738

S
Sujith 已提交
739 740
	/*
	 * Find the lowest frame length among the rate series that will have a
741
	 * 4ms (or TXOP limited) transmit duration.
S
Sujith 已提交
742 743
	 */
	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
S
Sujith 已提交
744

S
Sujith 已提交
745
	for (i = 0; i < 4; i++) {
746
		int modeidx;
S
Sujith 已提交
747

748 749
		if (!rates[i].count)
			continue;
750

751 752 753
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
			legacy = 1;
			break;
754
		}
755 756 757 758 759 760 761 762 763

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
			modeidx = MCS_HT40;
		else
			modeidx = MCS_HT20;

		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
			modeidx++;

764
		frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
765
		max_4ms_framelen = min(max_4ms_framelen, frmlen);
766
	}
S
Sujith 已提交
767

768
	/*
S
Sujith 已提交
769 770 771
	 * limit aggregate size by the minimum rate if rate selected is
	 * not a probe rate, if rate selected is a probe rate then
	 * avoid aggregation of this packet.
772
	 */
S
Sujith 已提交
773 774
	if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
		return 0;
775

776 777 778 779 780 781 782 783
	aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);

	/*
	 * Override the default aggregation limit for BTCOEX.
	 */
	bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
	if (bt_aggr_limit)
		aggr_limit = bt_aggr_limit;
784

785 786
	if (tid->an->maxampdu)
		aggr_limit = min(aggr_limit, tid->an->maxampdu);
787

S
Sujith 已提交
788 789
	return aggr_limit;
}
790

S
Sujith 已提交
791
/*
S
Sujith 已提交
792
 * Returns the number of delimiters to be added to
S
Sujith 已提交
793 794 795
 * meet the minimum required mpdudensity.
 */
static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
796 797
				  struct ath_buf *bf, u16 frmlen,
				  bool first_subfrm)
S
Sujith 已提交
798
{
799
#define FIRST_DESC_NDELIMS 60
800
	u32 nsymbits, nsymbols;
S
Sujith 已提交
801
	u16 minlen;
802
	u8 flags, rix;
803
	int width, streams, half_gi, ndelim, mindelim;
804
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith 已提交
805 806 807

	/* Select standard number of delimiters based on frame length alone */
	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
808 809

	/*
S
Sujith 已提交
810 811 812 813
	 * If encryption enabled, hardware requires some more padding between
	 * subframes.
	 * TODO - this could be improved to be dependent on the rate.
	 *      The hardware can keep up at lower rates, but not higher rates
814
	 */
815 816
	if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
	    !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
S
Sujith 已提交
817
		ndelim += ATH_AGGR_ENCRYPTDELIM;
818

819 820 821 822
	/*
	 * Add delimiter when using RTS/CTS with aggregation
	 * and non enterprise AR9003 card
	 */
823 824
	if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
	    (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
825 826
		ndelim = max(ndelim, FIRST_DESC_NDELIMS);

S
Sujith 已提交
827 828 829 830 831
	/*
	 * Convert desired mpdu density from microeconds to bytes based
	 * on highest rate in rate series (i.e. first rate) to determine
	 * required minimum length for subframe. Take into account
	 * whether high rate is 20 or 40Mhz and half or full GI.
832
	 *
S
Sujith 已提交
833 834 835
	 * If there is no mpdu density restriction, no further calculation
	 * is needed.
	 */
836 837

	if (tid->an->mpdudensity == 0)
S
Sujith 已提交
838
		return ndelim;
839

840 841
	rix = bf->rates[0].idx;
	flags = bf->rates[0].flags;
S
Sujith 已提交
842 843
	width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
	half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
844

S
Sujith 已提交
845
	if (half_gi)
846
		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
S
Sujith 已提交
847
	else
848
		nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
849

S
Sujith 已提交
850 851
	if (nsymbols == 0)
		nsymbols = 1;
852

853 854
	streams = HT_RC_2_STREAMS(rix);
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
855
	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
856

S
Sujith 已提交
857 858 859
	if (frmlen < minlen) {
		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
		ndelim = max(mindelim, ndelim);
860 861
	}

S
Sujith 已提交
862
	return ndelim;
863 864
}

865 866
static struct ath_buf *
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
867
			struct ath_atx_tid *tid, struct sk_buff_head **q)
868
{
869
	struct ieee80211_tx_info *tx_info;
870
	struct ath_frame_info *fi;
871
	struct sk_buff *skb;
872
	struct ath_buf *bf;
873
	u16 seqno;
874

875
	while (1) {
876 877 878 879
		*q = &tid->retry_q;
		if (skb_queue_empty(*q))
			*q = &tid->buf_q;

880
		skb = skb_peek(*q);
881 882 883
		if (!skb)
			break;

884 885
		fi = get_frame_info(skb);
		bf = fi->bf;
886
		if (!fi->bf)
F
Felix Fietkau 已提交
887
			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
888 889
		else
			bf->bf_state.stale = false;
890

F
Felix Fietkau 已提交
891
		if (!bf) {
892
			__skb_unlink(skb, *q);
893
			ath_txq_skb_done(sc, txq, skb);
F
Felix Fietkau 已提交
894
			ieee80211_free_txskb(sc->hw, skb);
895
			continue;
F
Felix Fietkau 已提交
896
		}
897

898 899 900 901 902
		bf->bf_next = NULL;
		bf->bf_lastbf = bf;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
903 904 905 906 907 908 909 910 911

		/*
		 * No aggregation session is running, but there may be frames
		 * from a previous session or a failed attempt in the queue.
		 * Send them out as normal data frames
		 */
		if (!tid->active)
			tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;

912 913 914 915 916
		if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
			bf->bf_state.bf_type = 0;
			return bf;
		}

917
		bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
918
		seqno = bf->bf_state.seqno;
919

S
Sujith 已提交
920
		/* do not step over block-ack window */
921
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
S
Sujith 已提交
922
			break;
923

924 925 926 927 928 929
		if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
			struct ath_tx_status ts = {};
			struct list_head bf_head;

			INIT_LIST_HEAD(&bf_head);
			list_add(&bf->list, &bf_head);
930
			__skb_unlink(skb, *q);
931 932 933 934 935
			ath_tx_update_baw(sc, tid, seqno);
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
			continue;
		}

936 937 938 939 940 941
		return bf;
	}

	return NULL;
}

942 943 944 945 946
static bool
ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
		 struct ath_atx_tid *tid, struct list_head *bf_q,
		 struct ath_buf *bf_first, struct sk_buff_head *tid_q,
		 int *aggr_len)
947 948
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
949
	struct ath_buf *bf = bf_first, *bf_prev = NULL;
F
Felix Fietkau 已提交
950
	int nframes = 0, ndelim;
951
	u16 aggr_limit = 0, al = 0, bpad = 0,
F
Felix Fietkau 已提交
952
	    al_delta, h_baw = tid->baw_size / 2;
953 954 955
	struct ieee80211_tx_info *tx_info;
	struct ath_frame_info *fi;
	struct sk_buff *skb;
956
	bool closed = false;
957

958 959
	bf = bf_first;
	aggr_limit = ath_lookup_rate(sc, bf, tid);
960

961
	do {
962 963 964
		skb = bf->bf_mpdu;
		fi = get_frame_info(skb);

S
Sujith 已提交
965
		/* do not exceed aggregation limit */
966
		al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
F
Felix Fietkau 已提交
967 968
		if (nframes) {
			if (aggr_limit < al + bpad + al_delta ||
969
			    ath_lookup_legacy(bf) || nframes >= h_baw)
F
Felix Fietkau 已提交
970
				break;
971

F
Felix Fietkau 已提交
972
			tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
973 974
			if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
			    !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
F
Felix Fietkau 已提交
975
				break;
S
Sujith 已提交
976
		}
977

S
Sujith 已提交
978
		/* add padding for previous frame to aggregation length */
S
Sujith 已提交
979
		al += bpad + al_delta;
980

S
Sujith 已提交
981 982 983 984
		/*
		 * Get the delimiters needed to meet the MPDU
		 * density for this node.
		 */
985 986
		ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
						!nframes);
S
Sujith 已提交
987
		bpad = PADBYTES(al_delta) + (ndelim << 2);
988

989
		nframes++;
S
Sujith 已提交
990
		bf->bf_next = NULL;
991

S
Sujith 已提交
992
		/* link buffers of this frame to the aggregate */
993 994
		if (!fi->baw_tracked)
			ath_tx_addto_baw(sc, tid, bf);
995
		bf->bf_state.ndelim = ndelim;
996

997
		__skb_unlink(skb, tid_q);
998
		list_add_tail(&bf->list, bf_q);
999
		if (bf_prev)
S
Sujith 已提交
1000
			bf_prev->bf_next = bf;
1001

S
Sujith 已提交
1002
		bf_prev = bf;
S
Sujith 已提交
1003

1004 1005 1006 1007 1008
		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf) {
			closed = true;
			break;
		}
1009
	} while (ath_tid_has_buffered(tid));
1010

1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
	bf = bf_first;
	bf->bf_lastbf = bf_prev;

	if (bf == bf_prev) {
		al = get_frame_info(bf->bf_mpdu)->framelen;
		bf->bf_state.bf_type = BUF_AMPDU;
	} else {
		TX_STAT_INC(txq->axq_qnum, a_aggr);
	}

1021
	*aggr_len = al;
S
Sujith 已提交
1022

1023
	return closed;
S
Sujith 已提交
1024 1025
#undef PADBYTES
}
1026

1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
/*
 * rix - rate index
 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
 * width  - 0 for 20 MHz, 1 for 40 MHz
 * half_gi - to use 4us v/s 3.6 us for symbol time
 */
static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
			    int width, int half_gi, bool shortPreamble)
{
	u32 nbits, nsymbits, duration, nsymbols;
	int streams;

	/* find number of symbols: PLCP + data */
	streams = HT_RC_2_STREAMS(rix);
	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
	nsymbols = (nbits + nsymbits - 1) / nsymbits;

	if (!half_gi)
		duration = SYMBOL_TIME(nsymbols);
	else
		duration = SYMBOL_TIME_HALFGI(nsymbols);

	/* addup duration for legacy/ht training and signal fields */
	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);

	return duration;
}

1056 1057 1058 1059 1060 1061
static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
{
	int streams = HT_RC_2_STREAMS(mcs);
	int symbols, bits;
	int bytes = 0;

1062
	usec -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
	symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
	bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
	bits -= OFDM_PLCP_BITS;
	bytes = bits / 8;
	if (bytes > 65532)
		bytes = 65532;

	return bytes;
}

void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
{
	u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi;
	int mcs;

	/* 4ms is the default (and maximum) duration */
	if (!txop || txop > 4096)
		txop = 4096;

	cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
	cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
	cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
	cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
	for (mcs = 0; mcs < 32; mcs++) {
		cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false);
		cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true);
		cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false);
		cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true);
	}
}

1094
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
S
Sujith Manoharan 已提交
1095
			     struct ath_tx_info *info, int len, bool rts)
1096 1097
{
	struct ath_hw *ah = sc->sc_ah;
1098
	struct ath_common *common = ath9k_hw_common(ah);
1099 1100 1101 1102 1103
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	const struct ieee80211_rate *rate;
	struct ieee80211_hdr *hdr;
1104
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith Manoharan 已提交
1105
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
1106 1107
	int i;
	u8 rix = 0;
1108 1109 1110

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
1111
	rates = bf->rates;
1112
	hdr = (struct ieee80211_hdr *)skb->data;
1113 1114 1115

	/* set dur_update_en for l-sig computation except for PS-Poll frames */
	info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
1116
	info->rtscts_rate = fi->rtscts_rate;
1117

1118
	for (i = 0; i < ARRAY_SIZE(bf->rates); i++) {
1119 1120 1121 1122 1123 1124 1125
		bool is_40, is_sgi, is_sp;
		int phy;

		if (!rates[i].count || (rates[i].idx < 0))
			continue;

		rix = rates[i].idx;
1126
		info->rates[i].Tries = rates[i].count;
1127

S
Sujith Manoharan 已提交
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
		/*
		 * Handle RTS threshold for unaggregated HT frames.
		 */
		if (bf_isampdu(bf) && !bf_isaggr(bf) &&
		    (rates[i].flags & IEEE80211_TX_RC_MCS) &&
		    unlikely(rts_thresh != (u32) -1)) {
			if (!rts_thresh || (len > rts_thresh))
				rts = true;
		}

		if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1139 1140
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_RTSENA;
1141
		} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1142 1143
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_CTSENA;
1144 1145 1146
		}

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1147
			info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
1148
		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1149
			info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
1150 1151 1152 1153 1154 1155 1156

		is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
		is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
		is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);

		if (rates[i].flags & IEEE80211_TX_RC_MCS) {
			/* MCS rates */
1157 1158 1159 1160
			info->rates[i].Rate = rix | 0x80;
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
			info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
1161 1162
				 is_40, is_sgi, is_sp);
			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1163
				info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
1164 1165 1166 1167
			continue;
		}

		/* legacy rates */
1168
		rate = &common->sbands[tx_info->band].bitrates[rates[i].idx];
1169 1170 1171 1172 1173 1174
		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
		    !(rate->flags & IEEE80211_RATE_ERP_G))
			phy = WLAN_RC_PHY_CCK;
		else
			phy = WLAN_RC_PHY_OFDM;

1175
		info->rates[i].Rate = rate->hw_value;
1176 1177
		if (rate->hw_value_short) {
			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1178
				info->rates[i].Rate |= rate->hw_value_short;
1179 1180 1181 1182 1183
		} else {
			is_sp = false;
		}

		if (bf->bf_state.bfs_paprd)
1184
			info->rates[i].ChSel = ah->txchainmask;
1185
		else
1186 1187
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
1188

1189
		info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1190 1191 1192 1193 1194
			phy, rate->bitrate * 100, len, rix, is_sp);
	}

	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
	if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1195
		info->flags &= ~ATH9K_TXDESC_RTSENA;
1196 1197

	/* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1198 1199 1200
	if (info->flags & ATH9K_TXDESC_RTSENA)
		info->flags &= ~ATH9K_TXDESC_CTSENA;
}
1201

1202 1203 1204 1205 1206 1207 1208 1209
static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	enum ath9k_pkt_type htype;
	__le16 fc;

	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
1210

1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
	if (ieee80211_is_beacon(fc))
		htype = ATH9K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = ATH9K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = ATH9K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = ATH9K_PKT_TYPE_PSPOLL;
	else
		htype = ATH9K_PKT_TYPE_NORMAL;

	return htype;
1223 1224
}

1225 1226
static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_txq *txq, int len)
1227 1228
{
	struct ath_hw *ah = sc->sc_ah;
1229
	struct ath_buf *bf_first = NULL;
1230
	struct ath_tx_info info;
S
Sujith Manoharan 已提交
1231 1232
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
	bool rts = false;
1233

1234 1235 1236 1237 1238 1239
	memset(&info, 0, sizeof(info));
	info.is_first = true;
	info.is_last = true;
	info.txpower = MAX_RATE_POWER;
	info.qcu = txq->axq_qnum;

1240
	while (bf) {
1241
		struct sk_buff *skb = bf->bf_mpdu;
1242
		struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1243
		struct ath_frame_info *fi = get_frame_info(skb);
1244
		bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
1245 1246

		info.type = get_hw_packet_type(skb);
1247
		if (bf->bf_next)
1248
			info.link = bf->bf_next->bf_daddr;
1249
		else
L
Luis R. Rodriguez 已提交
1250
			info.link = (sc->tx99_state) ? bf->bf_daddr : 0;
1251

1252 1253 1254
		if (!bf_first) {
			bf_first = bf;

L
Luis R. Rodriguez 已提交
1255 1256
			if (!sc->tx99_state)
				info.flags = ATH9K_TXDESC_INTREQ;
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
			if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
			    txq == sc->tx.uapsdq)
				info.flags |= ATH9K_TXDESC_CLRDMASK;

			if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
				info.flags |= ATH9K_TXDESC_NOACK;
			if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
				info.flags |= ATH9K_TXDESC_LDPC;

			if (bf->bf_state.bfs_paprd)
				info.flags |= (u32) bf->bf_state.bfs_paprd <<
					      ATH9K_TXDESC_PAPRD_S;

S
Sujith Manoharan 已提交
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
			/*
			 * mac80211 doesn't handle RTS threshold for HT because
			 * the decision has to be taken based on AMPDU length
			 * and aggregation is done entirely inside ath9k.
			 * Set the RTS/CTS flag for the first subframe based
			 * on the threshold.
			 */
			if (aggr && (bf == bf_first) &&
			    unlikely(rts_thresh != (u32) -1)) {
				/*
				 * "len" is the size of the entire AMPDU.
				 */
				if (!rts_thresh || (len > rts_thresh))
					rts = true;
			}
1285 1286 1287 1288

			if (!aggr)
				len = fi->framelen;

S
Sujith Manoharan 已提交
1289
			ath_buf_set_rate(sc, bf, &info, len, rts);
1290 1291
		}

1292 1293
		info.buf_addr[0] = bf->bf_buf_addr;
		info.buf_len[0] = skb->len;
1294 1295 1296 1297 1298
		info.pkt_len = fi->framelen;
		info.keyix = fi->keyix;
		info.keytype = fi->keytype;

		if (aggr) {
1299
			if (bf == bf_first)
1300
				info.aggr = AGGR_BUF_FIRST;
1301
			else if (bf == bf_first->bf_lastbf)
1302 1303 1304
				info.aggr = AGGR_BUF_LAST;
			else
				info.aggr = AGGR_BUF_MIDDLE;
1305

1306 1307
			info.ndelim = bf->bf_state.ndelim;
			info.aggr_len = len;
1308 1309
		}

1310 1311 1312
		if (bf == bf_first->bf_lastbf)
			bf_first = NULL;

1313
		ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
1314 1315 1316 1317
		bf = bf->bf_next;
	}
}

1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352
static void
ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
		  struct ath_atx_tid *tid, struct list_head *bf_q,
		  struct ath_buf *bf_first, struct sk_buff_head *tid_q)
{
	struct ath_buf *bf = bf_first, *bf_prev = NULL;
	struct sk_buff *skb;
	int nframes = 0;

	do {
		struct ieee80211_tx_info *tx_info;
		skb = bf->bf_mpdu;

		nframes++;
		__skb_unlink(skb, tid_q);
		list_add_tail(&bf->list, bf_q);
		if (bf_prev)
			bf_prev->bf_next = bf;
		bf_prev = bf;

		if (nframes >= 2)
			break;

		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf)
			break;

		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
		if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
			break;

		ath_set_rates(tid->an->vif, tid->an->sta, bf);
	} while (1);
}

1353 1354
static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
			      struct ath_atx_tid *tid, bool *stop)
S
Sujith 已提交
1355
{
S
Sujith 已提交
1356
	struct ath_buf *bf;
1357
	struct ieee80211_tx_info *tx_info;
1358
	struct sk_buff_head *tid_q;
S
Sujith 已提交
1359
	struct list_head bf_q;
1360 1361
	int aggr_len = 0;
	bool aggr, last = true;
1362

1363 1364
	if (!ath_tid_has_buffered(tid))
		return false;
1365

1366
	INIT_LIST_HEAD(&bf_q);
S
Sujith 已提交
1367

1368 1369 1370
	bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
	if (!bf)
		return false;
1371

1372 1373 1374 1375 1376 1377 1378
	tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
	aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
	if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
		(!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
		*stop = true;
		return false;
	}
1379

1380 1381 1382 1383 1384 1385
	ath_set_rates(tid->an->vif, tid->an->sta, bf);
	if (aggr)
		last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
					tid_q, &aggr_len);
	else
		ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
1386

1387 1388
	if (list_empty(&bf_q))
		return false;
1389

1390
	if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
1391 1392 1393
		tid->ac->clear_ps_filter = false;
		tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
	}
1394

1395 1396 1397
	ath_tx_fill_desc(sc, bf, txq, aggr_len);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	return true;
S
Sujith 已提交
1398 1399
}

1400 1401
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
		      u16 tid, u16 *ssn)
S
Sujith 已提交
1402 1403
{
	struct ath_atx_tid *txtid;
1404
	struct ath_txq *txq;
S
Sujith 已提交
1405
	struct ath_node *an;
1406
	u8 density;
S
Sujith 已提交
1407 1408

	an = (struct ath_node *)sta->drv_priv;
S
Sujith 已提交
1409
	txtid = ATH_AN_2_TID(an, tid);
1410 1411 1412
	txq = txtid->ac->txq;

	ath_txq_lock(sc, txq);
1413

1414 1415 1416 1417
	/* update ampdu factor/density, they may have changed. This may happen
	 * in HT IBSS when a beacon with HT-info is received after the station
	 * has already been added.
	 */
1418
	if (sta->ht_cap.ht_supported) {
1419 1420
		an->maxampdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
				      sta->ht_cap.ampdu_factor)) - 1;
1421 1422 1423 1424
		density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
		an->mpdudensity = density;
	}

1425 1426 1427
	/* force sequence number allocation for pending frames */
	ath_tx_tid_change_state(sc, txtid);

1428
	txtid->active = true;
1429
	*ssn = txtid->seq_start = txtid->seq_next;
1430
	txtid->bar_index = -1;
1431

1432 1433 1434
	memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
	txtid->baw_head = txtid->baw_tail = 0;

1435 1436
	ath_txq_unlock_complete(sc, txq);

1437
	return 0;
S
Sujith 已提交
1438
}
1439

1440
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
S
Sujith 已提交
1441 1442 1443
{
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1444
	struct ath_txq *txq = txtid->ac->txq;
1445

F
Felix Fietkau 已提交
1446
	ath_txq_lock(sc, txq);
1447 1448
	txtid->active = false;
	ath_tx_flush_tid(sc, txtid);
1449
	ath_tx_tid_change_state(sc, txtid);
F
Felix Fietkau 已提交
1450
	ath_txq_unlock_complete(sc, txq);
S
Sujith 已提交
1451
}
1452

1453 1454
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
		       struct ath_node *an)
1455 1456 1457 1458
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
1459
	bool buffered;
1460 1461 1462
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1463
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1464 1465 1466 1467

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1468
		ath_txq_lock(sc, txq);
1469

1470 1471 1472 1473 1474
		if (!tid->sched) {
			ath_txq_unlock(sc, txq);
			continue;
		}

1475
		buffered = ath_tid_has_buffered(tid);
1476 1477 1478 1479 1480 1481 1482 1483 1484

		tid->sched = false;
		list_del(&tid->list);

		if (ac->sched) {
			ac->sched = false;
			list_del(&ac->list);
		}

F
Felix Fietkau 已提交
1485
		ath_txq_unlock(sc, txq);
1486

1487 1488
		ieee80211_sta_set_buffered(sta, tidno, buffered);
	}
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
}

void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1499
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1500 1501 1502 1503

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1504
		ath_txq_lock(sc, txq);
1505 1506
		ac->clear_ps_filter = true;

F
Felix Fietkau 已提交
1507
		if (ath_tid_has_buffered(tid)) {
1508
			ath_tx_queue_tid(sc, txq, tid);
1509 1510 1511
			ath_txq_schedule(sc, txq);
		}

F
Felix Fietkau 已提交
1512
		ath_txq_unlock_complete(sc, txq);
1513 1514 1515
	}
}

1516 1517
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
			u16 tidno)
S
Sujith 已提交
1518
{
1519
	struct ath_atx_tid *tid;
S
Sujith 已提交
1520
	struct ath_node *an;
1521
	struct ath_txq *txq;
S
Sujith 已提交
1522 1523

	an = (struct ath_node *)sta->drv_priv;
1524 1525
	tid = ATH_AN_2_TID(an, tidno);
	txq = tid->ac->txq;
S
Sujith 已提交
1526

1527 1528 1529 1530
	ath_txq_lock(sc, txq);

	tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;

1531
	if (ath_tid_has_buffered(tid)) {
1532
		ath_tx_queue_tid(sc, txq, tid);
1533 1534 1535 1536
		ath_txq_schedule(sc, txq);
	}

	ath_txq_unlock_complete(sc, txq);
1537 1538
}

1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
				   struct ieee80211_sta *sta,
				   u16 tids, int nframes,
				   enum ieee80211_frame_release_type reason,
				   bool more_data)
{
	struct ath_softc *sc = hw->priv;
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_txq *txq = sc->tx.uapsdq;
	struct ieee80211_tx_info *info;
	struct list_head bf_q;
	struct ath_buf *bf_tail = NULL, *bf;
1551
	struct sk_buff_head *tid_q;
1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564
	int sent = 0;
	int i;

	INIT_LIST_HEAD(&bf_q);
	for (i = 0; tids && nframes; i++, tids >>= 1) {
		struct ath_atx_tid *tid;

		if (!(tids & 1))
			continue;

		tid = ATH_AN_2_TID(an, i);

		ath_txq_lock(sc, tid->ac->txq);
1565 1566
		while (nframes > 0) {
			bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
1567 1568 1569
			if (!bf)
				break;

1570
			__skb_unlink(bf->bf_mpdu, tid_q);
1571 1572
			list_add_tail(&bf->list, &bf_q);
			ath_set_rates(tid->an->vif, tid->an->sta, bf);
1573 1574 1575 1576
			if (bf_isampdu(bf)) {
				ath_tx_addto_baw(sc, tid, bf);
				bf->bf_state.bf_type &= ~BUF_AGGR;
			}
1577 1578 1579 1580 1581 1582 1583 1584
			if (bf_tail)
				bf_tail->bf_next = bf;

			bf_tail = bf;
			nframes--;
			sent++;
			TX_STAT_INC(txq->axq_qnum, a_queued_hw);

1585
			if (an->sta && !ath_tid_has_buffered(tid))
1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
				ieee80211_sta_set_buffered(an->sta, i, false);
		}
		ath_txq_unlock_complete(sc, tid->ac->txq);
	}

	if (list_empty(&bf_q))
		return;

	info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
	info->flags |= IEEE80211_TX_STATUS_EOSP;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	ath_txq_lock(sc, txq);
	ath_tx_fill_desc(sc, bf, txq, 0);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	ath_txq_unlock(sc, txq);
}

S
Sujith 已提交
1604 1605 1606
/********************/
/* Queue Management */
/********************/
1607

S
Sujith 已提交
1608
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1609
{
1610
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1611
	struct ath9k_tx_queue_info qi;
1612
	static const int subtype_txq_to_hwq[] = {
1613 1614 1615 1616
		[IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
		[IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
		[IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
		[IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
1617
	};
1618
	int axq_qnum, i;
1619

S
Sujith 已提交
1620
	memset(&qi, 0, sizeof(qi));
1621
	qi.tqi_subtype = subtype_txq_to_hwq[subtype];
S
Sujith 已提交
1622 1623 1624 1625
	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_physCompBuf = 0;
1626 1627

	/*
S
Sujith 已提交
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise waiting for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
	 *
	 * The UAPSD queue is an exception, since we take a desc-
	 * based intr on the EOSP frames.
1641
	 */
1642
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1643
		qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
1644 1645 1646 1647 1648 1649 1650
	} else {
		if (qtype == ATH9K_TX_QUEUE_UAPSD)
			qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
		else
			qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
					TXQ_FLAG_TXDESCINT_ENABLE;
	}
1651 1652
	axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
	if (axq_qnum == -1) {
1653
		/*
S
Sujith 已提交
1654 1655
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
1656
		 */
S
Sujith 已提交
1657
		return NULL;
1658
	}
1659 1660
	if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
		struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1661

1662 1663
		txq->axq_qnum = axq_qnum;
		txq->mac80211_qnum = -1;
S
Sujith 已提交
1664
		txq->axq_link = NULL;
F
Felix Fietkau 已提交
1665
		__skb_queue_head_init(&txq->complete_q);
S
Sujith 已提交
1666 1667 1668
		INIT_LIST_HEAD(&txq->axq_q);
		spin_lock_init(&txq->axq_lock);
		txq->axq_depth = 0;
1669
		txq->axq_ampdu_depth = 0;
1670
		txq->axq_tx_inprogress = false;
1671
		sc->tx.txqsetup |= 1<<axq_qnum;
1672 1673 1674 1675

		txq->txq_headidx = txq->txq_tailidx = 0;
		for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
			INIT_LIST_HEAD(&txq->txq_fifo[i]);
S
Sujith 已提交
1676
	}
1677
	return &sc->tx.txq[axq_qnum];
1678 1679
}

S
Sujith 已提交
1680 1681 1682
int ath_txq_update(struct ath_softc *sc, int qnum,
		   struct ath9k_tx_queue_info *qinfo)
{
1683
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1684 1685 1686
	int error = 0;
	struct ath9k_tx_queue_info qi;

1687
	BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
S
Sujith 已提交
1688 1689 1690 1691 1692 1693 1694 1695 1696

	ath9k_hw_get_txq_props(ah, qnum, &qi);
	qi.tqi_aifs = qinfo->tqi_aifs;
	qi.tqi_cwmin = qinfo->tqi_cwmin;
	qi.tqi_cwmax = qinfo->tqi_cwmax;
	qi.tqi_burstTime = qinfo->tqi_burstTime;
	qi.tqi_readyTime = qinfo->tqi_readyTime;

	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1697 1698
		ath_err(ath9k_hw_common(sc->sc_ah),
			"Unable to update hardware queue %u!\n", qnum);
S
Sujith 已提交
1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
		error = -EIO;
	} else {
		ath9k_hw_resettxqueue(ah, qnum);
	}

	return error;
}

int ath_cabq_update(struct ath_softc *sc)
{
	struct ath9k_tx_queue_info qi;
1710
	struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
S
Sujith 已提交
1711
	int qnum = sc->beacon.cabq->axq_qnum;
1712

S
Sujith 已提交
1713
	ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1714

1715
	qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) *
1716
			    ATH_CABQ_READY_TIME) / 100;
S
Sujith 已提交
1717 1718 1719
	ath_txq_update(sc, qnum, &qi);

	return 0;
1720 1721
}

1722
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1723
			       struct list_head *list)
1724
{
S
Sujith 已提交
1725 1726
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
1727 1728 1729
	struct ath_tx_status ts;

	memset(&ts, 0, sizeof(ts));
1730
	ts.ts_status = ATH9K_TX_FLUSH;
S
Sujith 已提交
1731
	INIT_LIST_HEAD(&bf_head);
1732

1733 1734
	while (!list_empty(list)) {
		bf = list_first_entry(list, struct ath_buf, list);
1735

1736
		if (bf->bf_state.stale) {
1737
			list_del(&bf->list);
1738

1739 1740
			ath_tx_return_buffer(sc, bf);
			continue;
S
Sujith 已提交
1741
		}
1742

S
Sujith 已提交
1743
		lastbf = bf->bf_lastbf;
1744
		list_cut_position(&bf_head, list, &lastbf->list);
1745
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
1746
	}
1747
}
1748

1749 1750 1751 1752 1753 1754
/*
 * Drain a given TX queue (could be Beacon or Data)
 *
 * This assumes output has been stopped and
 * we do not need to block ath_tx_tasklet.
 */
1755
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
1756
{
F
Felix Fietkau 已提交
1757 1758
	ath_txq_lock(sc, txq);

1759
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1760
		int idx = txq->txq_tailidx;
1761

1762
		while (!list_empty(&txq->txq_fifo[idx])) {
1763
			ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
1764 1765

			INCR(idx, ATH_TXFIFO_DEPTH);
1766
		}
1767
		txq->txq_tailidx = idx;
1768
	}
1769

1770 1771
	txq->axq_link = NULL;
	txq->axq_tx_inprogress = false;
1772
	ath_drain_txq_list(sc, txq, &txq->axq_q);
1773

F
Felix Fietkau 已提交
1774
	ath_txq_unlock_complete(sc, txq);
1775 1776
}

1777
bool ath_drain_all_txq(struct ath_softc *sc)
1778
{
1779
	struct ath_hw *ah = sc->sc_ah;
1780
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
1781
	struct ath_txq *txq;
1782 1783
	int i;
	u32 npend = 0;
S
Sujith 已提交
1784

1785
	if (test_bit(ATH_OP_INVALID, &common->op_flags))
1786
		return true;
S
Sujith 已提交
1787

1788
	ath9k_hw_abort_tx_dma(ah);
S
Sujith 已提交
1789

1790
	/* Check if any queue remains active */
S
Sujith 已提交
1791
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1792 1793 1794
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

F
Felix Fietkau 已提交
1795 1796 1797
		if (!sc->tx.txq[i].axq_depth)
			continue;

1798 1799
		if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
			npend |= BIT(i);
S
Sujith 已提交
1800 1801
	}

1802
	if (npend)
1803
		ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
S
Sujith 已提交
1804 1805

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

		/*
		 * The caller will resume queues with ieee80211_wake_queues.
		 * Mark the queue as not stopped to prevent ath_tx_complete
		 * from waking the queue too early.
		 */
		txq = &sc->tx.txq[i];
		txq->stopped = false;
1816
		ath_draintxq(sc, txq);
S
Sujith 已提交
1817
	}
1818 1819

	return !npend;
S
Sujith 已提交
1820
}
1821

S
Sujith 已提交
1822
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
S
Sujith 已提交
1823
{
S
Sujith 已提交
1824 1825
	ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
	sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
S
Sujith 已提交
1826
}
1827

1828
/* For each acq entry, for each tid, try to schedule packets
1829 1830
 * for transmit until ampdu_depth has reached min Q depth.
 */
S
Sujith 已提交
1831 1832
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
1833
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1834
	struct ath_atx_ac *ac, *last_ac;
1835
	struct ath_atx_tid *tid, *last_tid;
1836
	struct list_head *ac_list;
1837
	bool sent = false;
1838

1839 1840 1841
	if (txq->mac80211_qnum < 0)
		return;

1842
	spin_lock_bh(&sc->chan_lock);
1843
	ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
1844
	spin_unlock_bh(&sc->chan_lock);
1845

1846
	if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
1847
	    list_empty(ac_list))
S
Sujith 已提交
1848
		return;
1849

1850
	spin_lock_bh(&sc->chan_lock);
1851 1852
	rcu_read_lock();

1853 1854
	last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
	while (!list_empty(ac_list)) {
1855
		bool stop = false;
1856

1857 1858 1859
		if (sc->cur_chan->stopped)
			break;

1860
		ac = list_first_entry(ac_list, struct ath_atx_ac, list);
1861 1862 1863
		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
		list_del(&ac->list);
		ac->sched = false;
1864

1865
		while (!list_empty(&ac->tid_q)) {
1866

1867 1868 1869 1870
			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
					       list);
			list_del(&tid->list);
			tid->sched = false;
1871

1872 1873
			if (ath_tx_sched_aggr(sc, txq, tid, &stop))
				sent = true;
1874

1875 1876 1877 1878
			/*
			 * add tid to round-robin queue if more frames
			 * are pending for the tid
			 */
1879
			if (ath_tid_has_buffered(tid))
1880
				ath_tx_queue_tid(sc, txq, tid);
1881

1882
			if (stop || tid == last_tid)
1883 1884
				break;
		}
1885

1886 1887
		if (!list_empty(&ac->tid_q) && !ac->sched) {
			ac->sched = true;
1888
			list_add_tail(&ac->list, ac_list);
1889
		}
1890

1891
		if (stop)
1892
			break;
1893 1894 1895 1896 1897 1898

		if (ac == last_ac) {
			if (!sent)
				break;

			sent = false;
1899
			last_ac = list_entry(ac_list->prev,
1900 1901
					     struct ath_atx_ac, list);
		}
S
Sujith 已提交
1902
	}
1903 1904

	rcu_read_unlock();
1905
	spin_unlock_bh(&sc->chan_lock);
S
Sujith 已提交
1906
}
1907

1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921
void ath_txq_schedule_all(struct ath_softc *sc)
{
	struct ath_txq *txq;
	int i;

	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
		txq = sc->tx.txq_map[i];

		spin_lock_bh(&txq->axq_lock);
		ath_txq_schedule(sc, txq);
		spin_unlock_bh(&txq->axq_lock);
	}
}

S
Sujith 已提交
1922 1923 1924 1925
/***********/
/* TX, DMA */
/***********/

1926
/*
S
Sujith 已提交
1927 1928
 * Insert a chain of ath_buf (descriptors) on a txq and
 * assume the descriptors are already chained together by caller.
1929
 */
S
Sujith 已提交
1930
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1931
			     struct list_head *head, bool internal)
1932
{
1933
	struct ath_hw *ah = sc->sc_ah;
1934
	struct ath_common *common = ath9k_hw_common(ah);
1935 1936 1937
	struct ath_buf *bf, *bf_last;
	bool puttxbuf = false;
	bool edma;
1938

S
Sujith 已提交
1939 1940 1941 1942
	/*
	 * Insert the frame on the outbound list and
	 * pass it on to the hardware.
	 */
1943

S
Sujith 已提交
1944 1945
	if (list_empty(head))
		return;
1946

1947
	edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
S
Sujith 已提交
1948
	bf = list_first_entry(head, struct ath_buf, list);
1949
	bf_last = list_entry(head->prev, struct ath_buf, list);
1950

1951 1952
	ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
		txq->axq_qnum, txq->axq_depth);
1953

1954 1955
	if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
		list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
1956
		INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1957
		puttxbuf = true;
S
Sujith 已提交
1958
	} else {
1959 1960
		list_splice_tail_init(head, &txq->axq_q);

1961 1962
		if (txq->axq_link) {
			ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
1963
			ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
J
Joe Perches 已提交
1964 1965
				txq->axq_qnum, txq->axq_link,
				ito64(bf->bf_daddr), bf->bf_desc);
1966 1967 1968 1969 1970 1971 1972 1973 1974
		} else if (!edma)
			puttxbuf = true;

		txq->axq_link = bf_last->bf_desc;
	}

	if (puttxbuf) {
		TX_STAT_INC(txq->axq_qnum, puttxbuf);
		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1975
		ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
1976 1977 1978
			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
	}

L
Luis R. Rodriguez 已提交
1979
	if (!edma || sc->tx99_state) {
F
Felix Fietkau 已提交
1980
		TX_STAT_INC(txq->axq_qnum, txstart);
1981
		ath9k_hw_txstart(ah, txq->axq_qnum);
S
Sujith 已提交
1982
	}
1983 1984

	if (!internal) {
1985 1986 1987 1988 1989
		while (bf) {
			txq->axq_depth++;
			if (bf_is_ampdu_not_probing(bf))
				txq->axq_ampdu_depth++;

1990 1991 1992
			bf_last = bf->bf_lastbf;
			bf = bf_last->bf_next;
			bf_last->bf_next = NULL;
1993
		}
1994
	}
S
Sujith 已提交
1995
}
1996

F
Felix Fietkau 已提交
1997
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1998
			       struct ath_atx_tid *tid, struct sk_buff *skb)
S
Sujith 已提交
1999
{
2000
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2001 2002
	struct ath_frame_info *fi = get_frame_info(skb);
	struct list_head bf_head;
2003
	struct ath_buf *bf = fi->bf;
2004 2005 2006

	INIT_LIST_HEAD(&bf_head);
	list_add_tail(&bf->list, &bf_head);
2007
	bf->bf_state.bf_type = 0;
2008 2009 2010 2011
	if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
		bf->bf_state.bf_type = BUF_AMPDU;
		ath_tx_addto_baw(sc, tid, bf);
	}
S
Sujith 已提交
2012

2013
	bf->bf_next = NULL;
S
Sujith 已提交
2014
	bf->bf_lastbf = bf;
2015
	ath_tx_fill_desc(sc, bf, txq, fi->framelen);
2016
	ath_tx_txqaddbuf(sc, txq, &bf_head, false);
S
Sujith 已提交
2017
	TX_STAT_INC(txq->axq_qnum, queued);
S
Sujith 已提交
2018 2019
}

2020 2021 2022
static void setup_frame_info(struct ieee80211_hw *hw,
			     struct ieee80211_sta *sta,
			     struct sk_buff *skb,
2023
			     int framelen)
S
Sujith 已提交
2024 2025
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2026
	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
2027
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2028
	const struct ieee80211_rate *rate;
2029
	struct ath_frame_info *fi = get_frame_info(skb);
2030
	struct ath_node *an = NULL;
2031
	enum ath9k_key_type keytype;
2032 2033 2034 2035 2036 2037 2038 2039 2040 2041
	bool short_preamble = false;

	/*
	 * We check if Short Preamble is needed for the CTS rate by
	 * checking the BSS's global flag.
	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
	 */
	if (tx_info->control.vif &&
	    tx_info->control.vif->bss_conf.use_short_preamble)
		short_preamble = true;
S
Sujith 已提交
2042

2043
	rate = ieee80211_get_rts_cts_rate(hw, tx_info);
2044
	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
S
Sujith 已提交
2045

2046 2047 2048
	if (sta)
		an = (struct ath_node *) sta->drv_priv;

2049
	memset(fi, 0, sizeof(*fi));
2050
	fi->txq = -1;
2051 2052
	if (hw_key)
		fi->keyix = hw_key->hw_key_idx;
2053 2054
	else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
		fi->keyix = an->ps_key;
2055 2056 2057 2058
	else
		fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->keytype = keytype;
	fi->framelen = framelen;
2059 2060 2061

	if (!rate)
		return;
2062 2063 2064
	fi->rtscts_rate = rate->hw_value;
	if (short_preamble)
		fi->rtscts_rate |= rate->hw_value_short;
S
Sujith 已提交
2065 2066
}

2067 2068 2069 2070
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath9k_channel *curchan = ah->curchan;
2071

2072
	if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && IS_CHAN_5GHZ(curchan) &&
2073
	    (chainmask == 0x7) && (rate < 0x90))
2074
		return 0x3;
2075 2076 2077
	else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
		 IS_CCK_RATE(rate))
		return 0x2;
2078 2079 2080 2081
	else
		return chainmask;
}

2082 2083 2084 2085
/*
 * Assign a descriptor (and sequence number if necessary,
 * and map buffer for DMA. Frees skb on error
 */
2086
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
F
Felix Fietkau 已提交
2087
					   struct ath_txq *txq,
2088
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
2089
					   struct sk_buff *skb)
2090
{
F
Felix Fietkau 已提交
2091
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2092
	struct ath_frame_info *fi = get_frame_info(skb);
2093
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
F
Felix Fietkau 已提交
2094
	struct ath_buf *bf;
S
Sujith Manoharan 已提交
2095
	int fragno;
2096
	u16 seqno;
F
Felix Fietkau 已提交
2097 2098 2099

	bf = ath_tx_get_buffer(sc);
	if (!bf) {
2100
		ath_dbg(common, XMIT, "TX buffers are full\n");
F
Felix Fietkau 已提交
2101
		return NULL;
F
Felix Fietkau 已提交
2102
	}
2103

S
Sujith 已提交
2104
	ATH_TXBUF_RESET(bf);
2105

2106
	if (tid && ieee80211_is_data_present(hdr->frame_control)) {
S
Sujith Manoharan 已提交
2107
		fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
2108 2109
		seqno = tid->seq_next;
		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
S
Sujith Manoharan 已提交
2110 2111 2112 2113 2114 2115 2116

		if (fragno)
			hdr->seq_ctrl |= cpu_to_le16(fragno);

		if (!ieee80211_has_morefrags(hdr->frame_control))
			INCR(tid->seq_next, IEEE80211_SEQ_MAX);

2117 2118 2119
		bf->bf_state.seqno = seqno;
	}

2120
	bf->bf_mpdu = skb;
2121

B
Ben Greear 已提交
2122 2123 2124
	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
2125
		bf->bf_mpdu = NULL;
2126
		bf->bf_buf_addr = 0;
2127 2128
		ath_err(ath9k_hw_common(sc->sc_ah),
			"dma_mapping_error() on TX\n");
F
Felix Fietkau 已提交
2129
		ath_tx_return_buffer(sc, bf);
F
Felix Fietkau 已提交
2130
		return NULL;
2131 2132
	}

2133
	fi->bf = bf;
F
Felix Fietkau 已提交
2134 2135 2136 2137

	return bf;
}

2138 2139
static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
			  struct ath_tx_control *txctl)
2140
{
2141 2142
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2143
	struct ieee80211_sta *sta = txctl->sta;
2144
	struct ieee80211_vif *vif = info->control.vif;
2145
	struct ath_vif *avp;
2146
	struct ath_softc *sc = hw->priv;
F
Felix Fietkau 已提交
2147
	int frmlen = skb->len + FCS_LEN;
2148
	int padpos, padsize;
2149

2150 2151 2152
	/* NOTE:  sta can be NULL according to net/mac80211.h */
	if (sta)
		txctl->an = (struct ath_node *)sta->drv_priv;
2153 2154 2155 2156
	else if (vif && ieee80211_is_data(hdr->frame_control)) {
		avp = (void *)vif->drv_priv;
		txctl->an = &avp->mcast_node;
	}
2157

F
Felix Fietkau 已提交
2158 2159 2160
	if (info->control.hw_key)
		frmlen += info->control.hw_key->icv_len;

2161
	/*
S
Sujith 已提交
2162 2163 2164
	 * As a temporary workaround, assign seq# here; this will likely need
	 * to be cleaned up to work better with Beacon transmission and virtual
	 * BSSes.
2165
	 */
S
Sujith 已提交
2166 2167 2168 2169 2170
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
			sc->tx.seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2171 2172
	}

2173 2174 2175 2176 2177
	if ((vif && vif->type != NL80211_IFTYPE_AP &&
	            vif->type != NL80211_IFTYPE_AP_VLAN) ||
	    !ieee80211_is_data(hdr->frame_control))
		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;

2178
	/* Add the padding after the header if this is not already done */
2179
	padpos = ieee80211_hdrlen(hdr->frame_control);
2180 2181 2182 2183
	padsize = padpos & 3;
	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize)
			return -ENOMEM;
2184

2185 2186
		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
2187 2188
	}

2189
	setup_frame_info(hw, sta, skb, frmlen);
2190 2191 2192
	return 0;
}

2193

2194 2195 2196 2197 2198 2199 2200 2201
/* Upon failure caller should free skb */
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
		 struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ieee80211_sta *sta = txctl->sta;
	struct ieee80211_vif *vif = info->control.vif;
2202
	struct ath_frame_info *fi = get_frame_info(skb);
2203
	struct ath_vif *avp = NULL;
2204 2205 2206 2207
	struct ath_softc *sc = hw->priv;
	struct ath_txq *txq = txctl->txq;
	struct ath_atx_tid *tid = NULL;
	struct ath_buf *bf;
S
Sujith Manoharan 已提交
2208
	bool queue, skip_uapsd = false;
S
Sujith Manoharan 已提交
2209
	int q, ret;
2210

2211 2212 2213
	if (vif)
		avp = (void *)vif->drv_priv;

2214 2215 2216
	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
		txctl->force_channel = true;

2217 2218 2219 2220 2221
	ret = ath_tx_prepare(hw, skb, txctl);
	if (ret)
	    return ret;

	hdr = (struct ieee80211_hdr *) skb->data;
2222 2223 2224 2225 2226
	/*
	 * At this point, the vif, hw_key and sta pointers in the tx control
	 * info are no longer valid (overwritten by the ath_frame_info data.
	 */

2227
	q = skb_get_queue_mapping(skb);
F
Felix Fietkau 已提交
2228 2229

	ath_txq_lock(sc, txq);
2230 2231 2232 2233
	if (txq == sc->tx.txq_map[q]) {
		fi->txq = q;
		if (++txq->pending_frames > sc->tx.txq_max_pending[q] &&
		    !txq->stopped) {
S
Sujith Manoharan 已提交
2234
			ieee80211_stop_queue(sc->hw, info->hw_queue);
2235 2236
			txq->stopped = true;
		}
2237 2238
	}

2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249
	queue = ieee80211_is_data_present(hdr->frame_control);

	/* Force queueing of all frames that belong to a virtual interface on
	 * a different channel context, to ensure that they are sent on the
	 * correct channel.
	 */
	if (((avp && avp->chanctx != sc->cur_chan) ||
	     sc->cur_chan->stopped) && !txctl->force_channel) {
		if (!txctl->an)
			txctl->an = &avp->mcast_node;
		queue = true;
S
Sujith Manoharan 已提交
2250
		skip_uapsd = true;
2251 2252 2253
	}

	if (txctl->an && queue)
2254 2255
		tid = ath_get_skb_tid(sc, txctl->an, skb);

S
Sujith Manoharan 已提交
2256
	if (!skip_uapsd && (info->flags & IEEE80211_TX_CTL_PS_RESPONSE)) {
2257 2258 2259
		ath_txq_unlock(sc, txq);
		txq = sc->tx.uapsdq;
		ath_txq_lock(sc, txq);
2260
	} else if (txctl->an && queue) {
2261 2262
		WARN_ON(tid->ac->txq != txctl->txq);

2263 2264 2265
		if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
			tid->ac->clear_ps_filter = true;

2266
		/*
2267 2268
		 * Add this frame to software queue for scheduling later
		 * for aggregation.
2269
		 */
2270 2271 2272
		TX_STAT_INC(txq->axq_qnum, a_queued_sw);
		__skb_queue_tail(&tid->buf_q, skb);
		if (!txctl->an->sleeping)
2273
			ath_tx_queue_tid(sc, txq, tid);
2274 2275

		ath_txq_schedule(sc, txq);
2276 2277 2278
		goto out;
	}

2279
	bf = ath_tx_setup_buffer(sc, txq, tid, skb);
2280
	if (!bf) {
2281
		ath_txq_skb_done(sc, txq, skb);
2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293
		if (txctl->paprd)
			dev_kfree_skb_any(skb);
		else
			ieee80211_free_txskb(sc->hw, skb);
		goto out;
	}

	bf->bf_state.bfs_paprd = txctl->paprd;

	if (txctl->paprd)
		bf->bf_state.bfs_paprd_timestamp = jiffies;

2294
	ath_set_rates(vif, sta, bf);
2295
	ath_tx_send_normal(sc, txq, tid, skb);
F
Felix Fietkau 已提交
2296

2297
out:
F
Felix Fietkau 已提交
2298
	ath_txq_unlock(sc, txq);
F
Felix Fietkau 已提交
2299

2300
	return 0;
2301 2302
}

2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318
void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		 struct sk_buff *skb)
{
	struct ath_softc *sc = hw->priv;
	struct ath_tx_control txctl = {
		.txq = sc->beacon.cabq
	};
	struct ath_tx_info info = {};
	struct ieee80211_hdr *hdr;
	struct ath_buf *bf_tail = NULL;
	struct ath_buf *bf;
	LIST_HEAD(bf_q);
	int duration = 0;
	int max_duration;

	max_duration =
2319 2320
		sc->cur_chan->beacon.beacon_interval * 1000 *
		sc->cur_chan->beacon.dtim_period / ATH_BCBUF;
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333

	do {
		struct ath_frame_info *fi = get_frame_info(skb);

		if (ath_tx_prepare(hw, skb, &txctl))
			break;

		bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
		if (!bf)
			break;

		bf->bf_lastbf = bf;
		ath_set_rates(vif, NULL, bf);
S
Sujith Manoharan 已提交
2334
		ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370
		duration += info.rates[0].PktDuration;
		if (bf_tail)
			bf_tail->bf_next = bf;

		list_add_tail(&bf->list, &bf_q);
		bf_tail = bf;
		skb = NULL;

		if (duration > max_duration)
			break;

		skb = ieee80211_get_buffered_bc(hw, vif);
	} while(skb);

	if (skb)
		ieee80211_free_txskb(hw, skb);

	if (list_empty(&bf_q))
		return;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;

	if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
		hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
			sizeof(*hdr), DMA_TO_DEVICE);
	}

	ath_txq_lock(sc, txctl.txq);
	ath_tx_fill_desc(sc, bf, txctl.txq, 0);
	ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
	TX_STAT_INC(txctl.txq->axq_qnum, queued);
	ath_txq_unlock(sc, txctl.txq);
}

S
Sujith 已提交
2371 2372 2373
/*****************/
/* TX Completion */
/*****************/
S
Sujith 已提交
2374

S
Sujith 已提交
2375
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2376
			    int tx_flags, struct ath_txq *txq)
S
Sujith 已提交
2377
{
S
Sujith 已提交
2378
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2379
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2380
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2381
	int padpos, padsize;
S
Sujith Manoharan 已提交
2382
	unsigned long flags;
S
Sujith 已提交
2383

2384
	ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
S
Sujith 已提交
2385

2386
	if (sc->sc_ah->caldata)
2387
		set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
2388

2389
	if (!(tx_flags & ATH_TX_ERROR))
S
Sujith 已提交
2390 2391
		/* Frame was ACKed */
		tx_info->flags |= IEEE80211_TX_STAT_ACK;
S
Sujith 已提交
2392

2393
	padpos = ieee80211_hdrlen(hdr->frame_control);
2394 2395 2396 2397 2398 2399 2400 2401
	padsize = padpos & 3;
	if (padsize && skb->len>padpos+padsize) {
		/*
		 * Remove MAC header padding before giving the frame back to
		 * mac80211.
		 */
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
S
Sujith 已提交
2402
	}
S
Sujith 已提交
2403

S
Sujith Manoharan 已提交
2404
	spin_lock_irqsave(&sc->sc_pm_lock, flags);
2405
	if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
S
Sujith 已提交
2406
		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
2407
		ath_dbg(common, PS,
J
Joe Perches 已提交
2408
			"Going back to sleep after having received TX status (0x%lx)\n",
S
Sujith 已提交
2409 2410 2411 2412
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
2413
	}
S
Sujith Manoharan 已提交
2414
	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2415

2416
	__skb_queue_tail(&txq->complete_q, skb);
2417
	ath_txq_skb_done(sc, txq, skb);
S
Sujith 已提交
2418
}
2419

S
Sujith 已提交
2420
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2421
				struct ath_txq *txq, struct list_head *bf_q,
2422
				struct ath_tx_status *ts, int txok)
2423
{
S
Sujith 已提交
2424
	struct sk_buff *skb = bf->bf_mpdu;
2425
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
S
Sujith 已提交
2426
	unsigned long flags;
2427
	int tx_flags = 0;
2428

2429
	if (!txok)
2430
		tx_flags |= ATH_TX_ERROR;
2431

2432 2433 2434
	if (ts->ts_status & ATH9K_TXERR_FILT)
		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;

B
Ben Greear 已提交
2435
	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2436
	bf->bf_buf_addr = 0;
L
Luis R. Rodriguez 已提交
2437 2438
	if (sc->tx99_state)
		goto skip_tx_complete;
2439 2440

	if (bf->bf_state.bfs_paprd) {
2441 2442 2443
		if (time_after(jiffies,
				bf->bf_state.bfs_paprd_timestamp +
				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
2444
			dev_kfree_skb_any(skb);
2445
		else
2446
			complete(&sc->paprd_complete);
2447
	} else {
2448
		ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2449
		ath_tx_complete(sc, skb, tx_flags, txq);
2450
	}
L
Luis R. Rodriguez 已提交
2451
skip_tx_complete:
2452 2453 2454 2455
	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
	 * accidentally reference it later.
	 */
	bf->bf_mpdu = NULL;
S
Sujith 已提交
2456 2457 2458 2459 2460 2461 2462

	/*
	 * Return the list of ath_buf of this mpdu to free queue
	 */
	spin_lock_irqsave(&sc->tx.txbuflock, flags);
	list_splice_tail_init(bf_q, &sc->tx.txbuf);
	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2463 2464
}

F
Felix Fietkau 已提交
2465 2466
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
2467
			     int txok)
2468
{
S
Sujith 已提交
2469
	struct sk_buff *skb = bf->bf_mpdu;
2470
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
S
Sujith 已提交
2471
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
F
Felix Fietkau 已提交
2472
	struct ieee80211_hw *hw = sc->hw;
2473
	struct ath_hw *ah = sc->sc_ah;
2474
	u8 i, tx_rateindex;
2475

S
Sujith 已提交
2476
	if (txok)
2477
		tx_info->status.ack_signal = ts->ts_rssi;
S
Sujith 已提交
2478

2479
	tx_rateindex = ts->ts_rateindex;
2480 2481
	WARN_ON(tx_rateindex >= hw->max_rates);

2482
	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
2483
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2484

2485
		BUG_ON(nbad > nframes);
2486
	}
2487 2488
	tx_info->status.ampdu_len = nframes;
	tx_info->status.ampdu_ack_len = nframes - nbad;
2489

2490
	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2491
	    (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503
		/*
		 * If an underrun error is seen assume it as an excessive
		 * retry only if max frame trigger level has been reached
		 * (2 KB for single stream, and 4 KB for dual stream).
		 * Adjust the long retry as if the frame was tried
		 * hw->max_rate_tries times to affect how rate control updates
		 * PER for the failed rate.
		 * In case of congestion on the bus penalizing this type of
		 * underruns should help hardware actually transmit new frames
		 * successfully by eventually preferring slower rates.
		 * This itself should also alleviate congestion on the bus.
		 */
2504 2505 2506
		if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
		                             ATH9K_TX_DELIM_UNDERRUN)) &&
		    ieee80211_is_data(hdr->frame_control) &&
2507
		    ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2508 2509
			tx_info->status.rates[tx_rateindex].count =
				hw->max_rate_tries;
2510
	}
2511

2512
	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2513
		tx_info->status.rates[i].count = 0;
2514 2515
		tx_info->status.rates[i].idx = -1;
	}
2516

2517
	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2518 2519
}

S
Sujith 已提交
2520
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2521
{
2522
	struct ath_hw *ah = sc->sc_ah;
2523
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
2524
	struct ath_buf *bf, *lastbf, *bf_held = NULL;
2525
	struct list_head bf_head;
S
Sujith 已提交
2526
	struct ath_desc *ds;
2527
	struct ath_tx_status ts;
S
Sujith 已提交
2528
	int status;
2529

2530
	ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
J
Joe Perches 已提交
2531 2532
		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
		txq->axq_link);
2533

F
Felix Fietkau 已提交
2534
	ath_txq_lock(sc, txq);
2535
	for (;;) {
2536
		if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
2537 2538
			break;

2539 2540
		if (list_empty(&txq->axq_q)) {
			txq->axq_link = NULL;
2541
			ath_txq_schedule(sc, txq);
2542 2543 2544 2545
			break;
		}
		bf = list_first_entry(&txq->axq_q, struct ath_buf, list);

S
Sujith 已提交
2546 2547 2548 2549 2550 2551 2552 2553 2554
		/*
		 * There is a race condition that a BH gets scheduled
		 * after sw writes TxE and before hw re-load the last
		 * descriptor to get the newly chained one.
		 * Software must keep the last DONE descriptor as a
		 * holding descriptor - software does so by marking
		 * it with the STALE flag.
		 */
		bf_held = NULL;
2555
		if (bf->bf_state.stale) {
S
Sujith 已提交
2556
			bf_held = bf;
2557
			if (list_is_last(&bf_held->list, &txq->axq_q))
S
Sujith 已提交
2558
				break;
2559 2560 2561

			bf = list_entry(bf_held->list.next, struct ath_buf,
					list);
2562 2563 2564
		}

		lastbf = bf->bf_lastbf;
S
Sujith 已提交
2565
		ds = lastbf->bf_desc;
2566

2567 2568
		memset(&ts, 0, sizeof(ts));
		status = ath9k_hw_txprocdesc(ah, ds, &ts);
2569
		if (status == -EINPROGRESS)
S
Sujith 已提交
2570
			break;
2571

2572
		TX_STAT_INC(txq->axq_qnum, txprocdesc);
2573

S
Sujith 已提交
2574 2575 2576 2577 2578
		/*
		 * Remove ath_buf's of the same transmit unit from txq,
		 * however leave the last descriptor back as the holding
		 * descriptor for hw.
		 */
2579
		lastbf->bf_state.stale = true;
S
Sujith 已提交
2580 2581 2582 2583
		INIT_LIST_HEAD(&bf_head);
		if (!list_is_singular(&lastbf->list))
			list_cut_position(&bf_head,
				&txq->axq_q, lastbf->list.prev);
2584

2585
		if (bf_held) {
2586 2587
			list_del(&bf_held->list);
			ath_tx_return_buffer(sc, bf_held);
S
Sujith 已提交
2588
		}
2589

2590
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2591
	}
F
Felix Fietkau 已提交
2592
	ath_txq_unlock_complete(sc, txq);
2593 2594
}

S
Sujith 已提交
2595
void ath_tx_tasklet(struct ath_softc *sc)
2596
{
2597 2598
	struct ath_hw *ah = sc->sc_ah;
	u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
S
Sujith 已提交
2599
	int i;
2600

S
Sujith 已提交
2601 2602 2603
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
		if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
			ath_tx_processq(sc, &sc->tx.txq[i]);
2604 2605 2606
	}
}

2607 2608
void ath_tx_edma_tasklet(struct ath_softc *sc)
{
2609
	struct ath_tx_status ts;
2610 2611 2612 2613 2614
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct ath_txq *txq;
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
2615
	struct list_head *fifo_list;
2616 2617 2618
	int status;

	for (;;) {
2619
		if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
2620 2621
			break;

2622
		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
2623 2624 2625
		if (status == -EINPROGRESS)
			break;
		if (status == -EIO) {
2626
			ath_dbg(common, XMIT, "Error processing tx status\n");
2627 2628 2629
			break;
		}

2630 2631 2632 2633
		/* Process beacon completions separately */
		if (ts.qid == sc->beacon.beaconq) {
			sc->beacon.tx_processed = true;
			sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2634

2635 2636 2637 2638 2639
			if (ath9k_is_chanctx_enabled()) {
				ath_chanctx_event(sc, NULL,
						  ATH_CHANCTX_EVENT_BEACON_SENT);
			}

2640
			ath9k_csa_update(sc);
2641
			continue;
2642
		}
2643

2644
		txq = &sc->tx.txq[ts.qid];
2645

F
Felix Fietkau 已提交
2646
		ath_txq_lock(sc, txq);
2647

2648 2649
		TX_STAT_INC(txq->axq_qnum, txprocdesc);

2650 2651
		fifo_list = &txq->txq_fifo[txq->txq_tailidx];
		if (list_empty(fifo_list)) {
F
Felix Fietkau 已提交
2652
			ath_txq_unlock(sc, txq);
2653 2654 2655
			return;
		}

2656
		bf = list_first_entry(fifo_list, struct ath_buf, list);
2657
		if (bf->bf_state.stale) {
2658 2659 2660 2661 2662
			list_del(&bf->list);
			ath_tx_return_buffer(sc, bf);
			bf = list_first_entry(fifo_list, struct ath_buf, list);
		}

2663 2664 2665
		lastbf = bf->bf_lastbf;

		INIT_LIST_HEAD(&bf_head);
2666 2667
		if (list_is_last(&lastbf->list, fifo_list)) {
			list_splice_tail_init(fifo_list, &bf_head);
2668
			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2669

2670 2671
			if (!list_empty(&txq->axq_q)) {
				struct list_head bf_q;
2672

2673 2674 2675 2676 2677
				INIT_LIST_HEAD(&bf_q);
				txq->axq_link = NULL;
				list_splice_tail_init(&txq->axq_q, &bf_q);
				ath_tx_txqaddbuf(sc, txq, &bf_q, true);
			}
2678
		} else {
2679
			lastbf->bf_state.stale = true;
2680 2681 2682
			if (bf != lastbf)
				list_cut_position(&bf_head, fifo_list,
						  lastbf->list.prev);
2683
		}
2684

2685
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
F
Felix Fietkau 已提交
2686
		ath_txq_unlock_complete(sc, txq);
2687 2688 2689
	}
}

S
Sujith 已提交
2690 2691 2692
/*****************/
/* Init, Cleanup */
/*****************/
2693

2694 2695 2696 2697 2698 2699
static int ath_txstatus_setup(struct ath_softc *sc, int size)
{
	struct ath_descdma *dd = &sc->txsdma;
	u8 txs_len = sc->sc_ah->caps.txs_len;

	dd->dd_desc_len = size * txs_len;
2700 2701
	dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
					  &dd->dd_desc_paddr, GFP_KERNEL);
2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720
	if (!dd->dd_desc)
		return -ENOMEM;

	return 0;
}

static int ath_tx_edma_init(struct ath_softc *sc)
{
	int err;

	err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
	if (!err)
		ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
					  sc->txsdma.dd_desc_paddr,
					  ATH_TXSTATUS_RING_SIZE);

	return err;
}

S
Sujith 已提交
2721
int ath_tx_init(struct ath_softc *sc, int nbufs)
2722
{
2723
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
2724
	int error = 0;
2725

2726
	spin_lock_init(&sc->tx.txbuflock);
2727

2728
	error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2729
				  "tx", nbufs, 1, 1);
2730
	if (error != 0) {
2731 2732
		ath_err(common,
			"Failed to allocate tx descriptors: %d\n", error);
2733
		return error;
2734
	}
2735

2736
	error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2737
				  "beacon", ATH_BCBUF, 1, 1);
2738
	if (error != 0) {
2739 2740
		ath_err(common,
			"Failed to allocate beacon descriptors: %d\n", error);
2741
		return error;
2742
	}
2743

2744 2745
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);

2746
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2747
		error = ath_tx_edma_init(sc);
2748

S
Sujith 已提交
2749
	return error;
2750 2751 2752 2753
}

void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
2754 2755 2756
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	int tidno, acno;
2757

2758
	for (tidno = 0, tid = &an->tid[tidno];
2759
	     tidno < IEEE80211_NUM_TIDS;
2760 2761 2762 2763 2764 2765 2766
	     tidno++, tid++) {
		tid->an        = an;
		tid->tidno     = tidno;
		tid->seq_start = tid->seq_next = 0;
		tid->baw_size  = WME_MAX_BA;
		tid->baw_head  = tid->baw_tail = 0;
		tid->sched     = false;
2767
		tid->active	   = false;
2768
		__skb_queue_head_init(&tid->buf_q);
2769
		__skb_queue_head_init(&tid->retry_q);
2770
		acno = TID_TO_WME_AC(tidno);
2771
		tid->ac = &an->ac[acno];
2772
	}
2773

2774
	for (acno = 0, ac = &an->ac[acno];
2775
	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
2776
		ac->sched    = false;
2777
		ac->clear_ps_filter = true;
2778
		ac->txq = sc->tx.txq_map[acno];
2779
		INIT_LIST_HEAD(&ac->tid_q);
2780 2781 2782
	}
}

S
Sujith 已提交
2783
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2784
{
2785 2786
	struct ath_atx_ac *ac;
	struct ath_atx_tid *tid;
2787
	struct ath_txq *txq;
2788
	int tidno;
S
Sujith 已提交
2789

2790
	for (tidno = 0, tid = &an->tid[tidno];
2791
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
2792

2793
		ac = tid->ac;
2794
		txq = ac->txq;
2795

F
Felix Fietkau 已提交
2796
		ath_txq_lock(sc, txq);
2797 2798 2799 2800 2801 2802 2803 2804 2805

		if (tid->sched) {
			list_del(&tid->list);
			tid->sched = false;
		}

		if (ac->sched) {
			list_del(&ac->list);
			tid->ac->sched = false;
2806
		}
2807 2808

		ath_tid_drain(sc, txq, tid);
2809
		tid->active = false;
2810

F
Felix Fietkau 已提交
2811
		ath_txq_unlock(sc, txq);
2812 2813
	}
}
L
Luis R. Rodriguez 已提交
2814

2815 2816
#ifdef CONFIG_ATH9K_TX99

L
Luis R. Rodriguez 已提交
2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858
int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
		    struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ath_frame_info *fi = get_frame_info(skb);
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_buf *bf;
	int padpos, padsize;

	padpos = ieee80211_hdrlen(hdr->frame_control);
	padsize = padpos & 3;

	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize) {
			ath_dbg(common, XMIT,
				"tx99 padding failed\n");
		return -EINVAL;
		}

		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
	}

	fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->framelen = skb->len + FCS_LEN;
	fi->keytype = ATH9K_KEY_TYPE_CLEAR;

	bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
	if (!bf) {
		ath_dbg(common, XMIT, "tx99 buffer setup failed\n");
		return -EINVAL;
	}

	ath_set_rates(sc->tx99_vif, NULL, bf);

	ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
	ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);

	ath_tx_send_normal(sc, txctl->txq, NULL, skb);

	return 0;
}
2859 2860

#endif /* CONFIG_ATH9K_TX99 */